aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/80211.tmpl495
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/mac80211.tmpl337
-rw-r--r--Documentation/networking/dccp.txt29
-rw-r--r--Documentation/networking/ip-sysctl.txt27
-rw-r--r--Documentation/networking/timestamping.txt22
-rw-r--r--MAINTAINERS35
-rw-r--r--arch/s390/include/asm/qdio.h13
-rw-r--r--drivers/atm/firestream.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/firewire/net.c13
-rw-r--r--drivers/ieee1394/eth1394.c16
-rw-r--r--drivers/isdn/capi/capidrv.c17
-rw-r--r--drivers/isdn/divert/isdn_divert.c6
-rw-r--r--drivers/isdn/hisax/hfc_sx.c13
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/net/3c503.c8
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/Kconfig25
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c18
-rw-r--r--drivers/net/amd8111e.h1
-rw-r--r--drivers/net/arm/am79c961a.c35
-rw-r--r--drivers/net/arm/am79c961a.h1
-rw-r--r--drivers/net/arm/ep93xx_eth.c39
-rw-r--r--drivers/net/arm/ether1.c34
-rw-r--r--drivers/net/arm/ether1.h1
-rw-r--r--drivers/net/arm/ether3.c33
-rw-r--r--drivers/net/arm/ether3.h1
-rw-r--r--drivers/net/atl1c/atl1c.h1
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c4
-rw-r--r--drivers/net/atlx/atl2.c10
-rw-r--r--drivers/net/atp.c2
-rw-r--r--drivers/net/au1000_eth.c313
-rw-r--r--drivers/net/au1000_eth.h42
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/bcm63xx_enet.c62
-rw-r--r--drivers/net/bcm63xx_enet.h1
-rw-r--r--drivers/net/benet/be.h14
-rw-r--r--drivers/net/benet/be_ethtool.c3
-rw-r--r--drivers/net/benet/be_main.c62
-rw-r--r--drivers/net/bfin_mac.c10
-rw-r--r--drivers/net/bmac.c7
-rw-r--r--drivers/net/bna/Makefile11
-rw-r--r--drivers/net/bna/bfa_cee.c291
-rw-r--r--drivers/net/bna/bfa_cee.h64
-rw-r--r--drivers/net/bna/bfa_defs.h243
-rw-r--r--drivers/net/bna/bfa_defs_cna.h223
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h244
-rw-r--r--drivers/net/bna/bfa_defs_status.h216
-rw-r--r--drivers/net/bna/bfa_ioc.c1738
-rw-r--r--drivers/net/bna/bfa_ioc.h301
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c392
-rw-r--r--drivers/net/bna/bfa_sm.h88
-rw-r--r--drivers/net/bna/bfa_wc.h69
-rw-r--r--drivers/net/bna/bfi.h392
-rw-r--r--drivers/net/bna/bfi_cna.h199
-rw-r--r--drivers/net/bna/bfi_ctreg.h637
-rw-r--r--drivers/net/bna/bfi_ll.h438
-rw-r--r--drivers/net/bna/bna.h654
-rw-r--r--drivers/net/bna/bna_ctrl.c3624
-rw-r--r--drivers/net/bna/bna_hw.h1491
-rw-r--r--drivers/net/bna/bna_txrx.c4209
-rw-r--r--drivers/net/bna/bna_types.h1128
-rw-r--r--drivers/net/bna/bnad.c3266
-rw-r--r--drivers/net/bna/bnad.h333
-rw-r--r--drivers/net/bna/bnad_ethtool.c1277
-rw-r--r--drivers/net/bna/cna.h81
-rw-r--r--drivers/net/bna/cna_fwimg.c64
-rw-r--r--drivers/net/bnx2.c46
-rw-r--r--drivers/net/bnx2x/bnx2x.h14
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c36
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h12
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c223
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h130
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c8585
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h238
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c756
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h53
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c8
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/net/chelsio/sge.c2
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cnic.c2
-rw-r--r--drivers/net/cpmac.c39
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c24
-rw-r--r--drivers/net/cxgb3/regs.h4
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
-rw-r--r--drivers/net/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c94
-rw-r--r--drivers/net/cxgb4/sge.c19
-rw-r--r--drivers/net/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h5
-rw-r--r--drivers/net/cxgb4vf/sge.c3
-rw-r--r--drivers/net/declance.c2
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000/e1000_main.c171
-rw-r--r--drivers/net/e1000e/netdev.c7
-rw-r--r--drivers/net/eepro.c8
-rw-r--r--drivers/net/ehea/ehea_main.c10
-rw-r--r--drivers/net/enic/enic.h2
-rw-r--r--drivers/net/enic/enic_main.c19
-rw-r--r--drivers/net/enic/vnic_dev.c25
-rw-r--r--drivers/net/enic/vnic_devcmd.h12
-rw-r--r--drivers/net/enic/vnic_enet.h2
-rw-r--r--drivers/net/enic/vnic_resource.h13
-rw-r--r--drivers/net/enic/vnic_rq.c6
-rw-r--r--drivers/net/enic/vnic_vic.c7
-rw-r--r--drivers/net/enic/vnic_wq.c6
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/ethoc.c6
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/forcedeth.c6
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/gianfar.c19
-rw-r--r--drivers/net/greth.c6
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hamradio/scc.c3
-rw-r--r--drivers/net/hp.c8
-rw-r--r--drivers/net/hydra.c13
-rw-r--r--drivers/net/ibmlana.c2
-rw-r--r--drivers/net/ibmveth.c953
-rw-r--r--drivers/net/ibmveth.h59
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_main.c17
-rw-r--r--drivers/net/igbvf/netdev.c2
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/via-ircc.c3
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/ixgbe.h30
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c385
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c7
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1715
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c30
-rw-r--r--drivers/net/ixgbevf/vf.h2
-rw-r--r--drivers/net/jme.c100
-rw-r--r--drivers/net/jme.h3
-rw-r--r--drivers/net/ll_temac_main.c2
-rw-r--r--drivers/net/mac8390.c48
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvtap.c99
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/alloc.c17
-rw-r--r--drivers/net/mlx4/en_ethtool.c173
-rw-r--r--drivers/net/mlx4/en_main.c24
-rw-r--r--drivers/net/mlx4/en_netdev.c28
-rw-r--r--drivers/net/mlx4/en_port.c32
-rw-r--r--drivers/net/mlx4/en_port.h14
-rw-r--r--drivers/net/mlx4/en_rx.c104
-rw-r--r--drivers/net/mlx4/en_selftest.c179
-rw-r--r--drivers/net/mlx4/en_tx.c20
-rw-r--r--drivers/net/mlx4/eq.c44
-rw-r--r--drivers/net/mlx4/fw.c15
-rw-r--r--drivers/net/mlx4/fw.h6
-rw-r--r--drivers/net/mlx4/main.c6
-rw-r--r--drivers/net/mlx4/mlx4_en.h39
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/niu.c6
-rw-r--r--drivers/net/ns83820.c53
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pasemi_mac_ethtool.c16
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c88
-rw-r--r--drivers/net/pcmcia/3c589_cs.c15
-rw-r--r--drivers/net/pcmcia/axnet_cs.c187
-rw-r--r--drivers/net/pcmcia/com20020_cs.c32
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c60
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c26
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c54
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c116
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c103
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c104
-rw-r--r--drivers/net/plip.c1
-rw-r--r--drivers/net/pptp.c726
-rw-r--r--drivers/net/ps3_gelic_net.c4
-rw-r--r--drivers/net/pxa168_eth.c3
-rw-r--r--drivers/net/qla3xxx.c4
-rw-r--r--drivers/net/qlcnic/qlcnic.h107
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c267
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c75
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h23
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c77
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c271
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1112
-rw-r--r--drivers/net/qlge/qlge_main.c34
-rw-r--r--drivers/net/r6040.c67
-rw-r--r--drivers/net/r8169.c18
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c37
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sc92031.c11
-rw-r--r--drivers/net/sfc/rx.c2
-rw-r--r--drivers/net/sh_eth.c4
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skge.c5
-rw-r--r--drivers/net/slip.c91
-rw-r--r--drivers/net/slip.h9
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c10
-rw-r--r--drivers/net/stmmac/Kconfig5
-rw-r--r--drivers/net/stmmac/common.h50
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c20
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c8
-rw-r--r--drivers/net/stmmac/dwmac100_core.c22
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c8
-rw-r--r--drivers/net/stmmac/dwmac_dma.h16
-rw-r--r--drivers/net/stmmac/dwmac_lib.c22
-rw-r--r--drivers/net/stmmac/enh_desc.c2
-rw-r--r--drivers/net/stmmac/norm_desc.c2
-rw-r--r--drivers/net/stmmac/stmmac.h4
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c21
-rw-r--r--drivers/net/stmmac/stmmac_main.c133
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c21
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sungem.c211
-rw-r--r--drivers/net/sungem_phy.c3
-rw-r--r--drivers/net/sunhme.c8
-rw-r--r--drivers/net/sunlance.c2
-rw-r--r--drivers/net/sunvnet.c50
-rw-r--r--drivers/net/tehuti.c34
-rw-r--r--drivers/net/tehuti.h1
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tlan.c8
-rw-r--r--drivers/net/tokenring/tms380tr.c6
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/interrupt.c77
-rw-r--r--drivers/net/tulip/tulip.h3
-rw-r--r--drivers/net/tulip/tulip_core.c10
-rw-r--r--drivers/net/tulip/uli526x.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c15
-rw-r--r--drivers/net/typhoon.c48
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cx82310_eth.c354
-rw-r--r--drivers/net/usb/hso.c9
-rw-r--r--drivers/net/usb/kaweth.c9
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vxge/vxge-main.c34
-rw-r--r--drivers/net/vxge/vxge-main.h1
-rw-r--r--drivers/net/wan/c101.c2
-rw-r--r--drivers/net/wan/cycx_drv.c14
-rw-r--r--drivers/net/wan/cycx_main.c6
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/n2.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/z85230.c4
-rw-r--r--drivers/net/wd.c8
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/net/wireless/at76c50x-usb.c7
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c31
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h7
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c94
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c36
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c30
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c109
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c78
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c134
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/ath/debug.h2
-rw-r--r--drivers/net/wireless/b43/main.c30
-rw-r--r--drivers/net/wireless/b43/phy_n.c64
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c10
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c12
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig10
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c86
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c163
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c133
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c424
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c332
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c704
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c153
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c95
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c677
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h518
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c887
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h92
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c112
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h185
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c638
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c75
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c205
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c240
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c7
-rw-r--r--drivers/net/wireless/libertas/cfg.c62
-rw-r--r--drivers/net/wireless/libertas/decl.h13
-rw-r--r--drivers/net/wireless/libertas/if_cs.c130
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c161
-rw-r--r--drivers/net/wireless/libertas/if_sdio.h4
-rw-r--r--drivers/net/wireless/libertas/if_spi.c150
-rw-r--r--drivers/net/wireless/libertas/if_spi.h5
-rw-r--r--drivers/net/wireless/libertas/if_usb.c60
-rw-r--r--drivers/net/wireless/libertas/if_usb.h1
-rw-r--r--drivers/net/wireless/libertas/main.c105
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c57
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c20
-rw-r--r--drivers/net/wireless/orinoco/hw.c9
-rw-r--r--drivers/net/wireless/orinoco/wext.c11
-rw-r--r--drivers/net/wireless/p54/Kconfig18
-rw-r--r--drivers/net/wireless/p54/eeprom.c21
-rw-r--r--drivers/net/wireless/p54/fwio.c6
-rw-r--r--drivers/net/wireless/p54/main.c9
-rw-r--r--drivers/net/wireless/p54/p54spi.c7
-rw-r--r--drivers/net/wireless/p54/p54spi_eeprom.h2
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c25
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/ray_cs.c28
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c33
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c37
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h38
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c389
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h24
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c233
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h31
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c82
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h56
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c298
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c69
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c41
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c143
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.c33
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.h3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_io.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c55
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c26
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c15
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_scan.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c4
-rw-r--r--drivers/net/wireless/wl3501_cs.c11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c5
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/net/xilinx_emaclite.c15
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/s390/cio/qdio.h29
-rw-r--r--drivers/s390/cio/qdio_debug.c33
-rw-r--r--drivers/s390/cio/qdio_main.c138
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/cio/qdio_thinint.c66
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/qeth_core.h17
-rw-r--r--drivers/s390/net/qeth_core_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_main.c173
-rw-r--r--drivers/s390/net/qeth_l3_main.c189
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c6
-rw-r--r--drivers/usb/atm/cxacru.c18
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/etherdevice.h18
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/if_macvlan.h9
-rw-r--r--include/linux/if_pppox.h50
-rw-r--r--include/linux/if_vlan.h5
-rw-r--r--include/linux/in.h19
-rw-r--r--include/linux/mlx4/cmd.h1
-rw-r--r--include/linux/mlx4/device.h7
-rw-r--r--include/linux/netdevice.h16
-rw-r--r--include/linux/nl80211.h166
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/linux/pkt_cls.h1
-rw-r--r--include/linux/rds.h115
-rw-r--r--include/linux/rtnetlink.h11
-rw-r--r--include/linux/skbuff.h72
-rw-r--r--include/linux/spi/wl12xx.h2
-rw-r--r--include/linux/ssb/ssb_regs.h1
-rw-r--r--include/linux/stmmac.h2
-rw-r--r--include/linux/tc_act/Kbuild1
-rw-r--r--include/linux/tc_act/tc_csum.h32
-rw-r--r--include/linux/tc_ematch/tc_em_meta.h1
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/net/cfg80211.h203
-rw-r--r--include/net/gre.h18
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/irda/irlan_common.h1
-rw-r--r--include/net/mac80211.h82
-rw-r--r--include/net/raw.h5
-rw-r--r--include/net/sctp/sctp.h48
-rw-r--r--include/net/sock.h8
-rw-r--r--include/net/tc_act/tc_csum.h15
-rw-r--r--include/net/tcp.h9
-rw-r--r--net/8021q/vlan_core.c14
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_route.c4
-rw-r--r--net/bluetooth/af_bluetooth.c5
-rw-r--r--net/bridge/br_if.c29
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/caif/caif_dev.c22
-rw-r--r--net/caif/caif_socket.c19
-rw-r--r--net/caif/cfcnfg.c49
-rw-r--r--net/caif/cfctrl.c59
-rw-r--r--net/caif/cfdbgl.c4
-rw-r--r--net/caif/cfdgml.c11
-rw-r--r--net/caif/cffrml.c14
-rw-r--r--net/caif/cfmuxl.c14
-rw-r--r--net/caif/cfpkt_skbuff.c48
-rw-r--r--net/caif/cfrfml.c12
-rw-r--r--net/caif/cfserl.c4
-rw-r--r--net/caif/cfsrvl.c17
-rw-r--r--net/caif/cfutill.c12
-rw-r--r--net/caif/cfveil.c11
-rw-r--r--net/caif/cfvidl.c6
-rw-r--r--net/caif/chnl_net.c46
-rw-r--r--net/can/raw.c4
-rw-r--r--net/core/datagram.c5
-rw-r--r--net/core/dev.c196
-rw-r--r--net/core/ethtool.c34
-rw-r--r--net/core/iovec.c6
-rw-r--r--net/core/net-sysfs.c5
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c31
-rw-r--r--net/core/skbuff.c92
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/ccids/Kconfig31
-rw-r--r--net/dccp/ccids/ccid2.c287
-rw-r--r--net/dccp/ccids/ccid2.h35
-rw-r--r--net/dccp/ccids/ccid3.c81
-rw-r--r--net/dccp/ccids/ccid3.h21
-rw-r--r--net/decnet/dn_nsp_out.c8
-rw-r--r--net/econet/af_econet.c2
-rw-r--r--net/ethernet/eth.c6
-rw-r--r--net/ipv4/Kconfig7
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/arp.c226
-rw-r--r--net/ipv4/gre.c151
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_gre.c14
-rw-r--r--net/ipv4/ip_output.c15
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c31
-rw-r--r--net/ipv4/protocol.c31
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c17
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c23
-rw-r--r--net/ipv4/tcp_timer.c40
-rw-r--r--net/ipv4/tunnel4.c19
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/xfrm4_tunnel.c4
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/ndisc.c18
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/protocol.c32
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/tunnel6.c17
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
-rw-r--r--net/irda/irlan/irlan_eth.c32
-rw-r--r--net/l2tp/l2tp_eth.c1
-rw-r--r--net/mac80211/aes_ccm.c6
-rw-r--r--net/mac80211/aes_cmac.c6
-rw-r--r--net/mac80211/agg-rx.c22
-rw-r--r--net/mac80211/cfg.c115
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/debugfs_key.c55
-rw-r--r--net/mac80211/driver-ops.h14
-rw-r--r--net/mac80211/driver-trace.h29
-rw-r--r--net/mac80211/ht.c28
-rw-r--r--net/mac80211/ibss.c12
-rw-r--r--net/mac80211/ieee80211_i.h66
-rw-r--r--net/mac80211/iface.c375
-rw-r--r--net/mac80211/key.c111
-rw-r--r--net/mac80211/key.h10
-rw-r--r--net/mac80211/main.c159
-rw-r--r--net/mac80211/mlme.c64
-rw-r--r--net/mac80211/offchannel.c19
-rw-r--r--net/mac80211/pm.c3
-rw-r--r--net/mac80211/rate.c9
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c2
-rw-r--r--net/mac80211/rx.c520
-rw-r--r--net/mac80211/scan.c66
-rw-r--r--net/mac80211/sta_info.c21
-rw-r--r--net/mac80211/sta_info.h16
-rw-r--r--net/mac80211/status.c11
-rw-r--r--net/mac80211/tx.c67
-rw-r--r--net/mac80211/util.c17
-rw-r--r--net/mac80211/wep.c2
-rw-r--r--net/mac80211/work.c39
-rw-r--r--net/mac80211/wpa.c32
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c22
-rw-r--r--net/netfilter/xt_hashlimit.c15
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/pn_dev.c3
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/af_rds.c26
-rw-r--r--net/rds/bind.c82
-rw-r--r--net/rds/cong.c8
-rw-r--r--net/rds/connection.c157
-rw-r--r--net/rds/ib.c194
-rw-r--r--net/rds/ib.h100
-rw-r--r--net/rds/ib_cm.c183
-rw-r--r--net/rds/ib_rdma.c314
-rw-r--r--net/rds/ib_recv.c549
-rw-r--r--net/rds/ib_send.c682
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/rds/ib_sysctl.c17
-rw-r--r--net/rds/info.c12
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rds/iw.h11
-rw-r--r--net/rds/iw_cm.c14
-rw-r--r--net/rds/iw_rdma.c1
-rw-r--r--net/rds/iw_recv.c24
-rw-r--r--net/rds/iw_send.c93
-rw-r--r--net/rds/iw_sysctl.c4
-rw-r--r--net/rds/loop.c31
-rw-r--r--net/rds/message.c118
-rw-r--r--net/rds/page.c5
-rw-r--r--net/rds/rdma.c339
-rw-r--r--net/rds/rdma.h85
-rw-r--r--net/rds/rdma_transport.c42
-rw-r--r--net/rds/rds.h187
-rw-r--r--net/rds/recv.c9
-rw-r--r--net/rds/send.c544
-rw-r--r--net/rds/stats.c6
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/rds/tcp.c8
-rw-r--r--net/rds/tcp.h9
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_recv.c14
-rw-r--r--net/rds/tcp_send.c66
-rw-r--r--net/rds/threads.c69
-rw-r--r--net/rds/transport.c19
-rw-r--r--net/rds/xlist.h80
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_csum.c595
-rw-r--r--net/sched/cls_flow.c74
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_sfq.c33
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/objcnt.c5
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c34
-rw-r--r--net/sctp/probe.c4
-rw-r--r--net/sctp/protocol.c17
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c21
-rw-r--r--net/sctp/sm_statefuns.c20
-rw-r--r--net/sctp/sm_statetable.c42
-rw-r--r--net/sctp/socket.c79
-rw-r--r--net/sctp/transport.c9
-rw-r--r--net/socket.c30
-rw-r--r--net/tipc/bcast.c39
-rw-r--r--net/tipc/core.c6
-rw-r--r--net/tipc/discover.c8
-rw-r--r--net/tipc/eth_media.c48
-rw-r--r--net/tipc/link.c25
-rw-r--r--net/tipc/name_table.c48
-rw-r--r--net/tipc/net.c1
-rw-r--r--net/tipc/node.c22
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/port.c19
-rw-r--r--net/tipc/socket.c81
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/wireless/core.c15
-rw-r--r--net/wireless/core.h21
-rw-r--r--net/wireless/mlme.c149
-rw-r--r--net/wireless/nl80211.c150
-rw-r--r--net/wireless/nl80211.h14
-rw-r--r--net/wireless/reg.c16
-rw-r--r--net/wireless/sysfs.c9
-rw-r--r--net/wireless/util.c13
-rw-r--r--net/wireless/wext-core.c2
-rw-r--r--net/wireless/wext-sme.c2
690 files changed, 48324 insertions, 16797 deletions
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
new file mode 100644
index 000000000000..19a1210c2530
--- /dev/null
+++ b/Documentation/DocBook/80211.tmpl
@@ -0,0 +1,495 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE set PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4<set>
5 <setinfo>
6 <title>The 802.11 subsystems &ndash; for kernel developers</title>
7 <subtitle>
8 Explaining wireless 802.11 networking in the Linux kernel
9 </subtitle>
10
11 <copyright>
12 <year>2007-2009</year>
13 <holder>Johannes Berg</holder>
14 </copyright>
15
16 <authorgroup>
17 <author>
18 <firstname>Johannes</firstname>
19 <surname>Berg</surname>
20 <affiliation>
21 <address><email>johannes@sipsolutions.net</email></address>
22 </affiliation>
23 </author>
24 </authorgroup>
25
26 <legalnotice>
27 <para>
28 This documentation is free software; you can redistribute
29 it and/or modify it under the terms of the GNU General Public
30 License version 2 as published by the Free Software Foundation.
31 </para>
32 <para>
33 This documentation is distributed in the hope that it will be
34 useful, but WITHOUT ANY WARRANTY; without even the implied
35 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
36 See the GNU General Public License for more details.
37 </para>
38 <para>
39 You should have received a copy of the GNU General Public
40 License along with this documentation; if not, write to the Free
41 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
42 MA 02111-1307 USA
43 </para>
44 <para>
45 For more details see the file COPYING in the source
46 distribution of Linux.
47 </para>
48 </legalnotice>
49
50 <abstract>
51 <para>
52 These books attempt to give a description of the
53 various subsystems that play a role in 802.11 wireless
54 networking in Linux. Since these books are for kernel
55 developers they attempts to document the structures
56 and functions used in the kernel as well as giving a
57 higher-level overview.
58 </para>
59 <para>
60 The reader is expected to be familiar with the 802.11
61 standard as published by the IEEE in 802.11-2007 (or
62 possibly later versions). References to this standard
63 will be given as "802.11-2007 8.1.5".
64 </para>
65 </abstract>
66 </setinfo>
67 <book id="cfg80211-developers-guide">
68 <bookinfo>
69 <title>The cfg80211 subsystem</title>
70
71 <abstract>
72!Pinclude/net/cfg80211.h Introduction
73 </abstract>
74 </bookinfo>
75 <chapter>
76 <title>Device registration</title>
77!Pinclude/net/cfg80211.h Device registration
78!Finclude/net/cfg80211.h ieee80211_band
79!Finclude/net/cfg80211.h ieee80211_channel_flags
80!Finclude/net/cfg80211.h ieee80211_channel
81!Finclude/net/cfg80211.h ieee80211_rate_flags
82!Finclude/net/cfg80211.h ieee80211_rate
83!Finclude/net/cfg80211.h ieee80211_sta_ht_cap
84!Finclude/net/cfg80211.h ieee80211_supported_band
85!Finclude/net/cfg80211.h cfg80211_signal_type
86!Finclude/net/cfg80211.h wiphy_params_flags
87!Finclude/net/cfg80211.h wiphy_flags
88!Finclude/net/cfg80211.h wiphy
89!Finclude/net/cfg80211.h wireless_dev
90!Finclude/net/cfg80211.h wiphy_new
91!Finclude/net/cfg80211.h wiphy_register
92!Finclude/net/cfg80211.h wiphy_unregister
93!Finclude/net/cfg80211.h wiphy_free
94
95!Finclude/net/cfg80211.h wiphy_name
96!Finclude/net/cfg80211.h wiphy_dev
97!Finclude/net/cfg80211.h wiphy_priv
98!Finclude/net/cfg80211.h priv_to_wiphy
99!Finclude/net/cfg80211.h set_wiphy_dev
100!Finclude/net/cfg80211.h wdev_priv
101 </chapter>
102 <chapter>
103 <title>Actions and configuration</title>
104!Pinclude/net/cfg80211.h Actions and configuration
105!Finclude/net/cfg80211.h cfg80211_ops
106!Finclude/net/cfg80211.h vif_params
107!Finclude/net/cfg80211.h key_params
108!Finclude/net/cfg80211.h survey_info_flags
109!Finclude/net/cfg80211.h survey_info
110!Finclude/net/cfg80211.h beacon_parameters
111!Finclude/net/cfg80211.h plink_actions
112!Finclude/net/cfg80211.h station_parameters
113!Finclude/net/cfg80211.h station_info_flags
114!Finclude/net/cfg80211.h rate_info_flags
115!Finclude/net/cfg80211.h rate_info
116!Finclude/net/cfg80211.h station_info
117!Finclude/net/cfg80211.h monitor_flags
118!Finclude/net/cfg80211.h mpath_info_flags
119!Finclude/net/cfg80211.h mpath_info
120!Finclude/net/cfg80211.h bss_parameters
121!Finclude/net/cfg80211.h ieee80211_txq_params
122!Finclude/net/cfg80211.h cfg80211_crypto_settings
123!Finclude/net/cfg80211.h cfg80211_auth_request
124!Finclude/net/cfg80211.h cfg80211_assoc_request
125!Finclude/net/cfg80211.h cfg80211_deauth_request
126!Finclude/net/cfg80211.h cfg80211_disassoc_request
127!Finclude/net/cfg80211.h cfg80211_ibss_params
128!Finclude/net/cfg80211.h cfg80211_connect_params
129!Finclude/net/cfg80211.h cfg80211_pmksa
130!Finclude/net/cfg80211.h cfg80211_send_rx_auth
131!Finclude/net/cfg80211.h cfg80211_send_auth_timeout
132!Finclude/net/cfg80211.h __cfg80211_auth_canceled
133!Finclude/net/cfg80211.h cfg80211_send_rx_assoc
134!Finclude/net/cfg80211.h cfg80211_send_assoc_timeout
135!Finclude/net/cfg80211.h cfg80211_send_deauth
136!Finclude/net/cfg80211.h __cfg80211_send_deauth
137!Finclude/net/cfg80211.h cfg80211_send_disassoc
138!Finclude/net/cfg80211.h __cfg80211_send_disassoc
139!Finclude/net/cfg80211.h cfg80211_ibss_joined
140!Finclude/net/cfg80211.h cfg80211_connect_result
141!Finclude/net/cfg80211.h cfg80211_roamed
142!Finclude/net/cfg80211.h cfg80211_disconnected
143!Finclude/net/cfg80211.h cfg80211_ready_on_channel
144!Finclude/net/cfg80211.h cfg80211_remain_on_channel_expired
145!Finclude/net/cfg80211.h cfg80211_new_sta
146!Finclude/net/cfg80211.h cfg80211_rx_mgmt
147!Finclude/net/cfg80211.h cfg80211_mgmt_tx_status
148!Finclude/net/cfg80211.h cfg80211_cqm_rssi_notify
149!Finclude/net/cfg80211.h cfg80211_michael_mic_failure
150 </chapter>
151 <chapter>
152 <title>Scanning and BSS list handling</title>
153!Pinclude/net/cfg80211.h Scanning and BSS list handling
154!Finclude/net/cfg80211.h cfg80211_ssid
155!Finclude/net/cfg80211.h cfg80211_scan_request
156!Finclude/net/cfg80211.h cfg80211_scan_done
157!Finclude/net/cfg80211.h cfg80211_bss
158!Finclude/net/cfg80211.h cfg80211_inform_bss_frame
159!Finclude/net/cfg80211.h cfg80211_inform_bss
160!Finclude/net/cfg80211.h cfg80211_unlink_bss
161!Finclude/net/cfg80211.h cfg80211_find_ie
162!Finclude/net/cfg80211.h ieee80211_bss_get_ie
163 </chapter>
164 <chapter>
165 <title>Utility functions</title>
166!Pinclude/net/cfg80211.h Utility functions
167!Finclude/net/cfg80211.h ieee80211_channel_to_frequency
168!Finclude/net/cfg80211.h ieee80211_frequency_to_channel
169!Finclude/net/cfg80211.h ieee80211_get_channel
170!Finclude/net/cfg80211.h ieee80211_get_response_rate
171!Finclude/net/cfg80211.h ieee80211_hdrlen
172!Finclude/net/cfg80211.h ieee80211_get_hdrlen_from_skb
173!Finclude/net/cfg80211.h ieee80211_radiotap_iterator
174 </chapter>
175 <chapter>
176 <title>Data path helpers</title>
177!Pinclude/net/cfg80211.h Data path helpers
178!Finclude/net/cfg80211.h ieee80211_data_to_8023
179!Finclude/net/cfg80211.h ieee80211_data_from_8023
180!Finclude/net/cfg80211.h ieee80211_amsdu_to_8023s
181!Finclude/net/cfg80211.h cfg80211_classify8021d
182 </chapter>
183 <chapter>
184 <title>Regulatory enforcement infrastructure</title>
185!Pinclude/net/cfg80211.h Regulatory enforcement infrastructure
186!Finclude/net/cfg80211.h regulatory_hint
187!Finclude/net/cfg80211.h wiphy_apply_custom_regulatory
188!Finclude/net/cfg80211.h freq_reg_info
189 </chapter>
190 <chapter>
191 <title>RFkill integration</title>
192!Pinclude/net/cfg80211.h RFkill integration
193!Finclude/net/cfg80211.h wiphy_rfkill_set_hw_state
194!Finclude/net/cfg80211.h wiphy_rfkill_start_polling
195!Finclude/net/cfg80211.h wiphy_rfkill_stop_polling
196 </chapter>
197 <chapter>
198 <title>Test mode</title>
199!Pinclude/net/cfg80211.h Test mode
200!Finclude/net/cfg80211.h cfg80211_testmode_alloc_reply_skb
201!Finclude/net/cfg80211.h cfg80211_testmode_reply
202!Finclude/net/cfg80211.h cfg80211_testmode_alloc_event_skb
203!Finclude/net/cfg80211.h cfg80211_testmode_event
204 </chapter>
205 </book>
206 <book id="mac80211-developers-guide">
207 <bookinfo>
208 <title>The mac80211 subsystem</title>
209 <abstract>
210!Pinclude/net/mac80211.h Introduction
211!Pinclude/net/mac80211.h Warning
212 </abstract>
213 </bookinfo>
214
215 <toc></toc>
216
217 <!--
218 Generally, this document shall be ordered by increasing complexity.
219 It is important to note that readers should be able to read only
220 the first few sections to get a working driver and only advanced
221 usage should require reading the full document.
222 -->
223
224 <part>
225 <title>The basic mac80211 driver interface</title>
226 <partintro>
227 <para>
228 You should read and understand the information contained
229 within this part of the book while implementing a driver.
230 In some chapters, advanced usage is noted, that may be
231 skipped at first.
232 </para>
233 <para>
234 This part of the book only covers station and monitor mode
235 functionality, additional information required to implement
236 the other modes is covered in the second part of the book.
237 </para>
238 </partintro>
239
240 <chapter id="basics">
241 <title>Basic hardware handling</title>
242 <para>TBD</para>
243 <para>
244 This chapter shall contain information on getting a hw
245 struct allocated and registered with mac80211.
246 </para>
247 <para>
248 Since it is required to allocate rates/modes before registering
249 a hw struct, this chapter shall also contain information on setting
250 up the rate/mode structs.
251 </para>
252 <para>
253 Additionally, some discussion about the callbacks and
254 the general programming model should be in here, including
255 the definition of ieee80211_ops which will be referred to
256 a lot.
257 </para>
258 <para>
259 Finally, a discussion of hardware capabilities should be done
260 with references to other parts of the book.
261 </para>
262 <!-- intentionally multiple !F lines to get proper order -->
263!Finclude/net/mac80211.h ieee80211_hw
264!Finclude/net/mac80211.h ieee80211_hw_flags
265!Finclude/net/mac80211.h SET_IEEE80211_DEV
266!Finclude/net/mac80211.h SET_IEEE80211_PERM_ADDR
267!Finclude/net/mac80211.h ieee80211_ops
268!Finclude/net/mac80211.h ieee80211_alloc_hw
269!Finclude/net/mac80211.h ieee80211_register_hw
270!Finclude/net/mac80211.h ieee80211_get_tx_led_name
271!Finclude/net/mac80211.h ieee80211_get_rx_led_name
272!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
273!Finclude/net/mac80211.h ieee80211_get_radio_led_name
274!Finclude/net/mac80211.h ieee80211_unregister_hw
275!Finclude/net/mac80211.h ieee80211_free_hw
276 </chapter>
277
278 <chapter id="phy-handling">
279 <title>PHY configuration</title>
280 <para>TBD</para>
281 <para>
282 This chapter should describe PHY handling including
283 start/stop callbacks and the various structures used.
284 </para>
285!Finclude/net/mac80211.h ieee80211_conf
286!Finclude/net/mac80211.h ieee80211_conf_flags
287 </chapter>
288
289 <chapter id="iface-handling">
290 <title>Virtual interfaces</title>
291 <para>TBD</para>
292 <para>
293 This chapter should describe virtual interface basics
294 that are relevant to the driver (VLANs, MGMT etc are not.)
295 It should explain the use of the add_iface/remove_iface
296 callbacks as well as the interface configuration callbacks.
297 </para>
298 <para>Things related to AP mode should be discussed there.</para>
299 <para>
300 Things related to supporting multiple interfaces should be
301 in the appropriate chapter, a BIG FAT note should be here about
302 this though and the recommendation to allow only a single
303 interface in STA mode at first!
304 </para>
305!Finclude/net/mac80211.h ieee80211_vif
306 </chapter>
307
308 <chapter id="rx-tx">
309 <title>Receive and transmit processing</title>
310 <sect1>
311 <title>what should be here</title>
312 <para>TBD</para>
313 <para>
314 This should describe the receive and transmit
315 paths in mac80211/the drivers as well as
316 transmit status handling.
317 </para>
318 </sect1>
319 <sect1>
320 <title>Frame format</title>
321!Pinclude/net/mac80211.h Frame format
322 </sect1>
323 <sect1>
324 <title>Packet alignment</title>
325!Pnet/mac80211/rx.c Packet alignment
326 </sect1>
327 <sect1>
328 <title>Calling into mac80211 from interrupts</title>
329!Pinclude/net/mac80211.h Calling mac80211 from interrupts
330 </sect1>
331 <sect1>
332 <title>functions/definitions</title>
333!Finclude/net/mac80211.h ieee80211_rx_status
334!Finclude/net/mac80211.h mac80211_rx_flags
335!Finclude/net/mac80211.h ieee80211_tx_info
336!Finclude/net/mac80211.h ieee80211_rx
337!Finclude/net/mac80211.h ieee80211_rx_irqsafe
338!Finclude/net/mac80211.h ieee80211_tx_status
339!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
340!Finclude/net/mac80211.h ieee80211_rts_get
341!Finclude/net/mac80211.h ieee80211_rts_duration
342!Finclude/net/mac80211.h ieee80211_ctstoself_get
343!Finclude/net/mac80211.h ieee80211_ctstoself_duration
344!Finclude/net/mac80211.h ieee80211_generic_frame_duration
345!Finclude/net/mac80211.h ieee80211_wake_queue
346!Finclude/net/mac80211.h ieee80211_stop_queue
347!Finclude/net/mac80211.h ieee80211_wake_queues
348!Finclude/net/mac80211.h ieee80211_stop_queues
349 </sect1>
350 </chapter>
351
352 <chapter id="filters">
353 <title>Frame filtering</title>
354!Pinclude/net/mac80211.h Frame filtering
355!Finclude/net/mac80211.h ieee80211_filter_flags
356 </chapter>
357 </part>
358
359 <part id="advanced">
360 <title>Advanced driver interface</title>
361 <partintro>
362 <para>
363 Information contained within this part of the book is
364 of interest only for advanced interaction of mac80211
365 with drivers to exploit more hardware capabilities and
366 improve performance.
367 </para>
368 </partintro>
369
370 <chapter id="hardware-crypto-offload">
371 <title>Hardware crypto acceleration</title>
372!Pinclude/net/mac80211.h Hardware crypto acceleration
373 <!-- intentionally multiple !F lines to get proper order -->
374!Finclude/net/mac80211.h set_key_cmd
375!Finclude/net/mac80211.h ieee80211_key_conf
376!Finclude/net/mac80211.h ieee80211_key_flags
377 </chapter>
378
379 <chapter id="powersave">
380 <title>Powersave support</title>
381!Pinclude/net/mac80211.h Powersave support
382 </chapter>
383
384 <chapter id="beacon-filter">
385 <title>Beacon filter support</title>
386!Pinclude/net/mac80211.h Beacon filter support
387!Finclude/net/mac80211.h ieee80211_beacon_loss
388 </chapter>
389
390 <chapter id="qos">
391 <title>Multiple queues and QoS support</title>
392 <para>TBD</para>
393!Finclude/net/mac80211.h ieee80211_tx_queue_params
394 </chapter>
395
396 <chapter id="AP">
397 <title>Access point mode support</title>
398 <para>TBD</para>
399 <para>Some parts of the if_conf should be discussed here instead</para>
400 <para>
401 Insert notes about VLAN interfaces with hw crypto here or
402 in the hw crypto chapter.
403 </para>
404!Finclude/net/mac80211.h ieee80211_get_buffered_bc
405!Finclude/net/mac80211.h ieee80211_beacon_get
406 </chapter>
407
408 <chapter id="multi-iface">
409 <title>Supporting multiple virtual interfaces</title>
410 <para>TBD</para>
411 <para>
412 Note: WDS with identical MAC address should almost always be OK
413 </para>
414 <para>
415 Insert notes about having multiple virtual interfaces with
416 different MAC addresses here, note which configurations are
417 supported by mac80211, add notes about supporting hw crypto
418 with it.
419 </para>
420 </chapter>
421
422 <chapter id="hardware-scan-offload">
423 <title>Hardware scan offload</title>
424 <para>TBD</para>
425!Finclude/net/mac80211.h ieee80211_scan_completed
426 </chapter>
427 </part>
428
429 <part id="rate-control">
430 <title>Rate control interface</title>
431 <partintro>
432 <para>TBD</para>
433 <para>
434 This part of the book describes the rate control algorithm
435 interface and how it relates to mac80211 and drivers.
436 </para>
437 </partintro>
438 <chapter id="dummy">
439 <title>dummy chapter</title>
440 <para>TBD</para>
441 </chapter>
442 </part>
443
444 <part id="internal">
445 <title>Internals</title>
446 <partintro>
447 <para>TBD</para>
448 <para>
449 This part of the book describes mac80211 internals.
450 </para>
451 </partintro>
452
453 <chapter id="key-handling">
454 <title>Key handling</title>
455 <sect1>
456 <title>Key handling basics</title>
457!Pnet/mac80211/key.c Key handling basics
458 </sect1>
459 <sect1>
460 <title>MORE TBD</title>
461 <para>TBD</para>
462 </sect1>
463 </chapter>
464
465 <chapter id="rx-processing">
466 <title>Receive processing</title>
467 <para>TBD</para>
468 </chapter>
469
470 <chapter id="tx-processing">
471 <title>Transmit processing</title>
472 <para>TBD</para>
473 </chapter>
474
475 <chapter id="sta-info">
476 <title>Station info handling</title>
477 <sect1>
478 <title>Programming information</title>
479!Fnet/mac80211/sta_info.h sta_info
480!Fnet/mac80211/sta_info.h ieee80211_sta_info_flags
481 </sect1>
482 <sect1>
483 <title>STA information lifetime rules</title>
484!Pnet/mac80211/sta_info.c STA information lifetime rules
485 </sect1>
486 </chapter>
487
488 <chapter id="synchronisation">
489 <title>Synchronisation</title>
490 <para>TBD</para>
491 <para>Locking, lots of RCU</para>
492 </chapter>
493 </part>
494 </book>
495</set>
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 34929f24c284..8b6e00a71034 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \ 12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ 13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ 14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
15 mac80211.xml debugobjects.xml sh.xml regulator.xml \ 15 80211.xml debugobjects.xml sh.xml regulator.xml \
16 alsa-driver-api.xml writing-an-alsa-driver.xml \ 16 alsa-driver-api.xml writing-an-alsa-driver.xml \
17 tracepoint.xml media.xml drm.xml 17 tracepoint.xml media.xml drm.xml
18 18
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
deleted file mode 100644
index affb15a344a1..000000000000
--- a/Documentation/DocBook/mac80211.tmpl
+++ /dev/null
@@ -1,337 +0,0 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4
5<book id="mac80211-developers-guide">
6 <bookinfo>
7 <title>The mac80211 subsystem for kernel developers</title>
8
9 <authorgroup>
10 <author>
11 <firstname>Johannes</firstname>
12 <surname>Berg</surname>
13 <affiliation>
14 <address><email>johannes@sipsolutions.net</email></address>
15 </affiliation>
16 </author>
17 </authorgroup>
18
19 <copyright>
20 <year>2007-2009</year>
21 <holder>Johannes Berg</holder>
22 </copyright>
23
24 <legalnotice>
25 <para>
26 This documentation is free software; you can redistribute
27 it and/or modify it under the terms of the GNU General Public
28 License version 2 as published by the Free Software Foundation.
29 </para>
30
31 <para>
32 This documentation is distributed in the hope that it will be
33 useful, but WITHOUT ANY WARRANTY; without even the implied
34 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
35 See the GNU General Public License for more details.
36 </para>
37
38 <para>
39 You should have received a copy of the GNU General Public
40 License along with this documentation; if not, write to the Free
41 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
42 MA 02111-1307 USA
43 </para>
44
45 <para>
46 For more details see the file COPYING in the source
47 distribution of Linux.
48 </para>
49 </legalnotice>
50
51 <abstract>
52!Pinclude/net/mac80211.h Introduction
53!Pinclude/net/mac80211.h Warning
54 </abstract>
55 </bookinfo>
56
57 <toc></toc>
58
59<!--
60Generally, this document shall be ordered by increasing complexity.
61It is important to note that readers should be able to read only
62the first few sections to get a working driver and only advanced
63usage should require reading the full document.
64-->
65
66 <part>
67 <title>The basic mac80211 driver interface</title>
68 <partintro>
69 <para>
70 You should read and understand the information contained
71 within this part of the book while implementing a driver.
72 In some chapters, advanced usage is noted, that may be
73 skipped at first.
74 </para>
75 <para>
76 This part of the book only covers station and monitor mode
77 functionality, additional information required to implement
78 the other modes is covered in the second part of the book.
79 </para>
80 </partintro>
81
82 <chapter id="basics">
83 <title>Basic hardware handling</title>
84 <para>TBD</para>
85 <para>
86 This chapter shall contain information on getting a hw
87 struct allocated and registered with mac80211.
88 </para>
89 <para>
90 Since it is required to allocate rates/modes before registering
91 a hw struct, this chapter shall also contain information on setting
92 up the rate/mode structs.
93 </para>
94 <para>
95 Additionally, some discussion about the callbacks and
96 the general programming model should be in here, including
97 the definition of ieee80211_ops which will be referred to
98 a lot.
99 </para>
100 <para>
101 Finally, a discussion of hardware capabilities should be done
102 with references to other parts of the book.
103 </para>
104<!-- intentionally multiple !F lines to get proper order -->
105!Finclude/net/mac80211.h ieee80211_hw
106!Finclude/net/mac80211.h ieee80211_hw_flags
107!Finclude/net/mac80211.h SET_IEEE80211_DEV
108!Finclude/net/mac80211.h SET_IEEE80211_PERM_ADDR
109!Finclude/net/mac80211.h ieee80211_ops
110!Finclude/net/mac80211.h ieee80211_alloc_hw
111!Finclude/net/mac80211.h ieee80211_register_hw
112!Finclude/net/mac80211.h ieee80211_get_tx_led_name
113!Finclude/net/mac80211.h ieee80211_get_rx_led_name
114!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
115!Finclude/net/mac80211.h ieee80211_get_radio_led_name
116!Finclude/net/mac80211.h ieee80211_unregister_hw
117!Finclude/net/mac80211.h ieee80211_free_hw
118 </chapter>
119
120 <chapter id="phy-handling">
121 <title>PHY configuration</title>
122 <para>TBD</para>
123 <para>
124 This chapter should describe PHY handling including
125 start/stop callbacks and the various structures used.
126 </para>
127!Finclude/net/mac80211.h ieee80211_conf
128!Finclude/net/mac80211.h ieee80211_conf_flags
129 </chapter>
130
131 <chapter id="iface-handling">
132 <title>Virtual interfaces</title>
133 <para>TBD</para>
134 <para>
135 This chapter should describe virtual interface basics
136 that are relevant to the driver (VLANs, MGMT etc are not.)
137 It should explain the use of the add_iface/remove_iface
138 callbacks as well as the interface configuration callbacks.
139 </para>
140 <para>Things related to AP mode should be discussed there.</para>
141 <para>
142 Things related to supporting multiple interfaces should be
143 in the appropriate chapter, a BIG FAT note should be here about
144 this though and the recommendation to allow only a single
145 interface in STA mode at first!
146 </para>
147!Finclude/net/mac80211.h ieee80211_vif
148 </chapter>
149
150 <chapter id="rx-tx">
151 <title>Receive and transmit processing</title>
152 <sect1>
153 <title>what should be here</title>
154 <para>TBD</para>
155 <para>
156 This should describe the receive and transmit
157 paths in mac80211/the drivers as well as
158 transmit status handling.
159 </para>
160 </sect1>
161 <sect1>
162 <title>Frame format</title>
163!Pinclude/net/mac80211.h Frame format
164 </sect1>
165 <sect1>
166 <title>Packet alignment</title>
167!Pnet/mac80211/rx.c Packet alignment
168 </sect1>
169 <sect1>
170 <title>Calling into mac80211 from interrupts</title>
171!Pinclude/net/mac80211.h Calling mac80211 from interrupts
172 </sect1>
173 <sect1>
174 <title>functions/definitions</title>
175!Finclude/net/mac80211.h ieee80211_rx_status
176!Finclude/net/mac80211.h mac80211_rx_flags
177!Finclude/net/mac80211.h ieee80211_tx_info
178!Finclude/net/mac80211.h ieee80211_rx
179!Finclude/net/mac80211.h ieee80211_rx_irqsafe
180!Finclude/net/mac80211.h ieee80211_tx_status
181!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
182!Finclude/net/mac80211.h ieee80211_rts_get
183!Finclude/net/mac80211.h ieee80211_rts_duration
184!Finclude/net/mac80211.h ieee80211_ctstoself_get
185!Finclude/net/mac80211.h ieee80211_ctstoself_duration
186!Finclude/net/mac80211.h ieee80211_generic_frame_duration
187!Finclude/net/mac80211.h ieee80211_wake_queue
188!Finclude/net/mac80211.h ieee80211_stop_queue
189!Finclude/net/mac80211.h ieee80211_wake_queues
190!Finclude/net/mac80211.h ieee80211_stop_queues
191 </sect1>
192 </chapter>
193
194 <chapter id="filters">
195 <title>Frame filtering</title>
196!Pinclude/net/mac80211.h Frame filtering
197!Finclude/net/mac80211.h ieee80211_filter_flags
198 </chapter>
199 </part>
200
201 <part id="advanced">
202 <title>Advanced driver interface</title>
203 <partintro>
204 <para>
205 Information contained within this part of the book is
206 of interest only for advanced interaction of mac80211
207 with drivers to exploit more hardware capabilities and
208 improve performance.
209 </para>
210 </partintro>
211
212 <chapter id="hardware-crypto-offload">
213 <title>Hardware crypto acceleration</title>
214!Pinclude/net/mac80211.h Hardware crypto acceleration
215<!-- intentionally multiple !F lines to get proper order -->
216!Finclude/net/mac80211.h set_key_cmd
217!Finclude/net/mac80211.h ieee80211_key_conf
218!Finclude/net/mac80211.h ieee80211_key_alg
219!Finclude/net/mac80211.h ieee80211_key_flags
220 </chapter>
221
222 <chapter id="powersave">
223 <title>Powersave support</title>
224!Pinclude/net/mac80211.h Powersave support
225 </chapter>
226
227 <chapter id="beacon-filter">
228 <title>Beacon filter support</title>
229!Pinclude/net/mac80211.h Beacon filter support
230!Finclude/net/mac80211.h ieee80211_beacon_loss
231 </chapter>
232
233 <chapter id="qos">
234 <title>Multiple queues and QoS support</title>
235 <para>TBD</para>
236!Finclude/net/mac80211.h ieee80211_tx_queue_params
237 </chapter>
238
239 <chapter id="AP">
240 <title>Access point mode support</title>
241 <para>TBD</para>
242 <para>Some parts of the if_conf should be discussed here instead</para>
243 <para>
244 Insert notes about VLAN interfaces with hw crypto here or
245 in the hw crypto chapter.
246 </para>
247!Finclude/net/mac80211.h ieee80211_get_buffered_bc
248!Finclude/net/mac80211.h ieee80211_beacon_get
249 </chapter>
250
251 <chapter id="multi-iface">
252 <title>Supporting multiple virtual interfaces</title>
253 <para>TBD</para>
254 <para>
255 Note: WDS with identical MAC address should almost always be OK
256 </para>
257 <para>
258 Insert notes about having multiple virtual interfaces with
259 different MAC addresses here, note which configurations are
260 supported by mac80211, add notes about supporting hw crypto
261 with it.
262 </para>
263 </chapter>
264
265 <chapter id="hardware-scan-offload">
266 <title>Hardware scan offload</title>
267 <para>TBD</para>
268!Finclude/net/mac80211.h ieee80211_scan_completed
269 </chapter>
270 </part>
271
272 <part id="rate-control">
273 <title>Rate control interface</title>
274 <partintro>
275 <para>TBD</para>
276 <para>
277 This part of the book describes the rate control algorithm
278 interface and how it relates to mac80211 and drivers.
279 </para>
280 </partintro>
281 <chapter id="dummy">
282 <title>dummy chapter</title>
283 <para>TBD</para>
284 </chapter>
285 </part>
286
287 <part id="internal">
288 <title>Internals</title>
289 <partintro>
290 <para>TBD</para>
291 <para>
292 This part of the book describes mac80211 internals.
293 </para>
294 </partintro>
295
296 <chapter id="key-handling">
297 <title>Key handling</title>
298 <sect1>
299 <title>Key handling basics</title>
300!Pnet/mac80211/key.c Key handling basics
301 </sect1>
302 <sect1>
303 <title>MORE TBD</title>
304 <para>TBD</para>
305 </sect1>
306 </chapter>
307
308 <chapter id="rx-processing">
309 <title>Receive processing</title>
310 <para>TBD</para>
311 </chapter>
312
313 <chapter id="tx-processing">
314 <title>Transmit processing</title>
315 <para>TBD</para>
316 </chapter>
317
318 <chapter id="sta-info">
319 <title>Station info handling</title>
320 <sect1>
321 <title>Programming information</title>
322!Fnet/mac80211/sta_info.h sta_info
323!Fnet/mac80211/sta_info.h ieee80211_sta_info_flags
324 </sect1>
325 <sect1>
326 <title>STA information lifetime rules</title>
327!Pnet/mac80211/sta_info.c STA information lifetime rules
328 </sect1>
329 </chapter>
330
331 <chapter id="synchronisation">
332 <title>Synchronisation</title>
333 <para>TBD</para>
334 <para>Locking, lots of RCU</para>
335 </chapter>
336 </part>
337</book>
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index a62fdf7a6bff..271d524a4c8d 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -1,18 +1,20 @@
1DCCP protocol 1DCCP protocol
2============ 2=============
3 3
4 4
5Contents 5Contents
6======== 6========
7
8- Introduction 7- Introduction
9- Missing features 8- Missing features
10- Socket options 9- Socket options
10- Sysctl variables
11- IOCTLs
12- Other tunables
11- Notes 13- Notes
12 14
15
13Introduction 16Introduction
14============ 17============
15
16Datagram Congestion Control Protocol (DCCP) is an unreliable, connection 18Datagram Congestion Control Protocol (DCCP) is an unreliable, connection
17oriented protocol designed to solve issues present in UDP and TCP, particularly 19oriented protocol designed to solve issues present in UDP and TCP, particularly
18for real-time and multimedia (streaming) traffic. 20for real-time and multimedia (streaming) traffic.
@@ -29,9 +31,9 @@ It has a base protocol and pluggable congestion control IDs (CCIDs).
29DCCP is a Proposed Standard (RFC 2026), and the homepage for DCCP as a protocol 31DCCP is a Proposed Standard (RFC 2026), and the homepage for DCCP as a protocol
30is at http://www.ietf.org/html.charters/dccp-charter.html 32is at http://www.ietf.org/html.charters/dccp-charter.html
31 33
34
32Missing features 35Missing features
33================ 36================
34
35The Linux DCCP implementation does not currently support all the features that are 37The Linux DCCP implementation does not currently support all the features that are
36specified in RFCs 4340...42. 38specified in RFCs 4340...42.
37 39
@@ -45,7 +47,6 @@ http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
45 47
46Socket options 48Socket options
47============== 49==============
48
49DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of 50DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
50service codes (RFC 4340, sec. 8.1.2); if this socket option is not set, 51service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
51the socket will fall back to 0 (which means that no meaningful service code 52the socket will fall back to 0 (which means that no meaningful service code
@@ -112,6 +113,7 @@ DCCP_SOCKOPT_CCID_TX_INFO
112On unidirectional connections it is useful to close the unused half-connection 113On unidirectional connections it is useful to close the unused half-connection
113via shutdown (SHUT_WR or SHUT_RD): this will reduce per-packet processing costs. 114via shutdown (SHUT_WR or SHUT_RD): this will reduce per-packet processing costs.
114 115
116
115Sysctl variables 117Sysctl variables
116================ 118================
117Several DCCP default parameters can be managed by the following sysctls 119Several DCCP default parameters can be managed by the following sysctls
@@ -155,15 +157,30 @@ sync_ratelimit = 125 ms
155 sequence-invalid packets on the same socket (RFC 4340, 7.5.4). The unit 157 sequence-invalid packets on the same socket (RFC 4340, 7.5.4). The unit
156 of this parameter is milliseconds; a value of 0 disables rate-limiting. 158 of this parameter is milliseconds; a value of 0 disables rate-limiting.
157 159
160
158IOCTLS 161IOCTLS
159====== 162======
160FIONREAD 163FIONREAD
161 Works as in udp(7): returns in the `int' argument pointer the size of 164 Works as in udp(7): returns in the `int' argument pointer the size of
162 the next pending datagram in bytes, or 0 when no datagram is pending. 165 the next pending datagram in bytes, or 0 when no datagram is pending.
163 166
167
168Other tunables
169==============
170Per-route rto_min support
171 CCID-2 supports the RTAX_RTO_MIN per-route setting for the minimum value
172 of the RTO timer. This setting can be modified via the 'rto_min' option
173 of iproute2; for example:
174 > ip route change 10.0.0.0/24 rto_min 250j dev wlan0
175 > ip route add 10.0.0.254/32 rto_min 800j dev wlan0
176 > ip route show dev wlan0
177 CCID-3 also supports the rto_min setting: it is used to define the lower
178 bound for the expiry of the nofeedback timer. This can be useful on LANs
179 with very low RTTs (e.g., loopback, Gbit ethernet).
180
181
164Notes 182Notes
165===== 183=====
166
167DCCP does not travel through NAT successfully at present on many boxes. This is 184DCCP does not travel through NAT successfully at present on many boxes. This is
168because the checksum covers the pseudo-header as per TCP and UDP. Linux NAT 185because the checksum covers the pseudo-header as per TCP and UDP. Linux NAT
169support for DCCP has been added. 186support for DCCP has been added.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f350c69b2bb4..c7165f4cb792 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1014,6 +1014,12 @@ conf/interface/*:
1014accept_ra - BOOLEAN 1014accept_ra - BOOLEAN
1015 Accept Router Advertisements; autoconfigure using them. 1015 Accept Router Advertisements; autoconfigure using them.
1016 1016
1017 Possible values are:
1018 0 Do not accept Router Advertisements.
1019 1 Accept Router Advertisements if forwarding is disabled.
1020 2 Overrule forwarding behaviour. Accept Router Advertisements
1021 even if forwarding is enabled.
1022
1017 Functional default: enabled if local forwarding is disabled. 1023 Functional default: enabled if local forwarding is disabled.
1018 disabled if local forwarding is enabled. 1024 disabled if local forwarding is enabled.
1019 1025
@@ -1075,7 +1081,12 @@ forwarding - BOOLEAN
1075 Note: It is recommended to have the same setting on all 1081 Note: It is recommended to have the same setting on all
1076 interfaces; mixed router/host scenarios are rather uncommon. 1082 interfaces; mixed router/host scenarios are rather uncommon.
1077 1083
1078 FALSE: 1084 Possible values are:
1085 0 Forwarding disabled
1086 1 Forwarding enabled
1087 2 Forwarding enabled (Hybrid Mode)
1088
1089 FALSE (0):
1079 1090
1080 By default, Host behaviour is assumed. This means: 1091 By default, Host behaviour is assumed. This means:
1081 1092
@@ -1085,18 +1096,24 @@ forwarding - BOOLEAN
1085 Advertisements (and do autoconfiguration). 1096 Advertisements (and do autoconfiguration).
1086 4. If accept_redirects is TRUE (default), accept Redirects. 1097 4. If accept_redirects is TRUE (default), accept Redirects.
1087 1098
1088 TRUE: 1099 TRUE (1):
1089 1100
1090 If local forwarding is enabled, Router behaviour is assumed. 1101 If local forwarding is enabled, Router behaviour is assumed.
1091 This means exactly the reverse from the above: 1102 This means exactly the reverse from the above:
1092 1103
1093 1. IsRouter flag is set in Neighbour Advertisements. 1104 1. IsRouter flag is set in Neighbour Advertisements.
1094 2. Router Solicitations are not sent. 1105 2. Router Solicitations are not sent.
1095 3. Router Advertisements are ignored. 1106 3. Router Advertisements are ignored unless accept_ra is 2.
1096 4. Redirects are ignored. 1107 4. Redirects are ignored.
1097 1108
1098 Default: FALSE if global forwarding is disabled (default), 1109 TRUE (2):
1099 otherwise TRUE. 1110
1111 Hybrid mode. Same behaviour as TRUE, except for:
1112
1113 2. Router Solicitations are being sent when necessary.
1114
1115 Default: 0 (disabled) if global forwarding is disabled (default),
1116 otherwise 1 (enabled).
1100 1117
1101hop_limit - INTEGER 1118hop_limit - INTEGER
1102 Default Hop Limit to set. 1119 Default Hop Limit to set.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index e8c8f4f06c67..98097d8cb910 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -172,15 +172,19 @@ struct skb_shared_hwtstamps {
172}; 172};
173 173
174Time stamps for outgoing packets are to be generated as follows: 174Time stamps for outgoing packets are to be generated as follows:
175- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero. 175- In hard_start_xmit(), check if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
176 If yes, then the driver is expected to do hardware time stamping. 176 is set no-zero. If yes, then the driver is expected to do hardware time
177 stamping.
177- If this is possible for the skb and requested, then declare 178- If this is possible for the skb and requested, then declare
178 that the driver is doing the time stamping by setting the field 179 that the driver is doing the time stamping by setting the flag
179 skb_tx(skb)->in_progress non-zero. You might want to keep a pointer 180 SKBTX_IN_PROGRESS in skb_shinfo(skb)->tx_flags , e.g. with
180 to the associated skb for the next step and not free the skb. A driver 181
181 not supporting hardware time stamping doesn't do that. A driver must 182 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
182 never touch sk_buff::tstamp! It is used to store software generated 183
183 time stamps by the network subsystem. 184 You might want to keep a pointer to the associated skb for the next step
185 and not free the skb. A driver not supporting hardware time stamping doesn't
186 do that. A driver must never touch sk_buff::tstamp! It is used to store
187 software generated time stamps by the network subsystem.
184- As soon as the driver has sent the packet and/or obtained a 188- As soon as the driver has sent the packet and/or obtained a
185 hardware time stamp for it, it passes the time stamp back by 189 hardware time stamp for it, it passes the time stamp back by
186 calling skb_hwtstamp_tx() with the original skb, the raw 190 calling skb_hwtstamp_tx() with the original skb, the raw
@@ -191,6 +195,6 @@ Time stamps for outgoing packets are to be generated as follows:
191 this would occur at a later time in the processing pipeline than other 195 this would occur at a later time in the processing pipeline than other
192 software time stamping and therefore could lead to unexpected deltas 196 software time stamping and therefore could lead to unexpected deltas
193 between time stamps. 197 between time stamps.
194- If the driver did not call set skb_tx(skb)->in_progress, then 198- If the driver did not set the SKBTX_IN_PROGRESS flag (see above), then
195 dev_hard_start_xmit() checks whether software time stamping 199 dev_hard_start_xmit() checks whether software time stamping
196 is wanted as fallback and potentially generates the time stamp. 200 is wanted as fallback and potentially generates the time stamp.
diff --git a/MAINTAINERS b/MAINTAINERS
index b5fb9bdcabb1..102352e6d61d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1398,6 +1398,13 @@ L: linux-scsi@vger.kernel.org
1398S: Supported 1398S: Supported
1399F: drivers/scsi/bfa/ 1399F: drivers/scsi/bfa/
1400 1400
1401BROCADE BNA 10 GIGABIT ETHERNET DRIVER
1402M: Rasesh Mody <rmody@brocade.com>
1403M: Debashis Dutt <ddutt@brocade.com>
1404L: netdev@vger.kernel.org
1405S: Supported
1406F: drivers/net/bna/
1407
1401BSG (block layer generic sg v4 driver) 1408BSG (block layer generic sg v4 driver)
1402M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> 1409M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
1403L: linux-scsi@vger.kernel.org 1410L: linux-scsi@vger.kernel.org
@@ -2881,6 +2888,12 @@ M: Brian King <brking@us.ibm.com>
2881S: Supported 2888S: Supported
2882F: drivers/scsi/ipr.* 2889F: drivers/scsi/ipr.*
2883 2890
2891IBM Power Virtual Ethernet Device Driver
2892M: Santiago Leon <santil@linux.vnet.ibm.com>
2893L: netdev@vger.kernel.org
2894S: Supported
2895F: drivers/net/ibmveth.*
2896
2884IBM ServeRAID RAID DRIVER 2897IBM ServeRAID RAID DRIVER
2885P: Jack Hammer 2898P: Jack Hammer
2886M: Dave Jeffery <ipslinux@adaptec.com> 2899M: Dave Jeffery <ipslinux@adaptec.com>
@@ -4328,13 +4341,12 @@ F: Documentation/filesystems/dlmfs.txt
4328F: fs/ocfs2/ 4341F: fs/ocfs2/
4329 4342
4330ORINOCO DRIVER 4343ORINOCO DRIVER
4331M: Pavel Roskin <proski@gnu.org>
4332M: David Gibson <hermes@gibson.dropbear.id.au>
4333L: linux-wireless@vger.kernel.org 4344L: linux-wireless@vger.kernel.org
4334L: orinoco-users@lists.sourceforge.net 4345L: orinoco-users@lists.sourceforge.net
4335L: orinoco-devel@lists.sourceforge.net 4346L: orinoco-devel@lists.sourceforge.net
4347W: http://linuxwireless.org/en/users/Drivers/orinoco
4336W: http://www.nongnu.org/orinoco/ 4348W: http://www.nongnu.org/orinoco/
4337S: Maintained 4349S: Orphan
4338F: drivers/net/wireless/orinoco/ 4350F: drivers/net/wireless/orinoco/
4339 4351
4340OSD LIBRARY and FILESYSTEM 4352OSD LIBRARY and FILESYSTEM
@@ -6400,7 +6412,7 @@ S: Maintained
6400F: drivers/input/misc/wistron_btns.c 6412F: drivers/input/misc/wistron_btns.c
6401 6413
6402WL1251 WIRELESS DRIVER 6414WL1251 WIRELESS DRIVER
6403M: Kalle Valo <kalle.valo@iki.fi> 6415M: Kalle Valo <kvalo@adurom.com>
6404L: linux-wireless@vger.kernel.org 6416L: linux-wireless@vger.kernel.org
6405W: http://wireless.kernel.org 6417W: http://wireless.kernel.org
6406T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git 6418T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
@@ -6415,6 +6427,7 @@ W: http://wireless.kernel.org
6415T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git 6427T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
6416S: Maintained 6428S: Maintained
6417F: drivers/net/wireless/wl12xx/wl1271* 6429F: drivers/net/wireless/wl12xx/wl1271*
6430F: include/linux/spi/wl12xx.h
6418 6431
6419WL3501 WIRELESS PCMCIA CARD DRIVER 6432WL3501 WIRELESS PCMCIA CARD DRIVER
6420M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6433M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
@@ -6559,6 +6572,20 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org>
6559S: Maintained 6572S: Maintained
6560F: drivers/serial/zs.* 6573F: drivers/serial/zs.*
6561 6574
6575GRE DEMULTIPLEXER DRIVER
6576M: Dmitry Kozlov <xeb@mail.ru>
6577L: netdev@vger.kernel.org
6578S: Maintained
6579F: net/ipv4/gre.c
6580F: include/net/gre.h
6581
6582PPTP DRIVER
6583M: Dmitry Kozlov <xeb@mail.ru>
6584L: netdev@vger.kernel.org
6585S: Maintained
6586F: drivers/net/pptp.c
6587W: http://sourceforge.net/projects/accel-pptp
6588
6562THE REST 6589THE REST
6563M: Linus Torvalds <torvalds@linux-foundation.org> 6590M: Linus Torvalds <torvalds@linux-foundation.org>
6564L: linux-kernel@vger.kernel.org 6591L: linux-kernel@vger.kernel.org
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 2ba630276295..46e96bc1f5a1 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -360,6 +360,7 @@ struct qdio_initialize {
360 unsigned int no_output_qs; 360 unsigned int no_output_qs;
361 qdio_handler_t *input_handler; 361 qdio_handler_t *input_handler;
362 qdio_handler_t *output_handler; 362 qdio_handler_t *output_handler;
363 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
363 unsigned long int_parm; 364 unsigned long int_parm;
364 void **input_sbal_addr_array; 365 void **input_sbal_addr_array;
365 void **output_sbal_addr_array; 366 void **output_sbal_addr_array;
@@ -377,11 +378,13 @@ struct qdio_initialize {
377extern int qdio_allocate(struct qdio_initialize *); 378extern int qdio_allocate(struct qdio_initialize *);
378extern int qdio_establish(struct qdio_initialize *); 379extern int qdio_establish(struct qdio_initialize *);
379extern int qdio_activate(struct ccw_device *); 380extern int qdio_activate(struct ccw_device *);
380 381extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
381extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 382 unsigned int);
382 int q_nr, unsigned int bufnr, unsigned int count); 383extern int qdio_start_irq(struct ccw_device *, int);
383extern int qdio_shutdown(struct ccw_device*, int); 384extern int qdio_stop_irq(struct ccw_device *, int);
385extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
386extern int qdio_shutdown(struct ccw_device *, int);
384extern int qdio_free(struct ccw_device *); 387extern int qdio_free(struct ccw_device *);
385extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*); 388extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
386 389
387#endif /* __QDIO_H__ */ 390#endif /* __QDIO_H__ */
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 8717809787fb..5d86bb803e94 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -444,8 +444,8 @@ static inline void fs_kfree_skb (struct sk_buff * skb)
444#define ROUND_NEAREST 3 444#define ROUND_NEAREST 3
445/********** make rate (not quite as much fun as Horizon) **********/ 445/********** make rate (not quite as much fun as Horizon) **********/
446 446
447static unsigned int make_rate (unsigned int rate, int r, 447static int make_rate(unsigned int rate, int r,
448 u16 * bits, unsigned int * actual) 448 u16 *bits, unsigned int *actual)
449{ 449{
450 unsigned char exp = -1; /* hush gcc */ 450 unsigned char exp = -1; /* hush gcc */
451 unsigned int man = -1; /* hush gcc */ 451 unsigned int man = -1; /* hush gcc */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ee9ddeb53417..8b358d7d958f 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -220,7 +220,7 @@ static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
220 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) { 220 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
221 dev->ffL.tcq_rd += 2; 221 dev->ffL.tcq_rd += 2;
222 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 222 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
223 dev->ffL.tcq_rd = dev->ffL.tcq_st; 223 dev->ffL.tcq_rd = dev->ffL.tcq_st;
224 if (dev->ffL.tcq_rd == dev->host_tcq_wr) 224 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
225 return 0xFFFF; 225 return 0xFFFF;
226 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd); 226 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 33f8421c71cc..18fdd9703b48 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/bug.h> 9#include <linux/bug.h>
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/ethtool.h>
12#include <linux/firewire.h> 11#include <linux/firewire.h>
13#include <linux/firewire-constants.h> 12#include <linux/firewire-constants.h>
14#include <linux/highmem.h> 13#include <linux/highmem.h>
@@ -1361,17 +1360,6 @@ static int fwnet_change_mtu(struct net_device *net, int new_mtu)
1361 return 0; 1360 return 0;
1362} 1361}
1363 1362
1364static void fwnet_get_drvinfo(struct net_device *net,
1365 struct ethtool_drvinfo *info)
1366{
1367 strcpy(info->driver, KBUILD_MODNAME);
1368 strcpy(info->bus_info, "ieee1394");
1369}
1370
1371static const struct ethtool_ops fwnet_ethtool_ops = {
1372 .get_drvinfo = fwnet_get_drvinfo,
1373};
1374
1375static const struct net_device_ops fwnet_netdev_ops = { 1363static const struct net_device_ops fwnet_netdev_ops = {
1376 .ndo_open = fwnet_open, 1364 .ndo_open = fwnet_open,
1377 .ndo_stop = fwnet_stop, 1365 .ndo_stop = fwnet_stop,
@@ -1390,7 +1378,6 @@ static void fwnet_init_dev(struct net_device *net)
1390 net->hard_header_len = FWNET_HLEN; 1378 net->hard_header_len = FWNET_HLEN;
1391 net->type = ARPHRD_IEEE1394; 1379 net->type = ARPHRD_IEEE1394;
1392 net->tx_queue_len = 10; 1380 net->tx_queue_len = 10;
1393 SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops);
1394} 1381}
1395 1382
1396/* caller must hold fwnet_device_mutex */ 1383/* caller must hold fwnet_device_mutex */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index bc289e367e30..63403822330e 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -58,7 +58,6 @@
58#include <linux/tcp.h> 58#include <linux/tcp.h>
59#include <linux/skbuff.h> 59#include <linux/skbuff.h>
60#include <linux/bitops.h> 60#include <linux/bitops.h>
61#include <linux/ethtool.h>
62#include <asm/uaccess.h> 61#include <asm/uaccess.h>
63#include <asm/delay.h> 62#include <asm/delay.h>
64#include <asm/unaligned.h> 63#include <asm/unaligned.h>
@@ -173,8 +172,6 @@ static netdev_tx_t ether1394_tx(struct sk_buff *skb,
173 struct net_device *dev); 172 struct net_device *dev);
174static void ether1394_iso(struct hpsb_iso *iso); 173static void ether1394_iso(struct hpsb_iso *iso);
175 174
176static const struct ethtool_ops ethtool_ops;
177
178static int ether1394_write(struct hpsb_host *host, int srcid, int destid, 175static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
179 quadlet_t *data, u64 addr, size_t len, u16 flags); 176 quadlet_t *data, u64 addr, size_t len, u16 flags);
180static void ether1394_add_host(struct hpsb_host *host); 177static void ether1394_add_host(struct hpsb_host *host);
@@ -525,8 +522,6 @@ static void ether1394_init_dev(struct net_device *dev)
525 dev->header_ops = &ether1394_header_ops; 522 dev->header_ops = &ether1394_header_ops;
526 dev->netdev_ops = &ether1394_netdev_ops; 523 dev->netdev_ops = &ether1394_netdev_ops;
527 524
528 SET_ETHTOOL_OPS(dev, &ethtool_ops);
529
530 dev->watchdog_timeo = ETHER1394_TIMEOUT; 525 dev->watchdog_timeo = ETHER1394_TIMEOUT;
531 dev->flags = IFF_BROADCAST | IFF_MULTICAST; 526 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
532 dev->features = NETIF_F_HIGHDMA; 527 dev->features = NETIF_F_HIGHDMA;
@@ -1695,17 +1690,6 @@ fail:
1695 return NETDEV_TX_OK; 1690 return NETDEV_TX_OK;
1696} 1691}
1697 1692
1698static void ether1394_get_drvinfo(struct net_device *dev,
1699 struct ethtool_drvinfo *info)
1700{
1701 strcpy(info->driver, driver_name);
1702 strcpy(info->bus_info, "ieee1394"); /* FIXME provide more detail? */
1703}
1704
1705static const struct ethtool_ops ethtool_ops = {
1706 .get_drvinfo = ether1394_get_drvinfo
1707};
1708
1709static int __init ether1394_init_module(void) 1693static int __init ether1394_init_module(void)
1710{ 1694{
1711 int err; 1695 int err;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 2978bdaa6b88..e54e79d4e2c1 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -1515,8 +1515,13 @@ static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
1515 while (*s) { 1515 while (*s) {
1516 int digit1 = 0; 1516 int digit1 = 0;
1517 int digit2 = 0; 1517 int digit2 = 0;
1518 if (!isdigit(*s)) return -3; 1518 char *endp;
1519 while (isdigit(*s)) { digit1 = digit1*10 + (*s - '0'); s++; } 1519
1520 digit1 = simple_strtoul(s, &endp, 10);
1521 if (s == endp)
1522 return -3;
1523 s = endp;
1524
1520 if (digit1 <= 0 || digit1 > 30) return -4; 1525 if (digit1 <= 0 || digit1 > 30) return -4;
1521 if (*s == 0 || *s == ',' || *s == ' ') { 1526 if (*s == 0 || *s == ',' || *s == ' ') {
1522 bmask |= (1 << digit1); 1527 bmask |= (1 << digit1);
@@ -1526,8 +1531,12 @@ static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
1526 } 1531 }
1527 if (*s != '-') return -5; 1532 if (*s != '-') return -5;
1528 s++; 1533 s++;
1529 if (!isdigit(*s)) return -3; 1534
1530 while (isdigit(*s)) { digit2 = digit2*10 + (*s - '0'); s++; } 1535 digit2 = simple_strtoul(s, &endp, 10);
1536 if (s == endp)
1537 return -3;
1538 s = endp;
1539
1531 if (digit2 <= 0 || digit2 > 30) return -4; 1540 if (digit2 <= 0 || digit2 > 30) return -4;
1532 if (*s == 0 || *s == ',' || *s == ' ') { 1541 if (*s == 0 || *s == ',' || *s == ' ') {
1533 if (digit1 > digit2) 1542 if (digit1 > digit2)
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 70cf6bac7a5a..48e6d220f62c 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -77,7 +77,7 @@ static void deflect_timer_expire(ulong arg)
77 77
78 case DEFLECT_ALERT: 78 case DEFLECT_ALERT:
79 cs->ics.command = ISDN_CMD_REDIR; /* protocol */ 79 cs->ics.command = ISDN_CMD_REDIR; /* protocol */
80 strcpy(cs->ics.parm.setup.phone,cs->deflect_dest); 80 strlcpy(cs->ics.parm.setup.phone, cs->deflect_dest, sizeof(cs->ics.parm.setup.phone));
81 strcpy(cs->ics.parm.setup.eazmsn,"Testtext delayed"); 81 strcpy(cs->ics.parm.setup.eazmsn,"Testtext delayed");
82 divert_if.ll_cmd(&cs->ics); 82 divert_if.ll_cmd(&cs->ics);
83 spin_lock_irqsave(&divert_lock, flags); 83 spin_lock_irqsave(&divert_lock, flags);
@@ -251,7 +251,7 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
251 251
252 case 2: /* redir */ 252 case 2: /* redir */
253 del_timer(&cs->timer); 253 del_timer(&cs->timer);
254 strcpy(cs->ics.parm.setup.phone, to_nr); 254 strlcpy(cs->ics.parm.setup.phone, to_nr, sizeof(cs->ics.parm.setup.phone));
255 strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual"); 255 strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual");
256 ic.command = ISDN_CMD_REDIR; 256 ic.command = ISDN_CMD_REDIR;
257 if ((i = divert_if.ll_cmd(&ic))) 257 if ((i = divert_if.ll_cmd(&ic)))
@@ -480,7 +480,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
480 if (!cs->timer.expires) 480 if (!cs->timer.expires)
481 { strcpy(ic->parm.setup.eazmsn,"Testtext direct"); 481 { strcpy(ic->parm.setup.eazmsn,"Testtext direct");
482 ic->parm.setup.screen = dv->rule.screen; 482 ic->parm.setup.screen = dv->rule.screen;
483 strcpy(ic->parm.setup.phone,dv->rule.to_nr); 483 strlcpy(ic->parm.setup.phone, dv->rule.to_nr, sizeof(ic->parm.setup.phone));
484 cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */ 484 cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
485 cs->timer.expires = jiffies + (HZ * AUTODEL_TIME); 485 cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
486 retval = 5; 486 retval = 5;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index be5faf4aa868..5aa138eb0b3c 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -234,13 +234,14 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
234 count++; 234 count++;
235 if (count > trans_max) 235 if (count > trans_max)
236 count = trans_max; /* limit length */ 236 count = trans_max; /* limit length */
237 if ((skb = dev_alloc_skb(count))) { 237 skb = dev_alloc_skb(count);
238 dst = skb_put(skb, count); 238 if (skb) {
239 while (count--) 239 dst = skb_put(skb, count);
240 while (count--)
240 *dst++ = Read_hfc(cs, HFCSX_FIF_DRD); 241 *dst++ = Read_hfc(cs, HFCSX_FIF_DRD);
241 return(skb); 242 return skb;
242 } 243 } else
243 else return(NULL); /* no memory */ 244 return NULL; /* no memory */
244 } 245 }
245 246
246 do { 247 do {
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 51dc60da333b..f013ee15327c 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -3515,7 +3515,7 @@ isdn_tty_parse_at(modem_info * info)
3515{ 3515{
3516 atemu *m = &info->emu; 3516 atemu *m = &info->emu;
3517 char *p; 3517 char *p;
3518 char ds[40]; 3518 char ds[ISDN_MSNLEN];
3519 3519
3520#ifdef ISDN_DEBUG_AT 3520#ifdef ISDN_DEBUG_AT
3521 printk(KERN_DEBUG "AT: '%s'\n", m->mdmcmd); 3521 printk(KERN_DEBUG "AT: '%s'\n", m->mdmcmd);
@@ -3594,7 +3594,7 @@ isdn_tty_parse_at(modem_info * info)
3594 break; 3594 break;
3595 case '3': 3595 case '3':
3596 p++; 3596 p++;
3597 sprintf(ds, "\r\n%d", info->emu.charge); 3597 snprintf(ds, sizeof(ds), "\r\n%d", info->emu.charge);
3598 isdn_tty_at_cout(ds, info); 3598 isdn_tty_at_cout(ds, info);
3599 break; 3599 break;
3600 default:; 3600 default:;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 713ef2b805a2..76d9e673b4e1 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -1237,6 +1237,7 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
1237 if (dsp->cmx_delay) 1237 if (dsp->cmx_delay)
1238 dsp->rx_W = (dsp->rx_R + dsp->cmx_delay) 1238 dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
1239 & CMX_BUFF_MASK; 1239 & CMX_BUFF_MASK;
1240 else
1240 dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1)) 1241 dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
1241 & CMX_BUFF_MASK; 1242 & CMX_BUFF_MASK;
1242 } else { 1243 } else {
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 22f38e48ac4e..5b59796ed250 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -972,7 +972,7 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
972 if (debug & DEBUG_L1OIP_SOCKET) 972 if (debug & DEBUG_L1OIP_SOCKET)
973 printk(KERN_DEBUG "%s: got new ip address from user " 973 printk(KERN_DEBUG "%s: got new ip address from user "
974 "space.\n", __func__); 974 "space.\n", __func__);
975 l1oip_socket_open(hc); 975 l1oip_socket_open(hc);
976 break; 976 break;
977 case MISDN_CTRL_UNSETPEER: 977 case MISDN_CTRL_UNSETPEER:
978 if (debug & DEBUG_L1OIP_SOCKET) 978 if (debug & DEBUG_L1OIP_SOCKET)
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index baac246561b9..4777a1cbcd8d 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -337,10 +337,10 @@ el2_probe1(struct net_device *dev, int ioaddr)
337 /* Finish setting the board's parameters. */ 337 /* Finish setting the board's parameters. */
338 ei_status.stop_page = EL2_MB1_STOP_PG; 338 ei_status.stop_page = EL2_MB1_STOP_PG;
339 ei_status.word16 = wordlength; 339 ei_status.word16 = wordlength;
340 ei_status.reset_8390 = &el2_reset_8390; 340 ei_status.reset_8390 = el2_reset_8390;
341 ei_status.get_8390_hdr = &el2_get_8390_hdr; 341 ei_status.get_8390_hdr = el2_get_8390_hdr;
342 ei_status.block_input = &el2_block_input; 342 ei_status.block_input = el2_block_input;
343 ei_status.block_output = &el2_block_output; 343 ei_status.block_output = el2_block_output;
344 344
345 if (dev->irq == 2) 345 if (dev->irq == 2)
346 dev->irq = 9; 346 dev->irq = 9;
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 3bba835f1a21..8a6eb0c44486 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -734,7 +734,7 @@ static int corkscrew_open(struct net_device *dev)
734 init_timer(&vp->timer); 734 init_timer(&vp->timer);
735 vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; 735 vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
736 vp->timer.data = (unsigned long) dev; 736 vp->timer.data = (unsigned long) dev;
737 vp->timer.function = &corkscrew_timer; /* timer handler */ 737 vp->timer.function = corkscrew_timer; /* timer handler */
738 add_timer(&vp->timer); 738 add_timer(&vp->timer);
739 } else 739 } else
740 dev->if_port = vp->default_media; 740 dev->if_port = vp->default_media;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 85671adae455..e31a6d1919c6 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1739,7 +1739,7 @@ vortex_open(struct net_device *dev)
1739 1739
1740 /* Use the now-standard shared IRQ implementation. */ 1740 /* Use the now-standard shared IRQ implementation. */
1741 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? 1741 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
1742 &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) { 1742 boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
1743 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); 1743 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1744 goto err; 1744 goto err;
1745 } 1745 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 4a4f6b81e32d..237d4ea5a416 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -561,7 +561,7 @@ rx_status_loop:
561 if (cp_rx_csum_ok(status)) 561 if (cp_rx_csum_ok(status))
562 skb->ip_summed = CHECKSUM_UNNECESSARY; 562 skb->ip_summed = CHECKSUM_UNNECESSARY;
563 else 563 else
564 skb->ip_summed = CHECKSUM_NONE; 564 skb_checksum_none_assert(skb);
565 565
566 skb_put(skb, len); 566 skb_put(skb, len);
567 567
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2cc81a54cbf3..53c4810b119e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2869,6 +2869,20 @@ config QLGE
2869 To compile this driver as a module, choose M here: the module 2869 To compile this driver as a module, choose M here: the module
2870 will be called qlge. 2870 will be called qlge.
2871 2871
2872config BNA
2873 tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
2874 depends on PCI
2875 ---help---
2876 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
2877 cards.
2878 To compile this driver as a module, choose M here: the module
2879 will be called bna.
2880
2881 For general information and support, go to the Brocade support
2882 website at:
2883
2884 <http://support.brocade.com>
2885
2872source "drivers/net/sfc/Kconfig" 2886source "drivers/net/sfc/Kconfig"
2873 2887
2874source "drivers/net/benet/Kconfig" 2888source "drivers/net/benet/Kconfig"
@@ -3202,6 +3216,17 @@ config PPPOE
3202 which contains instruction on how to use this driver (under 3216 which contains instruction on how to use this driver (under
3203 the heading "Kernel mode PPPoE"). 3217 the heading "Kernel mode PPPoE").
3204 3218
3219config PPTP
3220 tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
3221 depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
3222 help
3223 Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
3224
3225 This driver requires pppd plugin to work in client mode or
3226 modified pptpd (poptop) to work in server mode.
3227 See http://accel-pptp.sourceforge.net/ for information how to
3228 utilize this module.
3229
3205config PPPOATM 3230config PPPOATM
3206 tristate "PPP over ATM" 3231 tristate "PPP over ATM"
3207 depends on ATM && PPP 3232 depends on ATM && PPP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3e8f150c4b14..18a277709a2a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_ENIC) += enic/
34obj-$(CONFIG_JME) += jme.o 34obj-$(CONFIG_JME) += jme.o
35obj-$(CONFIG_BE2NET) += benet/ 35obj-$(CONFIG_BE2NET) += benet/
36obj-$(CONFIG_VMXNET3) += vmxnet3/ 36obj-$(CONFIG_VMXNET3) += vmxnet3/
37obj-$(CONFIG_BNA) += bna/
37 38
38gianfar_driver-objs := gianfar.o \ 39gianfar_driver-objs := gianfar.o \
39 gianfar_ethtool.o \ 40 gianfar_ethtool.o \
@@ -162,6 +163,7 @@ obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
162obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o 163obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
163obj-$(CONFIG_PPPOE) += pppox.o pppoe.o 164obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
164obj-$(CONFIG_PPPOL2TP) += pppox.o 165obj-$(CONFIG_PPPOL2TP) += pppox.o
166obj-$(CONFIG_PPTP) += pppox.o pptp.o
165 167
166obj-$(CONFIG_SLIP) += slip.o 168obj-$(CONFIG_SLIP) += slip.o
167obj-$(CONFIG_SLHC) += slhc.o 169obj-$(CONFIG_SLHC) += slhc.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index b9a591604e5b..41d9911202d0 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2033,7 +2033,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2033 skb->csum = htons(csum); 2033 skb->csum = htons(csum);
2034 skb->ip_summed = CHECKSUM_COMPLETE; 2034 skb->ip_summed = CHECKSUM_COMPLETE;
2035 } else { 2035 } else {
2036 skb->ip_summed = CHECKSUM_NONE; 2036 skb_checksum_none_assert(skb);
2037 } 2037 }
2038 2038
2039 /* send it up */ 2039 /* send it up */
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 585c25f4b60c..58a0ab4923ee 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -396,7 +396,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
396 event_count = coal_conf->rx_event_count; 396 event_count = coal_conf->rx_event_count;
397 if( timeout > MAX_TIMEOUT || 397 if( timeout > MAX_TIMEOUT ||
398 event_count > MAX_EVENT_COUNT ) 398 event_count > MAX_EVENT_COUNT )
399 return -EINVAL; 399 return -EINVAL;
400 400
401 timeout = timeout * DELAY_TIMER_CONV; 401 timeout = timeout * DELAY_TIMER_CONV;
402 writel(VAL0|STINTEN, mmio+INTEN0); 402 writel(VAL0|STINTEN, mmio+INTEN0);
@@ -409,7 +409,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
409 event_count = coal_conf->tx_event_count; 409 event_count = coal_conf->tx_event_count;
410 if( timeout > MAX_TIMEOUT || 410 if( timeout > MAX_TIMEOUT ||
411 event_count > MAX_EVENT_COUNT ) 411 event_count > MAX_EVENT_COUNT )
412 return -EINVAL; 412 return -EINVAL;
413 413
414 414
415 timeout = timeout * DELAY_TIMER_CONV; 415 timeout = timeout * DELAY_TIMER_CONV;
@@ -903,18 +903,18 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
903} 903}
904 904
905/* 905/*
906This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values. 906 * This function reads the mib registers and returns the hardware statistics.
907*/ 907 * It updates previous internal driver statistics with new values.
908static struct net_device_stats *amd8111e_get_stats(struct net_device * dev) 908 */
909static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
909{ 910{
910 struct amd8111e_priv *lp = netdev_priv(dev); 911 struct amd8111e_priv *lp = netdev_priv(dev);
911 void __iomem *mmio = lp->mmio; 912 void __iomem *mmio = lp->mmio;
912 unsigned long flags; 913 unsigned long flags;
913 /* struct net_device_stats *prev_stats = &lp->prev_stats; */ 914 struct net_device_stats *new_stats = &dev->stats;
914 struct net_device_stats* new_stats = &lp->stats;
915 915
916 if(!lp->opened) 916 if (!lp->opened)
917 return &lp->stats; 917 return new_stats;
918 spin_lock_irqsave (&lp->lock, flags); 918 spin_lock_irqsave (&lp->lock, flags);
919 919
920 /* stats.rx_packets */ 920 /* stats.rx_packets */
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index ac36eb6981e3..b5926af03a7e 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -787,7 +787,6 @@ struct amd8111e_priv{
787 struct vlan_group *vlgrp; 787 struct vlan_group *vlgrp;
788#endif 788#endif
789 char opened; 789 char opened;
790 struct net_device_stats stats;
791 unsigned int drv_rx_errors; 790 unsigned int drv_rx_errors;
792 struct amd8111e_coalesce_conf coal_conf; 791 struct amd8111e_coalesce_conf coal_conf;
793 792
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 8c496fb1ac9e..62f21106efec 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -300,8 +300,6 @@ am79c961_open(struct net_device *dev)
300 struct dev_priv *priv = netdev_priv(dev); 300 struct dev_priv *priv = netdev_priv(dev);
301 int ret; 301 int ret;
302 302
303 memset (&priv->stats, 0, sizeof (priv->stats));
304
305 ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev); 303 ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
306 if (ret) 304 if (ret)
307 return ret; 305 return ret;
@@ -347,8 +345,7 @@ am79c961_close(struct net_device *dev)
347 */ 345 */
348static struct net_device_stats *am79c961_getstats (struct net_device *dev) 346static struct net_device_stats *am79c961_getstats (struct net_device *dev)
349{ 347{
350 struct dev_priv *priv = netdev_priv(dev); 348 return &dev->stats;
351 return &priv->stats;
352} 349}
353 350
354static void am79c961_mc_hash(char *addr, unsigned short *hash) 351static void am79c961_mc_hash(char *addr, unsigned short *hash)
@@ -510,14 +507,14 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
510 507
511 if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) { 508 if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
512 am_writeword (dev, hdraddr + 2, RMD_OWN); 509 am_writeword (dev, hdraddr + 2, RMD_OWN);
513 priv->stats.rx_errors ++; 510 dev->stats.rx_errors++;
514 if (status & RMD_ERR) { 511 if (status & RMD_ERR) {
515 if (status & RMD_FRAM) 512 if (status & RMD_FRAM)
516 priv->stats.rx_frame_errors ++; 513 dev->stats.rx_frame_errors++;
517 if (status & RMD_CRC) 514 if (status & RMD_CRC)
518 priv->stats.rx_crc_errors ++; 515 dev->stats.rx_crc_errors++;
519 } else if (status & RMD_STP) 516 } else if (status & RMD_STP)
520 priv->stats.rx_length_errors ++; 517 dev->stats.rx_length_errors++;
521 continue; 518 continue;
522 } 519 }
523 520
@@ -531,12 +528,12 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
531 am_writeword(dev, hdraddr + 2, RMD_OWN); 528 am_writeword(dev, hdraddr + 2, RMD_OWN);
532 skb->protocol = eth_type_trans(skb, dev); 529 skb->protocol = eth_type_trans(skb, dev);
533 netif_rx(skb); 530 netif_rx(skb);
534 priv->stats.rx_bytes += len; 531 dev->stats.rx_bytes += len;
535 priv->stats.rx_packets ++; 532 dev->stats.rx_packets++;
536 } else { 533 } else {
537 am_writeword (dev, hdraddr + 2, RMD_OWN); 534 am_writeword (dev, hdraddr + 2, RMD_OWN);
538 printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); 535 printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
539 priv->stats.rx_dropped ++; 536 dev->stats.rx_dropped++;
540 break; 537 break;
541 } 538 }
542 } while (1); 539 } while (1);
@@ -565,7 +562,7 @@ am79c961_tx(struct net_device *dev, struct dev_priv *priv)
565 if (status & TMD_ERR) { 562 if (status & TMD_ERR) {
566 u_int status2; 563 u_int status2;
567 564
568 priv->stats.tx_errors ++; 565 dev->stats.tx_errors++;
569 566
570 status2 = am_readword (dev, hdraddr + 6); 567 status2 = am_readword (dev, hdraddr + 6);
571 568
@@ -575,18 +572,18 @@ am79c961_tx(struct net_device *dev, struct dev_priv *priv)
575 am_writeword (dev, hdraddr + 6, 0); 572 am_writeword (dev, hdraddr + 6, 0);
576 573
577 if (status2 & TST_RTRY) 574 if (status2 & TST_RTRY)
578 priv->stats.collisions += 16; 575 dev->stats.collisions += 16;
579 if (status2 & TST_LCOL) 576 if (status2 & TST_LCOL)
580 priv->stats.tx_window_errors ++; 577 dev->stats.tx_window_errors++;
581 if (status2 & TST_LCAR) 578 if (status2 & TST_LCAR)
582 priv->stats.tx_carrier_errors ++; 579 dev->stats.tx_carrier_errors++;
583 if (status2 & TST_UFLO) 580 if (status2 & TST_UFLO)
584 priv->stats.tx_fifo_errors ++; 581 dev->stats.tx_fifo_errors++;
585 continue; 582 continue;
586 } 583 }
587 priv->stats.tx_packets ++; 584 dev->stats.tx_packets++;
588 len = am_readword (dev, hdraddr + 4); 585 len = am_readword (dev, hdraddr + 4);
589 priv->stats.tx_bytes += -len; 586 dev->stats.tx_bytes += -len;
590 } while (priv->txtail != priv->txhead); 587 } while (priv->txtail != priv->txhead);
591 588
592 netif_wake_queue(dev); 589 netif_wake_queue(dev);
@@ -616,7 +613,7 @@ am79c961_interrupt(int irq, void *dev_id)
616 } 613 }
617 if (status & CSR0_MISS) { 614 if (status & CSR0_MISS) {
618 handled = 1; 615 handled = 1;
619 priv->stats.rx_dropped ++; 616 dev->stats.rx_dropped++;
620 } 617 }
621 if (status & CSR0_CERR) { 618 if (status & CSR0_CERR) {
622 handled = 1; 619 handled = 1;
diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/arm/am79c961a.h
index 483009fe6ec2..fd634d32756b 100644
--- a/drivers/net/arm/am79c961a.h
+++ b/drivers/net/arm/am79c961a.h
@@ -130,7 +130,6 @@
130#define ISALED0_LNKST 0x8000 130#define ISALED0_LNKST 0x8000
131 131
132struct dev_priv { 132struct dev_priv {
133 struct net_device_stats stats;
134 unsigned long rxbuffer[RX_BUFFERS]; 133 unsigned long rxbuffer[RX_BUFFERS];
135 unsigned long txbuffer[TX_BUFFERS]; 134 unsigned long txbuffer[TX_BUFFERS];
136 unsigned char txhead; 135 unsigned char txhead;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 4a5ec9470aa1..5a77001b6d10 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -175,8 +175,6 @@ struct ep93xx_priv
175 struct net_device *dev; 175 struct net_device *dev;
176 struct napi_struct napi; 176 struct napi_struct napi;
177 177
178 struct net_device_stats stats;
179
180 struct mii_if_info mii; 178 struct mii_if_info mii;
181 u8 mdc_divisor; 179 u8 mdc_divisor;
182}; 180};
@@ -230,12 +228,6 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
230 pr_info("mdio write timed out\n"); 228 pr_info("mdio write timed out\n");
231} 229}
232 230
233static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
234{
235 struct ep93xx_priv *ep = netdev_priv(dev);
236 return &(ep->stats);
237}
238
239static int ep93xx_rx(struct net_device *dev, int processed, int budget) 231static int ep93xx_rx(struct net_device *dev, int processed, int budget)
240{ 232{
241 struct ep93xx_priv *ep = netdev_priv(dev); 233 struct ep93xx_priv *ep = netdev_priv(dev);
@@ -267,15 +259,15 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
267 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1); 259 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
268 260
269 if (!(rstat0 & RSTAT0_RWE)) { 261 if (!(rstat0 & RSTAT0_RWE)) {
270 ep->stats.rx_errors++; 262 dev->stats.rx_errors++;
271 if (rstat0 & RSTAT0_OE) 263 if (rstat0 & RSTAT0_OE)
272 ep->stats.rx_fifo_errors++; 264 dev->stats.rx_fifo_errors++;
273 if (rstat0 & RSTAT0_FE) 265 if (rstat0 & RSTAT0_FE)
274 ep->stats.rx_frame_errors++; 266 dev->stats.rx_frame_errors++;
275 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) 267 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA))
276 ep->stats.rx_length_errors++; 268 dev->stats.rx_length_errors++;
277 if (rstat0 & RSTAT0_CRCE) 269 if (rstat0 & RSTAT0_CRCE)
278 ep->stats.rx_crc_errors++; 270 dev->stats.rx_crc_errors++;
279 goto err; 271 goto err;
280 } 272 }
281 273
@@ -300,10 +292,10 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
300 292
301 netif_receive_skb(skb); 293 netif_receive_skb(skb);
302 294
303 ep->stats.rx_packets++; 295 dev->stats.rx_packets++;
304 ep->stats.rx_bytes += length; 296 dev->stats.rx_bytes += length;
305 } else { 297 } else {
306 ep->stats.rx_dropped++; 298 dev->stats.rx_dropped++;
307 } 299 }
308 300
309err: 301err:
@@ -359,7 +351,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
359 int entry; 351 int entry;
360 352
361 if (unlikely(skb->len > MAX_PKT_SIZE)) { 353 if (unlikely(skb->len > MAX_PKT_SIZE)) {
362 ep->stats.tx_dropped++; 354 dev->stats.tx_dropped++;
363 dev_kfree_skb(skb); 355 dev_kfree_skb(skb);
364 return NETDEV_TX_OK; 356 return NETDEV_TX_OK;
365 } 357 }
@@ -415,17 +407,17 @@ static void ep93xx_tx_complete(struct net_device *dev)
415 if (tstat0 & TSTAT0_TXWE) { 407 if (tstat0 & TSTAT0_TXWE) {
416 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; 408 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
417 409
418 ep->stats.tx_packets++; 410 dev->stats.tx_packets++;
419 ep->stats.tx_bytes += length; 411 dev->stats.tx_bytes += length;
420 } else { 412 } else {
421 ep->stats.tx_errors++; 413 dev->stats.tx_errors++;
422 } 414 }
423 415
424 if (tstat0 & TSTAT0_OW) 416 if (tstat0 & TSTAT0_OW)
425 ep->stats.tx_window_errors++; 417 dev->stats.tx_window_errors++;
426 if (tstat0 & TSTAT0_TXU) 418 if (tstat0 & TSTAT0_TXU)
427 ep->stats.tx_fifo_errors++; 419 dev->stats.tx_fifo_errors++;
428 ep->stats.collisions += (tstat0 >> 16) & 0x1f; 420 dev->stats.collisions += (tstat0 >> 16) & 0x1f;
429 421
430 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); 422 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
431 if (ep->tx_pending == TX_QUEUE_ENTRIES) 423 if (ep->tx_pending == TX_QUEUE_ENTRIES)
@@ -758,7 +750,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
758 .ndo_open = ep93xx_open, 750 .ndo_open = ep93xx_open,
759 .ndo_stop = ep93xx_close, 751 .ndo_stop = ep93xx_close,
760 .ndo_start_xmit = ep93xx_xmit, 752 .ndo_start_xmit = ep93xx_xmit,
761 .ndo_get_stats = ep93xx_get_stats,
762 .ndo_do_ioctl = ep93xx_ioctl, 753 .ndo_do_ioctl = ep93xx_ioctl,
763 .ndo_validate_addr = eth_validate_addr, 754 .ndo_validate_addr = eth_validate_addr,
764 .ndo_change_mtu = eth_change_mtu, 755 .ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index b17ab5153f51..b00781c02d5d 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -68,7 +68,6 @@ static int ether1_open(struct net_device *dev);
68static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); 68static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
69static irqreturn_t ether1_interrupt(int irq, void *dev_id); 69static irqreturn_t ether1_interrupt(int irq, void *dev_id);
70static int ether1_close(struct net_device *dev); 70static int ether1_close(struct net_device *dev);
71static struct net_device_stats *ether1_getstats(struct net_device *dev);
72static void ether1_setmulticastlist(struct net_device *dev); 71static void ether1_setmulticastlist(struct net_device *dev);
73static void ether1_timeout(struct net_device *dev); 72static void ether1_timeout(struct net_device *dev);
74 73
@@ -649,8 +648,6 @@ ether1_open (struct net_device *dev)
649 if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev)) 648 if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
650 return -EAGAIN; 649 return -EAGAIN;
651 650
652 memset (&priv(dev)->stats, 0, sizeof (struct net_device_stats));
653
654 if (ether1_init_for_open (dev)) { 651 if (ether1_init_for_open (dev)) {
655 free_irq (dev->irq, dev); 652 free_irq (dev->irq, dev);
656 return -EAGAIN; 653 return -EAGAIN;
@@ -673,7 +670,7 @@ ether1_timeout(struct net_device *dev)
673 if (ether1_init_for_open (dev)) 670 if (ether1_init_for_open (dev))
674 printk (KERN_ERR "%s: unable to restart interface\n", dev->name); 671 printk (KERN_ERR "%s: unable to restart interface\n", dev->name);
675 672
676 priv(dev)->stats.tx_errors++; 673 dev->stats.tx_errors++;
677 netif_wake_queue(dev); 674 netif_wake_queue(dev);
678} 675}
679 676
@@ -802,21 +799,21 @@ again:
802 799
803 while (nop.nop_status & STAT_COMPLETE) { 800 while (nop.nop_status & STAT_COMPLETE) {
804 if (nop.nop_status & STAT_OK) { 801 if (nop.nop_status & STAT_OK) {
805 priv(dev)->stats.tx_packets ++; 802 dev->stats.tx_packets++;
806 priv(dev)->stats.collisions += (nop.nop_status & STAT_COLLISIONS); 803 dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
807 } else { 804 } else {
808 priv(dev)->stats.tx_errors ++; 805 dev->stats.tx_errors++;
809 806
810 if (nop.nop_status & STAT_COLLAFTERTX) 807 if (nop.nop_status & STAT_COLLAFTERTX)
811 priv(dev)->stats.collisions ++; 808 dev->stats.collisions++;
812 if (nop.nop_status & STAT_NOCARRIER) 809 if (nop.nop_status & STAT_NOCARRIER)
813 priv(dev)->stats.tx_carrier_errors ++; 810 dev->stats.tx_carrier_errors++;
814 if (nop.nop_status & STAT_TXLOSTCTS) 811 if (nop.nop_status & STAT_TXLOSTCTS)
815 printk (KERN_WARNING "%s: cts lost\n", dev->name); 812 printk (KERN_WARNING "%s: cts lost\n", dev->name);
816 if (nop.nop_status & STAT_TXSLOWDMA) 813 if (nop.nop_status & STAT_TXSLOWDMA)
817 priv(dev)->stats.tx_fifo_errors ++; 814 dev->stats.tx_fifo_errors++;
818 if (nop.nop_status & STAT_COLLEXCESSIVE) 815 if (nop.nop_status & STAT_COLLEXCESSIVE)
819 priv(dev)->stats.collisions += 16; 816 dev->stats.collisions += 16;
820 } 817 }
821 818
822 if (nop.nop_link == caddr) { 819 if (nop.nop_link == caddr) {
@@ -879,13 +876,13 @@ ether1_recv_done (struct net_device *dev)
879 876
880 skb->protocol = eth_type_trans (skb, dev); 877 skb->protocol = eth_type_trans (skb, dev);
881 netif_rx (skb); 878 netif_rx (skb);
882 priv(dev)->stats.rx_packets ++; 879 dev->stats.rx_packets++;
883 } else 880 } else
884 priv(dev)->stats.rx_dropped ++; 881 dev->stats.rx_dropped++;
885 } else { 882 } else {
886 printk(KERN_WARNING "%s: %s\n", dev->name, 883 printk(KERN_WARNING "%s: %s\n", dev->name,
887 (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid"); 884 (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
888 priv(dev)->stats.rx_dropped ++; 885 dev->stats.rx_dropped++;
889 } 886 }
890 887
891 nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS); 888 nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
@@ -939,7 +936,7 @@ ether1_interrupt (int irq, void *dev_id)
939 printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name); 936 printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name);
940 ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS); 937 ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
941 writeb(CTRL_CA, REG_CONTROL); 938 writeb(CTRL_CA, REG_CONTROL);
942 priv(dev)->stats.rx_dropped ++; /* we suspended due to lack of buffer space */ 939 dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */
943 } else 940 } else
944 printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name, 941 printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name,
945 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS)); 942 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS));
@@ -962,12 +959,6 @@ ether1_close (struct net_device *dev)
962 return 0; 959 return 0;
963} 960}
964 961
965static struct net_device_stats *
966ether1_getstats (struct net_device *dev)
967{
968 return &priv(dev)->stats;
969}
970
971/* 962/*
972 * Set or clear the multicast filter for this adaptor. 963 * Set or clear the multicast filter for this adaptor.
973 * num_addrs == -1 Promiscuous mode, receive all packets. 964 * num_addrs == -1 Promiscuous mode, receive all packets.
@@ -994,7 +985,6 @@ static const struct net_device_ops ether1_netdev_ops = {
994 .ndo_open = ether1_open, 985 .ndo_open = ether1_open,
995 .ndo_stop = ether1_close, 986 .ndo_stop = ether1_close,
996 .ndo_start_xmit = ether1_sendpacket, 987 .ndo_start_xmit = ether1_sendpacket,
997 .ndo_get_stats = ether1_getstats,
998 .ndo_set_multicast_list = ether1_setmulticastlist, 988 .ndo_set_multicast_list = ether1_setmulticastlist,
999 .ndo_tx_timeout = ether1_timeout, 989 .ndo_tx_timeout = ether1_timeout,
1000 .ndo_validate_addr = eth_validate_addr, 990 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ether1.h b/drivers/net/arm/ether1.h
index c8a4b2389d85..3a5830ab3dc7 100644
--- a/drivers/net/arm/ether1.h
+++ b/drivers/net/arm/ether1.h
@@ -38,7 +38,6 @@
38 38
39struct ether1_priv { 39struct ether1_priv {
40 void __iomem *base; 40 void __iomem *base;
41 struct net_device_stats stats;
42 unsigned int tx_link; 41 unsigned int tx_link;
43 unsigned int tx_head; 42 unsigned int tx_head;
44 volatile unsigned int tx_tail; 43 volatile unsigned int tx_tail;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1361b7367c28..44a8746f4014 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -81,7 +81,6 @@ static int ether3_open (struct net_device *dev);
81static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); 81static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
82static irqreturn_t ether3_interrupt (int irq, void *dev_id); 82static irqreturn_t ether3_interrupt (int irq, void *dev_id);
83static int ether3_close (struct net_device *dev); 83static int ether3_close (struct net_device *dev);
84static struct net_device_stats *ether3_getstats (struct net_device *dev);
85static void ether3_setmulticastlist (struct net_device *dev); 84static void ether3_setmulticastlist (struct net_device *dev);
86static void ether3_timeout(struct net_device *dev); 85static void ether3_timeout(struct net_device *dev);
87 86
@@ -323,8 +322,6 @@ ether3_init_for_open(struct net_device *dev)
323{ 322{
324 int i; 323 int i;
325 324
326 memset(&priv(dev)->stats, 0, sizeof(struct net_device_stats));
327
328 /* Reset the chip */ 325 /* Reset the chip */
329 ether3_outw(CFG2_RESET, REG_CONFIG2); 326 ether3_outw(CFG2_RESET, REG_CONFIG2);
330 udelay(4); 327 udelay(4);
@@ -442,15 +439,6 @@ ether3_close(struct net_device *dev)
442} 439}
443 440
444/* 441/*
445 * Get the current statistics. This may be called with the card open or
446 * closed.
447 */
448static struct net_device_stats *ether3_getstats(struct net_device *dev)
449{
450 return &priv(dev)->stats;
451}
452
453/*
454 * Set or clear promiscuous/multicast mode filter for this adaptor. 442 * Set or clear promiscuous/multicast mode filter for this adaptor.
455 * 443 *
456 * We don't attempt any packet filtering. The card may have a SEEQ 8004 444 * We don't attempt any packet filtering. The card may have a SEEQ 8004
@@ -490,7 +478,7 @@ static void ether3_timeout(struct net_device *dev)
490 local_irq_restore(flags); 478 local_irq_restore(flags);
491 479
492 priv(dev)->regs.config2 |= CFG2_CTRLO; 480 priv(dev)->regs.config2 |= CFG2_CTRLO;
493 priv(dev)->stats.tx_errors += 1; 481 dev->stats.tx_errors += 1;
494 ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); 482 ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
495 priv(dev)->tx_head = priv(dev)->tx_tail = 0; 483 priv(dev)->tx_head = priv(dev)->tx_tail = 0;
496 484
@@ -509,7 +497,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
509 497
510 if (priv(dev)->broken) { 498 if (priv(dev)->broken) {
511 dev_kfree_skb(skb); 499 dev_kfree_skb(skb);
512 priv(dev)->stats.tx_dropped ++; 500 dev->stats.tx_dropped++;
513 netif_start_queue(dev); 501 netif_start_queue(dev);
514 return NETDEV_TX_OK; 502 return NETDEV_TX_OK;
515 } 503 }
@@ -673,7 +661,7 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
673 } else 661 } else
674 goto dropping; 662 goto dropping;
675 } else { 663 } else {
676 struct net_device_stats *stats = &priv(dev)->stats; 664 struct net_device_stats *stats = &dev->stats;
677 ether3_outw(next_ptr >> 8, REG_RECVEND); 665 ether3_outw(next_ptr >> 8, REG_RECVEND);
678 if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++; 666 if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++;
679 if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++; 667 if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++;
@@ -685,14 +673,14 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
685 while (-- maxcnt); 673 while (-- maxcnt);
686 674
687done: 675done:
688 priv(dev)->stats.rx_packets += received; 676 dev->stats.rx_packets += received;
689 priv(dev)->rx_head = next_ptr; 677 priv(dev)->rx_head = next_ptr;
690 /* 678 /*
691 * If rx went off line, then that means that the buffer may be full. We 679 * If rx went off line, then that means that the buffer may be full. We
692 * have dropped at least one packet. 680 * have dropped at least one packet.
693 */ 681 */
694 if (!(ether3_inw(REG_STATUS) & STAT_RXON)) { 682 if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
695 priv(dev)->stats.rx_dropped ++; 683 dev->stats.rx_dropped++;
696 ether3_outw(next_ptr, REG_RECVPTR); 684 ether3_outw(next_ptr, REG_RECVPTR);
697 ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); 685 ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
698 } 686 }
@@ -710,7 +698,7 @@ dropping:{
710 last_warned = jiffies; 698 last_warned = jiffies;
711 printk("%s: memory squeeze, dropping packet.\n", dev->name); 699 printk("%s: memory squeeze, dropping packet.\n", dev->name);
712 } 700 }
713 priv(dev)->stats.rx_dropped ++; 701 dev->stats.rx_dropped++;
714 goto done; 702 goto done;
715 } 703 }
716} 704}
@@ -743,13 +731,13 @@ static void ether3_tx(struct net_device *dev)
743 * Update errors 731 * Update errors
744 */ 732 */
745 if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS))) 733 if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS)))
746 priv(dev)->stats.tx_packets++; 734 dev->stats.tx_packets++;
747 else { 735 else {
748 priv(dev)->stats.tx_errors ++; 736 dev->stats.tx_errors++;
749 if (status & TXSTAT_16COLLISIONS) 737 if (status & TXSTAT_16COLLISIONS)
750 priv(dev)->stats.collisions += 16; 738 dev->stats.collisions += 16;
751 if (status & TXSTAT_BABBLED) 739 if (status & TXSTAT_BABBLED)
752 priv(dev)->stats.tx_fifo_errors ++; 740 dev->stats.tx_fifo_errors++;
753 } 741 }
754 742
755 tx_tail = (tx_tail + 1) & 15; 743 tx_tail = (tx_tail + 1) & 15;
@@ -773,7 +761,6 @@ static const struct net_device_ops ether3_netdev_ops = {
773 .ndo_open = ether3_open, 761 .ndo_open = ether3_open,
774 .ndo_stop = ether3_close, 762 .ndo_stop = ether3_close,
775 .ndo_start_xmit = ether3_sendpacket, 763 .ndo_start_xmit = ether3_sendpacket,
776 .ndo_get_stats = ether3_getstats,
777 .ndo_set_multicast_list = ether3_setmulticastlist, 764 .ndo_set_multicast_list = ether3_setmulticastlist,
778 .ndo_tx_timeout = ether3_timeout, 765 .ndo_tx_timeout = ether3_timeout,
779 .ndo_validate_addr = eth_validate_addr, 766 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ether3.h b/drivers/net/arm/ether3.h
index 1921a3a07da7..2db63b08bdf3 100644
--- a/drivers/net/arm/ether3.h
+++ b/drivers/net/arm/ether3.h
@@ -164,7 +164,6 @@ struct dev_priv {
164 unsigned char tx_head; /* buffer nr to insert next packet */ 164 unsigned char tx_head; /* buffer nr to insert next packet */
165 unsigned char tx_tail; /* buffer nr of transmitting packet */ 165 unsigned char tx_tail; /* buffer nr of transmitting packet */
166 unsigned int rx_head; /* address to fetch next packet from */ 166 unsigned int rx_head; /* address to fetch next packet from */
167 struct net_device_stats stats;
168 struct timer_list timer; 167 struct timer_list timer;
169 int broken; /* 0 = ok, 1 = something went wrong */ 168 int broken; /* 0 = ok, 1 = something went wrong */
170}; 169};
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 52abbbdf8a08..ef4115b897bf 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -559,7 +559,6 @@ struct atl1c_adapter {
559 struct napi_struct napi; 559 struct napi_struct napi;
560 struct atl1c_hw hw; 560 struct atl1c_hw hw;
561 struct atl1c_hw_stats hw_stats; 561 struct atl1c_hw_stats hw_stats;
562 struct net_device_stats net_stats;
563 struct mii_if_info mii; /* MII interface info */ 562 struct mii_if_info mii; /* MII interface info */
564 u16 rx_buffer_len; 563 u16 rx_buffer_len;
565 564
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index d8501f060957..919080b2c3a5 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -480,7 +480,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
480 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D); 480 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
481 } 481 }
482 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2 482 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
483 || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) { 483 || hw->nic_type == athr_l2c) {
484 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); 484 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
485 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); 485 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
486 } 486 }
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index c7b8ef507ebd..553230eb365c 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1562,7 +1562,7 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1562{ 1562{
1563 struct atl1c_adapter *adapter = netdev_priv(netdev); 1563 struct atl1c_adapter *adapter = netdev_priv(netdev);
1564 struct atl1c_hw_stats *hw_stats = &adapter->hw_stats; 1564 struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
1565 struct net_device_stats *net_stats = &adapter->net_stats; 1565 struct net_device_stats *net_stats = &netdev->stats;
1566 1566
1567 atl1c_update_hw_stats(adapter); 1567 atl1c_update_hw_stats(adapter);
1568 net_stats->rx_packets = hw_stats->rx_ok; 1568 net_stats->rx_packets = hw_stats->rx_ok;
@@ -1590,7 +1590,7 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1590 net_stats->tx_aborted_errors = hw_stats->tx_abort_col; 1590 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1591 net_stats->tx_window_errors = hw_stats->tx_late_col; 1591 net_stats->tx_window_errors = hw_stats->tx_late_col;
1592 1592
1593 return &adapter->net_stats; 1593 return net_stats;
1594} 1594}
1595 1595
1596static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) 1596static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
@@ -1700,7 +1700,7 @@ static irqreturn_t atl1c_intr(int irq, void *data)
1700 1700
1701 /* link event */ 1701 /* link event */
1702 if (status & (ISR_GPHY | ISR_MANUAL)) { 1702 if (status & (ISR_GPHY | ISR_MANUAL)) {
1703 adapter->net_stats.tx_carrier_errors++; 1703 netdev->stats.tx_carrier_errors++;
1704 atl1c_link_chg_event(adapter); 1704 atl1c_link_chg_event(adapter);
1705 break; 1705 break;
1706 } 1706 }
@@ -1719,7 +1719,7 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1719 * cannot figure out if the packet is fragmented or not, 1719 * cannot figure out if the packet is fragmented or not,
1720 * so we tell the KERNEL CHECKSUM_NONE 1720 * so we tell the KERNEL CHECKSUM_NONE
1721 */ 1721 */
1722 skb->ip_summed = CHECKSUM_NONE; 1722 skb_checksum_none_assert(skb);
1723} 1723}
1724 1724
1725static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid) 1725static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 1acea5774e89..56ace3fbe40d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1331,7 +1331,7 @@ static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
1331 u16 pkt_flags; 1331 u16 pkt_flags;
1332 u16 err_flags; 1332 u16 err_flags;
1333 1333
1334 skb->ip_summed = CHECKSUM_NONE; 1334 skb_checksum_none_assert(skb);
1335 pkt_flags = prrs->pkt_flag; 1335 pkt_flags = prrs->pkt_flag;
1336 err_flags = prrs->err_flag; 1336 err_flags = prrs->err_flag;
1337 if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && 1337 if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
@@ -2316,7 +2316,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2316 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); 2316 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
2317 2317
2318 init_timer(&adapter->phy_config_timer); 2318 init_timer(&adapter->phy_config_timer);
2319 adapter->phy_config_timer.function = &atl1e_phy_config; 2319 adapter->phy_config_timer.function = atl1e_phy_config;
2320 adapter->phy_config_timer.data = (unsigned long) adapter; 2320 adapter->phy_config_timer.data = (unsigned long) adapter;
2321 2321
2322 /* get user settings */ 2322 /* get user settings */
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 63b9ba0cc67e..e1e0171d6e62 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1805,7 +1805,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1805 * the higher layers and let it be sorted out there. 1805 * the higher layers and let it be sorted out there.
1806 */ 1806 */
1807 1807
1808 skb->ip_summed = CHECKSUM_NONE; 1808 skb_checksum_none_assert(skb);
1809 1809
1810 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1810 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1811 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | 1811 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
@@ -3036,7 +3036,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3036 netif_carrier_off(netdev); 3036 netif_carrier_off(netdev);
3037 netif_stop_queue(netdev); 3037 netif_stop_queue(netdev);
3038 3038
3039 setup_timer(&adapter->phy_config_timer, &atl1_phy_config, 3039 setup_timer(&adapter->phy_config_timer, atl1_phy_config,
3040 (unsigned long)adapter); 3040 (unsigned long)adapter);
3041 adapter->phy_timer_pending = false; 3041 adapter->phy_timer_pending = false;
3042 3042
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 8da87383fb39..29c0265ccc5d 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -51,10 +51,10 @@
51 51
52#define ATL2_DRV_VERSION "2.2.3" 52#define ATL2_DRV_VERSION "2.2.3"
53 53
54static char atl2_driver_name[] = "atl2"; 54static const char atl2_driver_name[] = "atl2";
55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; 55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
56static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; 56static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
57static char atl2_driver_version[] = ATL2_DRV_VERSION; 57static const char atl2_driver_version[] = ATL2_DRV_VERSION;
58 58
59MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>"); 59MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
60MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); 60MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -1444,11 +1444,11 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
1444 atl2_check_options(adapter); 1444 atl2_check_options(adapter);
1445 1445
1446 init_timer(&adapter->watchdog_timer); 1446 init_timer(&adapter->watchdog_timer);
1447 adapter->watchdog_timer.function = &atl2_watchdog; 1447 adapter->watchdog_timer.function = atl2_watchdog;
1448 adapter->watchdog_timer.data = (unsigned long) adapter; 1448 adapter->watchdog_timer.data = (unsigned long) adapter;
1449 1449
1450 init_timer(&adapter->phy_config_timer); 1450 init_timer(&adapter->phy_config_timer);
1451 adapter->phy_config_timer.function = &atl2_phy_config; 1451 adapter->phy_config_timer.function = atl2_phy_config;
1452 adapter->phy_config_timer.data = (unsigned long) adapter; 1452 adapter->phy_config_timer.data = (unsigned long) adapter;
1453 1453
1454 INIT_WORK(&adapter->reset_task, atl2_reset_task); 1454 INIT_WORK(&adapter->reset_task, atl2_reset_task);
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index bd2f9d331dac..dfd96b20547f 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -445,7 +445,7 @@ static int net_open(struct net_device *dev)
445 init_timer(&lp->timer); 445 init_timer(&lp->timer);
446 lp->timer.expires = jiffies + TIMED_CHECKER; 446 lp->timer.expires = jiffies + TIMED_CHECKER;
447 lp->timer.data = (unsigned long)dev; 447 lp->timer.data = (unsigned long)dev;
448 lp->timer.function = &atp_timed_checker; /* timer handler */ 448 lp->timer.function = atp_timed_checker; /* timer handler */
449 add_timer(&lp->timer); 449 add_timer(&lp->timer);
450 450
451 netif_start_queue(dev); 451 netif_start_queue(dev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 15ae6df2ff00..43489f89c142 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -13,7 +13,7 @@
13 * converted to use linux-2.6.x's PHY framework 13 * converted to use linux-2.6.x's PHY framework
14 * 14 *
15 * Author: MontaVista Software, Inc. 15 * Author: MontaVista Software, Inc.
16 * ppopov@mvista.com or source@mvista.com 16 * ppopov@mvista.com or source@mvista.com
17 * 17 *
18 * ######################################################################## 18 * ########################################################################
19 * 19 *
@@ -34,6 +34,8 @@
34 * 34 *
35 * 35 *
36 */ 36 */
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
37#include <linux/capability.h> 39#include <linux/capability.h>
38#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
39#include <linux/module.h> 41#include <linux/module.h>
@@ -56,11 +58,11 @@
56#include <linux/crc32.h> 58#include <linux/crc32.h>
57#include <linux/phy.h> 59#include <linux/phy.h>
58#include <linux/platform_device.h> 60#include <linux/platform_device.h>
61#include <linux/cpu.h>
62#include <linux/io.h>
59 63
60#include <asm/cpu.h>
61#include <asm/mipsregs.h> 64#include <asm/mipsregs.h>
62#include <asm/irq.h> 65#include <asm/irq.h>
63#include <asm/io.h>
64#include <asm/processor.h> 66#include <asm/processor.h>
65 67
66#include <au1000.h> 68#include <au1000.h>
@@ -152,11 +154,11 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
152 154
153 spin_lock_irqsave(&aup->lock, flags); 155 spin_lock_irqsave(&aup->lock, flags);
154 156
155 if(force_reset || (!aup->mac_enabled)) { 157 if (force_reset || (!aup->mac_enabled)) {
156 *aup->enable = MAC_EN_CLOCK_ENABLE; 158 writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
157 au_sync_delay(2); 159 au_sync_delay(2);
158 *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 160 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
159 | MAC_EN_CLOCK_ENABLE); 161 | MAC_EN_CLOCK_ENABLE), &aup->enable);
160 au_sync_delay(2); 162 au_sync_delay(2);
161 163
162 aup->mac_enabled = 1; 164 aup->mac_enabled = 1;
@@ -171,12 +173,12 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
171static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) 173static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
172{ 174{
173 struct au1000_private *aup = netdev_priv(dev); 175 struct au1000_private *aup = netdev_priv(dev);
174 volatile u32 *const mii_control_reg = &aup->mac->mii_control; 176 u32 *const mii_control_reg = &aup->mac->mii_control;
175 volatile u32 *const mii_data_reg = &aup->mac->mii_data; 177 u32 *const mii_data_reg = &aup->mac->mii_data;
176 u32 timedout = 20; 178 u32 timedout = 20;
177 u32 mii_control; 179 u32 mii_control;
178 180
179 while (*mii_control_reg & MAC_MII_BUSY) { 181 while (readl(mii_control_reg) & MAC_MII_BUSY) {
180 mdelay(1); 182 mdelay(1);
181 if (--timedout == 0) { 183 if (--timedout == 0) {
182 netdev_err(dev, "read_MII busy timeout!!\n"); 184 netdev_err(dev, "read_MII busy timeout!!\n");
@@ -187,29 +189,29 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
187 mii_control = MAC_SET_MII_SELECT_REG(reg) | 189 mii_control = MAC_SET_MII_SELECT_REG(reg) |
188 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ; 190 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
189 191
190 *mii_control_reg = mii_control; 192 writel(mii_control, mii_control_reg);
191 193
192 timedout = 20; 194 timedout = 20;
193 while (*mii_control_reg & MAC_MII_BUSY) { 195 while (readl(mii_control_reg) & MAC_MII_BUSY) {
194 mdelay(1); 196 mdelay(1);
195 if (--timedout == 0) { 197 if (--timedout == 0) {
196 netdev_err(dev, "mdio_read busy timeout!!\n"); 198 netdev_err(dev, "mdio_read busy timeout!!\n");
197 return -1; 199 return -1;
198 } 200 }
199 } 201 }
200 return (int)*mii_data_reg; 202 return readl(mii_data_reg);
201} 203}
202 204
203static void au1000_mdio_write(struct net_device *dev, int phy_addr, 205static void au1000_mdio_write(struct net_device *dev, int phy_addr,
204 int reg, u16 value) 206 int reg, u16 value)
205{ 207{
206 struct au1000_private *aup = netdev_priv(dev); 208 struct au1000_private *aup = netdev_priv(dev);
207 volatile u32 *const mii_control_reg = &aup->mac->mii_control; 209 u32 *const mii_control_reg = &aup->mac->mii_control;
208 volatile u32 *const mii_data_reg = &aup->mac->mii_data; 210 u32 *const mii_data_reg = &aup->mac->mii_data;
209 u32 timedout = 20; 211 u32 timedout = 20;
210 u32 mii_control; 212 u32 mii_control;
211 213
212 while (*mii_control_reg & MAC_MII_BUSY) { 214 while (readl(mii_control_reg) & MAC_MII_BUSY) {
213 mdelay(1); 215 mdelay(1);
214 if (--timedout == 0) { 216 if (--timedout == 0) {
215 netdev_err(dev, "mdio_write busy timeout!!\n"); 217 netdev_err(dev, "mdio_write busy timeout!!\n");
@@ -220,18 +222,22 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
220 mii_control = MAC_SET_MII_SELECT_REG(reg) | 222 mii_control = MAC_SET_MII_SELECT_REG(reg) |
221 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE; 223 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
222 224
223 *mii_data_reg = value; 225 writel(value, mii_data_reg);
224 *mii_control_reg = mii_control; 226 writel(mii_control, mii_control_reg);
225} 227}
226 228
227static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 229static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
228{ 230{
229 /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does 231 /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
230 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */ 232 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
233 */
231 struct net_device *const dev = bus->priv; 234 struct net_device *const dev = bus->priv;
232 235
233 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this 236 /* make sure the MAC associated with this
234 * mii_bus is enabled */ 237 * mii_bus is enabled
238 */
239 au1000_enable_mac(dev, 0);
240
235 return au1000_mdio_read(dev, phy_addr, regnum); 241 return au1000_mdio_read(dev, phy_addr, regnum);
236} 242}
237 243
@@ -240,8 +246,11 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
240{ 246{
241 struct net_device *const dev = bus->priv; 247 struct net_device *const dev = bus->priv;
242 248
243 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this 249 /* make sure the MAC associated with this
244 * mii_bus is enabled */ 250 * mii_bus is enabled
251 */
252 au1000_enable_mac(dev, 0);
253
245 au1000_mdio_write(dev, phy_addr, regnum, value); 254 au1000_mdio_write(dev, phy_addr, regnum, value);
246 return 0; 255 return 0;
247} 256}
@@ -250,28 +259,37 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
250{ 259{
251 struct net_device *const dev = bus->priv; 260 struct net_device *const dev = bus->priv;
252 261
253 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this 262 /* make sure the MAC associated with this
254 * mii_bus is enabled */ 263 * mii_bus is enabled
264 */
265 au1000_enable_mac(dev, 0);
266
255 return 0; 267 return 0;
256} 268}
257 269
258static void au1000_hard_stop(struct net_device *dev) 270static void au1000_hard_stop(struct net_device *dev)
259{ 271{
260 struct au1000_private *aup = netdev_priv(dev); 272 struct au1000_private *aup = netdev_priv(dev);
273 u32 reg;
261 274
262 netif_dbg(aup, drv, dev, "hard stop\n"); 275 netif_dbg(aup, drv, dev, "hard stop\n");
263 276
264 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); 277 reg = readl(&aup->mac->control);
278 reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
279 writel(reg, &aup->mac->control);
265 au_sync_delay(10); 280 au_sync_delay(10);
266} 281}
267 282
268static void au1000_enable_rx_tx(struct net_device *dev) 283static void au1000_enable_rx_tx(struct net_device *dev)
269{ 284{
270 struct au1000_private *aup = netdev_priv(dev); 285 struct au1000_private *aup = netdev_priv(dev);
286 u32 reg;
271 287
272 netif_dbg(aup, hw, dev, "enable_rx_tx\n"); 288 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
273 289
274 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); 290 reg = readl(&aup->mac->control);
291 reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
292 writel(reg, &aup->mac->control);
275 au_sync_delay(10); 293 au_sync_delay(10);
276} 294}
277 295
@@ -281,6 +299,7 @@ au1000_adjust_link(struct net_device *dev)
281 struct au1000_private *aup = netdev_priv(dev); 299 struct au1000_private *aup = netdev_priv(dev);
282 struct phy_device *phydev = aup->phy_dev; 300 struct phy_device *phydev = aup->phy_dev;
283 unsigned long flags; 301 unsigned long flags;
302 u32 reg;
284 303
285 int status_change = 0; 304 int status_change = 0;
286 305
@@ -312,14 +331,15 @@ au1000_adjust_link(struct net_device *dev)
312 /* switching duplex mode requires to disable rx and tx! */ 331 /* switching duplex mode requires to disable rx and tx! */
313 au1000_hard_stop(dev); 332 au1000_hard_stop(dev);
314 333
315 if (DUPLEX_FULL == phydev->duplex) 334 reg = readl(&aup->mac->control);
316 aup->mac->control = ((aup->mac->control 335 if (DUPLEX_FULL == phydev->duplex) {
317 | MAC_FULL_DUPLEX) 336 reg |= MAC_FULL_DUPLEX;
318 & ~MAC_DISABLE_RX_OWN); 337 reg &= ~MAC_DISABLE_RX_OWN;
319 else 338 } else {
320 aup->mac->control = ((aup->mac->control 339 reg &= ~MAC_FULL_DUPLEX;
321 & ~MAC_FULL_DUPLEX) 340 reg |= MAC_DISABLE_RX_OWN;
322 | MAC_DISABLE_RX_OWN); 341 }
342 writel(reg, &aup->mac->control);
323 au_sync_delay(1); 343 au_sync_delay(1);
324 344
325 au1000_enable_rx_tx(dev); 345 au1000_enable_rx_tx(dev);
@@ -353,10 +373,11 @@ au1000_adjust_link(struct net_device *dev)
353 } 373 }
354} 374}
355 375
356static int au1000_mii_probe (struct net_device *dev) 376static int au1000_mii_probe(struct net_device *dev)
357{ 377{
358 struct au1000_private *const aup = netdev_priv(dev); 378 struct au1000_private *const aup = netdev_priv(dev);
359 struct phy_device *phydev = NULL; 379 struct phy_device *phydev = NULL;
380 int phy_addr;
360 381
361 if (aup->phy_static_config) { 382 if (aup->phy_static_config) {
362 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); 383 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
@@ -366,42 +387,46 @@ static int au1000_mii_probe (struct net_device *dev)
366 else 387 else
367 netdev_info(dev, "using PHY-less setup\n"); 388 netdev_info(dev, "using PHY-less setup\n");
368 return 0; 389 return 0;
369 } else { 390 }
370 int phy_addr;
371
372 /* find the first (lowest address) PHY on the current MAC's MII bus */
373 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
374 if (aup->mii_bus->phy_map[phy_addr]) {
375 phydev = aup->mii_bus->phy_map[phy_addr];
376 if (!aup->phy_search_highest_addr)
377 break; /* break out with first one found */
378 }
379
380 if (aup->phy1_search_mac0) {
381 /* try harder to find a PHY */
382 if (!phydev && (aup->mac_id == 1)) {
383 /* no PHY found, maybe we have a dual PHY? */
384 dev_info(&dev->dev, ": no PHY found on MAC1, "
385 "let's see if it's attached to MAC0...\n");
386
387 /* find the first (lowest address) non-attached PHY on
388 * the MAC0 MII bus */
389 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
390 struct phy_device *const tmp_phydev =
391 aup->mii_bus->phy_map[phy_addr];
392
393 if (aup->mac_id == 1)
394 break;
395
396 if (!tmp_phydev)
397 continue; /* no PHY here... */
398 391
399 if (tmp_phydev->attached_dev) 392 /* find the first (lowest address) PHY
400 continue; /* already claimed by MAC0 */ 393 * on the current MAC's MII bus
394 */
395 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
396 if (aup->mii_bus->phy_map[phy_addr]) {
397 phydev = aup->mii_bus->phy_map[phy_addr];
398 if (!aup->phy_search_highest_addr)
399 /* break out with first one found */
400 break;
401 }
401 402
402 phydev = tmp_phydev; 403 if (aup->phy1_search_mac0) {
403 break; /* found it */ 404 /* try harder to find a PHY */
404 } 405 if (!phydev && (aup->mac_id == 1)) {
406 /* no PHY found, maybe we have a dual PHY? */
407 dev_info(&dev->dev, ": no PHY found on MAC1, "
408 "let's see if it's attached to MAC0...\n");
409
410 /* find the first (lowest address) non-attached
411 * PHY on the MAC0 MII bus
412 */
413 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
414 struct phy_device *const tmp_phydev =
415 aup->mii_bus->phy_map[phy_addr];
416
417 if (aup->mac_id == 1)
418 break;
419
420 /* no PHY here... */
421 if (!tmp_phydev)
422 continue;
423
424 /* already claimed by MAC0 */
425 if (tmp_phydev->attached_dev)
426 continue;
427
428 phydev = tmp_phydev;
429 break; /* found it */
405 } 430 }
406 } 431 }
407 } 432 }
@@ -452,20 +477,20 @@ static int au1000_mii_probe (struct net_device *dev)
452 * has the virtual and dma address of a buffer suitable for 477 * has the virtual and dma address of a buffer suitable for
453 * both, receive and transmit operations. 478 * both, receive and transmit operations.
454 */ 479 */
455static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup) 480static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
456{ 481{
457 db_dest_t *pDB; 482 struct db_dest *pDB;
458 pDB = aup->pDBfree; 483 pDB = aup->pDBfree;
459 484
460 if (pDB) { 485 if (pDB)
461 aup->pDBfree = pDB->pnext; 486 aup->pDBfree = pDB->pnext;
462 } 487
463 return pDB; 488 return pDB;
464} 489}
465 490
466void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) 491void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
467{ 492{
468 db_dest_t *pDBfree = aup->pDBfree; 493 struct db_dest *pDBfree = aup->pDBfree;
469 if (pDBfree) 494 if (pDBfree)
470 pDBfree->pnext = pDB; 495 pDBfree->pnext = pDB;
471 aup->pDBfree = pDB; 496 aup->pDBfree = pDB;
@@ -478,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
478 503
479 au1000_hard_stop(dev); 504 au1000_hard_stop(dev);
480 505
481 *aup->enable = MAC_EN_CLOCK_ENABLE; 506 writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
482 au_sync_delay(2); 507 au_sync_delay(2);
483 *aup->enable = 0; 508 writel(0, &aup->enable);
484 au_sync_delay(2); 509 au_sync_delay(2);
485 510
486 aup->tx_full = 0; 511 aup->tx_full = 0;
@@ -507,7 +532,7 @@ static void au1000_reset_mac(struct net_device *dev)
507 532
508 spin_lock_irqsave(&aup->lock, flags); 533 spin_lock_irqsave(&aup->lock, flags);
509 534
510 au1000_reset_mac_unlocked (dev); 535 au1000_reset_mac_unlocked(dev);
511 536
512 spin_unlock_irqrestore(&aup->lock, flags); 537 spin_unlock_irqrestore(&aup->lock, flags);
513} 538}
@@ -524,11 +549,13 @@ au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
524 549
525 for (i = 0; i < NUM_RX_DMA; i++) { 550 for (i = 0; i < NUM_RX_DMA; i++) {
526 aup->rx_dma_ring[i] = 551 aup->rx_dma_ring[i] =
527 (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i); 552 (struct rx_dma *)
553 (rx_base + sizeof(struct rx_dma)*i);
528 } 554 }
529 for (i = 0; i < NUM_TX_DMA; i++) { 555 for (i = 0; i < NUM_TX_DMA; i++) {
530 aup->tx_dma_ring[i] = 556 aup->tx_dma_ring[i] =
531 (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i); 557 (struct tx_dma *)
558 (tx_base + sizeof(struct tx_dma)*i);
532 } 559 }
533} 560}
534 561
@@ -616,18 +643,21 @@ static int au1000_init(struct net_device *dev)
616 643
617 spin_lock_irqsave(&aup->lock, flags); 644 spin_lock_irqsave(&aup->lock, flags);
618 645
619 aup->mac->control = 0; 646 writel(0, &aup->mac->control);
620 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; 647 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
621 aup->tx_tail = aup->tx_head; 648 aup->tx_tail = aup->tx_head;
622 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2; 649 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
623 650
624 aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4]; 651 writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
625 aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 | 652 &aup->mac->mac_addr_high);
626 dev->dev_addr[1]<<8 | dev->dev_addr[0]; 653 writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
654 dev->dev_addr[1]<<8 | dev->dev_addr[0],
655 &aup->mac->mac_addr_low);
627 656
628 for (i = 0; i < NUM_RX_DMA; i++) { 657
658 for (i = 0; i < NUM_RX_DMA; i++)
629 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; 659 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
630 } 660
631 au_sync(); 661 au_sync();
632 662
633 control = MAC_RX_ENABLE | MAC_TX_ENABLE; 663 control = MAC_RX_ENABLE | MAC_TX_ENABLE;
@@ -643,8 +673,8 @@ static int au1000_init(struct net_device *dev)
643 control |= MAC_FULL_DUPLEX; 673 control |= MAC_FULL_DUPLEX;
644 } 674 }
645 675
646 aup->mac->control = control; 676 writel(control, &aup->mac->control);
647 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */ 677 writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
648 au_sync(); 678 au_sync();
649 679
650 spin_unlock_irqrestore(&aup->lock, flags); 680 spin_unlock_irqrestore(&aup->lock, flags);
@@ -681,9 +711,9 @@ static int au1000_rx(struct net_device *dev)
681{ 711{
682 struct au1000_private *aup = netdev_priv(dev); 712 struct au1000_private *aup = netdev_priv(dev);
683 struct sk_buff *skb; 713 struct sk_buff *skb;
684 volatile rx_dma_t *prxd; 714 struct rx_dma *prxd;
685 u32 buff_stat, status; 715 u32 buff_stat, status;
686 db_dest_t *pDB; 716 struct db_dest *pDB;
687 u32 frmlen; 717 u32 frmlen;
688 718
689 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); 719 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
@@ -713,24 +743,26 @@ static int au1000_rx(struct net_device *dev)
713 netif_rx(skb); /* pass the packet to upper layers */ 743 netif_rx(skb); /* pass the packet to upper layers */
714 } else { 744 } else {
715 if (au1000_debug > 4) { 745 if (au1000_debug > 4) {
746 pr_err("rx_error(s):");
716 if (status & RX_MISSED_FRAME) 747 if (status & RX_MISSED_FRAME)
717 printk("rx miss\n"); 748 pr_cont(" miss");
718 if (status & RX_WDOG_TIMER) 749 if (status & RX_WDOG_TIMER)
719 printk("rx wdog\n"); 750 pr_cont(" wdog");
720 if (status & RX_RUNT) 751 if (status & RX_RUNT)
721 printk("rx runt\n"); 752 pr_cont(" runt");
722 if (status & RX_OVERLEN) 753 if (status & RX_OVERLEN)
723 printk("rx overlen\n"); 754 pr_cont(" overlen");
724 if (status & RX_COLL) 755 if (status & RX_COLL)
725 printk("rx coll\n"); 756 pr_cont(" coll");
726 if (status & RX_MII_ERROR) 757 if (status & RX_MII_ERROR)
727 printk("rx mii error\n"); 758 pr_cont(" mii error");
728 if (status & RX_CRC_ERROR) 759 if (status & RX_CRC_ERROR)
729 printk("rx crc error\n"); 760 pr_cont(" crc error");
730 if (status & RX_LEN_ERROR) 761 if (status & RX_LEN_ERROR)
731 printk("rx len error\n"); 762 pr_cont(" len error");
732 if (status & RX_U_CNTRL_FRAME) 763 if (status & RX_U_CNTRL_FRAME)
733 printk("rx u control frame\n"); 764 pr_cont(" u control frame");
765 pr_cont("\n");
734 } 766 }
735 } 767 }
736 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); 768 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
@@ -753,7 +785,8 @@ static void au1000_update_tx_stats(struct net_device *dev, u32 status)
753 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { 785 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
754 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { 786 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
755 /* any other tx errors are only valid 787 /* any other tx errors are only valid
756 * in half duplex mode */ 788 * in half duplex mode
789 */
757 ps->tx_errors++; 790 ps->tx_errors++;
758 ps->tx_aborted_errors++; 791 ps->tx_aborted_errors++;
759 } 792 }
@@ -774,7 +807,7 @@ static void au1000_update_tx_stats(struct net_device *dev, u32 status)
774static void au1000_tx_ack(struct net_device *dev) 807static void au1000_tx_ack(struct net_device *dev)
775{ 808{
776 struct au1000_private *aup = netdev_priv(dev); 809 struct au1000_private *aup = netdev_priv(dev);
777 volatile tx_dma_t *ptxd; 810 struct tx_dma *ptxd;
778 811
779 ptxd = aup->tx_dma_ring[aup->tx_tail]; 812 ptxd = aup->tx_dma_ring[aup->tx_tail];
780 813
@@ -854,7 +887,7 @@ static int au1000_close(struct net_device *dev)
854 887
855 spin_lock_irqsave(&aup->lock, flags); 888 spin_lock_irqsave(&aup->lock, flags);
856 889
857 au1000_reset_mac_unlocked (dev); 890 au1000_reset_mac_unlocked(dev);
858 891
859 /* stop the device */ 892 /* stop the device */
860 netif_stop_queue(dev); 893 netif_stop_queue(dev);
@@ -873,9 +906,9 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
873{ 906{
874 struct au1000_private *aup = netdev_priv(dev); 907 struct au1000_private *aup = netdev_priv(dev);
875 struct net_device_stats *ps = &dev->stats; 908 struct net_device_stats *ps = &dev->stats;
876 volatile tx_dma_t *ptxd; 909 struct tx_dma *ptxd;
877 u32 buff_stat; 910 u32 buff_stat;
878 db_dest_t *pDB; 911 struct db_dest *pDB;
879 int i; 912 int i;
880 913
881 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", 914 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
@@ -902,9 +935,9 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
902 pDB = aup->tx_db_inuse[aup->tx_head]; 935 pDB = aup->tx_db_inuse[aup->tx_head];
903 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); 936 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
904 if (skb->len < ETH_ZLEN) { 937 if (skb->len < ETH_ZLEN) {
905 for (i = skb->len; i < ETH_ZLEN; i++) { 938 for (i = skb->len; i < ETH_ZLEN; i++)
906 ((char *)pDB->vaddr)[i] = 0; 939 ((char *)pDB->vaddr)[i] = 0;
907 } 940
908 ptxd->len = ETH_ZLEN; 941 ptxd->len = ETH_ZLEN;
909 } else 942 } else
910 ptxd->len = skb->len; 943 ptxd->len = skb->len;
@@ -935,15 +968,16 @@ static void au1000_tx_timeout(struct net_device *dev)
935static void au1000_multicast_list(struct net_device *dev) 968static void au1000_multicast_list(struct net_device *dev)
936{ 969{
937 struct au1000_private *aup = netdev_priv(dev); 970 struct au1000_private *aup = netdev_priv(dev);
971 u32 reg;
938 972
939 netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags); 973 netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
940 974 reg = readl(&aup->mac->control);
941 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 975 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
942 aup->mac->control |= MAC_PROMISCUOUS; 976 reg |= MAC_PROMISCUOUS;
943 } else if ((dev->flags & IFF_ALLMULTI) || 977 } else if ((dev->flags & IFF_ALLMULTI) ||
944 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { 978 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
945 aup->mac->control |= MAC_PASS_ALL_MULTI; 979 reg |= MAC_PASS_ALL_MULTI;
946 aup->mac->control &= ~MAC_PROMISCUOUS; 980 reg &= ~MAC_PROMISCUOUS;
947 netdev_info(dev, "Pass all multicast\n"); 981 netdev_info(dev, "Pass all multicast\n");
948 } else { 982 } else {
949 struct netdev_hw_addr *ha; 983 struct netdev_hw_addr *ha;
@@ -953,11 +987,12 @@ static void au1000_multicast_list(struct net_device *dev)
953 netdev_for_each_mc_addr(ha, dev) 987 netdev_for_each_mc_addr(ha, dev)
954 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, 988 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
955 (long *)mc_filter); 989 (long *)mc_filter);
956 aup->mac->multi_hash_high = mc_filter[1]; 990 writel(mc_filter[1], &aup->mac->multi_hash_high);
957 aup->mac->multi_hash_low = mc_filter[0]; 991 writel(mc_filter[0], &aup->mac->multi_hash_low);
958 aup->mac->control &= ~MAC_PROMISCUOUS; 992 reg &= ~MAC_PROMISCUOUS;
959 aup->mac->control |= MAC_HASH_MODE; 993 reg |= MAC_HASH_MODE;
960 } 994 }
995 writel(reg, &aup->mac->control);
961} 996}
962 997
963static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 998static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -991,7 +1026,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
991 struct au1000_private *aup = NULL; 1026 struct au1000_private *aup = NULL;
992 struct au1000_eth_platform_data *pd; 1027 struct au1000_eth_platform_data *pd;
993 struct net_device *dev = NULL; 1028 struct net_device *dev = NULL;
994 db_dest_t *pDB, *pDBfree; 1029 struct db_dest *pDB, *pDBfree;
995 int irq, i, err = 0; 1030 int irq, i, err = 0;
996 struct resource *base, *macen; 1031 struct resource *base, *macen;
997 1032
@@ -1016,13 +1051,15 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1016 goto out; 1051 goto out;
1017 } 1052 }
1018 1053
1019 if (!request_mem_region(base->start, resource_size(base), pdev->name)) { 1054 if (!request_mem_region(base->start, resource_size(base),
1055 pdev->name)) {
1020 dev_err(&pdev->dev, "failed to request memory region for base registers\n"); 1056 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1021 err = -ENXIO; 1057 err = -ENXIO;
1022 goto out; 1058 goto out;
1023 } 1059 }
1024 1060
1025 if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) { 1061 if (!request_mem_region(macen->start, resource_size(macen),
1062 pdev->name)) {
1026 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); 1063 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1027 err = -ENXIO; 1064 err = -ENXIO;
1028 goto err_request; 1065 goto err_request;
@@ -1040,10 +1077,12 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1040 aup = netdev_priv(dev); 1077 aup = netdev_priv(dev);
1041 1078
1042 spin_lock_init(&aup->lock); 1079 spin_lock_init(&aup->lock);
1043 aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug); 1080 aup->msg_enable = (au1000_debug < 4 ?
1081 AU1000_DEF_MSG_ENABLE : au1000_debug);
1044 1082
1045 /* Allocate the data buffers */ 1083 /* Allocate the data buffers
1046 /* Snooping works fine with eth on all au1xxx */ 1084 * Snooping works fine with eth on all au1xxx
1085 */
1047 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * 1086 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1048 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1087 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1049 &aup->dma_addr, 0); 1088 &aup->dma_addr, 0);
@@ -1054,15 +1093,17 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1054 } 1093 }
1055 1094
1056 /* aup->mac is the base address of the MAC's registers */ 1095 /* aup->mac is the base address of the MAC's registers */
1057 aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base)); 1096 aup->mac = (struct mac_reg *)
1097 ioremap_nocache(base->start, resource_size(base));
1058 if (!aup->mac) { 1098 if (!aup->mac) {
1059 dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); 1099 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1060 err = -ENXIO; 1100 err = -ENXIO;
1061 goto err_remap1; 1101 goto err_remap1;
1062 } 1102 }
1063 1103
1064 /* Setup some variables for quick register address access */ 1104 /* Setup some variables for quick register address access */
1065 aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen)); 1105 aup->enable = (u32 *)ioremap_nocache(macen->start,
1106 resource_size(macen));
1066 if (!aup->enable) { 1107 if (!aup->enable) {
1067 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); 1108 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1068 err = -ENXIO; 1109 err = -ENXIO;
@@ -1078,12 +1119,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1078 /* set a random MAC now in case platform_data doesn't provide one */ 1119 /* set a random MAC now in case platform_data doesn't provide one */
1079 random_ether_addr(dev->dev_addr); 1120 random_ether_addr(dev->dev_addr);
1080 1121
1081 *aup->enable = 0; 1122 writel(0, &aup->enable);
1082 aup->mac_enabled = 0; 1123 aup->mac_enabled = 0;
1083 1124
1084 pd = pdev->dev.platform_data; 1125 pd = pdev->dev.platform_data;
1085 if (!pd) { 1126 if (!pd) {
1086 dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n"); 1127 dev_info(&pdev->dev, "no platform_data passed,"
1128 " PHY search on MAC0\n");
1087 aup->phy1_search_mac0 = 1; 1129 aup->phy1_search_mac0 = 1;
1088 } else { 1130 } else {
1089 if (is_valid_ether_addr(pd->mac)) 1131 if (is_valid_ether_addr(pd->mac))
@@ -1098,8 +1140,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1098 } 1140 }
1099 1141
1100 if (aup->phy_busid && aup->phy_busid > 0) { 1142 if (aup->phy_busid && aup->phy_busid > 0) {
1101 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII" 1143 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1102 "bus not supported yet\n");
1103 err = -ENODEV; 1144 err = -ENODEV;
1104 goto err_mdiobus_alloc; 1145 goto err_mdiobus_alloc;
1105 } 1146 }
@@ -1151,17 +1192,17 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1151 1192
1152 for (i = 0; i < NUM_RX_DMA; i++) { 1193 for (i = 0; i < NUM_RX_DMA; i++) {
1153 pDB = au1000_GetFreeDB(aup); 1194 pDB = au1000_GetFreeDB(aup);
1154 if (!pDB) { 1195 if (!pDB)
1155 goto err_out; 1196 goto err_out;
1156 } 1197
1157 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; 1198 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1158 aup->rx_db_inuse[i] = pDB; 1199 aup->rx_db_inuse[i] = pDB;
1159 } 1200 }
1160 for (i = 0; i < NUM_TX_DMA; i++) { 1201 for (i = 0; i < NUM_TX_DMA; i++) {
1161 pDB = au1000_GetFreeDB(aup); 1202 pDB = au1000_GetFreeDB(aup);
1162 if (!pDB) { 1203 if (!pDB)
1163 goto err_out; 1204 goto err_out;
1164 } 1205
1165 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; 1206 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1166 aup->tx_dma_ring[i]->len = 0; 1207 aup->tx_dma_ring[i]->len = 0;
1167 aup->tx_db_inuse[i] = pDB; 1208 aup->tx_db_inuse[i] = pDB;
@@ -1188,7 +1229,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1188 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", 1229 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1189 (unsigned long)base->start, irq); 1230 (unsigned long)base->start, irq);
1190 if (version_printed++ == 0) 1231 if (version_printed++ == 0)
1191 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); 1232 pr_info("%s version %s %s\n",
1233 DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1192 1234
1193 return 0; 1235 return 0;
1194 1236
@@ -1197,7 +1239,8 @@ err_out:
1197 mdiobus_unregister(aup->mii_bus); 1239 mdiobus_unregister(aup->mii_bus);
1198 1240
1199 /* here we should have a valid dev plus aup-> register addresses 1241 /* here we should have a valid dev plus aup-> register addresses
1200 * so we can reset the mac properly.*/ 1242 * so we can reset the mac properly.
1243 */
1201 au1000_reset_mac(dev); 1244 au1000_reset_mac(dev);
1202 1245
1203 for (i = 0; i < NUM_RX_DMA; i++) { 1246 for (i = 0; i < NUM_RX_DMA; i++) {
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index d06ec008fbf1..6229c774552c 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -44,34 +44,34 @@
44 * Data Buffer Descriptor. Data buffers must be aligned on 32 byte 44 * Data Buffer Descriptor. Data buffers must be aligned on 32 byte
45 * boundary for both, receive and transmit. 45 * boundary for both, receive and transmit.
46 */ 46 */
47typedef struct db_dest { 47struct db_dest {
48 struct db_dest *pnext; 48 struct db_dest *pnext;
49 volatile u32 *vaddr; 49 u32 *vaddr;
50 dma_addr_t dma_addr; 50 dma_addr_t dma_addr;
51} db_dest_t; 51};
52 52
53/* 53/*
54 * The transmit and receive descriptors are memory 54 * The transmit and receive descriptors are memory
55 * mapped registers. 55 * mapped registers.
56 */ 56 */
57typedef struct tx_dma { 57struct tx_dma {
58 u32 status; 58 u32 status;
59 u32 buff_stat; 59 u32 buff_stat;
60 u32 len; 60 u32 len;
61 u32 pad; 61 u32 pad;
62} tx_dma_t; 62};
63 63
64typedef struct rx_dma { 64struct rx_dma {
65 u32 status; 65 u32 status;
66 u32 buff_stat; 66 u32 buff_stat;
67 u32 pad[2]; 67 u32 pad[2];
68} rx_dma_t; 68};
69 69
70 70
71/* 71/*
72 * MAC control registers, memory mapped. 72 * MAC control registers, memory mapped.
73 */ 73 */
74typedef struct mac_reg { 74struct mac_reg {
75 u32 control; 75 u32 control;
76 u32 mac_addr_high; 76 u32 mac_addr_high;
77 u32 mac_addr_low; 77 u32 mac_addr_low;
@@ -82,16 +82,16 @@ typedef struct mac_reg {
82 u32 flow_control; 82 u32 flow_control;
83 u32 vlan1_tag; 83 u32 vlan1_tag;
84 u32 vlan2_tag; 84 u32 vlan2_tag;
85} mac_reg_t; 85};
86 86
87 87
88struct au1000_private { 88struct au1000_private {
89 db_dest_t *pDBfree; 89 struct db_dest *pDBfree;
90 db_dest_t db[NUM_RX_BUFFS+NUM_TX_BUFFS]; 90 struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS];
91 volatile rx_dma_t *rx_dma_ring[NUM_RX_DMA]; 91 struct rx_dma *rx_dma_ring[NUM_RX_DMA];
92 volatile tx_dma_t *tx_dma_ring[NUM_TX_DMA]; 92 struct tx_dma *tx_dma_ring[NUM_TX_DMA];
93 db_dest_t *rx_db_inuse[NUM_RX_DMA]; 93 struct db_dest *rx_db_inuse[NUM_RX_DMA];
94 db_dest_t *tx_db_inuse[NUM_TX_DMA]; 94 struct db_dest *tx_db_inuse[NUM_TX_DMA];
95 u32 rx_head; 95 u32 rx_head;
96 u32 tx_head; 96 u32 tx_head;
97 u32 tx_tail; 97 u32 tx_tail;
@@ -99,7 +99,9 @@ struct au1000_private {
99 99
100 int mac_id; 100 int mac_id;
101 101
102 int mac_enabled; /* whether MAC is currently enabled and running (req. for mdio) */ 102 int mac_enabled; /* whether MAC is currently enabled and running
103 * (req. for mdio)
104 */
103 105
104 int old_link; /* used by au1000_adjust_link */ 106 int old_link; /* used by au1000_adjust_link */
105 int old_speed; 107 int old_speed;
@@ -117,9 +119,11 @@ struct au1000_private {
117 int phy_busid; 119 int phy_busid;
118 int phy_irq; 120 int phy_irq;
119 121
120 /* These variables are just for quick access to certain regs addresses. */ 122 /* These variables are just for quick access
121 volatile mac_reg_t *mac; /* mac registers */ 123 * to certain regs addresses.
122 volatile u32 *enable; /* address of MAC Enable Register */ 124 */
125 struct mac_reg *mac; /* mac registers */
126 u32 *enable; /* address of MAC Enable Register */
123 127
124 u32 vaddr; /* virtual address of rx/tx buffers */ 128 u32 vaddr; /* virtual address of rx/tx buffers */
125 dma_addr_t dma_addr; /* dma address of rx/tx buffers */ 129 dma_addr_t dma_addr; /* dma address of rx/tx buffers */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 1e620e287ae0..8e7c8a8e61c7 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -818,7 +818,7 @@ static int b44_rx(struct b44 *bp, int budget)
818 copy_skb->data, len); 818 copy_skb->data, len);
819 skb = copy_skb; 819 skb = copy_skb;
820 } 820 }
821 skb->ip_summed = CHECKSUM_NONE; 821 skb_checksum_none_assert(skb);
822 skb->protocol = eth_type_trans(skb, bp->dev); 822 skb->protocol = eth_type_trans(skb, bp->dev);
823 netif_receive_skb(skb); 823 netif_receive_skb(skb);
824 received++; 824 received++;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0d2c5da08937..ecfef240a303 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -293,22 +293,22 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
293 /* if the packet does not have start of packet _and_ 293 /* if the packet does not have start of packet _and_
294 * end of packet flag set, then just recycle it */ 294 * end of packet flag set, then just recycle it */
295 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { 295 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
296 priv->stats.rx_dropped++; 296 dev->stats.rx_dropped++;
297 continue; 297 continue;
298 } 298 }
299 299
300 /* recycle packet if it's marked as bad */ 300 /* recycle packet if it's marked as bad */
301 if (unlikely(len_stat & DMADESC_ERR_MASK)) { 301 if (unlikely(len_stat & DMADESC_ERR_MASK)) {
302 priv->stats.rx_errors++; 302 dev->stats.rx_errors++;
303 303
304 if (len_stat & DMADESC_OVSIZE_MASK) 304 if (len_stat & DMADESC_OVSIZE_MASK)
305 priv->stats.rx_length_errors++; 305 dev->stats.rx_length_errors++;
306 if (len_stat & DMADESC_CRC_MASK) 306 if (len_stat & DMADESC_CRC_MASK)
307 priv->stats.rx_crc_errors++; 307 dev->stats.rx_crc_errors++;
308 if (len_stat & DMADESC_UNDER_MASK) 308 if (len_stat & DMADESC_UNDER_MASK)
309 priv->stats.rx_frame_errors++; 309 dev->stats.rx_frame_errors++;
310 if (len_stat & DMADESC_OV_MASK) 310 if (len_stat & DMADESC_OV_MASK)
311 priv->stats.rx_fifo_errors++; 311 dev->stats.rx_fifo_errors++;
312 continue; 312 continue;
313 } 313 }
314 314
@@ -324,7 +324,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
324 nskb = netdev_alloc_skb_ip_align(dev, len); 324 nskb = netdev_alloc_skb_ip_align(dev, len);
325 if (!nskb) { 325 if (!nskb) {
326 /* forget packet, just rearm desc */ 326 /* forget packet, just rearm desc */
327 priv->stats.rx_dropped++; 327 dev->stats.rx_dropped++;
328 continue; 328 continue;
329 } 329 }
330 330
@@ -342,8 +342,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
342 342
343 skb_put(skb, len); 343 skb_put(skb, len);
344 skb->protocol = eth_type_trans(skb, dev); 344 skb->protocol = eth_type_trans(skb, dev);
345 priv->stats.rx_packets++; 345 dev->stats.rx_packets++;
346 priv->stats.rx_bytes += len; 346 dev->stats.rx_bytes += len;
347 netif_receive_skb(skb); 347 netif_receive_skb(skb);
348 348
349 } while (--budget > 0); 349 } while (--budget > 0);
@@ -403,7 +403,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
403 spin_unlock(&priv->tx_lock); 403 spin_unlock(&priv->tx_lock);
404 404
405 if (desc->len_stat & DMADESC_UNDER_MASK) 405 if (desc->len_stat & DMADESC_UNDER_MASK)
406 priv->stats.tx_errors++; 406 dev->stats.tx_errors++;
407 407
408 dev_kfree_skb(skb); 408 dev_kfree_skb(skb);
409 released++; 409 released++;
@@ -563,8 +563,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
563 if (!priv->tx_desc_count) 563 if (!priv->tx_desc_count)
564 netif_stop_queue(dev); 564 netif_stop_queue(dev);
565 565
566 priv->stats.tx_bytes += skb->len; 566 dev->stats.tx_bytes += skb->len;
567 priv->stats.tx_packets++; 567 dev->stats.tx_packets++;
568 ret = NETDEV_TX_OK; 568 ret = NETDEV_TX_OK;
569 569
570out_unlock: 570out_unlock:
@@ -798,7 +798,7 @@ static int bcm_enet_open(struct net_device *dev)
798 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 798 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
799 priv->mac_id ? "1" : "0", priv->phy_id); 799 priv->mac_id ? "1" : "0", priv->phy_id);
800 800
801 phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0, 801 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
802 PHY_INTERFACE_MODE_MII); 802 PHY_INTERFACE_MODE_MII);
803 803
804 if (IS_ERR(phydev)) { 804 if (IS_ERR(phydev)) {
@@ -1141,17 +1141,6 @@ static int bcm_enet_stop(struct net_device *dev)
1141} 1141}
1142 1142
1143/* 1143/*
1144 * core request to return device rx/tx stats
1145 */
1146static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
1147{
1148 struct bcm_enet_priv *priv;
1149
1150 priv = netdev_priv(dev);
1151 return &priv->stats;
1152}
1153
1154/*
1155 * ethtool callbacks 1144 * ethtool callbacks
1156 */ 1145 */
1157struct bcm_enet_stats { 1146struct bcm_enet_stats {
@@ -1163,16 +1152,18 @@ struct bcm_enet_stats {
1163 1152
1164#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ 1153#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1165 offsetof(struct bcm_enet_priv, m) 1154 offsetof(struct bcm_enet_priv, m)
1155#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1156 offsetof(struct net_device_stats, m)
1166 1157
1167static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { 1158static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1168 { "rx_packets", GEN_STAT(stats.rx_packets), -1 }, 1159 { "rx_packets", DEV_STAT(rx_packets), -1 },
1169 { "tx_packets", GEN_STAT(stats.tx_packets), -1 }, 1160 { "tx_packets", DEV_STAT(tx_packets), -1 },
1170 { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 }, 1161 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1171 { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 }, 1162 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1172 { "rx_errors", GEN_STAT(stats.rx_errors), -1 }, 1163 { "rx_errors", DEV_STAT(rx_errors), -1 },
1173 { "tx_errors", GEN_STAT(stats.tx_errors), -1 }, 1164 { "tx_errors", DEV_STAT(tx_errors), -1 },
1174 { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 }, 1165 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1175 { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 }, 1166 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1176 1167
1177 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, 1168 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1178 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, 1169 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
@@ -1328,7 +1319,11 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1328 char *p; 1319 char *p;
1329 1320
1330 s = &bcm_enet_gstrings_stats[i]; 1321 s = &bcm_enet_gstrings_stats[i];
1331 p = (char *)priv + s->stat_offset; 1322 if (s->mib_reg == -1)
1323 p = (char *)&netdev->stats;
1324 else
1325 p = (char *)priv;
1326 p += s->stat_offset;
1332 data[i] = (s->sizeof_stat == sizeof(u64)) ? 1327 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1333 *(u64 *)p : *(u32 *)p; 1328 *(u64 *)p : *(u32 *)p;
1334 } 1329 }
@@ -1605,7 +1600,6 @@ static const struct net_device_ops bcm_enet_ops = {
1605 .ndo_open = bcm_enet_open, 1600 .ndo_open = bcm_enet_open,
1606 .ndo_stop = bcm_enet_stop, 1601 .ndo_stop = bcm_enet_stop,
1607 .ndo_start_xmit = bcm_enet_start_xmit, 1602 .ndo_start_xmit = bcm_enet_start_xmit,
1608 .ndo_get_stats = bcm_enet_get_stats,
1609 .ndo_set_mac_address = bcm_enet_set_mac_address, 1603 .ndo_set_mac_address = bcm_enet_set_mac_address,
1610 .ndo_set_multicast_list = bcm_enet_set_multicast_list, 1604 .ndo_set_multicast_list = bcm_enet_set_multicast_list,
1611 .ndo_do_ioctl = bcm_enet_ioctl, 1605 .ndo_do_ioctl = bcm_enet_ioctl,
diff --git a/drivers/net/bcm63xx_enet.h b/drivers/net/bcm63xx_enet.h
index bd3684d42d74..0e3048b788c2 100644
--- a/drivers/net/bcm63xx_enet.h
+++ b/drivers/net/bcm63xx_enet.h
@@ -274,7 +274,6 @@ struct bcm_enet_priv {
274 int pause_tx; 274 int pause_tx;
275 275
276 /* stats */ 276 /* stats */
277 struct net_device_stats stats;
278 struct bcm_enet_mib_counters mib; 277 struct bcm_enet_mib_counters mib;
279 278
280 /* after mib interrupt, mib registers update is done in this 279 /* after mib interrupt, mib registers update is done in this
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 53306bf3f401..4faf6961dcec 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -414,6 +414,20 @@ static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
414 adapter->is_virtfn = (data != 0xAA); 414 adapter->is_virtfn = (data != 0xAA);
415} 415}
416 416
417static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
418{
419 u32 addr;
420
421 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
422
423 mac[5] = (u8)(addr & 0xFF);
424 mac[4] = (u8)((addr >> 8) & 0xFF);
425 mac[3] = (u8)((addr >> 16) & 0xFF);
426 mac[2] = 0xC9;
427 mac[1] = 0x00;
428 mac[0] = 0x00;
429}
430
417extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 431extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
418 u16 num_popped); 432 u16 num_popped);
419extern void be_link_status_update(struct be_adapter *adapter, bool link_up); 433extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 13f0abbc5205..d92063420c25 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -91,6 +91,9 @@ static const struct be_ethtool_stat et_stats[] = {
91 {PORTSTAT_INFO(rx_non_rss_packets)}, 91 {PORTSTAT_INFO(rx_non_rss_packets)},
92 {PORTSTAT_INFO(rx_ipv4_packets)}, 92 {PORTSTAT_INFO(rx_ipv4_packets)},
93 {PORTSTAT_INFO(rx_ipv6_packets)}, 93 {PORTSTAT_INFO(rx_ipv6_packets)},
94 {PORTSTAT_INFO(rx_switched_unicast_packets)},
95 {PORTSTAT_INFO(rx_switched_multicast_packets)},
96 {PORTSTAT_INFO(rx_switched_broadcast_packets)},
94 {PORTSTAT_INFO(tx_unicastframes)}, 97 {PORTSTAT_INFO(tx_unicastframes)},
95 {PORTSTAT_INFO(tx_multicastframes)}, 98 {PORTSTAT_INFO(tx_multicastframes)},
96 {PORTSTAT_INFO(tx_broadcastframes)}, 99 {PORTSTAT_INFO(tx_broadcastframes)},
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 6eda7a022256..43a3a574e2e0 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -365,11 +365,6 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
365 rx_eq->cur_eqd = eqd; 365 rx_eq->cur_eqd = eqd;
366} 366}
367 367
368static struct net_device_stats *be_get_stats(struct net_device *dev)
369{
370 return &dev->stats;
371}
372
373static u32 be_calc_rate(u64 bytes, unsigned long ticks) 368static u32 be_calc_rate(u64 bytes, unsigned long ticks)
374{ 369{
375 u64 rate = bytes; 370 u64 rate = bytes;
@@ -1026,7 +1021,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1026 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd); 1021 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
1027 1022
1028 if (do_pkt_csum(rxcp, adapter->rx_csum)) 1023 if (do_pkt_csum(rxcp, adapter->rx_csum))
1029 skb->ip_summed = CHECKSUM_NONE; 1024 skb_checksum_none_assert(skb);
1030 else 1025 else
1031 skb->ip_summed = CHECKSUM_UNNECESSARY; 1026 skb->ip_summed = CHECKSUM_UNNECESSARY;
1032 1027
@@ -2084,6 +2079,47 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2084 return status; 2079 return status;
2085} 2080}
2086 2081
2082/*
2083 * Generate a seed MAC address from the PF MAC Address using jhash.
2084 * MAC Address for VFs are assigned incrementally starting from the seed.
2085 * These addresses are programmed in the ASIC by the PF and the VF driver
2086 * queries for the MAC address during its probe.
2087 */
2088static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2089{
2090 u32 vf = 0;
2091 int status;
2092 u8 mac[ETH_ALEN];
2093
2094 be_vf_eth_addr_generate(adapter, mac);
2095
2096 for (vf = 0; vf < num_vfs; vf++) {
2097 status = be_cmd_pmac_add(adapter, mac,
2098 adapter->vf_cfg[vf].vf_if_handle,
2099 &adapter->vf_cfg[vf].vf_pmac_id);
2100 if (status)
2101 dev_err(&adapter->pdev->dev,
2102 "Mac address add failed for VF %d\n", vf);
2103 else
2104 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2105
2106 mac[5] += 1;
2107 }
2108 return status;
2109}
2110
2111static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2112{
2113 u32 vf;
2114
2115 for (vf = 0; vf < num_vfs; vf++) {
2116 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2117 be_cmd_pmac_del(adapter,
2118 adapter->vf_cfg[vf].vf_if_handle,
2119 adapter->vf_cfg[vf].vf_pmac_id);
2120 }
2121}
2122
2087static int be_setup(struct be_adapter *adapter) 2123static int be_setup(struct be_adapter *adapter)
2088{ 2124{
2089 struct net_device *netdev = adapter->netdev; 2125 struct net_device *netdev = adapter->netdev;
@@ -2143,10 +2179,20 @@ static int be_setup(struct be_adapter *adapter)
2143 if (status != 0) 2179 if (status != 0)
2144 goto rx_qs_destroy; 2180 goto rx_qs_destroy;
2145 2181
2182 if (be_physfn(adapter)) {
2183 status = be_vf_eth_addr_config(adapter);
2184 if (status)
2185 goto mcc_q_destroy;
2186 }
2187
2146 adapter->link_speed = -1; 2188 adapter->link_speed = -1;
2147 2189
2148 return 0; 2190 return 0;
2149 2191
2192mcc_q_destroy:
2193 if (be_physfn(adapter))
2194 be_vf_eth_addr_rem(adapter);
2195 be_mcc_queues_destroy(adapter);
2150rx_qs_destroy: 2196rx_qs_destroy:
2151 be_rx_queues_destroy(adapter); 2197 be_rx_queues_destroy(adapter);
2152tx_qs_destroy: 2198tx_qs_destroy:
@@ -2163,6 +2209,9 @@ do_none:
2163 2209
2164static int be_clear(struct be_adapter *adapter) 2210static int be_clear(struct be_adapter *adapter)
2165{ 2211{
2212 if (be_physfn(adapter))
2213 be_vf_eth_addr_rem(adapter);
2214
2166 be_mcc_queues_destroy(adapter); 2215 be_mcc_queues_destroy(adapter);
2167 be_rx_queues_destroy(adapter); 2216 be_rx_queues_destroy(adapter);
2168 be_tx_queues_destroy(adapter); 2217 be_tx_queues_destroy(adapter);
@@ -2390,7 +2439,6 @@ static struct net_device_ops be_netdev_ops = {
2390 .ndo_open = be_open, 2439 .ndo_open = be_open,
2391 .ndo_stop = be_close, 2440 .ndo_stop = be_close,
2392 .ndo_start_xmit = be_xmit, 2441 .ndo_start_xmit = be_xmit,
2393 .ndo_get_stats = be_get_stats,
2394 .ndo_set_rx_mode = be_set_multicast_list, 2442 .ndo_set_rx_mode = be_set_multicast_list,
2395 .ndo_set_mac_address = be_mac_addr_set, 2443 .ndo_set_mac_address = be_mac_addr_set,
2396 .ndo_change_mtu = be_change_mtu, 2444 .ndo_change_mtu = be_change_mtu,
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 012613fde3f4..7a0e4156fade 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -803,15 +803,14 @@ static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompa
803static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) 803static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
804{ 804{
805 struct bfin_mac_local *lp = netdev_priv(netdev); 805 struct bfin_mac_local *lp = netdev_priv(netdev);
806 union skb_shared_tx *shtx = skb_tx(skb);
807 806
808 if (shtx->hardware) { 807 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
809 int timeout_cnt = MAX_TIMEOUT_CNT; 808 int timeout_cnt = MAX_TIMEOUT_CNT;
810 809
811 /* When doing time stamping, keep the connection to the socket 810 /* When doing time stamping, keep the connection to the socket
812 * a while longer 811 * a while longer
813 */ 812 */
814 shtx->in_progress = 1; 813 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
815 814
816 /* 815 /*
817 * The timestamping is done at the EMAC module's MII/RMII interface 816 * The timestamping is done at the EMAC module's MII/RMII interface
@@ -991,7 +990,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
991 struct bfin_mac_local *lp = netdev_priv(dev); 990 struct bfin_mac_local *lp = netdev_priv(dev);
992 u16 *data; 991 u16 *data;
993 u32 data_align = (unsigned long)(skb->data) & 0x3; 992 u32 data_align = (unsigned long)(skb->data) & 0x3;
994 union skb_shared_tx *shtx = skb_tx(skb);
995 993
996 current_tx_ptr->skb = skb; 994 current_tx_ptr->skb = skb;
997 995
@@ -1005,7 +1003,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1005 * of this field are the length of the packet payload in bytes and the higher 1003 * of this field are the length of the packet payload in bytes and the higher
1006 * 4 bits are the timestamping enable field. 1004 * 4 bits are the timestamping enable field.
1007 */ 1005 */
1008 if (shtx->hardware) 1006 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1009 *data |= 0x1000; 1007 *data |= 0x1000;
1010 1008
1011 current_tx_ptr->desc_a.start_addr = (u32)data; 1009 current_tx_ptr->desc_a.start_addr = (u32)data;
@@ -1015,7 +1013,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1015 } else { 1013 } else {
1016 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len); 1014 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1017 /* enable timestamping for the sent packet */ 1015 /* enable timestamping for the sent packet */
1018 if (shtx->hardware) 1016 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1019 *((u16 *)(current_tx_ptr->packet)) |= 0x1000; 1017 *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
1020 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data, 1018 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
1021 skb->len); 1019 skb->len);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 959add2410bf..9322699bb31c 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1233,15 +1233,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
1233 } 1233 }
1234 spin_unlock_irqrestore(&bp->lock, flags); 1234 spin_unlock_irqrestore(&bp->lock, flags);
1235} 1235}
1236static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1237{
1238 struct bmac_data *bp = netdev_priv(dev);
1239 strcpy(info->driver, "bmac");
1240 strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev));
1241}
1242 1236
1243static const struct ethtool_ops bmac_ethtool_ops = { 1237static const struct ethtool_ops bmac_ethtool_ops = {
1244 .get_drvinfo = bmac_get_drvinfo,
1245 .get_link = ethtool_op_get_link, 1238 .get_link = ethtool_op_get_link,
1246}; 1239};
1247 1240
diff --git a/drivers/net/bna/Makefile b/drivers/net/bna/Makefile
new file mode 100644
index 000000000000..a5d604de7fea
--- /dev/null
+++ b/drivers/net/bna/Makefile
@@ -0,0 +1,11 @@
1#
2# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3# All rights reserved.
4#
5
6obj-$(CONFIG_BNA) += bna.o
7
8bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
9bna-objs += bfa_ioc.o bfa_ioc_ct.o bfa_cee.o cna_fwimg.o
10
11EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/bna/bfa_cee.c
new file mode 100644
index 000000000000..f7b789a3b217
--- /dev/null
+++ b/drivers/net/bna/bfa_cee.c
@@ -0,0 +1,291 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_defs_cna.h"
20#include "cna.h"
21#include "bfa_cee.h"
22#include "bfi_cna.h"
23#include "bfa_ioc.h"
24
25#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
26#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
27
28static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
29static void bfa_cee_format_cee_cfg(void *buffer);
30
31static void
32bfa_cee_format_cee_cfg(void *buffer)
33{
34 struct bfa_cee_attr *cee_cfg = buffer;
35 bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
36}
37
38static void
39bfa_cee_stats_swap(struct bfa_cee_stats *stats)
40{
41 u32 *buffer = (u32 *)stats;
42 int i;
43
44 for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
45 i++) {
46 buffer[i] = ntohl(buffer[i]);
47 }
48}
49
50static void
51bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
52{
53 lldp_cfg->time_to_live =
54 ntohs(lldp_cfg->time_to_live);
55 lldp_cfg->enabled_system_cap =
56 ntohs(lldp_cfg->enabled_system_cap);
57}
58
59/**
60 * bfa_cee_attr_meminfo()
61 *
62 * @brief Returns the size of the DMA memory needed by CEE attributes
63 *
64 * @param[in] void
65 *
66 * @return Size of DMA region
67 */
68static u32
69bfa_cee_attr_meminfo(void)
70{
71 return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
72}
73/**
74 * bfa_cee_stats_meminfo()
75 *
76 * @brief Returns the size of the DMA memory needed by CEE stats
77 *
78 * @param[in] void
79 *
80 * @return Size of DMA region
81 */
82static u32
83bfa_cee_stats_meminfo(void)
84{
85 return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
86}
87
88/**
89 * bfa_cee_get_attr_isr()
90 *
91 * @brief CEE ISR for get-attributes responses from f/w
92 *
93 * @param[in] cee - Pointer to the CEE module
94 * status - Return status from the f/w
95 *
96 * @return void
97 */
98static void
99bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
100{
101 cee->get_attr_status = status;
102 if (status == BFA_STATUS_OK) {
103 memcpy(cee->attr, cee->attr_dma.kva,
104 sizeof(struct bfa_cee_attr));
105 bfa_cee_format_cee_cfg(cee->attr);
106 }
107 cee->get_attr_pending = false;
108 if (cee->cbfn.get_attr_cbfn)
109 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
110}
111
112/**
113 * bfa_cee_get_attr_isr()
114 *
115 * @brief CEE ISR for get-stats responses from f/w
116 *
117 * @param[in] cee - Pointer to the CEE module
118 * status - Return status from the f/w
119 *
120 * @return void
121 */
122static void
123bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
124{
125 cee->get_stats_status = status;
126 if (status == BFA_STATUS_OK) {
127 memcpy(cee->stats, cee->stats_dma.kva,
128 sizeof(struct bfa_cee_stats));
129 bfa_cee_stats_swap(cee->stats);
130 }
131 cee->get_stats_pending = false;
132 if (cee->cbfn.get_stats_cbfn)
133 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
134}
135
136/**
137 * bfa_cee_get_attr_isr()
138 *
139 * @brief CEE ISR for reset-stats responses from f/w
140 *
141 * @param[in] cee - Pointer to the CEE module
142 * status - Return status from the f/w
143 *
144 * @return void
145 */
146static void
147bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
148{
149 cee->reset_stats_status = status;
150 cee->reset_stats_pending = false;
151 if (cee->cbfn.reset_stats_cbfn)
152 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
153}
154/**
155 * bfa_nw_cee_meminfo()
156 *
157 * @brief Returns the size of the DMA memory needed by CEE module
158 *
159 * @param[in] void
160 *
161 * @return Size of DMA region
162 */
163u32
164bfa_nw_cee_meminfo(void)
165{
166 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
167}
168
169/**
170 * bfa_nw_cee_mem_claim()
171 *
172 * @brief Initialized CEE DMA Memory
173 *
174 * @param[in] cee CEE module pointer
175 * dma_kva Kernel Virtual Address of CEE DMA Memory
176 * dma_pa Physical Address of CEE DMA Memory
177 *
178 * @return void
179 */
180void
181bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
182{
183 cee->attr_dma.kva = dma_kva;
184 cee->attr_dma.pa = dma_pa;
185 cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
186 cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
187 cee->attr = (struct bfa_cee_attr *) dma_kva;
188 cee->stats = (struct bfa_cee_stats *)
189 (dma_kva + bfa_cee_attr_meminfo());
190}
191
192/**
193 * bfa_cee_isrs()
194 *
195 * @brief Handles Mail-box interrupts for CEE module.
196 *
197 * @param[in] Pointer to the CEE module data structure.
198 *
199 * @return void
200 */
201
202static void
203bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
204{
205 union bfi_cee_i2h_msg_u *msg;
206 struct bfi_cee_get_rsp *get_rsp;
207 struct bfa_cee *cee = (struct bfa_cee *) cbarg;
208 msg = (union bfi_cee_i2h_msg_u *) m;
209 get_rsp = (struct bfi_cee_get_rsp *) m;
210 switch (msg->mh.msg_id) {
211 case BFI_CEE_I2H_GET_CFG_RSP:
212 bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
213 break;
214 case BFI_CEE_I2H_GET_STATS_RSP:
215 bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
216 break;
217 case BFI_CEE_I2H_RESET_STATS_RSP:
218 bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
219 break;
220 default:
221 BUG_ON(1);
222 }
223}
224
225/**
226 * bfa_cee_hbfail()
227 *
228 * @brief CEE module heart-beat failure handler.
229 *
230 * @param[in] Pointer to the CEE module data structure.
231 *
232 * @return void
233 */
234
235static void
236bfa_cee_hbfail(void *arg)
237{
238 struct bfa_cee *cee;
239 cee = (struct bfa_cee *) arg;
240
241 if (cee->get_attr_pending == true) {
242 cee->get_attr_status = BFA_STATUS_FAILED;
243 cee->get_attr_pending = false;
244 if (cee->cbfn.get_attr_cbfn) {
245 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
246 BFA_STATUS_FAILED);
247 }
248 }
249 if (cee->get_stats_pending == true) {
250 cee->get_stats_status = BFA_STATUS_FAILED;
251 cee->get_stats_pending = false;
252 if (cee->cbfn.get_stats_cbfn) {
253 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
254 BFA_STATUS_FAILED);
255 }
256 }
257 if (cee->reset_stats_pending == true) {
258 cee->reset_stats_status = BFA_STATUS_FAILED;
259 cee->reset_stats_pending = false;
260 if (cee->cbfn.reset_stats_cbfn) {
261 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
262 BFA_STATUS_FAILED);
263 }
264 }
265}
266
267/**
268 * bfa_nw_cee_attach()
269 *
270 * @brief CEE module-attach API
271 *
272 * @param[in] cee - Pointer to the CEE module data structure
273 * ioc - Pointer to the ioc module data structure
274 * dev - Pointer to the device driver module data structure
275 * The device driver specific mbox ISR functions have
276 * this pointer as one of the parameters.
277 *
278 * @return void
279 */
280void
281bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
282 void *dev)
283{
284 BUG_ON(!(cee != NULL));
285 cee->dev = dev;
286 cee->ioc = ioc;
287
288 bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
289 bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
290 bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
291}
diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/bna/bfa_cee.h
new file mode 100644
index 000000000000..20543d15b64f
--- /dev/null
+++ b/drivers/net/bna/bfa_cee.h
@@ -0,0 +1,64 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_CEE_H__
20#define __BFA_CEE_H__
21
22#include "bfa_defs_cna.h"
23#include "bfa_ioc.h"
24
25typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
26typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
27typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
28typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
29
30struct bfa_cee_cbfn {
31 bfa_cee_get_attr_cbfn_t get_attr_cbfn;
32 void *get_attr_cbarg;
33 bfa_cee_get_stats_cbfn_t get_stats_cbfn;
34 void *get_stats_cbarg;
35 bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
36 void *reset_stats_cbarg;
37};
38
39struct bfa_cee {
40 void *dev;
41 bool get_attr_pending;
42 bool get_stats_pending;
43 bool reset_stats_pending;
44 enum bfa_status get_attr_status;
45 enum bfa_status get_stats_status;
46 enum bfa_status reset_stats_status;
47 struct bfa_cee_cbfn cbfn;
48 struct bfa_ioc_hbfail_notify hbfail;
49 struct bfa_cee_attr *attr;
50 struct bfa_cee_stats *stats;
51 struct bfa_dma attr_dma;
52 struct bfa_dma stats_dma;
53 struct bfa_ioc *ioc;
54 struct bfa_mbox_cmd get_cfg_mb;
55 struct bfa_mbox_cmd get_stats_mb;
56 struct bfa_mbox_cmd reset_stats_mb;
57};
58
59u32 bfa_nw_cee_meminfo(void);
60void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
61 u64 dma_pa);
62void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
63
64#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
new file mode 100644
index 000000000000..29c1b8de2c2d
--- /dev/null
+++ b/drivers/net/bna/bfa_defs.h
@@ -0,0 +1,243 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_DEFS_H__
20#define __BFA_DEFS_H__
21
22#include "cna.h"
23#include "bfa_defs_status.h"
24#include "bfa_defs_mfg_comm.h"
25
26#define BFA_STRING_32 32
27#define BFA_VERSION_LEN 64
28
29/**
30 * ---------------------- adapter definitions ------------
31 */
32
33/**
34 * BFA adapter level attributes.
35 */
36enum {
37 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
38 /*
39 *!< adapter serial num length
40 */
41 BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */
42 BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
43 BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */
44 BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */
45 BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */
46};
47
48struct bfa_adapter_attr {
49 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
50 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
51 u32 card_type;
52 char model[BFA_ADAPTER_MODEL_NAME_LEN];
53 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
54 u64 pwwn;
55 char node_symname[FC_SYMNAME_MAX];
56 char hw_ver[BFA_VERSION_LEN];
57 char fw_ver[BFA_VERSION_LEN];
58 char optrom_ver[BFA_VERSION_LEN];
59 char os_type[BFA_ADAPTER_OS_TYPE_LEN];
60 struct bfa_mfg_vpd vpd;
61 struct mac mac;
62
63 u8 nports;
64 u8 max_speed;
65 u8 prototype;
66 char asic_rev;
67
68 u8 pcie_gen;
69 u8 pcie_lanes_orig;
70 u8 pcie_lanes;
71 u8 cna_capable;
72
73 u8 is_mezz;
74 u8 trunk_capable;
75};
76
77/**
78 * ---------------------- IOC definitions ------------
79 */
80
81enum {
82 BFA_IOC_DRIVER_LEN = 16,
83 BFA_IOC_CHIP_REV_LEN = 8,
84};
85
86/**
87 * Driver and firmware versions.
88 */
89struct bfa_ioc_driver_attr {
90 char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
91 char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
92 char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */
93 char bios_ver[BFA_VERSION_LEN]; /*!< bios version */
94 char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */
95 char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
96};
97
98/**
99 * IOC PCI device attributes
100 */
101struct bfa_ioc_pci_attr {
102 u16 vendor_id; /*!< PCI vendor ID */
103 u16 device_id; /*!< PCI device ID */
104 u16 ssid; /*!< subsystem ID */
105 u16 ssvid; /*!< subsystem vendor ID */
106 u32 pcifn; /*!< PCI device function */
107 u32 rsvd; /* padding */
108 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
109};
110
111/**
112 * IOC states
113 */
114enum bfa_ioc_state {
115 BFA_IOC_RESET = 1, /*!< IOC is in reset state */
116 BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
117 BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */
118 BFA_IOC_GETATTR = 4, /*!< IOC is being configured */
119 BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */
120 BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */
121 BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */
122 BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */
123 BFA_IOC_DISABLED = 9, /*!< IOC is disabled */
124 BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */
125};
126
127/**
128 * IOC firmware stats
129 */
130struct bfa_fw_ioc_stats {
131 u32 enable_reqs;
132 u32 disable_reqs;
133 u32 get_attr_reqs;
134 u32 dbg_sync;
135 u32 dbg_dump;
136 u32 unknown_reqs;
137};
138
139/**
140 * IOC driver stats
141 */
142struct bfa_ioc_drv_stats {
143 u32 ioc_isrs;
144 u32 ioc_enables;
145 u32 ioc_disables;
146 u32 ioc_hbfails;
147 u32 ioc_boots;
148 u32 stats_tmos;
149 u32 hb_count;
150 u32 disable_reqs;
151 u32 enable_reqs;
152 u32 disable_replies;
153 u32 enable_replies;
154};
155
156/**
157 * IOC statistics
158 */
159struct bfa_ioc_stats {
160 struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
161 struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
162};
163
164enum bfa_ioc_type {
165 BFA_IOC_TYPE_FC = 1,
166 BFA_IOC_TYPE_FCoE = 2,
167 BFA_IOC_TYPE_LL = 3,
168};
169
170/**
171 * IOC attributes returned in queries
172 */
173struct bfa_ioc_attr {
174 enum bfa_ioc_type ioc_type;
175 enum bfa_ioc_state state; /*!< IOC state */
176 struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
177 struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
178 struct bfa_ioc_pci_attr pci_attr;
179 u8 port_id; /*!< port number */
180 u8 rsvd[7]; /*!< 64bit align */
181};
182
183/**
184 * ---------------------- mfg definitions ------------
185 */
186
187/**
188 * Checksum size
189 */
190#define BFA_MFG_CHKSUM_SIZE 16
191
192#define BFA_MFG_PARTNUM_SIZE 14
193#define BFA_MFG_SUPPLIER_ID_SIZE 10
194#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
195#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
196#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
197
198#pragma pack(1)
199
200/**
201 * @brief BFA adapter manufacturing block definition.
202 *
203 * All numerical fields are in big-endian format.
204 */
205struct bfa_mfg_block {
206 u8 version; /*!< manufacturing block version */
207 u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
208 u16 mfgsize; /*!< mfg block size */
209 u16 u16_chksum; /*!< old u16 checksum */
210 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
211 char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
212 u8 mfg_day; /*!< manufacturing day */
213 u8 mfg_month; /*!< manufacturing month */
214 u16 mfg_year; /*!< manufacturing year */
215 u64 mfg_wwn; /*!< wwn base for this adapter */
216 u8 num_wwn; /*!< number of wwns assigned */
217 u8 mfg_speeds; /*!< speeds allowed for this adapter */
218 u8 rsv[2];
219 char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
220 char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
221 char
222 supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
223 char
224 supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
225 mac_t mfg_mac; /*!< mac address */
226 u8 num_mac; /*!< number of mac addresses */
227 u8 rsv2;
228 u32 mfg_type; /*!< card type */
229 u8 rsv3[108];
230 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
231};
232
233#pragma pack()
234
235/**
236 * ---------------------- pci definitions ------------
237 */
238
239#define bfa_asic_id_ct(devid) \
240 ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
241 (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
242
243#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/bna/bfa_defs_cna.h b/drivers/net/bna/bfa_defs_cna.h
new file mode 100644
index 000000000000..7e0a9187bdd5
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_cna.h
@@ -0,0 +1,223 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_DEFS_CNA_H__
20#define __BFA_DEFS_CNA_H__
21
22#include "bfa_defs.h"
23
24/**
25 * @brief
26 * FC physical port statistics.
27 */
28struct bfa_port_fc_stats {
29 u64 secs_reset; /*!< Seconds since stats is reset */
30 u64 tx_frames; /*!< Tx frames */
31 u64 tx_words; /*!< Tx words */
32 u64 tx_lip; /*!< Tx LIP */
33 u64 tx_nos; /*!< Tx NOS */
34 u64 tx_ols; /*!< Tx OLS */
35 u64 tx_lr; /*!< Tx LR */
36 u64 tx_lrr; /*!< Tx LRR */
37 u64 rx_frames; /*!< Rx frames */
38 u64 rx_words; /*!< Rx words */
39 u64 lip_count; /*!< Rx LIP */
40 u64 nos_count; /*!< Rx NOS */
41 u64 ols_count; /*!< Rx OLS */
42 u64 lr_count; /*!< Rx LR */
43 u64 lrr_count; /*!< Rx LRR */
44 u64 invalid_crcs; /*!< Rx CRC err frames */
45 u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
46 u64 undersized_frm; /*!< Rx undersized frames */
47 u64 oversized_frm; /*!< Rx oversized frames */
48 u64 bad_eof_frm; /*!< Rx frames with bad EOF */
49 u64 error_frames; /*!< Errored frames */
50 u64 dropped_frames; /*!< Dropped frames */
51 u64 link_failures; /*!< Link Failure (LF) count */
52 u64 loss_of_syncs; /*!< Loss of sync count */
53 u64 loss_of_signals; /*!< Loss of signal count */
54 u64 primseq_errs; /*!< Primitive sequence protocol err. */
55 u64 bad_os_count; /*!< Invalid ordered sets */
56 u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
57 u64 err_enc; /*!< Encoding err frame_8b10b */
58};
59
60/**
61 * @brief
62 * Eth Physical Port statistics.
63 */
64struct bfa_port_eth_stats {
65 u64 secs_reset; /*!< Seconds since stats is reset */
66 u64 frame_64; /*!< Frames 64 bytes */
67 u64 frame_65_127; /*!< Frames 65-127 bytes */
68 u64 frame_128_255; /*!< Frames 128-255 bytes */
69 u64 frame_256_511; /*!< Frames 256-511 bytes */
70 u64 frame_512_1023; /*!< Frames 512-1023 bytes */
71 u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */
72 u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */
73 u64 tx_bytes; /*!< Tx bytes */
74 u64 tx_packets; /*!< Tx packets */
75 u64 tx_mcast_packets; /*!< Tx multicast packets */
76 u64 tx_bcast_packets; /*!< Tx broadcast packets */
77 u64 tx_control_frame; /*!< Tx control frame */
78 u64 tx_drop; /*!< Tx drops */
79 u64 tx_jabber; /*!< Tx jabber */
80 u64 tx_fcs_error; /*!< Tx FCS errors */
81 u64 tx_fragments; /*!< Tx fragments */
82 u64 rx_bytes; /*!< Rx bytes */
83 u64 rx_packets; /*!< Rx packets */
84 u64 rx_mcast_packets; /*!< Rx multicast packets */
85 u64 rx_bcast_packets; /*!< Rx broadcast packets */
86 u64 rx_control_frames; /*!< Rx control frames */
87 u64 rx_unknown_opcode; /*!< Rx unknown opcode */
88 u64 rx_drop; /*!< Rx drops */
89 u64 rx_jabber; /*!< Rx jabber */
90 u64 rx_fcs_error; /*!< Rx FCS errors */
91 u64 rx_alignment_error; /*!< Rx alignment errors */
92 u64 rx_frame_length_error; /*!< Rx frame len errors */
93 u64 rx_code_error; /*!< Rx code errors */
94 u64 rx_fragments; /*!< Rx fragments */
95 u64 rx_pause; /*!< Rx pause */
96 u64 rx_zero_pause; /*!< Rx zero pause */
97 u64 tx_pause; /*!< Tx pause */
98 u64 tx_zero_pause; /*!< Tx zero pause */
99 u64 rx_fcoe_pause; /*!< Rx FCoE pause */
100 u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
101 u64 tx_fcoe_pause; /*!< Tx FCoE pause */
102 u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
103};
104
105/**
106 * @brief
107 * Port statistics.
108 */
109union bfa_port_stats_u {
110 struct bfa_port_fc_stats fc;
111 struct bfa_port_eth_stats eth;
112};
113
114#pragma pack(1)
115
116#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
117#define BFA_CEE_DCBX_MAX_PRIORITY (8)
118#define BFA_CEE_DCBX_MAX_PGID (8)
119
120#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
121#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
122#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
123#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
124#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
125#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
126#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
127#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
128#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
129#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
130#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
131
132/* LLDP string type */
133struct bfa_cee_lldp_str {
134 u8 sub_type;
135 u8 len;
136 u8 rsvd[2];
137 u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
138};
139
140/* LLDP paramters */
141struct bfa_cee_lldp_cfg {
142 struct bfa_cee_lldp_str chassis_id;
143 struct bfa_cee_lldp_str port_id;
144 struct bfa_cee_lldp_str port_desc;
145 struct bfa_cee_lldp_str sys_name;
146 struct bfa_cee_lldp_str sys_desc;
147 struct bfa_cee_lldp_str mgmt_addr;
148 u16 time_to_live;
149 u16 enabled_system_cap;
150};
151
152enum bfa_cee_dcbx_version {
153 DCBX_PROTOCOL_PRECEE = 1,
154 DCBX_PROTOCOL_CEE = 2,
155};
156
157enum bfa_cee_lls {
158 /* LLS is down because the TLV not sent by the peer */
159 CEE_LLS_DOWN_NO_TLV = 0,
160 /* LLS is down as advertised by the peer */
161 CEE_LLS_DOWN = 1,
162 CEE_LLS_UP = 2,
163};
164
165/* CEE/DCBX parameters */
166struct bfa_cee_dcbx_cfg {
167 u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
168 u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
169 u8 pfc_primap; /* bitmap of priorties with PFC enabled */
170 u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
171 u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
172 u8 dcbx_version; /* operating version:CEE or preCEE */
173 u8 lls_fcoe; /* FCoE Logical Link Status */
174 u8 lls_lan; /* LAN Logical Link Status */
175 u8 rsvd[2];
176};
177
178/* CEE status */
179/* Making this to tri-state for the benefit of port list command */
180enum bfa_cee_status {
181 CEE_UP = 0,
182 CEE_PHY_UP = 1,
183 CEE_LOOPBACK = 2,
184 CEE_PHY_DOWN = 3,
185};
186
187/* CEE Query */
188struct bfa_cee_attr {
189 u8 cee_status;
190 u8 error_reason;
191 struct bfa_cee_lldp_cfg lldp_remote;
192 struct bfa_cee_dcbx_cfg dcbx_remote;
193 mac_t src_mac;
194 u8 link_speed;
195 u8 nw_priority;
196 u8 filler[2];
197};
198
199/* LLDP/DCBX/CEE Statistics */
200struct bfa_cee_stats {
201 u32 lldp_tx_frames; /*!< LLDP Tx Frames */
202 u32 lldp_rx_frames; /*!< LLDP Rx Frames */
203 u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */
204 u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */
205 u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */
206 u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */
207 u32 lldp_info_aged_out; /*!< LLDP remote info aged out */
208 u32 dcbx_phylink_ups; /*!< DCBX phy link ups */
209 u32 dcbx_phylink_downs; /*!< DCBX phy link downs */
210 u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */
211 u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */
212 u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */
213 u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */
214 u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */
215 u32 cee_status_down; /*!< CEE status down */
216 u32 cee_status_up; /*!< CEE status up */
217 u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
218 u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
219};
220
221#pragma pack()
222
223#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
new file mode 100644
index 000000000000..987978fcb3fe
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -0,0 +1,244 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFA_DEFS_MFG_COMM_H__
19#define __BFA_DEFS_MFG_COMM_H__
20
21#include "cna.h"
22
23/**
24 * Manufacturing block version
25 */
26#define BFA_MFG_VERSION 2
27#define BFA_MFG_VERSION_UNINIT 0xFF
28
29/**
30 * Manufacturing block encrypted version
31 */
32#define BFA_MFG_ENC_VER 2
33
34/**
35 * Manufacturing block version 1 length
36 */
37#define BFA_MFG_VER1_LEN 128
38
39/**
40 * Manufacturing block header length
41 */
42#define BFA_MFG_HDR_LEN 4
43
44#define BFA_MFG_SERIALNUM_SIZE 11
45#define STRSZ(_n) (((_n) + 4) & ~3)
46
47/**
48 * Manufacturing card type
49 */
50enum {
51 BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
52 BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
53 BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
54 BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
55 BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
56 BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
57 BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
58 BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
59 BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
60 BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
61 BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
62 BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
63 BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
64};
65
66#pragma pack(1)
67
68/**
69 * Check if 1-port card
70 */
71#define bfa_mfg_is_1port(type) (( \
72 (type) == BFA_MFG_TYPE_FC8P1 || \
73 (type) == BFA_MFG_TYPE_FC4P1 || \
74 (type) == BFA_MFG_TYPE_CNA10P1))
75
76/**
77 * Check if Mezz card
78 */
79#define bfa_mfg_is_mezz(type) (( \
80 (type) == BFA_MFG_TYPE_JAYHAWK || \
81 (type) == BFA_MFG_TYPE_WANCHESE || \
82 (type) == BFA_MFG_TYPE_ASTRA || \
83 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
84 (type) == BFA_MFG_TYPE_LIGHTNING))
85
86/**
87 * Check if card type valid
88 */
89#define bfa_mfg_is_card_type_valid(type) (( \
90 (type) == BFA_MFG_TYPE_FC8P2 || \
91 (type) == BFA_MFG_TYPE_FC8P1 || \
92 (type) == BFA_MFG_TYPE_FC4P2 || \
93 (type) == BFA_MFG_TYPE_FC4P1 || \
94 (type) == BFA_MFG_TYPE_CNA10P2 || \
95 (type) == BFA_MFG_TYPE_CNA10P1 || \
96 bfa_mfg_is_mezz(type)))
97
98/**
99 * Check if the card having old wwn/mac handling
100 */
101#define bfa_mfg_is_old_wwn_mac_model(type) (( \
102 (type) == BFA_MFG_TYPE_FC8P2 || \
103 (type) == BFA_MFG_TYPE_FC8P1 || \
104 (type) == BFA_MFG_TYPE_FC4P2 || \
105 (type) == BFA_MFG_TYPE_FC4P1 || \
106 (type) == BFA_MFG_TYPE_CNA10P2 || \
107 (type) == BFA_MFG_TYPE_CNA10P1 || \
108 (type) == BFA_MFG_TYPE_JAYHAWK || \
109 (type) == BFA_MFG_TYPE_WANCHESE))
110
111#define bfa_mfg_increment_wwn_mac(m, i) \
112do { \
113 u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \
114 t += (i); \
115 (m)[0] = (t >> 16) & 0xFF; \
116 (m)[1] = (t >> 8) & 0xFF; \
117 (m)[2] = t & 0xFF; \
118} while (0)
119
120#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
121do { \
122 switch ((card_type)) { \
123 case BFA_MFG_TYPE_FC8P2: \
124 case BFA_MFG_TYPE_JAYHAWK: \
125 case BFA_MFG_TYPE_ASTRA: \
126 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
127 BFI_ADAPTER_SETP(SPEED, 8); \
128 break; \
129 case BFA_MFG_TYPE_FC8P1: \
130 (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
131 BFI_ADAPTER_SETP(SPEED, 8); \
132 break; \
133 case BFA_MFG_TYPE_FC4P2: \
134 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
135 BFI_ADAPTER_SETP(SPEED, 4); \
136 break; \
137 case BFA_MFG_TYPE_FC4P1: \
138 (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
139 BFI_ADAPTER_SETP(SPEED, 4); \
140 break; \
141 case BFA_MFG_TYPE_CNA10P2: \
142 case BFA_MFG_TYPE_WANCHESE: \
143 case BFA_MFG_TYPE_LIGHTNING_P0: \
144 case BFA_MFG_TYPE_LIGHTNING: \
145 (prop) = BFI_ADAPTER_SETP(NPORTS, 2); \
146 (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
147 break; \
148 case BFA_MFG_TYPE_CNA10P1: \
149 (prop) = BFI_ADAPTER_SETP(NPORTS, 1); \
150 (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
151 break; \
152 default: \
153 (prop) = BFI_ADAPTER_UNSUPP; \
154 } \
155} while (0)
156
157enum {
158 CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
159 CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
160 CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
161 CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
162 CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
163 CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
164 CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
165};
166
167#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
168do { \
169 if ((gpio) & CB_GPIO_PROTO) { \
170 (prop) |= BFI_ADAPTER_PROTO; \
171 (gpio) &= ~CB_GPIO_PROTO; \
172 } \
173 switch ((gpio)) { \
174 case CB_GPIO_TTV: \
175 (prop) |= BFI_ADAPTER_TTV; \
176 case CB_GPIO_DFLY: \
177 case CB_GPIO_FC8P2: \
178 (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
179 (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
180 (card_type) = BFA_MFG_TYPE_FC8P2; \
181 break; \
182 case CB_GPIO_FC8P1: \
183 (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
184 (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
185 (card_type) = BFA_MFG_TYPE_FC8P1; \
186 break; \
187 case CB_GPIO_FC4P2: \
188 (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
189 (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
190 (card_type) = BFA_MFG_TYPE_FC4P2; \
191 break; \
192 case CB_GPIO_FC4P1: \
193 (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
194 (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
195 (card_type) = BFA_MFG_TYPE_FC4P1; \
196 break; \
197 default: \
198 (prop) |= BFI_ADAPTER_UNSUPP; \
199 (card_type) = BFA_MFG_TYPE_INVALID; \
200 } \
201} while (0)
202
203/**
204 * VPD data length
205 */
206#define BFA_MFG_VPD_LEN 512
207#define BFA_MFG_VPD_LEN_INVALID 0
208
209#define BFA_MFG_VPD_PCI_HDR_OFF 137
210#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
211#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
212
213/**
214 * VPD vendor tag
215 */
216enum {
217 BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
218 BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
219 BFA_MFG_VPD_HP = 2, /*!< vendor HP */
220 BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
221 BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
222 BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
223 BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
224 BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
225};
226
227/**
228 * @brief BFA adapter flash vpd data definition.
229 *
230 * All numerical fields are in big-endian format.
231 */
232struct bfa_mfg_vpd {
233 u8 version; /*!< vpd data version */
234 u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
235 u8 chksum; /*!< u8 checksum */
236 u8 vendor; /*!< vendor */
237 u8 len; /*!< vpd data length excluding header */
238 u8 rsv;
239 u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
240};
241
242#pragma pack()
243
244#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/bna/bfa_defs_status.h b/drivers/net/bna/bfa_defs_status.h
new file mode 100644
index 000000000000..af951126375c
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_status.h
@@ -0,0 +1,216 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFA_DEFS_STATUS_H__
19#define __BFA_DEFS_STATUS_H__
20
21/**
22 * API status return values
23 *
24 * NOTE: The error msgs are auto generated from the comments. Only singe line
25 * comments are supported
26 */
27enum bfa_status {
28 BFA_STATUS_OK = 0,
29 BFA_STATUS_FAILED = 1,
30 BFA_STATUS_EINVAL = 2,
31 BFA_STATUS_ENOMEM = 3,
32 BFA_STATUS_ENOSYS = 4,
33 BFA_STATUS_ETIMER = 5,
34 BFA_STATUS_EPROTOCOL = 6,
35 BFA_STATUS_ENOFCPORTS = 7,
36 BFA_STATUS_NOFLASH = 8,
37 BFA_STATUS_BADFLASH = 9,
38 BFA_STATUS_SFP_UNSUPP = 10,
39 BFA_STATUS_UNKNOWN_VFID = 11,
40 BFA_STATUS_DATACORRUPTED = 12,
41 BFA_STATUS_DEVBUSY = 13,
42 BFA_STATUS_ABORTED = 14,
43 BFA_STATUS_NODEV = 15,
44 BFA_STATUS_HDMA_FAILED = 16,
45 BFA_STATUS_FLASH_BAD_LEN = 17,
46 BFA_STATUS_UNKNOWN_LWWN = 18,
47 BFA_STATUS_UNKNOWN_RWWN = 19,
48 BFA_STATUS_FCPT_LS_RJT = 20,
49 BFA_STATUS_VPORT_EXISTS = 21,
50 BFA_STATUS_VPORT_MAX = 22,
51 BFA_STATUS_UNSUPP_SPEED = 23,
52 BFA_STATUS_INVLD_DFSZ = 24,
53 BFA_STATUS_CNFG_FAILED = 25,
54 BFA_STATUS_CMD_NOTSUPP = 26,
55 BFA_STATUS_NO_ADAPTER = 27,
56 BFA_STATUS_LINKDOWN = 28,
57 BFA_STATUS_FABRIC_RJT = 29,
58 BFA_STATUS_UNKNOWN_VWWN = 30,
59 BFA_STATUS_NSLOGIN_FAILED = 31,
60 BFA_STATUS_NO_RPORTS = 32,
61 BFA_STATUS_NSQUERY_FAILED = 33,
62 BFA_STATUS_PORT_OFFLINE = 34,
63 BFA_STATUS_RPORT_OFFLINE = 35,
64 BFA_STATUS_TGTOPEN_FAILED = 36,
65 BFA_STATUS_BAD_LUNS = 37,
66 BFA_STATUS_IO_FAILURE = 38,
67 BFA_STATUS_NO_FABRIC = 39,
68 BFA_STATUS_EBADF = 40,
69 BFA_STATUS_EINTR = 41,
70 BFA_STATUS_EIO = 42,
71 BFA_STATUS_ENOTTY = 43,
72 BFA_STATUS_ENXIO = 44,
73 BFA_STATUS_EFOPEN = 45,
74 BFA_STATUS_VPORT_WWN_BP = 46,
75 BFA_STATUS_PORT_NOT_DISABLED = 47,
76 BFA_STATUS_BADFRMHDR = 48,
77 BFA_STATUS_BADFRMSZ = 49,
78 BFA_STATUS_MISSINGFRM = 50,
79 BFA_STATUS_LINKTIMEOUT = 51,
80 BFA_STATUS_NO_FCPIM_NEXUS = 52,
81 BFA_STATUS_CHECKSUM_FAIL = 53,
82 BFA_STATUS_GZME_FAILED = 54,
83 BFA_STATUS_SCSISTART_REQD = 55,
84 BFA_STATUS_IOC_FAILURE = 56,
85 BFA_STATUS_INVALID_WWN = 57,
86 BFA_STATUS_MISMATCH = 58,
87 BFA_STATUS_IOC_ENABLED = 59,
88 BFA_STATUS_ADAPTER_ENABLED = 60,
89 BFA_STATUS_IOC_NON_OP = 61,
90 BFA_STATUS_ADDR_MAP_FAILURE = 62,
91 BFA_STATUS_SAME_NAME = 63,
92 BFA_STATUS_PENDING = 64,
93 BFA_STATUS_8G_SPD = 65,
94 BFA_STATUS_4G_SPD = 66,
95 BFA_STATUS_AD_IS_ENABLE = 67,
96 BFA_STATUS_EINVAL_TOV = 68,
97 BFA_STATUS_EINVAL_QDEPTH = 69,
98 BFA_STATUS_VERSION_FAIL = 70,
99 BFA_STATUS_DIAG_BUSY = 71,
100 BFA_STATUS_BEACON_ON = 72,
101 BFA_STATUS_BEACON_OFF = 73,
102 BFA_STATUS_LBEACON_ON = 74,
103 BFA_STATUS_LBEACON_OFF = 75,
104 BFA_STATUS_PORT_NOT_INITED = 76,
105 BFA_STATUS_RPSC_ENABLED = 77,
106 BFA_STATUS_ENOFSAVE = 78,
107 BFA_STATUS_BAD_FILE = 79,
108 BFA_STATUS_RLIM_EN = 80,
109 BFA_STATUS_RLIM_DIS = 81,
110 BFA_STATUS_IOC_DISABLED = 82,
111 BFA_STATUS_ADAPTER_DISABLED = 83,
112 BFA_STATUS_BIOS_DISABLED = 84,
113 BFA_STATUS_AUTH_ENABLED = 85,
114 BFA_STATUS_AUTH_DISABLED = 86,
115 BFA_STATUS_ERROR_TRL_ENABLED = 87,
116 BFA_STATUS_ERROR_QOS_ENABLED = 88,
117 BFA_STATUS_NO_SFP_DEV = 89,
118 BFA_STATUS_MEMTEST_FAILED = 90,
119 BFA_STATUS_INVALID_DEVID = 91,
120 BFA_STATUS_QOS_ENABLED = 92,
121 BFA_STATUS_QOS_DISABLED = 93,
122 BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
123 BFA_STATUS_REG_FAIL = 95,
124 BFA_STATUS_IM_INV_CODE = 96,
125 BFA_STATUS_IM_INV_VLAN = 97,
126 BFA_STATUS_IM_INV_ADAPT_NAME = 98,
127 BFA_STATUS_IM_LOW_RESOURCES = 99,
128 BFA_STATUS_IM_VLANID_IS_PVID = 100,
129 BFA_STATUS_IM_VLANID_EXISTS = 101,
130 BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
131 BFA_STATUS_PORTLOG_ENABLED = 103,
132 BFA_STATUS_PORTLOG_DISABLED = 104,
133 BFA_STATUS_FILE_NOT_FOUND = 105,
134 BFA_STATUS_QOS_FC_ONLY = 106,
135 BFA_STATUS_RLIM_FC_ONLY = 107,
136 BFA_STATUS_CT_SPD = 108,
137 BFA_STATUS_LEDTEST_OP = 109,
138 BFA_STATUS_CEE_NOT_DN = 110,
139 BFA_STATUS_10G_SPD = 111,
140 BFA_STATUS_IM_INV_TEAM_NAME = 112,
141 BFA_STATUS_IM_DUP_TEAM_NAME = 113,
142 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
143 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
144 BFA_STATUS_IM_PVID_MISMATCH = 116,
145 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
146 BFA_STATUS_IM_MTU_MISMATCH = 118,
147 BFA_STATUS_IM_RSS_MISMATCH = 119,
148 BFA_STATUS_IM_HDS_MISMATCH = 120,
149 BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
150 BFA_STATUS_IM_PORT_PARAMS = 122,
151 BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
152 BFA_STATUS_IM_CANNOT_REM_PRI = 124,
153 BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
154 BFA_STATUS_IM_LAST_PORT_DELETE = 126,
155 BFA_STATUS_IM_NO_DRIVER = 127,
156 BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
157 BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
158 BFA_STATUS_NO_MINPORT_DRIVER = 130,
159 BFA_STATUS_CARD_TYPE_MISMATCH = 131,
160 BFA_STATUS_BAD_ASICBLK = 132,
161 BFA_STATUS_NO_DRIVER = 133,
162 BFA_STATUS_INVALID_MAC = 134,
163 BFA_STATUS_IM_NO_VLAN = 135,
164 BFA_STATUS_IM_ETH_LB_FAILED = 136,
165 BFA_STATUS_IM_PVID_REMOVE = 137,
166 BFA_STATUS_IM_PVID_EDIT = 138,
167 BFA_STATUS_CNA_NO_BOOT = 139,
168 BFA_STATUS_IM_PVID_NON_ZERO = 140,
169 BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
170 BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
171 BFA_STATUS_IM_NOT_BOUND = 143,
172 BFA_STATUS_INSUFFICIENT_PERMS = 144,
173 BFA_STATUS_IM_INV_VLAN_NAME = 145,
174 BFA_STATUS_CMD_NOTSUPP_CNA = 146,
175 BFA_STATUS_IM_PASSTHRU_EDIT = 147,
176 BFA_STATUS_IM_BIND_FAILED = 148,
177 BFA_STATUS_IM_UNBIND_FAILED = 149,
178 BFA_STATUS_IM_PORT_IN_TEAM = 150,
179 BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
180 BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
181 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
182 BFA_STATUS_PBC = 154,
183 BFA_STATUS_DEVID_MISSING = 155,
184 BFA_STATUS_BAD_FWCFG = 156,
185 BFA_STATUS_CREATE_FILE = 157,
186 BFA_STATUS_INVALID_VENDOR = 158,
187 BFA_STATUS_SFP_NOT_READY = 159,
188 BFA_STATUS_FLASH_UNINIT = 160,
189 BFA_STATUS_FLASH_EMPTY = 161,
190 BFA_STATUS_FLASH_CKFAIL = 162,
191 BFA_STATUS_TRUNK_UNSUPP = 163,
192 BFA_STATUS_TRUNK_ENABLED = 164,
193 BFA_STATUS_TRUNK_DISABLED = 165,
194 BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
195 BFA_STATUS_BOOT_CODE_UPDATED = 167,
196 BFA_STATUS_BOOT_VERSION = 168,
197 BFA_STATUS_CARDTYPE_MISSING = 169,
198 BFA_STATUS_INVALID_CARDTYPE = 170,
199 BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
200 BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
201 BFA_STATUS_ETHBOOT_ENABLED = 173,
202 BFA_STATUS_ETHBOOT_DISABLED = 174,
203 BFA_STATUS_IOPROFILE_OFF = 175,
204 BFA_STATUS_NO_PORT_INSTANCE = 176,
205 BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
206 BFA_STATUS_NO_VPORT_LOCK = 178,
207 BFA_STATUS_VPORT_NO_CNFG = 179,
208 BFA_STATUS_MAX_VAL
209};
210
211enum bfa_eproto_status {
212 BFA_EPROTO_BAD_ACCEPT = 0,
213 BFA_EPROTO_UNKNOWN_RSP = 1
214};
215
216#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
new file mode 100644
index 000000000000..caa45c2185e9
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc.c
@@ -0,0 +1,1738 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_ctreg.h"
23#include "bfa_defs.h"
24
25/**
26 * IOC local definitions
27 */
28
29#define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
33
34#define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
37
38#define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
42
43#define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
47
48/**
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
52#define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54#define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58#define bfa_ioc_notify_hbfail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
60
61#define bfa_ioc_is_optrom(__ioc) \
62 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
63
64#define bfa_ioc_mbox_cmd_pending(__ioc) \
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
67
68bool bfa_nw_auto_recover = true;
69
70/*
71 * forward declarations
72 */
73static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
74static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
75static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
76static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
77static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
78static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
79static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
80static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
81static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
82static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
83static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
84static void bfa_ioc_recover(struct bfa_ioc *ioc);
85static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
86static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
87static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
88static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
89 u32 boot_param);
90static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
91static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
92static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
93 char *serial_num);
94static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
95 char *fw_ver);
96static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
97 char *chip_rev);
98static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
99 char *optrom_ver);
100static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
101 char *manufacturer);
102static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
103static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
104static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
105
106/**
107 * IOC state machine events
108 */
109enum ioc_event {
110 IOC_E_ENABLE = 1, /*!< IOC enable request */
111 IOC_E_DISABLE = 2, /*!< IOC disable request */
112 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
113 IOC_E_FWREADY = 4, /*!< f/w initialization done */
114 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
115 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
116 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
117 IOC_E_HBFAIL = 8, /*!< heartbeat failure */
118 IOC_E_HWERROR = 9, /*!< hardware error interrupt */
119 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
120 IOC_E_DETACH = 11, /*!< driver detach cleanup */
121};
122
123bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
125bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
131bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
132bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
133bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
134bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
135
136static struct bfa_sm_table ioc_sm_table[] = {
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
139 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
140 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
141 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
142 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
143 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
146 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
147 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
149};
150
151/**
152 * Reset entry actions -- initialize state machine
153 */
154static void
155bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
156{
157 ioc->retry_count = 0;
158 ioc->auto_recover = bfa_nw_auto_recover;
159}
160
161/**
162 * Beginning state. IOC is in reset state.
163 */
164static void
165bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
166{
167 switch (event) {
168 case IOC_E_ENABLE:
169 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
170 break;
171
172 case IOC_E_DISABLE:
173 bfa_ioc_disable_comp(ioc);
174 break;
175
176 case IOC_E_DETACH:
177 break;
178
179 default:
180 bfa_sm_fault(ioc, event);
181 }
182}
183
184/**
185 * Semaphore should be acquired for version check.
186 */
187static void
188bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
189{
190 bfa_ioc_hw_sem_get(ioc);
191}
192
193/**
194 * Awaiting h/w semaphore to continue with version check.
195 */
196static void
197bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
198{
199 switch (event) {
200 case IOC_E_SEMLOCKED:
201 if (bfa_ioc_firmware_lock(ioc)) {
202 ioc->retry_count = 0;
203 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
204 } else {
205 bfa_nw_ioc_hw_sem_release(ioc);
206 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
207 }
208 break;
209
210 case IOC_E_DISABLE:
211 bfa_ioc_disable_comp(ioc);
212 /* fall through */
213
214 case IOC_E_DETACH:
215 bfa_ioc_hw_sem_get_cancel(ioc);
216 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
217 break;
218
219 case IOC_E_FWREADY:
220 break;
221
222 default:
223 bfa_sm_fault(ioc, event);
224 }
225}
226
227/**
228 * Notify enable completion callback and generate mismatch AEN.
229 */
230static void
231bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
232{
233 /**
234 * Provide enable completion callback and AEN notification only once.
235 */
236 if (ioc->retry_count == 0)
237 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
238 ioc->retry_count++;
239 bfa_ioc_timer_start(ioc);
240}
241
242/**
243 * Awaiting firmware version match.
244 */
245static void
246bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
247{
248 switch (event) {
249 case IOC_E_TIMEOUT:
250 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
251 break;
252
253 case IOC_E_DISABLE:
254 bfa_ioc_disable_comp(ioc);
255 /* fall through */
256
257 case IOC_E_DETACH:
258 bfa_ioc_timer_stop(ioc);
259 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
260 break;
261
262 case IOC_E_FWREADY:
263 break;
264
265 default:
266 bfa_sm_fault(ioc, event);
267 }
268}
269
270/**
271 * Request for semaphore.
272 */
273static void
274bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
275{
276 bfa_ioc_hw_sem_get(ioc);
277}
278
279/**
280 * Awaiting semaphore for h/w initialzation.
281 */
282static void
283bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
284{
285 switch (event) {
286 case IOC_E_SEMLOCKED:
287 ioc->retry_count = 0;
288 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
289 break;
290
291 case IOC_E_DISABLE:
292 bfa_ioc_hw_sem_get_cancel(ioc);
293 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
294 break;
295
296 default:
297 bfa_sm_fault(ioc, event);
298 }
299}
300
301static void
302bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
303{
304 bfa_ioc_timer_start(ioc);
305 bfa_ioc_reset(ioc, false);
306}
307
308/**
309 * @brief
310 * Hardware is being initialized. Interrupts are enabled.
311 * Holding hardware semaphore lock.
312 */
313static void
314bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
315{
316 switch (event) {
317 case IOC_E_FWREADY:
318 bfa_ioc_timer_stop(ioc);
319 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
320 break;
321
322 case IOC_E_HWERROR:
323 bfa_ioc_timer_stop(ioc);
324 /* fall through */
325
326 case IOC_E_TIMEOUT:
327 ioc->retry_count++;
328 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
329 bfa_ioc_timer_start(ioc);
330 bfa_ioc_reset(ioc, true);
331 break;
332 }
333
334 bfa_nw_ioc_hw_sem_release(ioc);
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
336 break;
337
338 case IOC_E_DISABLE:
339 bfa_nw_ioc_hw_sem_release(ioc);
340 bfa_ioc_timer_stop(ioc);
341 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
342 break;
343
344 default:
345 bfa_sm_fault(ioc, event);
346 }
347}
348
349static void
350bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
351{
352 bfa_ioc_timer_start(ioc);
353 bfa_ioc_send_enable(ioc);
354}
355
356/**
357 * Host IOC function is being enabled, awaiting response from firmware.
358 * Semaphore is acquired.
359 */
360static void
361bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
362{
363 switch (event) {
364 case IOC_E_FWRSP_ENABLE:
365 bfa_ioc_timer_stop(ioc);
366 bfa_nw_ioc_hw_sem_release(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
368 break;
369
370 case IOC_E_HWERROR:
371 bfa_ioc_timer_stop(ioc);
372 /* fall through */
373
374 case IOC_E_TIMEOUT:
375 ioc->retry_count++;
376 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
377 writel(BFI_IOC_UNINIT,
378 ioc->ioc_regs.ioc_fwstate);
379 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
380 break;
381 }
382
383 bfa_nw_ioc_hw_sem_release(ioc);
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
385 break;
386
387 case IOC_E_DISABLE:
388 bfa_ioc_timer_stop(ioc);
389 bfa_nw_ioc_hw_sem_release(ioc);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
391 break;
392
393 case IOC_E_FWREADY:
394 bfa_ioc_send_enable(ioc);
395 break;
396
397 default:
398 bfa_sm_fault(ioc, event);
399 }
400}
401
402static void
403bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
404{
405 bfa_ioc_timer_start(ioc);
406 bfa_ioc_send_getattr(ioc);
407}
408
409/**
410 * @brief
411 * IOC configuration in progress. Timer is active.
412 */
413static void
414bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
415{
416 switch (event) {
417 case IOC_E_FWRSP_GETATTR:
418 bfa_ioc_timer_stop(ioc);
419 bfa_ioc_check_attr_wwns(ioc);
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
421 break;
422
423 case IOC_E_HWERROR:
424 bfa_ioc_timer_stop(ioc);
425 /* fall through */
426
427 case IOC_E_TIMEOUT:
428 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
429 break;
430
431 case IOC_E_DISABLE:
432 bfa_ioc_timer_stop(ioc);
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
434 break;
435
436 default:
437 bfa_sm_fault(ioc, event);
438 }
439}
440
441static void
442bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
443{
444 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
445 bfa_ioc_hb_monitor(ioc);
446}
447
448static void
449bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
450{
451 switch (event) {
452 case IOC_E_ENABLE:
453 break;
454
455 case IOC_E_DISABLE:
456 bfa_ioc_hb_stop(ioc);
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
458 break;
459
460 case IOC_E_HWERROR:
461 case IOC_E_FWREADY:
462 /**
463 * Hard error or IOC recovery by other function.
464 * Treat it same as heartbeat failure.
465 */
466 bfa_ioc_hb_stop(ioc);
467 /* !!! fall through !!! */
468
469 case IOC_E_HBFAIL:
470 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
471 break;
472
473 default:
474 bfa_sm_fault(ioc, event);
475 }
476}
477
478static void
479bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
480{
481 bfa_ioc_timer_start(ioc);
482 bfa_ioc_send_disable(ioc);
483}
484
485/**
486 * IOC is being disabled
487 */
488static void
489bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
490{
491 switch (event) {
492 case IOC_E_FWRSP_DISABLE:
493 bfa_ioc_timer_stop(ioc);
494 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
495 break;
496
497 case IOC_E_HWERROR:
498 bfa_ioc_timer_stop(ioc);
499 /*
500 * !!! fall through !!!
501 */
502
503 case IOC_E_TIMEOUT:
504 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
506 break;
507
508 default:
509 bfa_sm_fault(ioc, event);
510 }
511}
512
513/**
514 * IOC disable completion entry.
515 */
516static void
517bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
518{
519 bfa_ioc_disable_comp(ioc);
520}
521
522static void
523bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
524{
525 switch (event) {
526 case IOC_E_ENABLE:
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
528 break;
529
530 case IOC_E_DISABLE:
531 ioc->cbfn->disable_cbfn(ioc->bfa);
532 break;
533
534 case IOC_E_FWREADY:
535 break;
536
537 case IOC_E_DETACH:
538 bfa_ioc_firmware_unlock(ioc);
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
540 break;
541
542 default:
543 bfa_sm_fault(ioc, event);
544 }
545}
546
547static void
548bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
549{
550 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
551 bfa_ioc_timer_start(ioc);
552}
553
554/**
555 * @brief
556 * Hardware initialization failed.
557 */
558static void
559bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
560{
561 switch (event) {
562 case IOC_E_DISABLE:
563 bfa_ioc_timer_stop(ioc);
564 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
565 break;
566
567 case IOC_E_DETACH:
568 bfa_ioc_timer_stop(ioc);
569 bfa_ioc_firmware_unlock(ioc);
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
571 break;
572
573 case IOC_E_TIMEOUT:
574 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
575 break;
576
577 default:
578 bfa_sm_fault(ioc, event);
579 }
580}
581
582static void
583bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
584{
585 struct list_head *qe;
586 struct bfa_ioc_hbfail_notify *notify;
587
588 /**
589 * Mark IOC as failed in hardware and stop firmware.
590 */
591 bfa_ioc_lpu_stop(ioc);
592 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
593
594 /**
595 * Notify other functions on HB failure.
596 */
597 bfa_ioc_notify_hbfail(ioc);
598
599 /**
600 * Notify driver and common modules registered for notification.
601 */
602 ioc->cbfn->hbfail_cbfn(ioc->bfa);
603 list_for_each(qe, &ioc->hb_notify_q) {
604 notify = (struct bfa_ioc_hbfail_notify *) qe;
605 notify->cbfn(notify->cbarg);
606 }
607
608 /**
609 * Flush any queued up mailbox requests.
610 */
611 bfa_ioc_mbox_hbfail(ioc);
612
613 /**
614 * Trigger auto-recovery after a delay.
615 */
616 if (ioc->auto_recover)
617 mod_timer(&ioc->ioc_timer, jiffies +
618 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
619}
620
621/**
622 * @brief
623 * IOC heartbeat failure.
624 */
625static void
626bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
627{
628 switch (event) {
629
630 case IOC_E_ENABLE:
631 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
632 break;
633
634 case IOC_E_DISABLE:
635 if (ioc->auto_recover)
636 bfa_ioc_timer_stop(ioc);
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
638 break;
639
640 case IOC_E_TIMEOUT:
641 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
642 break;
643
644 case IOC_E_FWREADY:
645 /**
646 * Recovery is already initiated by other function.
647 */
648 break;
649
650 case IOC_E_HWERROR:
651 /*
652 * HB failure notification, ignore.
653 */
654 break;
655 default:
656 bfa_sm_fault(ioc, event);
657 }
658}
659
660/**
661 * BFA IOC private functions
662 */
663
664static void
665bfa_ioc_disable_comp(struct bfa_ioc *ioc)
666{
667 struct list_head *qe;
668 struct bfa_ioc_hbfail_notify *notify;
669
670 ioc->cbfn->disable_cbfn(ioc->bfa);
671
672 /**
673 * Notify common modules registered for notification.
674 */
675 list_for_each(qe, &ioc->hb_notify_q) {
676 notify = (struct bfa_ioc_hbfail_notify *) qe;
677 notify->cbfn(notify->cbarg);
678 }
679}
680
681void
682bfa_nw_ioc_sem_timeout(void *ioc_arg)
683{
684 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
685
686 bfa_ioc_hw_sem_get(ioc);
687}
688
689bool
690bfa_nw_ioc_sem_get(void __iomem *sem_reg)
691{
692 u32 r32;
693 int cnt = 0;
694#define BFA_SEM_SPINCNT 3000
695
696 r32 = readl(sem_reg);
697
698 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
699 cnt++;
700 udelay(2);
701 r32 = readl(sem_reg);
702 }
703
704 if (r32 == 0)
705 return true;
706
707 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
708 return false;
709}
710
711void
712bfa_nw_ioc_sem_release(void __iomem *sem_reg)
713{
714 writel(1, sem_reg);
715}
716
717static void
718bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
719{
720 u32 r32;
721
722 /**
723 * First read to the semaphore register will return 0, subsequent reads
724 * will return 1. Semaphore is released by writing 1 to the register
725 */
726 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
727 if (r32 == 0) {
728 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
729 return;
730 }
731
732 mod_timer(&ioc->sem_timer, jiffies +
733 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
734}
735
736void
737bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
738{
739 writel(1, ioc->ioc_regs.ioc_sem_reg);
740}
741
742static void
743bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
744{
745 del_timer(&ioc->sem_timer);
746}
747
748/**
749 * @brief
750 * Initialize LPU local memory (aka secondary memory / SRAM)
751 */
752static void
753bfa_ioc_lmem_init(struct bfa_ioc *ioc)
754{
755 u32 pss_ctl;
756 int i;
757#define PSS_LMEM_INIT_TIME 10000
758
759 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
760 pss_ctl &= ~__PSS_LMEM_RESET;
761 pss_ctl |= __PSS_LMEM_INIT_EN;
762
763 /*
764 * i2c workaround 12.5khz clock
765 */
766 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
767 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
768
769 /**
770 * wait for memory initialization to be complete
771 */
772 i = 0;
773 do {
774 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
775 i++;
776 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
777
778 /**
779 * If memory initialization is not successful, IOC timeout will catch
780 * such failures.
781 */
782 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
783
784 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
785 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
786}
787
788static void
789bfa_ioc_lpu_start(struct bfa_ioc *ioc)
790{
791 u32 pss_ctl;
792
793 /**
794 * Take processor out of reset.
795 */
796 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
797 pss_ctl &= ~__PSS_LPU0_RESET;
798
799 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
800}
801
802static void
803bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
804{
805 u32 pss_ctl;
806
807 /**
808 * Put processors in reset.
809 */
810 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
811 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
812
813 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
814}
815
816/**
817 * Get driver and firmware versions.
818 */
819void
820bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
821{
822 u32 pgnum, pgoff;
823 u32 loff = 0;
824 int i;
825 u32 *fwsig = (u32 *) fwhdr;
826
827 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
828 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
829 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
830
831 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
832 i++) {
833 fwsig[i] =
834 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
835 loff += sizeof(u32);
836 }
837}
838
839/**
840 * Returns TRUE if same.
841 */
842bool
843bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
844{
845 struct bfi_ioc_image_hdr *drv_fwhdr;
846 int i;
847
848 drv_fwhdr = (struct bfi_ioc_image_hdr *)
849 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
850
851 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
852 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
853 return false;
854 }
855
856 return true;
857}
858
859/**
860 * Return true if current running version is valid. Firmware signature and
861 * execution context (driver/bios) must match.
862 */
863static bool
864bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
865{
866 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
867
868 /**
869 * If bios/efi boot (flash based) -- return true
870 */
871 if (bfa_ioc_is_optrom(ioc))
872 return true;
873
874 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
875 drv_fwhdr = (struct bfi_ioc_image_hdr *)
876 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
877
878 if (fwhdr.signature != drv_fwhdr->signature)
879 return false;
880
881 if (fwhdr.exec != drv_fwhdr->exec)
882 return false;
883
884 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
885}
886
887/**
888 * Conditionally flush any pending message from firmware at start.
889 */
890static void
891bfa_ioc_msgflush(struct bfa_ioc *ioc)
892{
893 u32 r32;
894
895 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
896 if (r32)
897 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
898}
899
900/**
901 * @img ioc_init_logic.jpg
902 */
903static void
904bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
905{
906 enum bfi_ioc_state ioc_fwstate;
907 bool fwvalid;
908
909 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
910
911 if (force)
912 ioc_fwstate = BFI_IOC_UNINIT;
913
914 /**
915 * check if firmware is valid
916 */
917 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
918 false : bfa_ioc_fwver_valid(ioc);
919
920 if (!fwvalid) {
921 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
922 return;
923 }
924
925 /**
926 * If hardware initialization is in progress (initialized by other IOC),
927 * just wait for an initialization completion interrupt.
928 */
929 if (ioc_fwstate == BFI_IOC_INITING) {
930 ioc->cbfn->reset_cbfn(ioc->bfa);
931 return;
932 }
933
934 /**
935 * If IOC function is disabled and firmware version is same,
936 * just re-enable IOC.
937 *
938 * If option rom, IOC must not be in operational state. With
939 * convergence, IOC will be in operational state when 2nd driver
940 * is loaded.
941 */
942 if (ioc_fwstate == BFI_IOC_DISABLED ||
943 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
944 /**
945 * When using MSI-X any pending firmware ready event should
946 * be flushed. Otherwise MSI-X interrupts are not delivered.
947 */
948 bfa_ioc_msgflush(ioc);
949 ioc->cbfn->reset_cbfn(ioc->bfa);
950 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
951 return;
952 }
953
954 /**
955 * Initialize the h/w for any other states.
956 */
957 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
958}
959
960void
961bfa_nw_ioc_timeout(void *ioc_arg)
962{
963 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
964
965 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
966}
967
968static void
969bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
970{
971 u32 *msgp = (u32 *) ioc_msg;
972 u32 i;
973
974 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
975
976 /*
977 * first write msg to mailbox registers
978 */
979 for (i = 0; i < len / sizeof(u32); i++)
980 writel(cpu_to_le32(msgp[i]),
981 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
982
983 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
984 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
985
986 /*
987 * write 1 to mailbox CMD to trigger LPU event
988 */
989 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
990 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
991}
992
993static void
994bfa_ioc_send_enable(struct bfa_ioc *ioc)
995{
996 struct bfi_ioc_ctrl_req enable_req;
997 struct timeval tv;
998
999 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1000 bfa_ioc_portid(ioc));
1001 enable_req.ioc_class = ioc->ioc_mc;
1002 do_gettimeofday(&tv);
1003 enable_req.tv_sec = ntohl(tv.tv_sec);
1004 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1005}
1006
1007static void
1008bfa_ioc_send_disable(struct bfa_ioc *ioc)
1009{
1010 struct bfi_ioc_ctrl_req disable_req;
1011
1012 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1013 bfa_ioc_portid(ioc));
1014 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1015}
1016
1017static void
1018bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1019{
1020 struct bfi_ioc_getattr_req attr_req;
1021
1022 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1023 bfa_ioc_portid(ioc));
1024 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1025 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1026}
1027
1028void
1029bfa_nw_ioc_hb_check(void *cbarg)
1030{
1031 struct bfa_ioc *ioc = cbarg;
1032 u32 hb_count;
1033
1034 hb_count = readl(ioc->ioc_regs.heartbeat);
1035 if (ioc->hb_count == hb_count) {
1036 pr_crit("Firmware heartbeat failure at %d", hb_count);
1037 bfa_ioc_recover(ioc);
1038 return;
1039 } else {
1040 ioc->hb_count = hb_count;
1041 }
1042
1043 bfa_ioc_mbox_poll(ioc);
1044 mod_timer(&ioc->hb_timer, jiffies +
1045 msecs_to_jiffies(BFA_IOC_HB_TOV));
1046}
1047
1048static void
1049bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1050{
1051 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1052 mod_timer(&ioc->hb_timer, jiffies +
1053 msecs_to_jiffies(BFA_IOC_HB_TOV));
1054}
1055
1056static void
1057bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1058{
1059 del_timer(&ioc->hb_timer);
1060}
1061
1062/**
1063 * @brief
1064 * Initiate a full firmware download.
1065 */
1066static void
1067bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1068 u32 boot_param)
1069{
1070 u32 *fwimg;
1071 u32 pgnum, pgoff;
1072 u32 loff = 0;
1073 u32 chunkno = 0;
1074 u32 i;
1075
1076 /**
1077 * Initialize LMEM first before code download
1078 */
1079 bfa_ioc_lmem_init(ioc);
1080
1081 /**
1082 * Flash based firmware boot
1083 */
1084 if (bfa_ioc_is_optrom(ioc))
1085 boot_type = BFI_BOOT_TYPE_FLASH;
1086 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1087
1088 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1089 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1090
1091 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1092
1093 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1094 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1095 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1096 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1097 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1098 }
1099
1100 /**
1101 * write smem
1102 */
1103 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1104 ((ioc->ioc_regs.smem_page_start) + (loff)));
1105
1106 loff += sizeof(u32);
1107
1108 /**
1109 * handle page offset wrap around
1110 */
1111 loff = PSS_SMEM_PGOFF(loff);
1112 if (loff == 0) {
1113 pgnum++;
1114 writel(pgnum,
1115 ioc->ioc_regs.host_page_num_fn);
1116 }
1117 }
1118
1119 writel(bfa_ioc_smem_pgnum(ioc, 0),
1120 ioc->ioc_regs.host_page_num_fn);
1121
1122 /*
1123 * Set boot type and boot param at the end.
1124 */
1125 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1126 + (BFI_BOOT_TYPE_OFF)));
1127 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1128 + (BFI_BOOT_PARAM_OFF)));
1129}
1130
1131static void
1132bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1133{
1134 bfa_ioc_hwinit(ioc, force);
1135}
1136
1137/**
1138 * @brief
1139 * Update BFA configuration from firmware configuration.
1140 */
1141static void
1142bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1143{
1144 struct bfi_ioc_attr *attr = ioc->attr;
1145
1146 attr->adapter_prop = ntohl(attr->adapter_prop);
1147 attr->card_type = ntohl(attr->card_type);
1148 attr->maxfrsize = ntohs(attr->maxfrsize);
1149
1150 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1151}
1152
1153/**
1154 * Attach time initialization of mbox logic.
1155 */
1156static void
1157bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1158{
1159 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1160 int mc;
1161
1162 INIT_LIST_HEAD(&mod->cmd_q);
1163 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1164 mod->mbhdlr[mc].cbfn = NULL;
1165 mod->mbhdlr[mc].cbarg = ioc->bfa;
1166 }
1167}
1168
1169/**
1170 * Mbox poll timer -- restarts any pending mailbox requests.
1171 */
1172static void
1173bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1174{
1175 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1176 struct bfa_mbox_cmd *cmd;
1177 u32 stat;
1178
1179 /**
1180 * If no command pending, do nothing
1181 */
1182 if (list_empty(&mod->cmd_q))
1183 return;
1184
1185 /**
1186 * If previous command is not yet fetched by firmware, do nothing
1187 */
1188 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1189 if (stat)
1190 return;
1191
1192 /**
1193 * Enqueue command to firmware.
1194 */
1195 bfa_q_deq(&mod->cmd_q, &cmd);
1196 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1197}
1198
1199/**
1200 * Cleanup any pending requests.
1201 */
1202static void
1203bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1204{
1205 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1206 struct bfa_mbox_cmd *cmd;
1207
1208 while (!list_empty(&mod->cmd_q))
1209 bfa_q_deq(&mod->cmd_q, &cmd);
1210}
1211
1212/**
1213 * IOC public
1214 */
1215static enum bfa_status
1216bfa_ioc_pll_init(struct bfa_ioc *ioc)
1217{
1218 /*
1219 * Hold semaphore so that nobody can access the chip during init.
1220 */
1221 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1222
1223 bfa_ioc_pll_init_asic(ioc);
1224
1225 ioc->pllinit = true;
1226 /*
1227 * release semaphore.
1228 */
1229 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1230
1231 return BFA_STATUS_OK;
1232}
1233
1234/**
1235 * Interface used by diag module to do firmware boot with memory test
1236 * as the entry vector.
1237 */
1238static void
1239bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1240{
1241 void __iomem *rb;
1242
1243 bfa_ioc_stats(ioc, ioc_boots);
1244
1245 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1246 return;
1247
1248 /**
1249 * Initialize IOC state of all functions on a chip reset.
1250 */
1251 rb = ioc->pcidev.pci_bar_kva;
1252 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1253 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1254 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1255 } else {
1256 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1257 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1258 }
1259
1260 bfa_ioc_msgflush(ioc);
1261 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1262
1263 /**
1264 * Enable interrupts just before starting LPU
1265 */
1266 ioc->cbfn->reset_cbfn(ioc->bfa);
1267 bfa_ioc_lpu_start(ioc);
1268}
1269
1270/**
1271 * Enable/disable IOC failure auto recovery.
1272 */
1273void
1274bfa_nw_ioc_auto_recover(bool auto_recover)
1275{
1276 bfa_nw_auto_recover = auto_recover;
1277}
1278
1279bool
1280bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
1281{
1282 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1283}
1284
1285static void
1286bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1287{
1288 u32 *msgp = mbmsg;
1289 u32 r32;
1290 int i;
1291
1292 /**
1293 * read the MBOX msg
1294 */
1295 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1296 i++) {
1297 r32 = readl(ioc->ioc_regs.lpu_mbox +
1298 i * sizeof(u32));
1299 msgp[i] = htonl(r32);
1300 }
1301
1302 /**
1303 * turn off mailbox interrupt by clearing mailbox status
1304 */
1305 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1306 readl(ioc->ioc_regs.lpu_mbox_cmd);
1307}
1308
1309static void
1310bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1311{
1312 union bfi_ioc_i2h_msg_u *msg;
1313
1314 msg = (union bfi_ioc_i2h_msg_u *) m;
1315
1316 bfa_ioc_stats(ioc, ioc_isrs);
1317
1318 switch (msg->mh.msg_id) {
1319 case BFI_IOC_I2H_HBEAT:
1320 break;
1321
1322 case BFI_IOC_I2H_READY_EVENT:
1323 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1324 break;
1325
1326 case BFI_IOC_I2H_ENABLE_REPLY:
1327 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1328 break;
1329
1330 case BFI_IOC_I2H_DISABLE_REPLY:
1331 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1332 break;
1333
1334 case BFI_IOC_I2H_GETATTR_REPLY:
1335 bfa_ioc_getattr_reply(ioc);
1336 break;
1337
1338 default:
1339 BUG_ON(1);
1340 }
1341}
1342
1343/**
1344 * IOC attach time initialization and setup.
1345 *
1346 * @param[in] ioc memory for IOC
1347 * @param[in] bfa driver instance structure
1348 */
1349void
1350bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1351{
1352 ioc->bfa = bfa;
1353 ioc->cbfn = cbfn;
1354 ioc->fcmode = false;
1355 ioc->pllinit = false;
1356 ioc->dbg_fwsave_once = true;
1357
1358 bfa_ioc_mbox_attach(ioc);
1359 INIT_LIST_HEAD(&ioc->hb_notify_q);
1360
1361 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1362}
1363
1364/**
1365 * Driver detach time IOC cleanup.
1366 */
1367void
1368bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1369{
1370 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1371}
1372
1373/**
1374 * Setup IOC PCI properties.
1375 *
1376 * @param[in] pcidev PCI device information for this IOC
1377 */
1378void
1379bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1380 enum bfi_mclass mc)
1381{
1382 ioc->ioc_mc = mc;
1383 ioc->pcidev = *pcidev;
1384 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1385 ioc->cna = ioc->ctdev && !ioc->fcmode;
1386
1387 bfa_nw_ioc_set_ct_hwif(ioc);
1388
1389 bfa_ioc_map_port(ioc);
1390 bfa_ioc_reg_init(ioc);
1391}
1392
1393/**
1394 * Initialize IOC dma memory
1395 *
1396 * @param[in] dm_kva kernel virtual address of IOC dma memory
1397 * @param[in] dm_pa physical address of IOC dma memory
1398 */
1399void
1400bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
1401{
1402 /**
1403 * dma memory for firmware attribute
1404 */
1405 ioc->attr_dma.kva = dm_kva;
1406 ioc->attr_dma.pa = dm_pa;
1407 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1408}
1409
1410/**
1411 * Return size of dma memory required.
1412 */
1413u32
1414bfa_nw_ioc_meminfo(void)
1415{
1416 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1417}
1418
1419void
1420bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1421{
1422 bfa_ioc_stats(ioc, ioc_enables);
1423 ioc->dbg_fwsave_once = true;
1424
1425 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1426}
1427
1428void
1429bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1430{
1431 bfa_ioc_stats(ioc, ioc_disables);
1432 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1433}
1434
1435static u32
1436bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1437{
1438 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1439}
1440
1441static u32
1442bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1443{
1444 return PSS_SMEM_PGOFF(fmaddr);
1445}
1446
1447/**
1448 * Register mailbox message handler function, to be called by common modules
1449 */
1450void
1451bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1452 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1453{
1454 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1455
1456 mod->mbhdlr[mc].cbfn = cbfn;
1457 mod->mbhdlr[mc].cbarg = cbarg;
1458}
1459
1460/**
1461 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1462 * Responsibility of caller to serialize
1463 *
1464 * @param[in] ioc IOC instance
1465 * @param[i] cmd Mailbox command
1466 */
1467void
1468bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1469{
1470 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1471 u32 stat;
1472
1473 /**
1474 * If a previous command is pending, queue new command
1475 */
1476 if (!list_empty(&mod->cmd_q)) {
1477 list_add_tail(&cmd->qe, &mod->cmd_q);
1478 return;
1479 }
1480
1481 /**
1482 * If mailbox is busy, queue command for poll timer
1483 */
1484 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1485 if (stat) {
1486 list_add_tail(&cmd->qe, &mod->cmd_q);
1487 return;
1488 }
1489
1490 /**
1491 * mailbox is free -- queue command to firmware
1492 */
1493 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1494}
1495
1496/**
1497 * Handle mailbox interrupts
1498 */
1499void
1500bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1501{
1502 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1503 struct bfi_mbmsg m;
1504 int mc;
1505
1506 bfa_ioc_msgget(ioc, &m);
1507
1508 /**
1509 * Treat IOC message class as special.
1510 */
1511 mc = m.mh.msg_class;
1512 if (mc == BFI_MC_IOC) {
1513 bfa_ioc_isr(ioc, &m);
1514 return;
1515 }
1516
1517 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1518 return;
1519
1520 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1521}
1522
1523void
1524bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
1525{
1526 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1527}
1528
1529/**
1530 * Add to IOC heartbeat failure notification queue. To be used by common
1531 * modules such as cee, port, diag.
1532 */
1533void
1534bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
1535 struct bfa_ioc_hbfail_notify *notify)
1536{
1537 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1538}
1539
1540#define BFA_MFG_NAME "Brocade"
1541static void
1542bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1543 struct bfa_adapter_attr *ad_attr)
1544{
1545 struct bfi_ioc_attr *ioc_attr;
1546
1547 ioc_attr = ioc->attr;
1548
1549 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1550 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1551 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1552 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1553 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1554 sizeof(struct bfa_mfg_vpd));
1555
1556 ad_attr->nports = bfa_ioc_get_nports(ioc);
1557 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1558
1559 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1560 /* For now, model descr uses same model string */
1561 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1562
1563 ad_attr->card_type = ioc_attr->card_type;
1564 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1565
1566 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1567 ad_attr->prototype = 1;
1568 else
1569 ad_attr->prototype = 0;
1570
1571 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1572 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
1573
1574 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1575 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1576 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1577 ad_attr->asic_rev = ioc_attr->asic_rev;
1578
1579 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1580
1581 ad_attr->cna_capable = ioc->cna;
1582 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1583}
1584
1585static enum bfa_ioc_type
1586bfa_ioc_get_type(struct bfa_ioc *ioc)
1587{
1588 if (!ioc->ctdev || ioc->fcmode)
1589 return BFA_IOC_TYPE_FC;
1590 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1591 return BFA_IOC_TYPE_FCoE;
1592 else if (ioc->ioc_mc == BFI_MC_LL)
1593 return BFA_IOC_TYPE_LL;
1594 else {
1595 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
1596 return BFA_IOC_TYPE_LL;
1597 }
1598}
1599
1600static void
1601bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1602{
1603 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1604 memcpy(serial_num,
1605 (void *)ioc->attr->brcd_serialnum,
1606 BFA_ADAPTER_SERIAL_NUM_LEN);
1607}
1608
1609static void
1610bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
1611{
1612 memset(fw_ver, 0, BFA_VERSION_LEN);
1613 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1614}
1615
1616static void
1617bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1618{
1619 BUG_ON(!(chip_rev));
1620
1621 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1622
1623 chip_rev[0] = 'R';
1624 chip_rev[1] = 'e';
1625 chip_rev[2] = 'v';
1626 chip_rev[3] = '-';
1627 chip_rev[4] = ioc->attr->asic_rev;
1628 chip_rev[5] = '\0';
1629}
1630
1631static void
1632bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1633{
1634 memset(optrom_ver, 0, BFA_VERSION_LEN);
1635 memcpy(optrom_ver, ioc->attr->optrom_version,
1636 BFA_VERSION_LEN);
1637}
1638
1639static void
1640bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
1641{
1642 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1643 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1644}
1645
1646static void
1647bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1648{
1649 struct bfi_ioc_attr *ioc_attr;
1650
1651 BUG_ON(!(model));
1652 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1653
1654 ioc_attr = ioc->attr;
1655
1656 /**
1657 * model name
1658 */
1659 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1660 BFA_MFG_NAME, ioc_attr->card_type);
1661}
1662
1663static enum bfa_ioc_state
1664bfa_ioc_get_state(struct bfa_ioc *ioc)
1665{
1666 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1667}
1668
1669void
1670bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
1671{
1672 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
1673
1674 ioc_attr->state = bfa_ioc_get_state(ioc);
1675 ioc_attr->port_id = ioc->port_id;
1676
1677 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1678
1679 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1680
1681 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1682 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1683 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1684}
1685
1686/**
1687 * WWN public
1688 */
1689static u64
1690bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1691{
1692 return ioc->attr->pwwn;
1693}
1694
1695mac_t
1696bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
1697{
1698 /*
1699 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1700 */
1701 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1702 return bfa_ioc_get_mfg_mac(ioc);
1703 else
1704 return ioc->attr->mac;
1705}
1706
1707static mac_t
1708bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1709{
1710 mac_t m;
1711
1712 m = ioc->attr->mfg_mac;
1713 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
1714 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1715 else
1716 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
1717 bfa_ioc_pcifn(ioc));
1718
1719 return m;
1720}
1721
1722/**
1723 * Firmware failure detected. Start recovery actions.
1724 */
1725static void
1726bfa_ioc_recover(struct bfa_ioc *ioc)
1727{
1728 bfa_ioc_stats(ioc, ioc_hbfails);
1729 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
1730}
1731
1732static void
1733bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1734{
1735 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
1736 return;
1737
1738}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
new file mode 100644
index 000000000000..7f0719e17efc
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc.h
@@ -0,0 +1,301 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_IOC_H__
20#define __BFA_IOC_H__
21
22#include "bfa_sm.h"
23#include "bfi.h"
24#include "cna.h"
25
26#define BFA_IOC_TOV 3000 /* msecs */
27#define BFA_IOC_HWSEM_TOV 500 /* msecs */
28#define BFA_IOC_HB_TOV 500 /* msecs */
29#define BFA_IOC_HWINIT_MAX 2
30#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
31
32/**
33 * Generic Scatter Gather Element used by driver
34 */
35struct bfa_sge {
36 u32 sg_len;
37 void *sg_addr;
38};
39
40/**
41 * PCI device information required by IOC
42 */
43struct bfa_pcidev {
44 int pci_slot;
45 u8 pci_func;
46 u16 device_id;
47 void __iomem *pci_bar_kva;
48};
49
50/**
51 * Structure used to remember the DMA-able memory block's KVA and Physical
52 * Address
53 */
54struct bfa_dma {
55 void *kva; /* ! Kernel virtual address */
56 u64 pa; /* ! Physical address */
57};
58
59#define BFA_DMA_ALIGN_SZ 256
60
61/**
62 * smem size for Crossbow and Catapult
63 */
64#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
65#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
66
67/**
68 * @brief BFA dma address assignment macro
69 */
70#define bfa_dma_addr_set(dma_addr, pa) \
71 __bfa_dma_addr_set(&dma_addr, (u64)pa)
72
73static inline void
74__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
75{
76 dma_addr->a32.addr_lo = (u32) pa;
77 dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
78}
79
80/**
81 * @brief BFA dma address assignment macro. (big endian format)
82 */
83#define bfa_dma_be_addr_set(dma_addr, pa) \
84 __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
85static inline void
86__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
87{
88 dma_addr->a32.addr_lo = (u32) htonl(pa);
89 dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
90}
91
92struct bfa_ioc_regs {
93 void __iomem *hfn_mbox_cmd;
94 void __iomem *hfn_mbox;
95 void __iomem *lpu_mbox_cmd;
96 void __iomem *lpu_mbox;
97 void __iomem *pss_ctl_reg;
98 void __iomem *pss_err_status_reg;
99 void __iomem *app_pll_fast_ctl_reg;
100 void __iomem *app_pll_slow_ctl_reg;
101 void __iomem *ioc_sem_reg;
102 void __iomem *ioc_usage_sem_reg;
103 void __iomem *ioc_init_sem_reg;
104 void __iomem *ioc_usage_reg;
105 void __iomem *host_page_num_fn;
106 void __iomem *heartbeat;
107 void __iomem *ioc_fwstate;
108 void __iomem *ll_halt;
109 void __iomem *err_set;
110 void __iomem *shirq_isr_next;
111 void __iomem *shirq_msk_next;
112 void __iomem *smem_page_start;
113 u32 smem_pg0;
114};
115
116/**
117 * IOC Mailbox structures
118 */
119struct bfa_mbox_cmd {
120 struct list_head qe;
121 u32 msg[BFI_IOC_MSGSZ];
122};
123
124/**
125 * IOC mailbox module
126 */
127typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
128struct bfa_ioc_mbox_mod {
129 struct list_head cmd_q; /*!< pending mbox queue */
130 int nmclass; /*!< number of handlers */
131 struct {
132 bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */
133 void *cbarg;
134 } mbhdlr[BFI_MC_MAX];
135};
136
137/**
138 * IOC callback function interfaces
139 */
140typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
141typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
142typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
143typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
144struct bfa_ioc_cbfn {
145 bfa_ioc_enable_cbfn_t enable_cbfn;
146 bfa_ioc_disable_cbfn_t disable_cbfn;
147 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
148 bfa_ioc_reset_cbfn_t reset_cbfn;
149};
150
151/**
152 * Heartbeat failure notification queue element.
153 */
154struct bfa_ioc_hbfail_notify {
155 struct list_head qe;
156 bfa_ioc_hbfail_cbfn_t cbfn;
157 void *cbarg;
158};
159
160/**
161 * Initialize a heartbeat failure notification structure
162 */
163#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
164 (__notify)->cbfn = (__cbfn); \
165 (__notify)->cbarg = (__cbarg); \
166} while (0)
167
168struct bfa_ioc {
169 bfa_fsm_t fsm;
170 struct bfa *bfa;
171 struct bfa_pcidev pcidev;
172 struct bfa_timer_mod *timer_mod;
173 struct timer_list ioc_timer;
174 struct timer_list sem_timer;
175 struct timer_list hb_timer;
176 u32 hb_count;
177 u32 retry_count;
178 struct list_head hb_notify_q;
179 void *dbg_fwsave;
180 int dbg_fwsave_len;
181 bool dbg_fwsave_once;
182 enum bfi_mclass ioc_mc;
183 struct bfa_ioc_regs ioc_regs;
184 struct bfa_ioc_drv_stats stats;
185 bool auto_recover;
186 bool fcmode;
187 bool ctdev;
188 bool cna;
189 bool pllinit;
190 bool stats_busy; /*!< outstanding stats */
191 u8 port_id;
192
193 struct bfa_dma attr_dma;
194 struct bfi_ioc_attr *attr;
195 struct bfa_ioc_cbfn *cbfn;
196 struct bfa_ioc_mbox_mod mbox_mod;
197 struct bfa_ioc_hwif *ioc_hwif;
198};
199
200struct bfa_ioc_hwif {
201 enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
202 bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
203 void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
204 void (*ioc_reg_init) (struct bfa_ioc *ioc);
205 void (*ioc_map_port) (struct bfa_ioc *ioc);
206 void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
207 bool msix);
208 void (*ioc_notify_hbfail) (struct bfa_ioc *ioc);
209 void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
210};
211
212#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
213#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
214#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
215#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
216#define bfa_ioc_fetch_stats(__ioc, __stats) \
217 (((__stats)->drv_stats) = (__ioc)->stats)
218#define bfa_ioc_clr_stats(__ioc) \
219 memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
220#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
221#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
222#define bfa_ioc_speed_sup(__ioc) \
223 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
224#define bfa_ioc_get_nports(__ioc) \
225 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
226
227#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
228#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
229#define BFA_IOC_FWIMG_TYPE(__ioc) \
230 (((__ioc)->ctdev) ? \
231 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
232 BFI_IMAGE_CB_FC)
233#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
234 (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
235#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
236#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
237#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
238
239/**
240 * IOC mailbox interface
241 */
242void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
243void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
244void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
245 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
246
247/**
248 * IOC interfaces
249 */
250
251#define bfa_ioc_pll_init_asic(__ioc) \
252 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
253 (__ioc)->fcmode))
254
255#define bfa_ioc_isr_mode_set(__ioc, __msix) \
256 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
257#define bfa_ioc_ownership_reset(__ioc) \
258 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
259
260void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
261
262void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
263 struct bfa_ioc_cbfn *cbfn);
264void bfa_nw_ioc_auto_recover(bool auto_recover);
265void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
266void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
267 enum bfi_mclass mc);
268u32 bfa_nw_ioc_meminfo(void);
269void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
270void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
271void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
272
273void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
274bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
275
276void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
277void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
278 struct bfa_ioc_hbfail_notify *notify);
279bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
280void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
281void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
282void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
283 struct bfi_ioc_image_hdr *fwhdr);
284bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
285 struct bfi_ioc_image_hdr *fwhdr);
286mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
287
288/*
289 * Timeout APIs
290 */
291void bfa_nw_ioc_timeout(void *ioc);
292void bfa_nw_ioc_hb_check(void *ioc);
293void bfa_nw_ioc_sem_timeout(void *ioc);
294
295/*
296 * F/W Image Size & Chunk
297 */
298u32 *bfa_cb_image_get_chunk(int type, u32 off);
299u32 bfa_cb_image_get_size(int type);
300
301#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
new file mode 100644
index 000000000000..462857cbab9b
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -0,0 +1,392 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_ctreg.h"
23#include "bfa_defs.h"
24
25/*
26 * forward declarations
27 */
28static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
29static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
30static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
31static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
32static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
33static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
35static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
36
37struct bfa_ioc_hwif nw_hwif_ct;
38
39/**
40 * Called from bfa_ioc_attach() to map asic specific calls.
41 */
42void
43bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
44{
45 nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
46 nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
47 nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
48 nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
49 nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
50 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
51 nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
52 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
53
54 ioc->ioc_hwif = &nw_hwif_ct;
55}
56
57/**
58 * Return true if firmware of current driver matches the running firmware.
59 */
60static bool
61bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
62{
63 enum bfi_ioc_state ioc_fwstate;
64 u32 usecnt;
65 struct bfi_ioc_image_hdr fwhdr;
66
67 /**
68 * Firmware match check is relevant only for CNA.
69 */
70 if (!ioc->cna)
71 return true;
72
73 /**
74 * If bios boot (flash based) -- do not increment usage count
75 */
76 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
77 BFA_IOC_FWIMG_MINSZ)
78 return true;
79
80 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
81 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
82
83 /**
84 * If usage count is 0, always return TRUE.
85 */
86 if (usecnt == 0) {
87 writel(1, ioc->ioc_regs.ioc_usage_reg);
88 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
89 return true;
90 }
91
92 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
93
94 /**
95 * Use count cannot be non-zero and chip in uninitialized state.
96 */
97 BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
98
99 /**
100 * Check if another driver with a different firmware is active
101 */
102 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
103 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
104 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
105 return false;
106 }
107
108 /**
109 * Same firmware version. Increment the reference count.
110 */
111 usecnt++;
112 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
113 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
114 return true;
115}
116
117static void
118bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
119{
120 u32 usecnt;
121
122 /**
123 * Firmware lock is relevant only for CNA.
124 */
125 if (!ioc->cna)
126 return;
127
128 /**
129 * If bios boot (flash based) -- do not decrement usage count
130 */
131 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
132 BFA_IOC_FWIMG_MINSZ)
133 return;
134
135 /**
136 * decrement usage count
137 */
138 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
139 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
140 BUG_ON(!(usecnt > 0));
141
142 usecnt--;
143 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
144
145 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
146}
147
148/**
149 * Notify other functions on HB failure.
150 */
151static void
152bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
153{
154 if (ioc->cna) {
155 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
156 /* Wait for halt to take effect */
157 readl(ioc->ioc_regs.ll_halt);
158 } else {
159 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
160 readl(ioc->ioc_regs.err_set);
161 }
162}
163
164/**
165 * Host to LPU mailbox message addresses
166 */
167static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
168 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
169 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
170 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
171 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
172};
173
174/**
175 * Host <-> LPU mailbox command/status registers - port 0
176 */
177static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
178 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
179 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
180 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
181 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
182};
183
184/**
185 * Host <-> LPU mailbox command/status registers - port 1
186 */
187static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
188 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
189 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
190 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
191 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
192};
193
194static void
195bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
196{
197 void __iomem *rb;
198 int pcifn = bfa_ioc_pcifn(ioc);
199
200 rb = bfa_ioc_bar0(ioc);
201
202 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
203 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
204 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
205
206 if (ioc->port_id == 0) {
207 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
208 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
209 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
210 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
211 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
212 } else {
213 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
214 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
215 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
216 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
217 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
218 }
219
220 /*
221 * PSS control registers
222 */
223 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
224 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
225 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
226 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
227
228 /*
229 * IOC semaphore registers and serialization
230 */
231 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
232 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
233 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
234 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
235
236 /**
237 * sram memory access
238 */
239 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
240 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
241
242 /*
243 * err set reg : for notification of hb failure in fcmode
244 */
245 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
246}
247
248/**
249 * Initialize IOC to port mapping.
250 */
251
252#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
253static void
254bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
255{
256 void __iomem *rb = ioc->pcidev.pci_bar_kva;
257 u32 r32;
258
259 /**
260 * For catapult, base port id on personality register and IOC type
261 */
262 r32 = readl(rb + FNC_PERS_REG);
263 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
264 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
265
266}
267
268/**
269 * Set interrupt mode for a function: INTX or MSIX
270 */
271static void
272bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
273{
274 void __iomem *rb = ioc->pcidev.pci_bar_kva;
275 u32 r32, mode;
276
277 r32 = readl(rb + FNC_PERS_REG);
278
279 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
280 __F0_INTX_STATUS;
281
282 /**
283 * If already in desired mode, do not change anything
284 */
285 if (!msix && mode)
286 return;
287
288 if (msix)
289 mode = __F0_INTX_STATUS_MSIX;
290 else
291 mode = __F0_INTX_STATUS_INTA;
292
293 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
294 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
295
296 writel(r32, rb + FNC_PERS_REG);
297}
298
299/**
300 * Cleanup hw semaphore and usecnt registers
301 */
302static void
303bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
304{
305 if (ioc->cna) {
306 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
307 writel(0, ioc->ioc_regs.ioc_usage_reg);
308 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
309 }
310
311 /*
312 * Read the hw sem reg to make sure that it is locked
313 * before we clear it. If it is not locked, writing 1
314 * will lock it instead of clearing it.
315 */
316 readl(ioc->ioc_regs.ioc_sem_reg);
317 bfa_nw_ioc_hw_sem_release(ioc);
318}
319
320static enum bfa_status
321bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
322{
323 u32 pll_sclk, pll_fclk, r32;
324
325 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
326 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
327 __APP_PLL_312_JITLMT0_1(3U) |
328 __APP_PLL_312_CNTLMT0_1(1U);
329 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
330 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
331 __APP_PLL_425_JITLMT0_1(3U) |
332 __APP_PLL_425_CNTLMT0_1(1U);
333 if (fcmode) {
334 writel(0, (rb + OP_MODE));
335 writel(__APP_EMS_CMLCKSEL |
336 __APP_EMS_REFCKBUFEN2 |
337 __APP_EMS_CHANNEL_SEL,
338 (rb + ETH_MAC_SER_REG));
339 } else {
340 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
341 writel(__APP_EMS_REFCKBUFEN1,
342 (rb + ETH_MAC_SER_REG));
343 }
344 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
345 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
346 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
347 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
348 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
349 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
350 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
351 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
352 writel(pll_sclk |
353 __APP_PLL_312_LOGIC_SOFT_RESET,
354 rb + APP_PLL_312_CTL_REG);
355 writel(pll_fclk |
356 __APP_PLL_425_LOGIC_SOFT_RESET,
357 rb + APP_PLL_425_CTL_REG);
358 writel(pll_sclk |
359 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
360 rb + APP_PLL_312_CTL_REG);
361 writel(pll_fclk |
362 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
363 rb + APP_PLL_425_CTL_REG);
364 readl(rb + HOSTFN0_INT_MSK);
365 udelay(2000);
366 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
367 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
368 writel(pll_sclk |
369 __APP_PLL_312_ENABLE,
370 rb + APP_PLL_312_CTL_REG);
371 writel(pll_fclk |
372 __APP_PLL_425_ENABLE,
373 rb + APP_PLL_425_CTL_REG);
374 if (!fcmode) {
375 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
376 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
377 }
378 r32 = readl((rb + PSS_CTL_REG));
379 r32 &= ~__PSS_LMEM_RESET;
380 writel(r32, (rb + PSS_CTL_REG));
381 udelay(1000);
382 if (!fcmode) {
383 writel(0, (rb + PMM_1T_RESET_REG_P0));
384 writel(0, (rb + PMM_1T_RESET_REG_P1));
385 }
386
387 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
388 udelay(1000);
389 r32 = readl((rb + MBIST_STAT_REG));
390 writel(0, (rb + MBIST_CTL_REG));
391 return BFA_STATUS_OK;
392}
diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_sm.h
new file mode 100644
index 000000000000..1d3d975d6f68
--- /dev/null
+++ b/drivers/net/bna/bfa_sm.h
@@ -0,0 +1,88 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/**
20 * @file bfasm.h State machine defines
21 */
22
23#ifndef __BFA_SM_H__
24#define __BFA_SM_H__
25
26#include "cna.h"
27
28typedef void (*bfa_sm_t)(void *sm, int event);
29
30/**
31 * oc - object class eg. bfa_ioc
32 * st - state, eg. reset
33 * otype - object type, eg. struct bfa_ioc
34 * etype - object type, eg. enum ioc_event
35 */
36#define bfa_sm_state_decl(oc, st, otype, etype) \
37 static void oc ## _sm_ ## st(otype * fsm, etype event)
38
39#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
40#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
41#define bfa_sm_get_state(_sm) ((_sm)->sm)
42#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
43
44/**
45 * For converting from state machine function to state encoding.
46 */
47struct bfa_sm_table {
48 bfa_sm_t sm; /*!< state machine function */
49 int state; /*!< state machine encoding */
50 char *name; /*!< state name for display */
51};
52#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
53
54/**
55 * State machine with entry actions.
56 */
57typedef void (*bfa_fsm_t)(void *fsm, int event);
58
59/**
60 * oc - object class eg. bfa_ioc
61 * st - state, eg. reset
62 * otype - object type, eg. struct bfa_ioc
63 * etype - object type, eg. enum ioc_event
64 */
65#define bfa_fsm_state_decl(oc, st, otype, etype) \
66 static void oc ## _sm_ ## st(otype * fsm, etype event); \
67 static void oc ## _sm_ ## st ## _entry(otype * fsm)
68
69#define bfa_fsm_set_state(_fsm, _state) do { \
70 (_fsm)->fsm = (bfa_fsm_t)(_state); \
71 _state ## _entry(_fsm); \
72} while (0)
73
74#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
75#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
76#define bfa_fsm_cmp_state(_fsm, _state) \
77 ((_fsm)->fsm == (bfa_fsm_t)(_state))
78
79static inline int
80bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm)
81{
82 int i = 0;
83
84 while (smt[i].sm && smt[i].sm != sm)
85 i++;
86 return smt[i].state;
87}
88#endif
diff --git a/drivers/net/bna/bfa_wc.h b/drivers/net/bna/bfa_wc.h
new file mode 100644
index 000000000000..d0e4caee67b0
--- /dev/null
+++ b/drivers/net/bna/bfa_wc.h
@@ -0,0 +1,69 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/**
20 * @file bfa_wc.h Generic wait counter.
21 */
22
23#ifndef __BFA_WC_H__
24#define __BFA_WC_H__
25
26typedef void (*bfa_wc_resume_t) (void *cbarg);
27
28struct bfa_wc {
29 bfa_wc_resume_t wc_resume;
30 void *wc_cbarg;
31 int wc_count;
32};
33
34static inline void
35bfa_wc_up(struct bfa_wc *wc)
36{
37 wc->wc_count++;
38}
39
40static inline void
41bfa_wc_down(struct bfa_wc *wc)
42{
43 wc->wc_count--;
44 if (wc->wc_count == 0)
45 wc->wc_resume(wc->wc_cbarg);
46}
47
48/**
49 * Initialize a waiting counter.
50 */
51static inline void
52bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
53{
54 wc->wc_resume = wc_resume;
55 wc->wc_cbarg = wc_cbarg;
56 wc->wc_count = 0;
57 bfa_wc_up(wc);
58}
59
60/**
61 * Wait for counter to reach zero
62 */
63static inline void
64bfa_wc_wait(struct bfa_wc *wc)
65{
66 bfa_wc_down(wc);
67}
68
69#endif
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
new file mode 100644
index 000000000000..a97396811050
--- /dev/null
+++ b/drivers/net/bna/bfi.h
@@ -0,0 +1,392 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFI_H__
20#define __BFI_H__
21
22#include "bfa_defs.h"
23
24#pragma pack(1)
25
26/**
27 * BFI FW image type
28 */
29#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
30#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
31enum {
32 BFI_IMAGE_CB_FC,
33 BFI_IMAGE_CT_FC,
34 BFI_IMAGE_CT_CNA,
35 BFI_IMAGE_MAX,
36};
37
38/**
39 * Msg header common to all msgs
40 */
41struct bfi_mhdr {
42 u8 msg_class; /*!< @ref enum bfi_mclass */
43 u8 msg_id; /*!< msg opcode with in the class */
44 union {
45 struct {
46 u8 rsvd;
47 u8 lpu_id; /*!< msg destination */
48 } h2i;
49 u16 i2htok; /*!< token in msgs to host */
50 } mtag;
51};
52
53#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
54 (_mh).msg_class = (_mc); \
55 (_mh).msg_id = (_op); \
56 (_mh).mtag.h2i.lpu_id = (_lpuid); \
57} while (0)
58
59#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
60 (_mh).msg_class = (_mc); \
61 (_mh).msg_id = (_op); \
62 (_mh).mtag.i2htok = (_i2htok); \
63} while (0)
64
65/*
66 * Message opcodes: 0-127 to firmware, 128-255 to host
67 */
68#define BFI_I2H_OPCODE_BASE 128
69#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
70
71/**
72 ****************************************************************************
73 *
74 * Scatter Gather Element and Page definition
75 *
76 ****************************************************************************
77 */
78
79#define BFI_SGE_INLINE 1
80#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
81
82/**
83 * SG Flags
84 */
85enum {
86 BFI_SGE_DATA = 0, /*!< data address, not last */
87 BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
88 BFI_SGE_DATA_LAST = 3, /*!< data address, last */
89 BFI_SGE_LINK = 2, /*!< link address */
90 BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
91};
92
93/**
94 * DMA addresses
95 */
96union bfi_addr_u {
97 struct {
98 u32 addr_lo;
99 u32 addr_hi;
100 } a32;
101};
102
103/**
104 * Scatter Gather Element
105 */
106struct bfi_sge {
107#ifdef __BIGENDIAN
108 u32 flags:2,
109 rsvd:2,
110 sg_len:28;
111#else
112 u32 sg_len:28,
113 rsvd:2,
114 flags:2;
115#endif
116 union bfi_addr_u sga;
117};
118
119/**
120 * Scatter Gather Page
121 */
122#define BFI_SGPG_DATA_SGES 7
123#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
124#define BFI_SGPG_RSVD_WD_LEN 8
125struct bfi_sgpg {
126 struct bfi_sge sges[BFI_SGPG_SGES_MAX];
127 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
128};
129
130/*
131 * Large Message structure - 128 Bytes size Msgs
132 */
133#define BFI_LMSG_SZ 128
134#define BFI_LMSG_PL_WSZ \
135 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
136
137struct bfi_msg {
138 struct bfi_mhdr mhdr;
139 u32 pl[BFI_LMSG_PL_WSZ];
140};
141
142/**
143 * Mailbox message structure
144 */
145#define BFI_MBMSG_SZ 7
146struct bfi_mbmsg {
147 struct bfi_mhdr mh;
148 u32 pl[BFI_MBMSG_SZ];
149};
150
151/**
152 * Message Classes
153 */
154enum bfi_mclass {
155 BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
156 BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
157 BFI_MC_FLASH = 3, /*!< Flash message class */
158 BFI_MC_CEE = 4, /*!< CEE */
159 BFI_MC_FCPORT = 5, /*!< FC port */
160 BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
161 BFI_MC_LL = 7, /*!< Link Layer */
162 BFI_MC_UF = 8, /*!< Unsolicited frame receive */
163 BFI_MC_FCXP = 9, /*!< FC Transport */
164 BFI_MC_LPS = 10, /*!< lport fc login services */
165 BFI_MC_RPORT = 11, /*!< Remote port */
166 BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
167 BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
168 BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
169 BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
170 BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
171 BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
172 BFI_MC_TSKIM = 18, /*!< Initiator Task management */
173 BFI_MC_SBOOT = 19, /*!< SAN boot services */
174 BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
175 BFI_MC_PORT = 21, /*!< Physical port */
176 BFI_MC_SFP = 22, /*!< SFP module */
177 BFI_MC_MSGQ = 23, /*!< MSGQ */
178 BFI_MC_ENET = 24, /*!< ENET commands/responses */
179 BFI_MC_MAX = 32
180};
181
182#define BFI_IOC_MAX_CQS 4
183#define BFI_IOC_MAX_CQS_ASIC 8
184#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
185
186#define BFI_BOOT_TYPE_OFF 8
187#define BFI_BOOT_PARAM_OFF 12
188
189#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
190#define BFI_BOOT_TYPE_FLASH 1
191#define BFI_BOOT_TYPE_MEMTEST 2
192
193#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
194#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
195
196/**
197 *----------------------------------------------------------------------
198 * IOC
199 *----------------------------------------------------------------------
200 */
201
202enum bfi_ioc_h2i_msgs {
203 BFI_IOC_H2I_ENABLE_REQ = 1,
204 BFI_IOC_H2I_DISABLE_REQ = 2,
205 BFI_IOC_H2I_GETATTR_REQ = 3,
206 BFI_IOC_H2I_DBG_SYNC = 4,
207 BFI_IOC_H2I_DBG_DUMP = 5,
208};
209
210enum bfi_ioc_i2h_msgs {
211 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
212 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
213 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
214 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
215 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
216};
217
218/**
219 * BFI_IOC_H2I_GETATTR_REQ message
220 */
221struct bfi_ioc_getattr_req {
222 struct bfi_mhdr mh;
223 union bfi_addr_u attr_addr;
224};
225
226struct bfi_ioc_attr {
227 u64 mfg_pwwn; /*!< Mfg port wwn */
228 u64 mfg_nwwn; /*!< Mfg node wwn */
229 mac_t mfg_mac; /*!< Mfg mac */
230 u16 rsvd_a;
231 u64 pwwn;
232 u64 nwwn;
233 mac_t mac; /*!< PBC or Mfg mac */
234 u16 rsvd_b;
235 mac_t fcoe_mac;
236 u16 rsvd_c;
237 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
238 u8 pcie_gen;
239 u8 pcie_lanes_orig;
240 u8 pcie_lanes;
241 u8 rx_bbcredit; /*!< receive buffer credits */
242 u32 adapter_prop; /*!< adapter properties */
243 u16 maxfrsize; /*!< max receive frame size */
244 char asic_rev;
245 u8 rsvd_d;
246 char fw_version[BFA_VERSION_LEN];
247 char optrom_version[BFA_VERSION_LEN];
248 struct bfa_mfg_vpd vpd;
249 u32 card_type; /*!< card type */
250};
251
252/**
253 * BFI_IOC_I2H_GETATTR_REPLY message
254 */
255struct bfi_ioc_getattr_reply {
256 struct bfi_mhdr mh; /*!< Common msg header */
257 u8 status; /*!< cfg reply status */
258 u8 rsvd[3];
259};
260
261/**
262 * Firmware memory page offsets
263 */
264#define BFI_IOC_SMEM_PG0_CB (0x40)
265#define BFI_IOC_SMEM_PG0_CT (0x180)
266
267/**
268 * Firmware statistic offset
269 */
270#define BFI_IOC_FWSTATS_OFF (0x6B40)
271#define BFI_IOC_FWSTATS_SZ (4096)
272
273/**
274 * Firmware trace offset
275 */
276#define BFI_IOC_TRC_OFF (0x4b00)
277#define BFI_IOC_TRC_ENTS 256
278
279#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
280#define BFI_IOC_MD5SUM_SZ 4
281struct bfi_ioc_image_hdr {
282 u32 signature; /*!< constant signature */
283 u32 rsvd_a;
284 u32 exec; /*!< exec vector */
285 u32 param; /*!< parameters */
286 u32 rsvd_b[4];
287 u32 md5sum[BFI_IOC_MD5SUM_SZ];
288};
289
290/**
291 * BFI_IOC_I2H_READY_EVENT message
292 */
293struct bfi_ioc_rdy_event {
294 struct bfi_mhdr mh; /*!< common msg header */
295 u8 init_status; /*!< init event status */
296 u8 rsvd[3];
297};
298
299struct bfi_ioc_hbeat {
300 struct bfi_mhdr mh; /*!< common msg header */
301 u32 hb_count; /*!< current heart beat count */
302};
303
304/**
305 * IOC hardware/firmware state
306 */
307enum bfi_ioc_state {
308 BFI_IOC_UNINIT = 0, /*!< not initialized */
309 BFI_IOC_INITING = 1, /*!< h/w is being initialized */
310 BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
311 BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
312 BFI_IOC_OP = 4, /*!< IOC is operational */
313 BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
314 BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
315 BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
316 BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
317 BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
318};
319
320#define BFI_IOC_ENDIAN_SIG 0x12345678
321
322enum {
323 BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
324 BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
325 BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
326 BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
327 BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
328 BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
329 BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
330 BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
331 BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
332 BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
333};
334
335#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
336 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
337 BFI_ADAPTER_ ## __prop ## _SH)
338#define BFI_ADAPTER_SETP(__prop, __val) \
339 ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
340#define BFI_ADAPTER_IS_PROTO(__adap_type) \
341 ((__adap_type) & BFI_ADAPTER_PROTO)
342#define BFI_ADAPTER_IS_TTV(__adap_type) \
343 ((__adap_type) & BFI_ADAPTER_TTV)
344#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
345 ((__adap_type) & BFI_ADAPTER_UNSUPP)
346#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
347 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
348 BFI_ADAPTER_UNSUPP))
349
350/**
351 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
352 */
353struct bfi_ioc_ctrl_req {
354 struct bfi_mhdr mh;
355 u8 ioc_class;
356 u8 rsvd[3];
357 u32 tv_sec;
358};
359
360/**
361 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
362 */
363struct bfi_ioc_ctrl_reply {
364 struct bfi_mhdr mh; /*!< Common msg header */
365 u8 status; /*!< enable/disable status */
366 u8 rsvd[3];
367};
368
369#define BFI_IOC_MSGSZ 8
370/**
371 * H2I Messages
372 */
373union bfi_ioc_h2i_msg_u {
374 struct bfi_mhdr mh;
375 struct bfi_ioc_ctrl_req enable_req;
376 struct bfi_ioc_ctrl_req disable_req;
377 struct bfi_ioc_getattr_req getattr_req;
378 u32 mboxmsg[BFI_IOC_MSGSZ];
379};
380
381/**
382 * I2H Messages
383 */
384union bfi_ioc_i2h_msg_u {
385 struct bfi_mhdr mh;
386 struct bfi_ioc_rdy_event rdy_event;
387 u32 mboxmsg[BFI_IOC_MSGSZ];
388};
389
390#pragma pack()
391
392#endif /* __BFI_H__ */
diff --git a/drivers/net/bna/bfi_cna.h b/drivers/net/bna/bfi_cna.h
new file mode 100644
index 000000000000..4eecabea397b
--- /dev/null
+++ b/drivers/net/bna/bfi_cna.h
@@ -0,0 +1,199 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFI_CNA_H__
19#define __BFI_CNA_H__
20
21#include "bfi.h"
22#include "bfa_defs_cna.h"
23
24#pragma pack(1)
25
26enum bfi_port_h2i {
27 BFI_PORT_H2I_ENABLE_REQ = (1),
28 BFI_PORT_H2I_DISABLE_REQ = (2),
29 BFI_PORT_H2I_GET_STATS_REQ = (3),
30 BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
31};
32
33enum bfi_port_i2h {
34 BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
35 BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
36 BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
37 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
38};
39
40/**
41 * Generic REQ type
42 */
43struct bfi_port_generic_req {
44 struct bfi_mhdr mh; /*!< msg header */
45 u32 msgtag; /*!< msgtag for reply */
46 u32 rsvd;
47};
48
49/**
50 * Generic RSP type
51 */
52struct bfi_port_generic_rsp {
53 struct bfi_mhdr mh; /*!< common msg header */
54 u8 status; /*!< port enable status */
55 u8 rsvd[3];
56 u32 msgtag; /*!< msgtag for reply */
57};
58
59/**
60 * @todo
61 * BFI_PORT_H2I_ENABLE_REQ
62 */
63
64/**
65 * @todo
66 * BFI_PORT_I2H_ENABLE_RSP
67 */
68
69/**
70 * BFI_PORT_H2I_DISABLE_REQ
71 */
72
73/**
74 * BFI_PORT_I2H_DISABLE_RSP
75 */
76
77/**
78 * BFI_PORT_H2I_GET_STATS_REQ
79 */
80struct bfi_port_get_stats_req {
81 struct bfi_mhdr mh; /*!< common msg header */
82 union bfi_addr_u dma_addr;
83};
84
85/**
86 * BFI_PORT_I2H_GET_STATS_RSP
87 */
88
89/**
90 * BFI_PORT_H2I_CLEAR_STATS_REQ
91 */
92
93/**
94 * BFI_PORT_I2H_CLEAR_STATS_RSP
95 */
96
97union bfi_port_h2i_msg_u {
98 struct bfi_mhdr mh;
99 struct bfi_port_generic_req enable_req;
100 struct bfi_port_generic_req disable_req;
101 struct bfi_port_get_stats_req getstats_req;
102 struct bfi_port_generic_req clearstats_req;
103};
104
105union bfi_port_i2h_msg_u {
106 struct bfi_mhdr mh;
107 struct bfi_port_generic_rsp enable_rsp;
108 struct bfi_port_generic_rsp disable_rsp;
109 struct bfi_port_generic_rsp getstats_rsp;
110 struct bfi_port_generic_rsp clearstats_rsp;
111};
112
113/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
114enum bfi_cee_h2i_msgs {
115 BFI_CEE_H2I_GET_CFG_REQ = 1,
116 BFI_CEE_H2I_RESET_STATS = 2,
117 BFI_CEE_H2I_GET_STATS_REQ = 3,
118};
119
120/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
121enum bfi_cee_i2h_msgs {
122 BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
123 BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
124 BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
125};
126
127/* Data structures */
128
129/*
130 * @brief H2I command structure for resetting the stats.
131 * BFI_CEE_H2I_RESET_STATS
132 */
133struct bfi_lldp_reset_stats {
134 struct bfi_mhdr mh;
135};
136
137/*
138 * @brief H2I command structure for resetting the stats.
139 * BFI_CEE_H2I_RESET_STATS
140 */
141struct bfi_cee_reset_stats {
142 struct bfi_mhdr mh;
143};
144
145/*
146 * @brief get configuration command from host
147 * BFI_CEE_H2I_GET_CFG_REQ
148 */
149struct bfi_cee_get_req {
150 struct bfi_mhdr mh;
151 union bfi_addr_u dma_addr;
152};
153
154/*
155 * @brief reply message from firmware
156 * BFI_CEE_I2H_GET_CFG_RSP
157 */
158struct bfi_cee_get_rsp {
159 struct bfi_mhdr mh;
160 u8 cmd_status;
161 u8 rsvd[3];
162};
163
164/*
165 * @brief get configuration command from host
166 * BFI_CEE_H2I_GET_STATS_REQ
167 */
168struct bfi_cee_stats_req {
169 struct bfi_mhdr mh;
170 union bfi_addr_u dma_addr;
171};
172
173/*
174 * @brief reply message from firmware
175 * BFI_CEE_I2H_GET_STATS_RSP
176 */
177struct bfi_cee_stats_rsp {
178 struct bfi_mhdr mh;
179 u8 cmd_status;
180 u8 rsvd[3];
181};
182
183/* @brief mailbox command structures from host to firmware */
184union bfi_cee_h2i_msg_u {
185 struct bfi_mhdr mh;
186 struct bfi_cee_get_req get_req;
187 struct bfi_cee_stats_req stats_req;
188};
189
190/* @brief mailbox message structures from firmware to host */
191union bfi_cee_i2h_msg_u {
192 struct bfi_mhdr mh;
193 struct bfi_cee_get_rsp get_rsp;
194 struct bfi_cee_stats_rsp stats_rsp;
195};
196
197#pragma pack()
198
199#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
new file mode 100644
index 000000000000..404ea351d4a1
--- /dev/null
+++ b/drivers/net/bna/bfi_ctreg.h
@@ -0,0 +1,637 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/*
20 * bfi_ctreg.h catapult host block register definitions
21 *
22 * !!! Do not edit. Auto generated. !!!
23 */
24
25#ifndef __BFI_CTREG_H__
26#define __BFI_CTREG_H__
27
28#define HOSTFN0_LPU_MBOX0_0 0x00019200
29#define HOSTFN1_LPU_MBOX0_8 0x00019260
30#define LPU_HOSTFN0_MBOX0_0 0x00019280
31#define LPU_HOSTFN1_MBOX0_8 0x000192e0
32#define HOSTFN2_LPU_MBOX0_0 0x00019400
33#define HOSTFN3_LPU_MBOX0_8 0x00019460
34#define LPU_HOSTFN2_MBOX0_0 0x00019480
35#define LPU_HOSTFN3_MBOX0_8 0x000194e0
36#define HOSTFN0_INT_STATUS 0x00014000
37#define __HOSTFN0_HALT_OCCURRED 0x01000000
38#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
39#define __HOSTFN0_INT_STATUS_LVL_SH 20
40#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
41#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
42#define __HOSTFN0_INT_STATUS_P_SH 16
43#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
44#define __HOSTFN0_INT_STATUS_F 0x0000ffff
45#define HOSTFN0_INT_MSK 0x00014004
46#define HOST_PAGE_NUM_FN0 0x00014008
47#define __HOST_PAGE_NUM_FN 0x000001ff
48#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
49#define __MSIX_ERR_INDEX_FN 0x000001ff
50#define HOSTFN1_INT_STATUS 0x00014100
51#define __HOSTFN1_HALT_OCCURRED 0x01000000
52#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
53#define __HOSTFN1_INT_STATUS_LVL_SH 20
54#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
55#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
56#define __HOSTFN1_INT_STATUS_P_SH 16
57#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
58#define __HOSTFN1_INT_STATUS_F 0x0000ffff
59#define HOSTFN1_INT_MSK 0x00014104
60#define HOST_PAGE_NUM_FN1 0x00014108
61#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
62#define APP_PLL_425_CTL_REG 0x00014204
63#define __P_425_PLL_LOCK 0x80000000
64#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
65#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
66#define __APP_PLL_425_RESET_TIMER_SH 17
67#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
68#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
69#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
70#define __APP_PLL_425_CNTLMT0_1_SH 14
71#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
72#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
73#define __APP_PLL_425_JITLMT0_1_SH 12
74#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
75#define __APP_PLL_425_HREF 0x00000800
76#define __APP_PLL_425_HDIV 0x00000400
77#define __APP_PLL_425_P0_1_MK 0x00000300
78#define __APP_PLL_425_P0_1_SH 8
79#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
80#define __APP_PLL_425_Z0_2_MK 0x000000e0
81#define __APP_PLL_425_Z0_2_SH 5
82#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
83#define __APP_PLL_425_RSEL200500 0x00000010
84#define __APP_PLL_425_ENARST 0x00000008
85#define __APP_PLL_425_BYPASS 0x00000004
86#define __APP_PLL_425_LRESETN 0x00000002
87#define __APP_PLL_425_ENABLE 0x00000001
88#define APP_PLL_312_CTL_REG 0x00014208
89#define __P_312_PLL_LOCK 0x80000000
90#define __ENABLE_MAC_AHB_1 0x00800000
91#define __ENABLE_MAC_AHB_0 0x00400000
92#define __ENABLE_MAC_1 0x00200000
93#define __ENABLE_MAC_0 0x00100000
94#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
95#define __APP_PLL_312_RESET_TIMER_SH 17
96#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
97#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
98#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
99#define __APP_PLL_312_CNTLMT0_1_SH 14
100#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
101#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
102#define __APP_PLL_312_JITLMT0_1_SH 12
103#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
104#define __APP_PLL_312_HREF 0x00000800
105#define __APP_PLL_312_HDIV 0x00000400
106#define __APP_PLL_312_P0_1_MK 0x00000300
107#define __APP_PLL_312_P0_1_SH 8
108#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
109#define __APP_PLL_312_Z0_2_MK 0x000000e0
110#define __APP_PLL_312_Z0_2_SH 5
111#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
112#define __APP_PLL_312_RSEL200500 0x00000010
113#define __APP_PLL_312_ENARST 0x00000008
114#define __APP_PLL_312_BYPASS 0x00000004
115#define __APP_PLL_312_LRESETN 0x00000002
116#define __APP_PLL_312_ENABLE 0x00000001
117#define MBIST_CTL_REG 0x00014220
118#define __EDRAM_BISTR_START 0x00000004
119#define __MBIST_RESET 0x00000002
120#define __MBIST_START 0x00000001
121#define MBIST_STAT_REG 0x00014224
122#define __EDRAM_BISTR_STATUS 0x00000008
123#define __EDRAM_BISTR_DONE 0x00000004
124#define __MEM_BIT_STATUS 0x00000002
125#define __MBIST_DONE 0x00000001
126#define HOST_SEM0_REG 0x00014230
127#define __HOST_SEMAPHORE 0x00000001
128#define HOST_SEM1_REG 0x00014234
129#define HOST_SEM2_REG 0x00014238
130#define HOST_SEM3_REG 0x0001423c
131#define HOST_SEM0_INFO_REG 0x00014240
132#define HOST_SEM1_INFO_REG 0x00014244
133#define HOST_SEM2_INFO_REG 0x00014248
134#define HOST_SEM3_INFO_REG 0x0001424c
135#define ETH_MAC_SER_REG 0x00014288
136#define __APP_EMS_CKBUFAMPIN 0x00000020
137#define __APP_EMS_REFCLKSEL 0x00000010
138#define __APP_EMS_CMLCKSEL 0x00000008
139#define __APP_EMS_REFCKBUFEN2 0x00000004
140#define __APP_EMS_REFCKBUFEN1 0x00000002
141#define __APP_EMS_CHANNEL_SEL 0x00000001
142#define HOSTFN2_INT_STATUS 0x00014300
143#define __HOSTFN2_HALT_OCCURRED 0x01000000
144#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
145#define __HOSTFN2_INT_STATUS_LVL_SH 20
146#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
147#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
148#define __HOSTFN2_INT_STATUS_P_SH 16
149#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
150#define __HOSTFN2_INT_STATUS_F 0x0000ffff
151#define HOSTFN2_INT_MSK 0x00014304
152#define HOST_PAGE_NUM_FN2 0x00014308
153#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
154#define HOSTFN3_INT_STATUS 0x00014400
155#define __HALT_OCCURRED 0x01000000
156#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
157#define __HOSTFN3_INT_STATUS_LVL_SH 20
158#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
159#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
160#define __HOSTFN3_INT_STATUS_P_SH 16
161#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
162#define __HOSTFN3_INT_STATUS_F 0x0000ffff
163#define HOSTFN3_INT_MSK 0x00014404
164#define HOST_PAGE_NUM_FN3 0x00014408
165#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
166#define FNC_ID_REG 0x00014600
167#define __FUNCTION_NUMBER 0x00000007
168#define FNC_PERS_REG 0x00014604
169#define __F3_FUNCTION_ACTIVE 0x80000000
170#define __F3_FUNCTION_MODE 0x40000000
171#define __F3_PORT_MAP_MK 0x30000000
172#define __F3_PORT_MAP_SH 28
173#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
174#define __F3_VM_MODE 0x08000000
175#define __F3_INTX_STATUS_MK 0x07000000
176#define __F3_INTX_STATUS_SH 24
177#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
178#define __F2_FUNCTION_ACTIVE 0x00800000
179#define __F2_FUNCTION_MODE 0x00400000
180#define __F2_PORT_MAP_MK 0x00300000
181#define __F2_PORT_MAP_SH 20
182#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
183#define __F2_VM_MODE 0x00080000
184#define __F2_INTX_STATUS_MK 0x00070000
185#define __F2_INTX_STATUS_SH 16
186#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
187#define __F1_FUNCTION_ACTIVE 0x00008000
188#define __F1_FUNCTION_MODE 0x00004000
189#define __F1_PORT_MAP_MK 0x00003000
190#define __F1_PORT_MAP_SH 12
191#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
192#define __F1_VM_MODE 0x00000800
193#define __F1_INTX_STATUS_MK 0x00000700
194#define __F1_INTX_STATUS_SH 8
195#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
196#define __F0_FUNCTION_ACTIVE 0x00000080
197#define __F0_FUNCTION_MODE 0x00000040
198#define __F0_PORT_MAP_MK 0x00000030
199#define __F0_PORT_MAP_SH 4
200#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
201#define __F0_VM_MODE 0x00000008
202#define __F0_INTX_STATUS 0x00000007
203enum {
204 __F0_INTX_STATUS_MSIX = 0x0,
205 __F0_INTX_STATUS_INTA = 0x1,
206 __F0_INTX_STATUS_INTB = 0x2,
207 __F0_INTX_STATUS_INTC = 0x3,
208 __F0_INTX_STATUS_INTD = 0x4,
209};
210#define OP_MODE 0x0001460c
211#define __APP_ETH_CLK_LOWSPEED 0x00000004
212#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
213#define __GLOBAL_FCOE_MODE 0x00000001
214#define HOST_SEM4_REG 0x00014610
215#define HOST_SEM5_REG 0x00014614
216#define HOST_SEM6_REG 0x00014618
217#define HOST_SEM7_REG 0x0001461c
218#define HOST_SEM4_INFO_REG 0x00014620
219#define HOST_SEM5_INFO_REG 0x00014624
220#define HOST_SEM6_INFO_REG 0x00014628
221#define HOST_SEM7_INFO_REG 0x0001462c
222#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
223#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
224#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
225#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
226#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
227#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
228#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
229#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
230#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
231#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
232#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
233#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
234#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
235#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
236#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
237#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
238#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
239#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
240#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
241#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
242#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
243#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
244#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
245#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
246#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
247#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
248#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
249#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
250#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
251#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
252#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
253#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
254#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
255#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
256#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
257#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
258#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
259#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
260#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
261#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
262#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
263#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
264#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
265#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
266#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
267#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
268#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
269#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
270#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
271#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
272#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
273#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
274#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
275#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
276#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
277#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
278#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
279#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
280#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
281#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
282#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
283#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
284#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
285#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
286#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
287#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
288#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
289#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
290#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
291#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
292#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
293#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
294#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
295#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
296#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
297#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
298#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
299#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
300#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
301#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
302#define FW_INIT_HALT_P0 0x000191ac
303#define __FW_INIT_HALT_P 0x00000001
304#define FW_INIT_HALT_P1 0x000191bc
305#define CPE_PI_PTR_Q0 0x00038000
306#define __CPE_PI_UNUSED_MK 0xffff0000
307#define __CPE_PI_UNUSED_SH 16
308#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
309#define __CPE_PI_PTR 0x0000ffff
310#define CPE_PI_PTR_Q1 0x00038040
311#define CPE_CI_PTR_Q0 0x00038004
312#define __CPE_CI_UNUSED_MK 0xffff0000
313#define __CPE_CI_UNUSED_SH 16
314#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
315#define __CPE_CI_PTR 0x0000ffff
316#define CPE_CI_PTR_Q1 0x00038044
317#define CPE_DEPTH_Q0 0x00038008
318#define __CPE_DEPTH_UNUSED_MK 0xf8000000
319#define __CPE_DEPTH_UNUSED_SH 27
320#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
321#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
322#define __CPE_MSIX_VEC_INDEX_SH 16
323#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
324#define __CPE_DEPTH 0x0000ffff
325#define CPE_DEPTH_Q1 0x00038048
326#define CPE_QCTRL_Q0 0x0003800c
327#define __CPE_CTRL_UNUSED30_MK 0xfc000000
328#define __CPE_CTRL_UNUSED30_SH 26
329#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
330#define __CPE_FUNC_INT_CTRL_MK 0x03000000
331#define __CPE_FUNC_INT_CTRL_SH 24
332#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
333enum {
334 __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
335 __CPE_FUNC_INT_CTRL_F2NF = 0x1,
336 __CPE_FUNC_INT_CTRL_3QUART = 0x2,
337 __CPE_FUNC_INT_CTRL_HALF = 0x3,
338};
339#define __CPE_CTRL_UNUSED20_MK 0x00f00000
340#define __CPE_CTRL_UNUSED20_SH 20
341#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
342#define __CPE_SCI_TH_MK 0x000f0000
343#define __CPE_SCI_TH_SH 16
344#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
345#define __CPE_CTRL_UNUSED10_MK 0x0000c000
346#define __CPE_CTRL_UNUSED10_SH 14
347#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
348#define __CPE_ACK_PENDING 0x00002000
349#define __CPE_CTRL_UNUSED40_MK 0x00001c00
350#define __CPE_CTRL_UNUSED40_SH 10
351#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
352#define __CPE_PCIEID_MK 0x00000300
353#define __CPE_PCIEID_SH 8
354#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
355#define __CPE_CTRL_UNUSED00_MK 0x000000fe
356#define __CPE_CTRL_UNUSED00_SH 1
357#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
358#define __CPE_ESIZE 0x00000001
359#define CPE_QCTRL_Q1 0x0003804c
360#define __CPE_CTRL_UNUSED31_MK 0xfc000000
361#define __CPE_CTRL_UNUSED31_SH 26
362#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
363#define __CPE_CTRL_UNUSED21_MK 0x00f00000
364#define __CPE_CTRL_UNUSED21_SH 20
365#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
366#define __CPE_CTRL_UNUSED11_MK 0x0000c000
367#define __CPE_CTRL_UNUSED11_SH 14
368#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
369#define __CPE_CTRL_UNUSED41_MK 0x00001c00
370#define __CPE_CTRL_UNUSED41_SH 10
371#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
372#define __CPE_CTRL_UNUSED01_MK 0x000000fe
373#define __CPE_CTRL_UNUSED01_SH 1
374#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
375#define RME_PI_PTR_Q0 0x00038020
376#define __LATENCY_TIME_STAMP_MK 0xffff0000
377#define __LATENCY_TIME_STAMP_SH 16
378#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
379#define __RME_PI_PTR 0x0000ffff
380#define RME_PI_PTR_Q1 0x00038060
381#define RME_CI_PTR_Q0 0x00038024
382#define __DELAY_TIME_STAMP_MK 0xffff0000
383#define __DELAY_TIME_STAMP_SH 16
384#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
385#define __RME_CI_PTR 0x0000ffff
386#define RME_CI_PTR_Q1 0x00038064
387#define RME_DEPTH_Q0 0x00038028
388#define __RME_DEPTH_UNUSED_MK 0xf8000000
389#define __RME_DEPTH_UNUSED_SH 27
390#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
391#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
392#define __RME_MSIX_VEC_INDEX_SH 16
393#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
394#define __RME_DEPTH 0x0000ffff
395#define RME_DEPTH_Q1 0x00038068
396#define RME_QCTRL_Q0 0x0003802c
397#define __RME_INT_LATENCY_TIMER_MK 0xff000000
398#define __RME_INT_LATENCY_TIMER_SH 24
399#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
400#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
401#define __RME_INT_DELAY_TIMER_SH 16
402#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
403#define __RME_INT_DELAY_DISABLE 0x00008000
404#define __RME_DLY_DELAY_DISABLE 0x00004000
405#define __RME_ACK_PENDING 0x00002000
406#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
407#define __RME_CTRL_UNUSED10_MK 0x00000c00
408#define __RME_CTRL_UNUSED10_SH 10
409#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
410#define __RME_PCIEID_MK 0x00000300
411#define __RME_PCIEID_SH 8
412#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
413#define __RME_CTRL_UNUSED00_MK 0x000000fe
414#define __RME_CTRL_UNUSED00_SH 1
415#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
416#define __RME_ESIZE 0x00000001
417#define RME_QCTRL_Q1 0x0003806c
418#define __RME_CTRL_UNUSED11_MK 0x00000c00
419#define __RME_CTRL_UNUSED11_SH 10
420#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
421#define __RME_CTRL_UNUSED01_MK 0x000000fe
422#define __RME_CTRL_UNUSED01_SH 1
423#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
424#define PSS_CTL_REG 0x00018800
425#define __PSS_I2C_CLK_DIV_MK 0x007f0000
426#define __PSS_I2C_CLK_DIV_SH 16
427#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
428#define __PSS_LMEM_INIT_DONE 0x00001000
429#define __PSS_LMEM_RESET 0x00000200
430#define __PSS_LMEM_INIT_EN 0x00000100
431#define __PSS_LPU1_RESET 0x00000002
432#define __PSS_LPU0_RESET 0x00000001
433#define PSS_ERR_STATUS_REG 0x00018810
434#define __PSS_LPU1_TCM_READ_ERR 0x00200000
435#define __PSS_LPU0_TCM_READ_ERR 0x00100000
436#define __PSS_LMEM5_CORR_ERR 0x00080000
437#define __PSS_LMEM4_CORR_ERR 0x00040000
438#define __PSS_LMEM3_CORR_ERR 0x00020000
439#define __PSS_LMEM2_CORR_ERR 0x00010000
440#define __PSS_LMEM1_CORR_ERR 0x00008000
441#define __PSS_LMEM0_CORR_ERR 0x00004000
442#define __PSS_LMEM5_UNCORR_ERR 0x00002000
443#define __PSS_LMEM4_UNCORR_ERR 0x00001000
444#define __PSS_LMEM3_UNCORR_ERR 0x00000800
445#define __PSS_LMEM2_UNCORR_ERR 0x00000400
446#define __PSS_LMEM1_UNCORR_ERR 0x00000200
447#define __PSS_LMEM0_UNCORR_ERR 0x00000100
448#define __PSS_BAL_PERR 0x00000080
449#define __PSS_DIP_IF_ERR 0x00000040
450#define __PSS_IOH_IF_ERR 0x00000020
451#define __PSS_TDS_IF_ERR 0x00000010
452#define __PSS_RDS_IF_ERR 0x00000008
453#define __PSS_SGM_IF_ERR 0x00000004
454#define __PSS_LPU1_RAM_ERR 0x00000002
455#define __PSS_LPU0_RAM_ERR 0x00000001
456#define ERR_SET_REG 0x00018818
457#define __PSS_ERR_STATUS_SET 0x003fffff
458#define PMM_1T_RESET_REG_P0 0x0002381c
459#define __PMM_1T_RESET_P 0x00000001
460#define PMM_1T_RESET_REG_P1 0x00023c1c
461#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
462#define __RXQ0_ADD_VECTORS_P 0x80000000
463#define __RXQ0_STOP_P 0x40000000
464#define __RXQ0_PRD_PTR_P 0x0000ffff
465#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
466#define __RXQ1_ADD_VECTORS_P 0x80000000
467#define __RXQ1_STOP_P 0x40000000
468#define __RXQ1_PRD_PTR_P 0x0000ffff
469#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
470#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
471#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
472#define __TXQ0_ADD_VECTORS_P 0x80000000
473#define __TXQ0_STOP_P 0x40000000
474#define __TXQ0_PRD_PTR_P 0x0000ffff
475#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
476#define __TXQ1_ADD_VECTORS_P 0x80000000
477#define __TXQ1_STOP_P 0x40000000
478#define __TXQ1_PRD_PTR_P 0x0000ffff
479#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
480#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
481#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
482#define __IB1_0_ACK_P 0x80000000
483#define __IB1_0_DISABLE_P 0x40000000
484#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
485#define __IB1_0_COALESCING_CFG_P_SH 16
486#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
487#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
488#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
489#define __IB1_1_ACK_P 0x80000000
490#define __IB1_1_DISABLE_P 0x40000000
491#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
492#define __IB1_1_COALESCING_CFG_P_SH 16
493#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
494#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
495#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
496#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
497#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
498#define __IB2_0_ACK_P 0x80000000
499#define __IB2_0_DISABLE_P 0x40000000
500#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
501#define __IB2_0_COALESCING_CFG_P_SH 16
502#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
503#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
504#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
505#define __IB2_1_ACK_P 0x80000000
506#define __IB2_1_DISABLE_P 0x40000000
507#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
508#define __IB2_1_COALESCING_CFG_P_SH 16
509#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
510#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
511#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
512#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
513
514/*
515 * These definitions are either in error/missing in spec. Its auto-generated
516 * from hard coded values in regparse.pl.
517 */
518#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
519#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
520#define __EMPHPRE_AT_4G_FIX 0x00000003
521#define __SFP_TXRATE_EN_FIX 0x00000100
522#define __SFP_RXRATE_EN_FIX 0x00000080
523
524/*
525 * These register definitions are auto-generated from hard coded values
526 * in regparse.pl.
527 */
528
529/*
530 * These register mapping definitions are auto-generated from mapping tables
531 * in regparse.pl.
532 */
533#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
534#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
535#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
536#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
537#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
538
539#define CPE_DEPTH_Q(__n) \
540 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
541#define CPE_QCTRL_Q(__n) \
542 (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
543#define CPE_PI_PTR_Q(__n) \
544 (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
545#define CPE_CI_PTR_Q(__n) \
546 (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
547#define RME_DEPTH_Q(__n) \
548 (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
549#define RME_QCTRL_Q(__n) \
550 (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
551#define RME_PI_PTR_Q(__n) \
552 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
553#define RME_CI_PTR_Q(__n) \
554 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
555#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
556 * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
557#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
558 * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
559#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
560 * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
561#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
562 * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
563#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
564 * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
565#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
566 * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
567#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
568 * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
569#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
570 * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
571
572#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
573#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
574#define CPE_Q_MASK(__q) ((__q) & 0x3)
575#define RME_Q_MASK(__q) ((__q) & 0x3)
576
577/*
578 * PCI MSI-X vector defines
579 */
580enum {
581 BFA_MSIX_CPE_Q0 = 0,
582 BFA_MSIX_CPE_Q1 = 1,
583 BFA_MSIX_CPE_Q2 = 2,
584 BFA_MSIX_CPE_Q3 = 3,
585 BFA_MSIX_RME_Q0 = 4,
586 BFA_MSIX_RME_Q1 = 5,
587 BFA_MSIX_RME_Q2 = 6,
588 BFA_MSIX_RME_Q3 = 7,
589 BFA_MSIX_LPU_ERR = 8,
590 BFA_MSIX_CT_MAX = 9,
591};
592
593/*
594 * And corresponding host interrupt status bit field defines
595 */
596#define __HFN_INT_CPE_Q0 0x00000001U
597#define __HFN_INT_CPE_Q1 0x00000002U
598#define __HFN_INT_CPE_Q2 0x00000004U
599#define __HFN_INT_CPE_Q3 0x00000008U
600#define __HFN_INT_CPE_Q4 0x00000010U
601#define __HFN_INT_CPE_Q5 0x00000020U
602#define __HFN_INT_CPE_Q6 0x00000040U
603#define __HFN_INT_CPE_Q7 0x00000080U
604#define __HFN_INT_RME_Q0 0x00000100U
605#define __HFN_INT_RME_Q1 0x00000200U
606#define __HFN_INT_RME_Q2 0x00000400U
607#define __HFN_INT_RME_Q3 0x00000800U
608#define __HFN_INT_RME_Q4 0x00001000U
609#define __HFN_INT_RME_Q5 0x00002000U
610#define __HFN_INT_RME_Q6 0x00004000U
611#define __HFN_INT_RME_Q7 0x00008000U
612#define __HFN_INT_ERR_EMC 0x00010000U
613#define __HFN_INT_ERR_LPU0 0x00020000U
614#define __HFN_INT_ERR_LPU1 0x00040000U
615#define __HFN_INT_ERR_PSS 0x00080000U
616#define __HFN_INT_MBOX_LPU0 0x00100000U
617#define __HFN_INT_MBOX_LPU1 0x00200000U
618#define __HFN_INT_MBOX1_LPU0 0x00400000U
619#define __HFN_INT_MBOX1_LPU1 0x00800000U
620#define __HFN_INT_LL_HALT 0x01000000U
621#define __HFN_INT_CPE_MASK 0x000000ffU
622#define __HFN_INT_RME_MASK 0x0000ff00U
623
624/*
625 * catapult memory map.
626 */
627#define LL_PGN_HQM0 0x0096
628#define LL_PGN_HQM1 0x0097
629#define PSS_SMEM_PAGE_START 0x8000
630#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
631#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
632
633/*
634 * End of catapult memory map
635 */
636
637#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/net/bna/bfi_ll.h b/drivers/net/bna/bfi_ll.h
new file mode 100644
index 000000000000..bee4d054066a
--- /dev/null
+++ b/drivers/net/bna/bfi_ll.h
@@ -0,0 +1,438 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFI_LL_H__
19#define __BFI_LL_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25/**
26 * @brief
27 * "enums" for all LL mailbox messages other than IOC
28 */
29enum {
30 BFI_LL_H2I_MAC_UCAST_SET_REQ = 1,
31 BFI_LL_H2I_MAC_UCAST_ADD_REQ = 2,
32 BFI_LL_H2I_MAC_UCAST_DEL_REQ = 3,
33
34 BFI_LL_H2I_MAC_MCAST_ADD_REQ = 4,
35 BFI_LL_H2I_MAC_MCAST_DEL_REQ = 5,
36 BFI_LL_H2I_MAC_MCAST_FILTER_REQ = 6,
37 BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ = 7,
38
39 BFI_LL_H2I_PORT_ADMIN_REQ = 8,
40 BFI_LL_H2I_STATS_GET_REQ = 9,
41 BFI_LL_H2I_STATS_CLEAR_REQ = 10,
42
43 BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ = 11,
44 BFI_LL_H2I_RXF_DEFAULT_SET_REQ = 12,
45
46 BFI_LL_H2I_TXQ_STOP_REQ = 13,
47 BFI_LL_H2I_RXQ_STOP_REQ = 14,
48
49 BFI_LL_H2I_DIAG_LOOPBACK_REQ = 15,
50
51 BFI_LL_H2I_SET_PAUSE_REQ = 16,
52 BFI_LL_H2I_MTU_INFO_REQ = 17,
53
54 BFI_LL_H2I_RX_REQ = 18,
55} ;
56
57enum {
58 BFI_LL_I2H_MAC_UCAST_SET_RSP = BFA_I2HM(1),
59 BFI_LL_I2H_MAC_UCAST_ADD_RSP = BFA_I2HM(2),
60 BFI_LL_I2H_MAC_UCAST_DEL_RSP = BFA_I2HM(3),
61
62 BFI_LL_I2H_MAC_MCAST_ADD_RSP = BFA_I2HM(4),
63 BFI_LL_I2H_MAC_MCAST_DEL_RSP = BFA_I2HM(5),
64 BFI_LL_I2H_MAC_MCAST_FILTER_RSP = BFA_I2HM(6),
65 BFI_LL_I2H_MAC_MCAST_DEL_ALL_RSP = BFA_I2HM(7),
66
67 BFI_LL_I2H_PORT_ADMIN_RSP = BFA_I2HM(8),
68 BFI_LL_I2H_STATS_GET_RSP = BFA_I2HM(9),
69 BFI_LL_I2H_STATS_CLEAR_RSP = BFA_I2HM(10),
70
71 BFI_LL_I2H_RXF_PROMISCUOUS_SET_RSP = BFA_I2HM(11),
72 BFI_LL_I2H_RXF_DEFAULT_SET_RSP = BFA_I2HM(12),
73
74 BFI_LL_I2H_TXQ_STOP_RSP = BFA_I2HM(13),
75 BFI_LL_I2H_RXQ_STOP_RSP = BFA_I2HM(14),
76
77 BFI_LL_I2H_DIAG_LOOPBACK_RSP = BFA_I2HM(15),
78
79 BFI_LL_I2H_SET_PAUSE_RSP = BFA_I2HM(16),
80
81 BFI_LL_I2H_MTU_INFO_RSP = BFA_I2HM(17),
82 BFI_LL_I2H_RX_RSP = BFA_I2HM(18),
83
84 BFI_LL_I2H_LINK_DOWN_AEN = BFA_I2HM(19),
85 BFI_LL_I2H_LINK_UP_AEN = BFA_I2HM(20),
86
87 BFI_LL_I2H_PORT_ENABLE_AEN = BFA_I2HM(21),
88 BFI_LL_I2H_PORT_DISABLE_AEN = BFA_I2HM(22),
89} ;
90
91/**
92 * @brief bfi_ll_mac_addr_req is used by:
93 * BFI_LL_H2I_MAC_UCAST_SET_REQ
94 * BFI_LL_H2I_MAC_UCAST_ADD_REQ
95 * BFI_LL_H2I_MAC_UCAST_DEL_REQ
96 * BFI_LL_H2I_MAC_MCAST_ADD_REQ
97 * BFI_LL_H2I_MAC_MCAST_DEL_REQ
98 */
99struct bfi_ll_mac_addr_req {
100 struct bfi_mhdr mh; /*!< common msg header */
101 u8 rxf_id;
102 u8 rsvd1[3];
103 mac_t mac_addr;
104 u8 rsvd2[2];
105};
106
107/**
108 * @brief bfi_ll_mcast_filter_req is used by:
109 * BFI_LL_H2I_MAC_MCAST_FILTER_REQ
110 */
111struct bfi_ll_mcast_filter_req {
112 struct bfi_mhdr mh; /*!< common msg header */
113 u8 rxf_id;
114 u8 enable;
115 u8 rsvd[2];
116};
117
118/**
119 * @brief bfi_ll_mcast_del_all is used by:
120 * BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ
121 */
122struct bfi_ll_mcast_del_all_req {
123 struct bfi_mhdr mh; /*!< common msg header */
124 u8 rxf_id;
125 u8 rsvd[3];
126};
127
128/**
129 * @brief bfi_ll_q_stop_req is used by:
130 * BFI_LL_H2I_TXQ_STOP_REQ
131 * BFI_LL_H2I_RXQ_STOP_REQ
132 */
133struct bfi_ll_q_stop_req {
134 struct bfi_mhdr mh; /*!< common msg header */
135 u32 q_id_mask[2]; /* !< bit-mask for queue ids */
136};
137
138/**
139 * @brief bfi_ll_stats_req is used by:
140 * BFI_LL_I2H_STATS_GET_REQ
141 * BFI_LL_I2H_STATS_CLEAR_REQ
142 */
143struct bfi_ll_stats_req {
144 struct bfi_mhdr mh; /*!< common msg header */
145 u16 stats_mask; /* !< bit-mask for non-function statistics */
146 u8 rsvd[2];
147 u32 rxf_id_mask[2]; /* !< bit-mask for RxF Statistics */
148 u32 txf_id_mask[2]; /* !< bit-mask for TxF Statistics */
149 union bfi_addr_u host_buffer; /* !< where statistics are returned */
150};
151
152/**
153 * @brief defines for "stats_mask" above.
154 */
155#define BFI_LL_STATS_MAC (1 << 0) /* !< MAC Statistics */
156#define BFI_LL_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
157#define BFI_LL_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
158#define BFI_LL_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
159#define BFI_LL_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
160
161#define BFI_LL_STATS_ALL 0x1f
162
163/**
164 * @brief bfi_ll_port_admin_req
165 */
166struct bfi_ll_port_admin_req {
167 struct bfi_mhdr mh; /*!< common msg header */
168 u8 up;
169 u8 rsvd[3];
170};
171
172/**
173 * @brief bfi_ll_rxf_req is used by:
174 * BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
175 * BFI_LL_H2I_RXF_DEFAULT_SET_REQ
176 */
177struct bfi_ll_rxf_req {
178 struct bfi_mhdr mh; /*!< common msg header */
179 u8 rxf_id;
180 u8 enable;
181 u8 rsvd[2];
182};
183
184/**
185 * @brief bfi_ll_rxf_multi_req is used by:
186 * BFI_LL_H2I_RX_REQ
187 */
188struct bfi_ll_rxf_multi_req {
189 struct bfi_mhdr mh; /*!< common msg header */
190 u32 rxf_id_mask[2];
191 u8 enable;
192 u8 rsvd[3];
193};
194
195/**
196 * @brief enum for Loopback opmodes
197 */
198enum {
199 BFI_LL_DIAG_LB_OPMODE_EXT = 0,
200 BFI_LL_DIAG_LB_OPMODE_CBL = 1,
201};
202
203/**
204 * @brief bfi_ll_set_pause_req is used by:
205 * BFI_LL_H2I_SET_PAUSE_REQ
206 */
207struct bfi_ll_set_pause_req {
208 struct bfi_mhdr mh;
209 u8 tx_pause; /* 1 = enable, 0 = disable */
210 u8 rx_pause; /* 1 = enable, 0 = disable */
211 u8 rsvd[2];
212};
213
214/**
215 * @brief bfi_ll_mtu_info_req is used by:
216 * BFI_LL_H2I_MTU_INFO_REQ
217 */
218struct bfi_ll_mtu_info_req {
219 struct bfi_mhdr mh;
220 u16 mtu;
221 u8 rsvd[2];
222};
223
224/**
225 * @brief
226 * Response header format used by all responses
227 * For both responses and asynchronous notifications
228 */
229struct bfi_ll_rsp {
230 struct bfi_mhdr mh; /*!< common msg header */
231 u8 error;
232 u8 rsvd[3];
233};
234
235/**
236 * @brief bfi_ll_cee_aen is used by:
237 * BFI_LL_I2H_LINK_DOWN_AEN
238 * BFI_LL_I2H_LINK_UP_AEN
239 */
240struct bfi_ll_aen {
241 struct bfi_mhdr mh; /*!< common msg header */
242 u32 reason;
243 u8 cee_linkup;
244 u8 prio_map; /*!< LL priority bit-map */
245 u8 rsvd[2];
246};
247
248/**
249 * @brief
250 * The following error codes can be returned
251 * by the mbox commands
252 */
253enum {
254 BFI_LL_CMD_OK = 0,
255 BFI_LL_CMD_FAIL = 1,
256 BFI_LL_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
257 BFI_LL_CMD_CAM_FULL = 3, /* !< CAM is full */
258 BFI_LL_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
259 BFI_LL_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
260 BFI_LL_CMD_WAITING = 6, /* !< Waiting for completion (VMware) */
261 BFI_LL_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
262} ;
263
264/* Statistics */
265#define BFI_LL_TXF_ID_MAX 64
266#define BFI_LL_RXF_ID_MAX 64
267
268/* TxF Frame Statistics */
269struct bfi_ll_stats_txf {
270 u64 ucast_octets;
271 u64 ucast;
272 u64 ucast_vlan;
273
274 u64 mcast_octets;
275 u64 mcast;
276 u64 mcast_vlan;
277
278 u64 bcast_octets;
279 u64 bcast;
280 u64 bcast_vlan;
281
282 u64 errors;
283 u64 filter_vlan; /* frames filtered due to VLAN */
284 u64 filter_mac_sa; /* frames filtered due to SA check */
285};
286
287/* RxF Frame Statistics */
288struct bfi_ll_stats_rxf {
289 u64 ucast_octets;
290 u64 ucast;
291 u64 ucast_vlan;
292
293 u64 mcast_octets;
294 u64 mcast;
295 u64 mcast_vlan;
296
297 u64 bcast_octets;
298 u64 bcast;
299 u64 bcast_vlan;
300 u64 frame_drops;
301};
302
303/* FC Tx Frame Statistics */
304struct bfi_ll_stats_fc_tx {
305 u64 txf_ucast_octets;
306 u64 txf_ucast;
307 u64 txf_ucast_vlan;
308
309 u64 txf_mcast_octets;
310 u64 txf_mcast;
311 u64 txf_mcast_vlan;
312
313 u64 txf_bcast_octets;
314 u64 txf_bcast;
315 u64 txf_bcast_vlan;
316
317 u64 txf_parity_errors;
318 u64 txf_timeout;
319 u64 txf_fid_parity_errors;
320};
321
322/* FC Rx Frame Statistics */
323struct bfi_ll_stats_fc_rx {
324 u64 rxf_ucast_octets;
325 u64 rxf_ucast;
326 u64 rxf_ucast_vlan;
327
328 u64 rxf_mcast_octets;
329 u64 rxf_mcast;
330 u64 rxf_mcast_vlan;
331
332 u64 rxf_bcast_octets;
333 u64 rxf_bcast;
334 u64 rxf_bcast_vlan;
335};
336
337/* RAD Frame Statistics */
338struct bfi_ll_stats_rad {
339 u64 rx_frames;
340 u64 rx_octets;
341 u64 rx_vlan_frames;
342
343 u64 rx_ucast;
344 u64 rx_ucast_octets;
345 u64 rx_ucast_vlan;
346
347 u64 rx_mcast;
348 u64 rx_mcast_octets;
349 u64 rx_mcast_vlan;
350
351 u64 rx_bcast;
352 u64 rx_bcast_octets;
353 u64 rx_bcast_vlan;
354
355 u64 rx_drops;
356};
357
358/* BPC Tx Registers */
359struct bfi_ll_stats_bpc {
360 /* transmit stats */
361 u64 tx_pause[8];
362 u64 tx_zero_pause[8]; /*!< Pause cancellation */
363 /*!<Pause initiation rather than retention */
364 u64 tx_first_pause[8];
365
366 /* receive stats */
367 u64 rx_pause[8];
368 u64 rx_zero_pause[8]; /*!< Pause cancellation */
369 /*!<Pause initiation rather than retention */
370 u64 rx_first_pause[8];
371};
372
373/* MAC Rx Statistics */
374struct bfi_ll_stats_mac {
375 u64 frame_64; /* both rx and tx counter */
376 u64 frame_65_127; /* both rx and tx counter */
377 u64 frame_128_255; /* both rx and tx counter */
378 u64 frame_256_511; /* both rx and tx counter */
379 u64 frame_512_1023; /* both rx and tx counter */
380 u64 frame_1024_1518; /* both rx and tx counter */
381 u64 frame_1519_1522; /* both rx and tx counter */
382
383 /* receive stats */
384 u64 rx_bytes;
385 u64 rx_packets;
386 u64 rx_fcs_error;
387 u64 rx_multicast;
388 u64 rx_broadcast;
389 u64 rx_control_frames;
390 u64 rx_pause;
391 u64 rx_unknown_opcode;
392 u64 rx_alignment_error;
393 u64 rx_frame_length_error;
394 u64 rx_code_error;
395 u64 rx_carrier_sense_error;
396 u64 rx_undersize;
397 u64 rx_oversize;
398 u64 rx_fragments;
399 u64 rx_jabber;
400 u64 rx_drop;
401
402 /* transmit stats */
403 u64 tx_bytes;
404 u64 tx_packets;
405 u64 tx_multicast;
406 u64 tx_broadcast;
407 u64 tx_pause;
408 u64 tx_deferral;
409 u64 tx_excessive_deferral;
410 u64 tx_single_collision;
411 u64 tx_muliple_collision;
412 u64 tx_late_collision;
413 u64 tx_excessive_collision;
414 u64 tx_total_collision;
415 u64 tx_pause_honored;
416 u64 tx_drop;
417 u64 tx_jabber;
418 u64 tx_fcs_error;
419 u64 tx_control_frame;
420 u64 tx_oversize;
421 u64 tx_undersize;
422 u64 tx_fragments;
423};
424
425/* Complete statistics */
426struct bfi_ll_stats {
427 struct bfi_ll_stats_mac mac_stats;
428 struct bfi_ll_stats_bpc bpc_stats;
429 struct bfi_ll_stats_rad rad_stats;
430 struct bfi_ll_stats_fc_rx fc_rx_stats;
431 struct bfi_ll_stats_fc_tx fc_tx_stats;
432 struct bfi_ll_stats_rxf rxf_stats[BFI_LL_RXF_ID_MAX];
433 struct bfi_ll_stats_txf txf_stats[BFI_LL_TXF_ID_MAX];
434};
435
436#pragma pack()
437
438#endif /* __BFI_LL_H__ */
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
new file mode 100644
index 000000000000..6a2b3291c190
--- /dev/null
+++ b/drivers/net/bna/bna.h
@@ -0,0 +1,654 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __BNA_H__
14#define __BNA_H__
15
16#include "bfa_wc.h"
17#include "bfa_ioc.h"
18#include "cna.h"
19#include "bfi_ll.h"
20#include "bna_types.h"
21
22extern u32 bna_dim_vector[][BNA_BIAS_T_MAX];
23extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
24
25/**
26 *
27 * Macros and constants
28 *
29 */
30
31#define BNA_IOC_TIMER_FREQ 200
32
33/* Log string size */
34#define BNA_MESSAGE_SIZE 256
35
36#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod))
37
38/* MBOX API for PORT, TX, RX */
39#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
40do { \
41 memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \
42 (_qe)->cbfn = (_cbfn); \
43 (_qe)->cbarg = (_cbarg); \
44} while (0)
45
46#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
47
48#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
49 (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
50
51#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
52
53#define BNA_TO_POWER_OF_2(x) \
54do { \
55 int _shift = 0; \
56 while ((x) && (x) != 1) { \
57 (x) >>= 1; \
58 _shift++; \
59 } \
60 (x) <<= _shift; \
61} while (0)
62
63#define BNA_TO_POWER_OF_2_HIGH(x) \
64do { \
65 int n = 1; \
66 while (n < (x)) \
67 n <<= 1; \
68 (x) = n; \
69} while (0)
70
71/*
72 * input : _addr-> os dma addr in host endian format,
73 * output : _bna_dma_addr-> pointer to hw dma addr
74 */
75#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
76do { \
77 u64 tmp_addr = \
78 cpu_to_be64((u64)(_addr)); \
79 (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
80 (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
81} while (0)
82
83/*
84 * input : _bna_dma_addr-> pointer to hw dma addr
85 * output : _addr-> os dma addr in host endian format
86 */
87#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
88do { \
89 (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
90 | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
91} while (0)
92
93#define containing_rec(addr, type, field) \
94 ((type *)((unsigned char *)(addr) - \
95 (unsigned char *)(&((type *)0)->field)))
96
97#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
98
99/* TxQ element is 64 bytes */
100#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
101#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
102
103#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
104{ \
105 unsigned int page_index; /* index within a page */ \
106 void *page_addr; \
107 page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
108 (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
109 page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
110 (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
111}
112
113/* RxQ element is 8 bytes */
114#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
115#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
116
117#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
118{ \
119 unsigned int page_index; /* index within a page */ \
120 void *page_addr; \
121 page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
122 (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
123 page_addr = (_qpt_ptr)[((_qe_idx) >> \
124 BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
125 (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
126}
127
128/* CQ element is 16 bytes */
129#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
130#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
131
132#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
133{ \
134 unsigned int page_index; /* index within a page */ \
135 void *page_addr; \
136 \
137 page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
138 (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
139 page_addr = (_qpt_ptr)[((_qe_idx) >> \
140 BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
141 (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
142}
143
144#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
145 (&((_cast *)(_q_base))[(_qe_idx)])
146
147#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
148
149#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
150 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
151
152#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
153 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
154
155#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
156 (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
157 ((_q_depth) - 1))
158
159#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
160 ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
161 (_q_depth - 1))
162
163#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
164
165#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
166
167#define BNA_Q_PI_ADD(_q_ptr, _num) \
168 (_q_ptr)->q.producer_index = \
169 (((_q_ptr)->q.producer_index + (_num)) & \
170 ((_q_ptr)->q.q_depth - 1))
171
172#define BNA_Q_CI_ADD(_q_ptr, _num) \
173 (_q_ptr)->q.consumer_index = \
174 (((_q_ptr)->q.consumer_index + (_num)) \
175 & ((_q_ptr)->q.q_depth - 1))
176
177#define BNA_Q_FREE_COUNT(_q_ptr) \
178 (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
179
180#define BNA_Q_IN_USE_COUNT(_q_ptr) \
181 (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
182
183/* These macros build the data portion of the TxQ/RxQ doorbell */
184#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
185#define BNA_DOORBELL_Q_STOP (0x40000000)
186
187/* These macros build the data portion of the IB doorbell */
188#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
189 (0x80000000 | ((_timeout) << 16) | (_events))
190#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
191
192/* Set the coalescing timer for the given ib */
193#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
194 ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
195
196/* Acks 'events' # of events for a given ib */
197#define bna_ib_ack(_i_dbell, _events) \
198 (writel(((_i_dbell)->doorbell_ack | (_events)), \
199 (_i_dbell)->doorbell_addr));
200
201#define bna_txq_prod_indx_doorbell(_tcb) \
202 (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
203 (_tcb)->q_dbell));
204
205#define bna_rxq_prod_indx_doorbell(_rcb) \
206 (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
207 (_rcb)->q_dbell));
208
209#define BNA_LARGE_PKT_SIZE 1000
210
211#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
212do { \
213 if ((_len) > BNA_LARGE_PKT_SIZE) { \
214 (_pkt)->large_pkt_cnt++; \
215 } else { \
216 (_pkt)->small_pkt_cnt++; \
217 } \
218} while (0)
219
220#define call_rxf_stop_cbfn(rxf, status) \
221 if ((rxf)->stop_cbfn) { \
222 (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \
223 (rxf)->stop_cbfn = NULL; \
224 (rxf)->stop_cbarg = NULL; \
225 }
226
227#define call_rxf_start_cbfn(rxf, status) \
228 if ((rxf)->start_cbfn) { \
229 (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \
230 (rxf)->start_cbfn = NULL; \
231 (rxf)->start_cbarg = NULL; \
232 }
233
234#define call_rxf_cam_fltr_cbfn(rxf, status) \
235 if ((rxf)->cam_fltr_cbfn) { \
236 (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
237 (status)); \
238 (rxf)->cam_fltr_cbfn = NULL; \
239 (rxf)->cam_fltr_cbarg = NULL; \
240 }
241
242#define call_rxf_pause_cbfn(rxf, status) \
243 if ((rxf)->oper_state_cbfn) { \
244 (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
245 (status)); \
246 (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \
247 (rxf)->oper_state_cbfn = NULL; \
248 (rxf)->oper_state_cbarg = NULL; \
249 }
250
251#define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
252
253#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
254
255#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
256
257#define xxx_enable(mode, bitmask, xxx) \
258do { \
259 bitmask |= xxx; \
260 mode |= xxx; \
261} while (0)
262
263#define xxx_disable(mode, bitmask, xxx) \
264do { \
265 bitmask |= xxx; \
266 mode &= ~xxx; \
267} while (0)
268
269#define xxx_inactive(mode, bitmask, xxx) \
270do { \
271 bitmask &= ~xxx; \
272 mode &= ~xxx; \
273} while (0)
274
275#define is_promisc_enable(mode, bitmask) \
276 is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
277
278#define is_promisc_disable(mode, bitmask) \
279 is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
280
281#define promisc_enable(mode, bitmask) \
282 xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
283
284#define promisc_disable(mode, bitmask) \
285 xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
286
287#define promisc_inactive(mode, bitmask) \
288 xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
289
290#define is_default_enable(mode, bitmask) \
291 is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
292
293#define is_default_disable(mode, bitmask) \
294 is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
295
296#define default_enable(mode, bitmask) \
297 xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
298
299#define default_disable(mode, bitmask) \
300 xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
301
302#define default_inactive(mode, bitmask) \
303 xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
304
305#define is_allmulti_enable(mode, bitmask) \
306 is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
307
308#define is_allmulti_disable(mode, bitmask) \
309 is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
310
311#define allmulti_enable(mode, bitmask) \
312 xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
313
314#define allmulti_disable(mode, bitmask) \
315 xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
316
317#define allmulti_inactive(mode, bitmask) \
318 xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
319
320#define GET_RXQS(rxp, q0, q1) do { \
321 switch ((rxp)->type) { \
322 case BNA_RXP_SINGLE: \
323 (q0) = rxp->rxq.single.only; \
324 (q1) = NULL; \
325 break; \
326 case BNA_RXP_SLR: \
327 (q0) = rxp->rxq.slr.large; \
328 (q1) = rxp->rxq.slr.small; \
329 break; \
330 case BNA_RXP_HDS: \
331 (q0) = rxp->rxq.hds.data; \
332 (q1) = rxp->rxq.hds.hdr; \
333 break; \
334 } \
335} while (0)
336
337/**
338 *
339 * Function prototypes
340 *
341 */
342
343/**
344 * BNA
345 */
346
347/* Internal APIs */
348void bna_adv_res_req(struct bna_res_info *res_info);
349
350/* APIs for BNAD */
351void bna_res_req(struct bna_res_info *res_info);
352void bna_init(struct bna *bna, struct bnad *bnad,
353 struct bfa_pcidev *pcidev,
354 struct bna_res_info *res_info);
355void bna_uninit(struct bna *bna);
356void bna_stats_get(struct bna *bna);
357void bna_stats_clr(struct bna *bna);
358void bna_get_perm_mac(struct bna *bna, u8 *mac);
359
360/* APIs for Rx */
361int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
362
363/* APIs for RxF */
364struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
365void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
366 struct bna_mac *mac);
367struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
368void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
369 struct bna_mac *mac);
370struct bna_rit_segment *
371bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
372void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
373 struct bna_rit_segment *seg);
374
375/**
376 * DEVICE
377 */
378
379/* Interanl APIs */
380void bna_adv_device_init(struct bna_device *device, struct bna *bna,
381 struct bna_res_info *res_info);
382
383/* APIs for BNA */
384void bna_device_init(struct bna_device *device, struct bna *bna,
385 struct bna_res_info *res_info);
386void bna_device_uninit(struct bna_device *device);
387void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
388int bna_device_status_get(struct bna_device *device);
389int bna_device_state_get(struct bna_device *device);
390
391/* APIs for BNAD */
392void bna_device_enable(struct bna_device *device);
393void bna_device_disable(struct bna_device *device,
394 enum bna_cleanup_type type);
395
396/**
397 * MBOX
398 */
399
400/* APIs for DEVICE */
401void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
402void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
403void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
404void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
405
406/* APIs for PORT, TX, RX */
407void bna_mbox_handler(struct bna *bna, u32 intr_status);
408void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
409
410/**
411 * PORT
412 */
413
414/* APIs for BNA */
415void bna_port_init(struct bna_port *port, struct bna *bna);
416void bna_port_uninit(struct bna_port *port);
417int bna_port_state_get(struct bna_port *port);
418int bna_llport_state_get(struct bna_llport *llport);
419
420/* APIs for DEVICE */
421void bna_port_start(struct bna_port *port);
422void bna_port_stop(struct bna_port *port);
423void bna_port_fail(struct bna_port *port);
424
425/* API for RX */
426int bna_port_mtu_get(struct bna_port *port);
427void bna_llport_admin_up(struct bna_llport *llport);
428void bna_llport_admin_down(struct bna_llport *llport);
429
430/* API for BNAD */
431void bna_port_enable(struct bna_port *port);
432void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
433 void (*cbfn)(void *, enum bna_cb_status));
434void bna_port_pause_config(struct bna_port *port,
435 struct bna_pause_config *pause_config,
436 void (*cbfn)(struct bnad *, enum bna_cb_status));
437void bna_port_mtu_set(struct bna_port *port, int mtu,
438 void (*cbfn)(struct bnad *, enum bna_cb_status));
439void bna_port_mac_get(struct bna_port *port, mac_t *mac);
440void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
441void bna_port_linkcbfn_set(struct bna_port *port,
442 void (*linkcbfn)(struct bnad *,
443 enum bna_link_status));
444void bna_port_admin_up(struct bna_port *port);
445void bna_port_admin_down(struct bna_port *port);
446
447/* Callbacks for TX, RX */
448void bna_port_cb_tx_stopped(struct bna_port *port,
449 enum bna_cb_status status);
450void bna_port_cb_rx_stopped(struct bna_port *port,
451 enum bna_cb_status status);
452
453/* Callbacks for MBOX */
454void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
455 int status);
456void bna_port_cb_link_down(struct bna_port *port, int status);
457
458/**
459 * IB
460 */
461
462/* APIs for BNA */
463void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
464 struct bna_res_info *res_info);
465void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
466
467/* APIs for TX, RX */
468struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
469 enum bna_intr_type intr_type, int vector);
470void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
471int bna_ib_reserve_idx(struct bna_ib *ib);
472void bna_ib_release_idx(struct bna_ib *ib, int idx);
473int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
474void bna_ib_start(struct bna_ib *ib);
475void bna_ib_stop(struct bna_ib *ib);
476void bna_ib_fail(struct bna_ib *ib);
477void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
478
479/**
480 * TX MODULE AND TX
481 */
482
483/* Internal APIs */
484void bna_tx_prio_changed(struct bna_tx *tx, int prio);
485
486/* APIs for BNA */
487void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
488 struct bna_res_info *res_info);
489void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
490int bna_tx_state_get(struct bna_tx *tx);
491
492/* APIs for PORT */
493void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
494void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
495void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
496void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio);
497void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link);
498
499/* APIs for BNAD */
500void bna_tx_res_req(int num_txq, int txq_depth,
501 struct bna_res_info *res_info);
502struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
503 struct bna_tx_config *tx_cfg,
504 struct bna_tx_event_cbfn *tx_cbfn,
505 struct bna_res_info *res_info, void *priv);
506void bna_tx_destroy(struct bna_tx *tx);
507void bna_tx_enable(struct bna_tx *tx);
508void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
509 void (*cbfn)(void *, struct bna_tx *,
510 enum bna_cb_status));
511enum bna_cb_status
512bna_tx_prio_set(struct bna_tx *tx, int prio,
513 void (*cbfn)(struct bnad *, struct bna_tx *,
514 enum bna_cb_status));
515void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
516
517/**
518 * RX MODULE, RX, RXF
519 */
520
521/* Internal APIs */
522void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status);
523void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
524 const struct bna_mac *mac_addr);
525void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status);
526void bna_rxf_adv_init(struct bna_rxf *rxf,
527 struct bna_rx *rx,
528 struct bna_rx_config *q_config);
529int rxf_process_packet_filter_ucast(struct bna_rxf *rxf);
530int rxf_process_packet_filter_promisc(struct bna_rxf *rxf);
531int rxf_process_packet_filter_default(struct bna_rxf *rxf);
532int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf);
533int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf);
534int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf);
535int rxf_clear_packet_filter_default(struct bna_rxf *rxf);
536int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf);
537void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf);
538void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
539void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
540void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
541
542/* APIs for BNA */
543void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
544 struct bna_res_info *res_info);
545void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
546int bna_rx_state_get(struct bna_rx *rx);
547int bna_rxf_state_get(struct bna_rxf *rxf);
548
549/* APIs for PORT */
550void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
551void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
552void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
553
554/* APIs for BNAD */
555void bna_rx_res_req(struct bna_rx_config *rx_config,
556 struct bna_res_info *res_info);
557struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
558 struct bna_rx_config *rx_cfg,
559 struct bna_rx_event_cbfn *rx_cbfn,
560 struct bna_res_info *res_info, void *priv);
561void bna_rx_destroy(struct bna_rx *rx);
562void bna_rx_enable(struct bna_rx *rx);
563void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
564 void (*cbfn)(void *, struct bna_rx *,
565 enum bna_cb_status));
566void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
567void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]);
568void bna_rx_dim_update(struct bna_ccb *ccb);
569enum bna_cb_status
570bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
571 void (*cbfn)(struct bnad *, struct bna_rx *,
572 enum bna_cb_status));
573enum bna_cb_status
574bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
575 void (*cbfn)(struct bnad *, struct bna_rx *,
576 enum bna_cb_status));
577enum bna_cb_status
578bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
579 void (*cbfn)(struct bnad *, struct bna_rx *,
580 enum bna_cb_status));
581enum bna_cb_status
582bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
583 void (*cbfn)(struct bnad *, struct bna_rx *,
584 enum bna_cb_status));
585enum bna_cb_status
586bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
587 void (*cbfn)(struct bnad *, struct bna_rx *,
588 enum bna_cb_status));
589enum bna_cb_status
590bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
591 void (*cbfn)(struct bnad *, struct bna_rx *,
592 enum bna_cb_status));
593void bna_rx_mcast_delall(struct bna_rx *rx,
594 void (*cbfn)(struct bnad *, struct bna_rx *,
595 enum bna_cb_status));
596enum bna_cb_status
597bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
598 enum bna_rxmode bitmask,
599 void (*cbfn)(struct bnad *, struct bna_rx *,
600 enum bna_cb_status));
601void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
602void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
603void bna_rx_vlanfilter_enable(struct bna_rx *rx);
604void bna_rx_vlanfilter_disable(struct bna_rx *rx);
605void bna_rx_rss_enable(struct bna_rx *rx);
606void bna_rx_rss_disable(struct bna_rx *rx);
607void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
608void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
609 int nvectors);
610void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
611 void (*cbfn)(struct bnad *, struct bna_rx *,
612 enum bna_cb_status));
613void bna_rx_hds_disable(struct bna_rx *rx,
614 void (*cbfn)(struct bnad *, struct bna_rx *,
615 enum bna_cb_status));
616void bna_rx_receive_pause(struct bna_rx *rx,
617 void (*cbfn)(struct bnad *, struct bna_rx *,
618 enum bna_cb_status));
619void bna_rx_receive_resume(struct bna_rx *rx,
620 void (*cbfn)(struct bnad *, struct bna_rx *,
621 enum bna_cb_status));
622
623/* RxF APIs for RX */
624void bna_rxf_start(struct bna_rxf *rxf);
625void bna_rxf_stop(struct bna_rxf *rxf);
626void bna_rxf_fail(struct bna_rxf *rxf);
627void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
628 struct bna_rx_config *q_config);
629void bna_rxf_uninit(struct bna_rxf *rxf);
630
631/* Callback from RXF to RX */
632void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
633void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
634
635/**
636 * BNAD
637 */
638
639/* Callbacks for BNA */
640void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
641 struct bna_stats *stats);
642void bnad_cb_stats_clr(struct bnad *bnad);
643
644/* Callbacks for DEVICE */
645void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
646void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status);
647void bnad_cb_device_enable_mbox_intr(struct bnad *bnad);
648void bnad_cb_device_disable_mbox_intr(struct bnad *bnad);
649
650/* Callbacks for port */
651void bnad_cb_port_link_status(struct bnad *bnad,
652 enum bna_link_status status);
653
654#endif /* __BNA_H__ */
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
new file mode 100644
index 000000000000..f3034d6bda58
--- /dev/null
+++ b/drivers/net/bna/bna_ctrl.c
@@ -0,0 +1,3624 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19#include "bfa_sm.h"
20#include "bfa_wc.h"
21
22/**
23 * MBOX
24 */
25static int
26bna_is_aen(u8 msg_id)
27{
28 return (msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
29 msg_id == BFI_LL_I2H_LINK_UP_AEN);
30}
31
32static void
33bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
34{
35 struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
36
37 switch (aen->mh.msg_id) {
38 case BFI_LL_I2H_LINK_UP_AEN:
39 bna_port_cb_link_up(&bna->port, aen, aen->reason);
40 break;
41 case BFI_LL_I2H_LINK_DOWN_AEN:
42 bna_port_cb_link_down(&bna->port, aen->reason);
43 break;
44 default:
45 break;
46 }
47}
48
49static void
50bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
51{
52 struct bna *bna = (struct bna *)(llarg);
53 struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
54 struct bfi_mhdr *cmd_h, *rsp_h;
55 struct bna_mbox_qe *mb_qe = NULL;
56 int to_post = 0;
57 u8 aen = 0;
58 char message[BNA_MESSAGE_SIZE];
59
60 aen = bna_is_aen(mb_rsp->mh.msg_id);
61
62 if (!aen) {
63 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
64 cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
65 rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
66
67 if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
68 (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
69 /* Remove the request from posted_q, update state */
70 list_del(&mb_qe->qe);
71 bna->mbox_mod.msg_pending--;
72 if (list_empty(&bna->mbox_mod.posted_q))
73 bna->mbox_mod.state = BNA_MBOX_FREE;
74 else
75 to_post = 1;
76
77 /* Dispatch the cbfn */
78 if (mb_qe->cbfn)
79 mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
80
81 /* Post the next entry, if needed */
82 if (to_post) {
83 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
84 bfa_nw_ioc_mbox_queue(&bna->device.ioc,
85 &mb_qe->cmd);
86 }
87 } else {
88 snprintf(message, BNA_MESSAGE_SIZE,
89 "No matching rsp for [%d:%d:%d]\n",
90 mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
91 mb_rsp->mh.mtag.i2htok);
92 pr_info("%s", message);
93 }
94
95 } else
96 bna_mbox_aen_callback(bna, msg);
97}
98
99void
100bna_err_handler(struct bna *bna, u32 intr_status)
101{
102 u32 init_halt;
103
104 if (intr_status & __HALT_STATUS_BITS) {
105 init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
106 init_halt &= ~__FW_INIT_HALT_P;
107 writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
108 }
109
110 bfa_nw_ioc_error_isr(&bna->device.ioc);
111}
112
113void
114bna_mbox_handler(struct bna *bna, u32 intr_status)
115{
116 if (BNA_IS_ERR_INTR(intr_status)) {
117 bna_err_handler(bna, intr_status);
118 return;
119 }
120 if (BNA_IS_MBOX_INTR(intr_status))
121 bfa_nw_ioc_mbox_isr(&bna->device.ioc);
122}
123
124void
125bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
126{
127 struct bfi_mhdr *mh;
128
129 mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
130
131 mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
132 bna->mbox_mod.msg_ctr++;
133 bna->mbox_mod.msg_pending++;
134 if (bna->mbox_mod.state == BNA_MBOX_FREE) {
135 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
136 bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
137 bna->mbox_mod.state = BNA_MBOX_POSTED;
138 } else {
139 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
140 }
141}
142
143void
144bna_mbox_flush_q(struct bna *bna, struct list_head *q)
145{
146 struct bna_mbox_qe *mb_qe = NULL;
147 struct bfi_mhdr *cmd_h;
148 struct list_head *mb_q;
149 void (*cbfn)(void *arg, int status);
150 void *cbarg;
151
152 mb_q = &bna->mbox_mod.posted_q;
153
154 while (!list_empty(mb_q)) {
155 bfa_q_deq(mb_q, &mb_qe);
156 cbfn = mb_qe->cbfn;
157 cbarg = mb_qe->cbarg;
158 bfa_q_qe_init(mb_qe);
159 bna->mbox_mod.msg_pending--;
160
161 cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
162 if (cbfn)
163 cbfn(cbarg, BNA_CB_NOT_EXEC);
164 }
165
166 bna->mbox_mod.state = BNA_MBOX_FREE;
167}
168
169void
170bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
171{
172}
173
174void
175bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
176{
177 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
178}
179
180void
181bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
182{
183 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
184 mbox_mod->state = BNA_MBOX_FREE;
185 mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
186 INIT_LIST_HEAD(&mbox_mod->posted_q);
187 mbox_mod->bna = bna;
188}
189
190void
191bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
192{
193 mbox_mod->bna = NULL;
194}
195
196/**
197 * LLPORT
198 */
199#define call_llport_stop_cbfn(llport, status)\
200do {\
201 if ((llport)->stop_cbfn)\
202 (llport)->stop_cbfn(&(llport)->bna->port, status);\
203 (llport)->stop_cbfn = NULL;\
204} while (0)
205
206static void bna_fw_llport_up(struct bna_llport *llport);
207static void bna_fw_cb_llport_up(void *arg, int status);
208static void bna_fw_llport_down(struct bna_llport *llport);
209static void bna_fw_cb_llport_down(void *arg, int status);
210static void bna_llport_start(struct bna_llport *llport);
211static void bna_llport_stop(struct bna_llport *llport);
212static void bna_llport_fail(struct bna_llport *llport);
213
214enum bna_llport_event {
215 LLPORT_E_START = 1,
216 LLPORT_E_STOP = 2,
217 LLPORT_E_FAIL = 3,
218 LLPORT_E_UP = 4,
219 LLPORT_E_DOWN = 5,
220 LLPORT_E_FWRESP_UP = 6,
221 LLPORT_E_FWRESP_DOWN = 7
222};
223
224enum bna_llport_state {
225 BNA_LLPORT_STOPPED = 1,
226 BNA_LLPORT_DOWN = 2,
227 BNA_LLPORT_UP_RESP_WAIT = 3,
228 BNA_LLPORT_DOWN_RESP_WAIT = 4,
229 BNA_LLPORT_UP = 5,
230 BNA_LLPORT_LAST_RESP_WAIT = 6
231};
232
233bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
234 enum bna_llport_event);
235bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
236 enum bna_llport_event);
237bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
238 enum bna_llport_event);
239bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
240 enum bna_llport_event);
241bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
242 enum bna_llport_event);
243bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
244 enum bna_llport_event);
245
246static struct bfa_sm_table llport_sm_table[] = {
247 {BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
248 {BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
249 {BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
250 {BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
251 {BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
252 {BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
253};
254
255static void
256bna_llport_sm_stopped_entry(struct bna_llport *llport)
257{
258 llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
259 call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
260}
261
262static void
263bna_llport_sm_stopped(struct bna_llport *llport,
264 enum bna_llport_event event)
265{
266 switch (event) {
267 case LLPORT_E_START:
268 bfa_fsm_set_state(llport, bna_llport_sm_down);
269 break;
270
271 case LLPORT_E_STOP:
272 call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
273 break;
274
275 case LLPORT_E_FAIL:
276 break;
277
278 case LLPORT_E_DOWN:
279 /* This event is received due to Rx objects failing */
280 /* No-op */
281 break;
282
283 case LLPORT_E_FWRESP_UP:
284 case LLPORT_E_FWRESP_DOWN:
285 /**
286 * These events are received due to flushing of mbox when
287 * device fails
288 */
289 /* No-op */
290 break;
291
292 default:
293 bfa_sm_fault(llport->bna, event);
294 }
295}
296
297static void
298bna_llport_sm_down_entry(struct bna_llport *llport)
299{
300 bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
301}
302
303static void
304bna_llport_sm_down(struct bna_llport *llport,
305 enum bna_llport_event event)
306{
307 switch (event) {
308 case LLPORT_E_STOP:
309 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
310 break;
311
312 case LLPORT_E_FAIL:
313 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
314 break;
315
316 case LLPORT_E_UP:
317 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
318 bna_fw_llport_up(llport);
319 break;
320
321 default:
322 bfa_sm_fault(llport->bna, event);
323 }
324}
325
326static void
327bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
328{
329 /**
330 * NOTE: Do not call bna_fw_llport_up() here. That will over step
331 * mbox due to down_resp_wait -> up_resp_wait transition on event
332 * LLPORT_E_UP
333 */
334}
335
336static void
337bna_llport_sm_up_resp_wait(struct bna_llport *llport,
338 enum bna_llport_event event)
339{
340 switch (event) {
341 case LLPORT_E_STOP:
342 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
343 break;
344
345 case LLPORT_E_FAIL:
346 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
347 break;
348
349 case LLPORT_E_DOWN:
350 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
351 break;
352
353 case LLPORT_E_FWRESP_UP:
354 bfa_fsm_set_state(llport, bna_llport_sm_up);
355 break;
356
357 case LLPORT_E_FWRESP_DOWN:
358 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
359 bna_fw_llport_up(llport);
360 break;
361
362 default:
363 bfa_sm_fault(llport->bna, event);
364 }
365}
366
367static void
368bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
369{
370 /**
371 * NOTE: Do not call bna_fw_llport_down() here. That will over step
372 * mbox due to up_resp_wait -> down_resp_wait transition on event
373 * LLPORT_E_DOWN
374 */
375}
376
377static void
378bna_llport_sm_down_resp_wait(struct bna_llport *llport,
379 enum bna_llport_event event)
380{
381 switch (event) {
382 case LLPORT_E_STOP:
383 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
384 break;
385
386 case LLPORT_E_FAIL:
387 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
388 break;
389
390 case LLPORT_E_UP:
391 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
392 break;
393
394 case LLPORT_E_FWRESP_UP:
395 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
396 bna_fw_llport_down(llport);
397 break;
398
399 case LLPORT_E_FWRESP_DOWN:
400 bfa_fsm_set_state(llport, bna_llport_sm_down);
401 break;
402
403 default:
404 bfa_sm_fault(llport->bna, event);
405 }
406}
407
408static void
409bna_llport_sm_up_entry(struct bna_llport *llport)
410{
411}
412
413static void
414bna_llport_sm_up(struct bna_llport *llport,
415 enum bna_llport_event event)
416{
417 switch (event) {
418 case LLPORT_E_STOP:
419 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
420 bna_fw_llport_down(llport);
421 break;
422
423 case LLPORT_E_FAIL:
424 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
425 break;
426
427 case LLPORT_E_DOWN:
428 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
429 bna_fw_llport_down(llport);
430 break;
431
432 default:
433 bfa_sm_fault(llport->bna, event);
434 }
435}
436
437static void
438bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
439{
440}
441
442static void
443bna_llport_sm_last_resp_wait(struct bna_llport *llport,
444 enum bna_llport_event event)
445{
446 switch (event) {
447 case LLPORT_E_FAIL:
448 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
449 break;
450
451 case LLPORT_E_DOWN:
452 /**
453 * This event is received due to Rx objects stopping in
454 * parallel to llport
455 */
456 /* No-op */
457 break;
458
459 case LLPORT_E_FWRESP_UP:
460 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
461 bna_fw_llport_down(llport);
462 break;
463
464 case LLPORT_E_FWRESP_DOWN:
465 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
466 break;
467
468 default:
469 bfa_sm_fault(llport->bna, event);
470 }
471}
472
473static void
474bna_fw_llport_admin_up(struct bna_llport *llport)
475{
476 struct bfi_ll_port_admin_req ll_req;
477
478 memset(&ll_req, 0, sizeof(ll_req));
479 ll_req.mh.msg_class = BFI_MC_LL;
480 ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
481 ll_req.mh.mtag.h2i.lpu_id = 0;
482
483 ll_req.up = BNA_STATUS_T_ENABLED;
484
485 bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
486 bna_fw_cb_llport_up, llport);
487
488 bna_mbox_send(llport->bna, &llport->mbox_qe);
489}
490
491static void
492bna_fw_llport_up(struct bna_llport *llport)
493{
494 if (llport->type == BNA_PORT_T_REGULAR)
495 bna_fw_llport_admin_up(llport);
496}
497
498static void
499bna_fw_cb_llport_up(void *arg, int status)
500{
501 struct bna_llport *llport = (struct bna_llport *)arg;
502
503 bfa_q_qe_init(&llport->mbox_qe.qe);
504 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
505}
506
507static void
508bna_fw_llport_admin_down(struct bna_llport *llport)
509{
510 struct bfi_ll_port_admin_req ll_req;
511
512 memset(&ll_req, 0, sizeof(ll_req));
513 ll_req.mh.msg_class = BFI_MC_LL;
514 ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
515 ll_req.mh.mtag.h2i.lpu_id = 0;
516
517 ll_req.up = BNA_STATUS_T_DISABLED;
518
519 bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
520 bna_fw_cb_llport_down, llport);
521
522 bna_mbox_send(llport->bna, &llport->mbox_qe);
523}
524
525static void
526bna_fw_llport_down(struct bna_llport *llport)
527{
528 if (llport->type == BNA_PORT_T_REGULAR)
529 bna_fw_llport_admin_down(llport);
530}
531
532static void
533bna_fw_cb_llport_down(void *arg, int status)
534{
535 struct bna_llport *llport = (struct bna_llport *)arg;
536
537 bfa_q_qe_init(&llport->mbox_qe.qe);
538 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
539}
540
541void
542bna_port_cb_llport_stopped(struct bna_port *port,
543 enum bna_cb_status status)
544{
545 bfa_wc_down(&port->chld_stop_wc);
546}
547
548static void
549bna_llport_init(struct bna_llport *llport, struct bna *bna)
550{
551 llport->flags |= BNA_LLPORT_F_ENABLED;
552 llport->type = BNA_PORT_T_REGULAR;
553 llport->bna = bna;
554
555 llport->link_status = BNA_LINK_DOWN;
556
557 llport->admin_up_count = 0;
558
559 llport->stop_cbfn = NULL;
560
561 bfa_q_qe_init(&llport->mbox_qe.qe);
562
563 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
564}
565
566static void
567bna_llport_uninit(struct bna_llport *llport)
568{
569 llport->flags &= ~BNA_LLPORT_F_ENABLED;
570
571 llport->bna = NULL;
572}
573
574static void
575bna_llport_start(struct bna_llport *llport)
576{
577 bfa_fsm_send_event(llport, LLPORT_E_START);
578}
579
580static void
581bna_llport_stop(struct bna_llport *llport)
582{
583 llport->stop_cbfn = bna_port_cb_llport_stopped;
584
585 bfa_fsm_send_event(llport, LLPORT_E_STOP);
586}
587
588static void
589bna_llport_fail(struct bna_llport *llport)
590{
591 bfa_fsm_send_event(llport, LLPORT_E_FAIL);
592}
593
594int
595bna_llport_state_get(struct bna_llport *llport)
596{
597 return bfa_sm_to_state(llport_sm_table, llport->fsm);
598}
599
600void
601bna_llport_admin_up(struct bna_llport *llport)
602{
603 llport->admin_up_count++;
604
605 if (llport->admin_up_count == 1) {
606 llport->flags |= BNA_LLPORT_F_RX_ENABLED;
607 if (llport->flags & BNA_LLPORT_F_ENABLED)
608 bfa_fsm_send_event(llport, LLPORT_E_UP);
609 }
610}
611
612void
613bna_llport_admin_down(struct bna_llport *llport)
614{
615 llport->admin_up_count--;
616
617 if (llport->admin_up_count == 0) {
618 llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
619 if (llport->flags & BNA_LLPORT_F_ENABLED)
620 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
621 }
622}
623
624/**
625 * PORT
626 */
627#define bna_port_chld_start(port)\
628do {\
629 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
630 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
631 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
632 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
633 bna_llport_start(&(port)->llport);\
634 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
635 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
636} while (0)
637
638#define bna_port_chld_stop(port)\
639do {\
640 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
641 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
642 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
643 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
644 bfa_wc_up(&(port)->chld_stop_wc);\
645 bfa_wc_up(&(port)->chld_stop_wc);\
646 bfa_wc_up(&(port)->chld_stop_wc);\
647 bna_llport_stop(&(port)->llport);\
648 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
649 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
650} while (0)
651
652#define bna_port_chld_fail(port)\
653do {\
654 bna_llport_fail(&(port)->llport);\
655 bna_tx_mod_fail(&(port)->bna->tx_mod);\
656 bna_rx_mod_fail(&(port)->bna->rx_mod);\
657} while (0)
658
659#define bna_port_rx_start(port)\
660do {\
661 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
662 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
663 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
664} while (0)
665
666#define bna_port_rx_stop(port)\
667do {\
668 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
669 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
670 bfa_wc_up(&(port)->chld_stop_wc);\
671 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
672} while (0)
673
674#define call_port_stop_cbfn(port, status)\
675do {\
676 if ((port)->stop_cbfn)\
677 (port)->stop_cbfn((port)->stop_cbarg, status);\
678 (port)->stop_cbfn = NULL;\
679 (port)->stop_cbarg = NULL;\
680} while (0)
681
682#define call_port_pause_cbfn(port, status)\
683do {\
684 if ((port)->pause_cbfn)\
685 (port)->pause_cbfn((port)->bna->bnad, status);\
686 (port)->pause_cbfn = NULL;\
687} while (0)
688
689#define call_port_mtu_cbfn(port, status)\
690do {\
691 if ((port)->mtu_cbfn)\
692 (port)->mtu_cbfn((port)->bna->bnad, status);\
693 (port)->mtu_cbfn = NULL;\
694} while (0)
695
696static void bna_fw_pause_set(struct bna_port *port);
697static void bna_fw_cb_pause_set(void *arg, int status);
698static void bna_fw_mtu_set(struct bna_port *port);
699static void bna_fw_cb_mtu_set(void *arg, int status);
700
701enum bna_port_event {
702 PORT_E_START = 1,
703 PORT_E_STOP = 2,
704 PORT_E_FAIL = 3,
705 PORT_E_PAUSE_CFG = 4,
706 PORT_E_MTU_CFG = 5,
707 PORT_E_CHLD_STOPPED = 6,
708 PORT_E_FWRESP_PAUSE = 7,
709 PORT_E_FWRESP_MTU = 8
710};
711
712enum bna_port_state {
713 BNA_PORT_STOPPED = 1,
714 BNA_PORT_MTU_INIT_WAIT = 2,
715 BNA_PORT_PAUSE_INIT_WAIT = 3,
716 BNA_PORT_LAST_RESP_WAIT = 4,
717 BNA_PORT_STARTED = 5,
718 BNA_PORT_PAUSE_CFG_WAIT = 6,
719 BNA_PORT_RX_STOP_WAIT = 7,
720 BNA_PORT_MTU_CFG_WAIT = 8,
721 BNA_PORT_CHLD_STOP_WAIT = 9
722};
723
724bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
725 enum bna_port_event);
726bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
727 enum bna_port_event);
728bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
729 enum bna_port_event);
730bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
731 enum bna_port_event);
732bfa_fsm_state_decl(bna_port, started, struct bna_port,
733 enum bna_port_event);
734bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
735 enum bna_port_event);
736bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
737 enum bna_port_event);
738bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
739 enum bna_port_event);
740bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
741 enum bna_port_event);
742
743static struct bfa_sm_table port_sm_table[] = {
744 {BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
745 {BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
746 {BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
747 {BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
748 {BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
749 {BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
750 {BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
751 {BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
752 {BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
753};
754
755static void
756bna_port_sm_stopped_entry(struct bna_port *port)
757{
758 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
759 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
760 call_port_stop_cbfn(port, BNA_CB_SUCCESS);
761}
762
763static void
764bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
765{
766 switch (event) {
767 case PORT_E_START:
768 bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
769 break;
770
771 case PORT_E_STOP:
772 call_port_stop_cbfn(port, BNA_CB_SUCCESS);
773 break;
774
775 case PORT_E_FAIL:
776 /* No-op */
777 break;
778
779 case PORT_E_PAUSE_CFG:
780 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
781 break;
782
783 case PORT_E_MTU_CFG:
784 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
785 break;
786
787 case PORT_E_CHLD_STOPPED:
788 /**
789 * This event is received due to LLPort, Tx and Rx objects
790 * failing
791 */
792 /* No-op */
793 break;
794
795 case PORT_E_FWRESP_PAUSE:
796 case PORT_E_FWRESP_MTU:
797 /**
798 * These events are received due to flushing of mbox when
799 * device fails
800 */
801 /* No-op */
802 break;
803
804 default:
805 bfa_sm_fault(port->bna, event);
806 }
807}
808
809static void
810bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
811{
812 bna_fw_mtu_set(port);
813}
814
815static void
816bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
817{
818 switch (event) {
819 case PORT_E_STOP:
820 bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
821 break;
822
823 case PORT_E_FAIL:
824 bfa_fsm_set_state(port, bna_port_sm_stopped);
825 break;
826
827 case PORT_E_PAUSE_CFG:
828 /* No-op */
829 break;
830
831 case PORT_E_MTU_CFG:
832 port->flags |= BNA_PORT_F_MTU_CHANGED;
833 break;
834
835 case PORT_E_FWRESP_MTU:
836 if (port->flags & BNA_PORT_F_MTU_CHANGED) {
837 port->flags &= ~BNA_PORT_F_MTU_CHANGED;
838 bna_fw_mtu_set(port);
839 } else {
840 bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
841 }
842 break;
843
844 default:
845 bfa_sm_fault(port->bna, event);
846 }
847}
848
849static void
850bna_port_sm_pause_init_wait_entry(struct bna_port *port)
851{
852 bna_fw_pause_set(port);
853}
854
855static void
856bna_port_sm_pause_init_wait(struct bna_port *port,
857 enum bna_port_event event)
858{
859 switch (event) {
860 case PORT_E_STOP:
861 bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
862 break;
863
864 case PORT_E_FAIL:
865 bfa_fsm_set_state(port, bna_port_sm_stopped);
866 break;
867
868 case PORT_E_PAUSE_CFG:
869 port->flags |= BNA_PORT_F_PAUSE_CHANGED;
870 break;
871
872 case PORT_E_MTU_CFG:
873 port->flags |= BNA_PORT_F_MTU_CHANGED;
874 break;
875
876 case PORT_E_FWRESP_PAUSE:
877 if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
878 port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
879 bna_fw_pause_set(port);
880 } else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
881 port->flags &= ~BNA_PORT_F_MTU_CHANGED;
882 bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
883 } else {
884 bfa_fsm_set_state(port, bna_port_sm_started);
885 bna_port_chld_start(port);
886 }
887 break;
888
889 default:
890 bfa_sm_fault(port->bna, event);
891 }
892}
893
894static void
895bna_port_sm_last_resp_wait_entry(struct bna_port *port)
896{
897}
898
899static void
900bna_port_sm_last_resp_wait(struct bna_port *port,
901 enum bna_port_event event)
902{
903 switch (event) {
904 case PORT_E_FAIL:
905 case PORT_E_FWRESP_PAUSE:
906 case PORT_E_FWRESP_MTU:
907 bfa_fsm_set_state(port, bna_port_sm_stopped);
908 break;
909
910 default:
911 bfa_sm_fault(port->bna, event);
912 }
913}
914
915static void
916bna_port_sm_started_entry(struct bna_port *port)
917{
918 /**
919 * NOTE: Do not call bna_port_chld_start() here, since it will be
920 * inadvertently called during pause_cfg_wait->started transition
921 * as well
922 */
923 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
924 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
925}
926
927static void
928bna_port_sm_started(struct bna_port *port,
929 enum bna_port_event event)
930{
931 switch (event) {
932 case PORT_E_STOP:
933 bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
934 break;
935
936 case PORT_E_FAIL:
937 bfa_fsm_set_state(port, bna_port_sm_stopped);
938 bna_port_chld_fail(port);
939 break;
940
941 case PORT_E_PAUSE_CFG:
942 bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
943 break;
944
945 case PORT_E_MTU_CFG:
946 bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
947 break;
948
949 default:
950 bfa_sm_fault(port->bna, event);
951 }
952}
953
954static void
955bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
956{
957 bna_fw_pause_set(port);
958}
959
960static void
961bna_port_sm_pause_cfg_wait(struct bna_port *port,
962 enum bna_port_event event)
963{
964 switch (event) {
965 case PORT_E_FAIL:
966 bfa_fsm_set_state(port, bna_port_sm_stopped);
967 bna_port_chld_fail(port);
968 break;
969
970 case PORT_E_FWRESP_PAUSE:
971 bfa_fsm_set_state(port, bna_port_sm_started);
972 break;
973
974 default:
975 bfa_sm_fault(port->bna, event);
976 }
977}
978
979static void
980bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
981{
982 bna_port_rx_stop(port);
983}
984
985static void
986bna_port_sm_rx_stop_wait(struct bna_port *port,
987 enum bna_port_event event)
988{
989 switch (event) {
990 case PORT_E_FAIL:
991 bfa_fsm_set_state(port, bna_port_sm_stopped);
992 bna_port_chld_fail(port);
993 break;
994
995 case PORT_E_CHLD_STOPPED:
996 bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
997 break;
998
999 default:
1000 bfa_sm_fault(port->bna, event);
1001 }
1002}
1003
1004static void
1005bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
1006{
1007 bna_fw_mtu_set(port);
1008}
1009
1010static void
1011bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
1012{
1013 switch (event) {
1014 case PORT_E_FAIL:
1015 bfa_fsm_set_state(port, bna_port_sm_stopped);
1016 bna_port_chld_fail(port);
1017 break;
1018
1019 case PORT_E_FWRESP_MTU:
1020 bfa_fsm_set_state(port, bna_port_sm_started);
1021 bna_port_rx_start(port);
1022 break;
1023
1024 default:
1025 bfa_sm_fault(port->bna, event);
1026 }
1027}
1028
1029static void
1030bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
1031{
1032 bna_port_chld_stop(port);
1033}
1034
1035static void
1036bna_port_sm_chld_stop_wait(struct bna_port *port,
1037 enum bna_port_event event)
1038{
1039 switch (event) {
1040 case PORT_E_FAIL:
1041 bfa_fsm_set_state(port, bna_port_sm_stopped);
1042 bna_port_chld_fail(port);
1043 break;
1044
1045 case PORT_E_CHLD_STOPPED:
1046 bfa_fsm_set_state(port, bna_port_sm_stopped);
1047 break;
1048
1049 default:
1050 bfa_sm_fault(port->bna, event);
1051 }
1052}
1053
1054static void
1055bna_fw_pause_set(struct bna_port *port)
1056{
1057 struct bfi_ll_set_pause_req ll_req;
1058
1059 memset(&ll_req, 0, sizeof(ll_req));
1060 ll_req.mh.msg_class = BFI_MC_LL;
1061 ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
1062 ll_req.mh.mtag.h2i.lpu_id = 0;
1063
1064 ll_req.tx_pause = port->pause_config.tx_pause;
1065 ll_req.rx_pause = port->pause_config.rx_pause;
1066
1067 bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1068 bna_fw_cb_pause_set, port);
1069
1070 bna_mbox_send(port->bna, &port->mbox_qe);
1071}
1072
1073static void
1074bna_fw_cb_pause_set(void *arg, int status)
1075{
1076 struct bna_port *port = (struct bna_port *)arg;
1077
1078 bfa_q_qe_init(&port->mbox_qe.qe);
1079 bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
1080}
1081
1082void
1083bna_fw_mtu_set(struct bna_port *port)
1084{
1085 struct bfi_ll_mtu_info_req ll_req;
1086
1087 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
1088 ll_req.mtu = htons((u16)port->mtu);
1089
1090 bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1091 bna_fw_cb_mtu_set, port);
1092 bna_mbox_send(port->bna, &port->mbox_qe);
1093}
1094
1095void
1096bna_fw_cb_mtu_set(void *arg, int status)
1097{
1098 struct bna_port *port = (struct bna_port *)arg;
1099
1100 bfa_q_qe_init(&port->mbox_qe.qe);
1101 bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
1102}
1103
1104static void
1105bna_port_cb_chld_stopped(void *arg)
1106{
1107 struct bna_port *port = (struct bna_port *)arg;
1108
1109 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
1110}
1111
1112void
1113bna_port_init(struct bna_port *port, struct bna *bna)
1114{
1115 port->bna = bna;
1116 port->flags = 0;
1117 port->mtu = 0;
1118 port->type = BNA_PORT_T_REGULAR;
1119
1120 port->link_cbfn = bnad_cb_port_link_status;
1121
1122 port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
1123 port->chld_stop_wc.wc_cbarg = port;
1124 port->chld_stop_wc.wc_count = 0;
1125
1126 port->stop_cbfn = NULL;
1127 port->stop_cbarg = NULL;
1128
1129 port->pause_cbfn = NULL;
1130
1131 port->mtu_cbfn = NULL;
1132
1133 bfa_q_qe_init(&port->mbox_qe.qe);
1134
1135 bfa_fsm_set_state(port, bna_port_sm_stopped);
1136
1137 bna_llport_init(&port->llport, bna);
1138}
1139
1140void
1141bna_port_uninit(struct bna_port *port)
1142{
1143 bna_llport_uninit(&port->llport);
1144
1145 port->flags = 0;
1146
1147 port->bna = NULL;
1148}
1149
1150int
1151bna_port_state_get(struct bna_port *port)
1152{
1153 return bfa_sm_to_state(port_sm_table, port->fsm);
1154}
1155
1156void
1157bna_port_start(struct bna_port *port)
1158{
1159 port->flags |= BNA_PORT_F_DEVICE_READY;
1160 if (port->flags & BNA_PORT_F_ENABLED)
1161 bfa_fsm_send_event(port, PORT_E_START);
1162}
1163
1164void
1165bna_port_stop(struct bna_port *port)
1166{
1167 port->stop_cbfn = bna_device_cb_port_stopped;
1168 port->stop_cbarg = &port->bna->device;
1169
1170 port->flags &= ~BNA_PORT_F_DEVICE_READY;
1171 bfa_fsm_send_event(port, PORT_E_STOP);
1172}
1173
1174void
1175bna_port_fail(struct bna_port *port)
1176{
1177 port->flags &= ~BNA_PORT_F_DEVICE_READY;
1178 bfa_fsm_send_event(port, PORT_E_FAIL);
1179}
1180
1181void
1182bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
1183{
1184 bfa_wc_down(&port->chld_stop_wc);
1185}
1186
1187void
1188bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
1189{
1190 bfa_wc_down(&port->chld_stop_wc);
1191}
1192
1193void
1194bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
1195 int status)
1196{
1197 int i;
1198 u8 prio_map;
1199
1200 port->llport.link_status = BNA_LINK_UP;
1201 if (aen->cee_linkup)
1202 port->llport.link_status = BNA_CEE_UP;
1203
1204 /* Compute the priority */
1205 prio_map = aen->prio_map;
1206 if (prio_map) {
1207 for (i = 0; i < 8; i++) {
1208 if ((prio_map >> i) & 0x1)
1209 break;
1210 }
1211 port->priority = i;
1212 } else
1213 port->priority = 0;
1214
1215 /* Dispatch events */
1216 bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
1217 bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
1218 port->link_cbfn(port->bna->bnad, port->llport.link_status);
1219}
1220
1221void
1222bna_port_cb_link_down(struct bna_port *port, int status)
1223{
1224 port->llport.link_status = BNA_LINK_DOWN;
1225
1226 /* Dispatch events */
1227 bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
1228 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
1229}
1230
1231int
1232bna_port_mtu_get(struct bna_port *port)
1233{
1234 return port->mtu;
1235}
1236
1237void
1238bna_port_enable(struct bna_port *port)
1239{
1240 if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
1241 return;
1242
1243 port->flags |= BNA_PORT_F_ENABLED;
1244
1245 if (port->flags & BNA_PORT_F_DEVICE_READY)
1246 bfa_fsm_send_event(port, PORT_E_START);
1247}
1248
1249void
1250bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
1251 void (*cbfn)(void *, enum bna_cb_status))
1252{
1253 if (type == BNA_SOFT_CLEANUP) {
1254 (*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
1255 return;
1256 }
1257
1258 port->stop_cbfn = cbfn;
1259 port->stop_cbarg = port->bna->bnad;
1260
1261 port->flags &= ~BNA_PORT_F_ENABLED;
1262
1263 bfa_fsm_send_event(port, PORT_E_STOP);
1264}
1265
1266void
1267bna_port_pause_config(struct bna_port *port,
1268 struct bna_pause_config *pause_config,
1269 void (*cbfn)(struct bnad *, enum bna_cb_status))
1270{
1271 port->pause_config = *pause_config;
1272
1273 port->pause_cbfn = cbfn;
1274
1275 bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
1276}
1277
1278void
1279bna_port_mtu_set(struct bna_port *port, int mtu,
1280 void (*cbfn)(struct bnad *, enum bna_cb_status))
1281{
1282 port->mtu = mtu;
1283
1284 port->mtu_cbfn = cbfn;
1285
1286 bfa_fsm_send_event(port, PORT_E_MTU_CFG);
1287}
1288
1289void
1290bna_port_mac_get(struct bna_port *port, mac_t *mac)
1291{
1292 *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
1293}
1294
1295/**
1296 * Should be called only when port is disabled
1297 */
1298void
1299bna_port_type_set(struct bna_port *port, enum bna_port_type type)
1300{
1301 port->type = type;
1302 port->llport.type = type;
1303}
1304
1305/**
1306 * Should be called only when port is disabled
1307 */
1308void
1309bna_port_linkcbfn_set(struct bna_port *port,
1310 void (*linkcbfn)(struct bnad *, enum bna_link_status))
1311{
1312 port->link_cbfn = linkcbfn;
1313}
1314
1315void
1316bna_port_admin_up(struct bna_port *port)
1317{
1318 struct bna_llport *llport = &port->llport;
1319
1320 if (llport->flags & BNA_LLPORT_F_ENABLED)
1321 return;
1322
1323 llport->flags |= BNA_LLPORT_F_ENABLED;
1324
1325 if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
1326 bfa_fsm_send_event(llport, LLPORT_E_UP);
1327}
1328
1329void
1330bna_port_admin_down(struct bna_port *port)
1331{
1332 struct bna_llport *llport = &port->llport;
1333
1334 if (!(llport->flags & BNA_LLPORT_F_ENABLED))
1335 return;
1336
1337 llport->flags &= ~BNA_LLPORT_F_ENABLED;
1338
1339 if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
1340 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
1341}
1342
1343/**
1344 * DEVICE
1345 */
1346#define enable_mbox_intr(_device)\
1347do {\
1348 u32 intr_status;\
1349 bna_intr_status_get((_device)->bna, intr_status);\
1350 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1351 bna_mbox_intr_enable((_device)->bna);\
1352} while (0)
1353
1354#define disable_mbox_intr(_device)\
1355do {\
1356 bna_mbox_intr_disable((_device)->bna);\
1357 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1358} while (0)
1359
1360const struct bna_chip_regs_offset reg_offset[] =
1361{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
1362 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
1363{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
1364 HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
1365{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
1366 HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
1367{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
1368 HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
1369};
1370
1371enum bna_device_event {
1372 DEVICE_E_ENABLE = 1,
1373 DEVICE_E_DISABLE = 2,
1374 DEVICE_E_IOC_READY = 3,
1375 DEVICE_E_IOC_FAILED = 4,
1376 DEVICE_E_IOC_DISABLED = 5,
1377 DEVICE_E_IOC_RESET = 6,
1378 DEVICE_E_PORT_STOPPED = 7,
1379};
1380
1381enum bna_device_state {
1382 BNA_DEVICE_STOPPED = 1,
1383 BNA_DEVICE_IOC_READY_WAIT = 2,
1384 BNA_DEVICE_READY = 3,
1385 BNA_DEVICE_PORT_STOP_WAIT = 4,
1386 BNA_DEVICE_IOC_DISABLE_WAIT = 5,
1387 BNA_DEVICE_FAILED = 6
1388};
1389
1390bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
1391 enum bna_device_event);
1392bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
1393 enum bna_device_event);
1394bfa_fsm_state_decl(bna_device, ready, struct bna_device,
1395 enum bna_device_event);
1396bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
1397 enum bna_device_event);
1398bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
1399 enum bna_device_event);
1400bfa_fsm_state_decl(bna_device, failed, struct bna_device,
1401 enum bna_device_event);
1402
1403static struct bfa_sm_table device_sm_table[] = {
1404 {BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
1405 {BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
1406 {BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
1407 {BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
1408 {BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
1409 {BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
1410};
1411
1412static void
1413bna_device_sm_stopped_entry(struct bna_device *device)
1414{
1415 if (device->stop_cbfn)
1416 device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
1417
1418 device->stop_cbfn = NULL;
1419 device->stop_cbarg = NULL;
1420}
1421
1422static void
1423bna_device_sm_stopped(struct bna_device *device,
1424 enum bna_device_event event)
1425{
1426 switch (event) {
1427 case DEVICE_E_ENABLE:
1428 if (device->intr_type == BNA_INTR_T_MSIX)
1429 bna_mbox_msix_idx_set(device);
1430 bfa_nw_ioc_enable(&device->ioc);
1431 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1432 break;
1433
1434 case DEVICE_E_DISABLE:
1435 bfa_fsm_set_state(device, bna_device_sm_stopped);
1436 break;
1437
1438 case DEVICE_E_IOC_RESET:
1439 enable_mbox_intr(device);
1440 break;
1441
1442 case DEVICE_E_IOC_FAILED:
1443 bfa_fsm_set_state(device, bna_device_sm_failed);
1444 break;
1445
1446 default:
1447 bfa_sm_fault(device->bna, event);
1448 }
1449}
1450
1451static void
1452bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
1453{
1454 /**
1455 * Do not call bfa_ioc_enable() here. It must be called in the
1456 * previous state due to failed -> ioc_ready_wait transition.
1457 */
1458}
1459
1460static void
1461bna_device_sm_ioc_ready_wait(struct bna_device *device,
1462 enum bna_device_event event)
1463{
1464 switch (event) {
1465 case DEVICE_E_DISABLE:
1466 if (device->ready_cbfn)
1467 device->ready_cbfn(device->ready_cbarg,
1468 BNA_CB_INTERRUPT);
1469 device->ready_cbfn = NULL;
1470 device->ready_cbarg = NULL;
1471 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1472 break;
1473
1474 case DEVICE_E_IOC_READY:
1475 bfa_fsm_set_state(device, bna_device_sm_ready);
1476 break;
1477
1478 case DEVICE_E_IOC_FAILED:
1479 bfa_fsm_set_state(device, bna_device_sm_failed);
1480 break;
1481
1482 case DEVICE_E_IOC_RESET:
1483 enable_mbox_intr(device);
1484 break;
1485
1486 default:
1487 bfa_sm_fault(device->bna, event);
1488 }
1489}
1490
1491static void
1492bna_device_sm_ready_entry(struct bna_device *device)
1493{
1494 bna_mbox_mod_start(&device->bna->mbox_mod);
1495 bna_port_start(&device->bna->port);
1496
1497 if (device->ready_cbfn)
1498 device->ready_cbfn(device->ready_cbarg,
1499 BNA_CB_SUCCESS);
1500 device->ready_cbfn = NULL;
1501 device->ready_cbarg = NULL;
1502}
1503
1504static void
1505bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
1506{
1507 switch (event) {
1508 case DEVICE_E_DISABLE:
1509 bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
1510 break;
1511
1512 case DEVICE_E_IOC_FAILED:
1513 bfa_fsm_set_state(device, bna_device_sm_failed);
1514 break;
1515
1516 default:
1517 bfa_sm_fault(device->bna, event);
1518 }
1519}
1520
1521static void
1522bna_device_sm_port_stop_wait_entry(struct bna_device *device)
1523{
1524 bna_port_stop(&device->bna->port);
1525}
1526
1527static void
1528bna_device_sm_port_stop_wait(struct bna_device *device,
1529 enum bna_device_event event)
1530{
1531 switch (event) {
1532 case DEVICE_E_PORT_STOPPED:
1533 bna_mbox_mod_stop(&device->bna->mbox_mod);
1534 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1535 break;
1536
1537 case DEVICE_E_IOC_FAILED:
1538 disable_mbox_intr(device);
1539 bna_port_fail(&device->bna->port);
1540 break;
1541
1542 default:
1543 bfa_sm_fault(device->bna, event);
1544 }
1545}
1546
1547static void
1548bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
1549{
1550 bfa_nw_ioc_disable(&device->ioc);
1551}
1552
1553static void
1554bna_device_sm_ioc_disable_wait(struct bna_device *device,
1555 enum bna_device_event event)
1556{
1557 switch (event) {
1558 case DEVICE_E_IOC_DISABLED:
1559 disable_mbox_intr(device);
1560 bfa_fsm_set_state(device, bna_device_sm_stopped);
1561 break;
1562
1563 default:
1564 bfa_sm_fault(device->bna, event);
1565 }
1566}
1567
1568static void
1569bna_device_sm_failed_entry(struct bna_device *device)
1570{
1571 disable_mbox_intr(device);
1572 bna_port_fail(&device->bna->port);
1573 bna_mbox_mod_stop(&device->bna->mbox_mod);
1574
1575 if (device->ready_cbfn)
1576 device->ready_cbfn(device->ready_cbarg,
1577 BNA_CB_FAIL);
1578 device->ready_cbfn = NULL;
1579 device->ready_cbarg = NULL;
1580}
1581
1582static void
1583bna_device_sm_failed(struct bna_device *device,
1584 enum bna_device_event event)
1585{
1586 switch (event) {
1587 case DEVICE_E_DISABLE:
1588 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1589 break;
1590
1591 case DEVICE_E_IOC_RESET:
1592 enable_mbox_intr(device);
1593 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1594 break;
1595
1596 default:
1597 bfa_sm_fault(device->bna, event);
1598 }
1599}
1600
1601/* IOC callback functions */
1602
1603static void
1604bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
1605{
1606 struct bna_device *device = (struct bna_device *)dev;
1607
1608 if (error)
1609 bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1610 else
1611 bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
1612}
1613
1614static void
1615bna_device_cb_iocll_disabled(void *dev)
1616{
1617 struct bna_device *device = (struct bna_device *)dev;
1618
1619 bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
1620}
1621
1622static void
1623bna_device_cb_iocll_failed(void *dev)
1624{
1625 struct bna_device *device = (struct bna_device *)dev;
1626
1627 bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1628}
1629
1630static void
1631bna_device_cb_iocll_reset(void *dev)
1632{
1633 struct bna_device *device = (struct bna_device *)dev;
1634
1635 bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
1636}
1637
1638static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
1639 bna_device_cb_iocll_ready,
1640 bna_device_cb_iocll_disabled,
1641 bna_device_cb_iocll_failed,
1642 bna_device_cb_iocll_reset
1643};
1644
1645void
1646bna_device_init(struct bna_device *device, struct bna *bna,
1647 struct bna_res_info *res_info)
1648{
1649 u64 dma;
1650
1651 device->bna = bna;
1652
1653 /**
1654 * Attach IOC and claim:
1655 * 1. DMA memory for IOC attributes
1656 * 2. Kernel memory for FW trace
1657 */
1658 bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
1659 bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
1660
1661 BNA_GET_DMA_ADDR(
1662 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1663 bfa_nw_ioc_mem_claim(&device->ioc,
1664 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
1665 dma);
1666
1667 bna_adv_device_init(device, bna, res_info);
1668 /*
1669 * Initialize mbox_mod only after IOC, so that mbox handler
1670 * registration goes through
1671 */
1672 device->intr_type =
1673 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
1674 device->vector =
1675 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
1676 bna_mbox_mod_init(&bna->mbox_mod, bna);
1677
1678 device->ready_cbfn = device->stop_cbfn = NULL;
1679 device->ready_cbarg = device->stop_cbarg = NULL;
1680
1681 bfa_fsm_set_state(device, bna_device_sm_stopped);
1682}
1683
1684void
1685bna_device_uninit(struct bna_device *device)
1686{
1687 bna_mbox_mod_uninit(&device->bna->mbox_mod);
1688
1689 bfa_nw_ioc_detach(&device->ioc);
1690
1691 device->bna = NULL;
1692}
1693
1694void
1695bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1696{
1697 struct bna_device *device = (struct bna_device *)arg;
1698
1699 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
1700}
1701
1702int
1703bna_device_status_get(struct bna_device *device)
1704{
1705 return (device->fsm == (bfa_fsm_t)bna_device_sm_ready);
1706}
1707
1708void
1709bna_device_enable(struct bna_device *device)
1710{
1711 if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
1712 bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
1713 return;
1714 }
1715
1716 device->ready_cbfn = bnad_cb_device_enabled;
1717 device->ready_cbarg = device->bna->bnad;
1718
1719 bfa_fsm_send_event(device, DEVICE_E_ENABLE);
1720}
1721
1722void
1723bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
1724{
1725 if (type == BNA_SOFT_CLEANUP) {
1726 bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
1727 return;
1728 }
1729
1730 device->stop_cbfn = bnad_cb_device_disabled;
1731 device->stop_cbarg = device->bna->bnad;
1732
1733 bfa_fsm_send_event(device, DEVICE_E_DISABLE);
1734}
1735
1736int
1737bna_device_state_get(struct bna_device *device)
1738{
1739 return bfa_sm_to_state(device_sm_table, device->fsm);
1740}
1741
1742u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1743 {12, 20},
1744 {10, 18},
1745 {8, 16},
1746 {6, 12},
1747 {4, 8},
1748 {3, 6},
1749 {2, 4},
1750 {1, 2},
1751};
1752
1753u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1754 {12, 12},
1755 {6, 10},
1756 {5, 10},
1757 {4, 8},
1758 {3, 6},
1759 {3, 6},
1760 {2, 4},
1761 {1, 2},
1762};
1763
1764/* device */
1765void
1766bna_adv_device_init(struct bna_device *device, struct bna *bna,
1767 struct bna_res_info *res_info)
1768{
1769 u8 *kva;
1770 u64 dma;
1771
1772 device->bna = bna;
1773
1774 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1775
1776 /**
1777 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1778 * DMA memory.
1779 */
1780 BNA_GET_DMA_ADDR(
1781 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1782 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1783
1784 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1785 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1786 kva += bfa_nw_cee_meminfo();
1787 dma += bfa_nw_cee_meminfo();
1788
1789}
1790
1791/* utils */
1792
1793void
1794bna_adv_res_req(struct bna_res_info *res_info)
1795{
1796 /* DMA memory for COMMON_MODULE */
1797 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1798 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1799 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1800 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1801 bfa_nw_cee_meminfo(), PAGE_SIZE);
1802
1803 /* Virtual memory for retreiving fw_trc */
1804 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1805 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1806 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
1807 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
1808
1809 /* DMA memory for retreiving stats */
1810 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1811 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1812 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1813 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1814 ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
1815
1816 /* Virtual memory for soft stats */
1817 res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
1818 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1819 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
1820 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
1821 sizeof(struct bna_sw_stats);
1822}
1823
1824static void
1825bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
1826{
1827 struct bna_tx *tx;
1828 struct bna_txq *txq;
1829 struct bna_rx *rx;
1830 struct bna_rxp *rxp;
1831 struct list_head *qe;
1832 struct list_head *txq_qe;
1833 struct list_head *rxp_qe;
1834 struct list_head *mac_qe;
1835 int i;
1836
1837 sw_stats->device_state = bna_device_state_get(&bna->device);
1838 sw_stats->port_state = bna_port_state_get(&bna->port);
1839 sw_stats->port_flags = bna->port.flags;
1840 sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
1841 sw_stats->priority = bna->port.priority;
1842
1843 i = 0;
1844 list_for_each(qe, &bna->tx_mod.tx_active_q) {
1845 tx = (struct bna_tx *)qe;
1846 sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
1847 sw_stats->tx_stats[i].tx_flags = tx->flags;
1848
1849 sw_stats->tx_stats[i].num_txqs = 0;
1850 sw_stats->tx_stats[i].txq_bmap[0] = 0;
1851 sw_stats->tx_stats[i].txq_bmap[1] = 0;
1852 list_for_each(txq_qe, &tx->txq_q) {
1853 txq = (struct bna_txq *)txq_qe;
1854 if (txq->txq_id < 32)
1855 sw_stats->tx_stats[i].txq_bmap[0] |=
1856 ((u32)1 << txq->txq_id);
1857 else
1858 sw_stats->tx_stats[i].txq_bmap[1] |=
1859 ((u32)
1860 1 << (txq->txq_id - 32));
1861 sw_stats->tx_stats[i].num_txqs++;
1862 }
1863
1864 sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
1865
1866 i++;
1867 }
1868 sw_stats->num_active_tx = i;
1869
1870 i = 0;
1871 list_for_each(qe, &bna->rx_mod.rx_active_q) {
1872 rx = (struct bna_rx *)qe;
1873 sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
1874 sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
1875
1876 sw_stats->rx_stats[i].num_rxps = 0;
1877 sw_stats->rx_stats[i].num_rxqs = 0;
1878 sw_stats->rx_stats[i].rxq_bmap[0] = 0;
1879 sw_stats->rx_stats[i].rxq_bmap[1] = 0;
1880 sw_stats->rx_stats[i].cq_bmap[0] = 0;
1881 sw_stats->rx_stats[i].cq_bmap[1] = 0;
1882 list_for_each(rxp_qe, &rx->rxp_q) {
1883 rxp = (struct bna_rxp *)rxp_qe;
1884
1885 sw_stats->rx_stats[i].num_rxqs += 1;
1886
1887 if (rxp->type == BNA_RXP_SINGLE) {
1888 if (rxp->rxq.single.only->rxq_id < 32) {
1889 sw_stats->rx_stats[i].rxq_bmap[0] |=
1890 ((u32)1 <<
1891 rxp->rxq.single.only->rxq_id);
1892 } else {
1893 sw_stats->rx_stats[i].rxq_bmap[1] |=
1894 ((u32)1 <<
1895 (rxp->rxq.single.only->rxq_id - 32));
1896 }
1897 } else {
1898 if (rxp->rxq.slr.large->rxq_id < 32) {
1899 sw_stats->rx_stats[i].rxq_bmap[0] |=
1900 ((u32)1 <<
1901 rxp->rxq.slr.large->rxq_id);
1902 } else {
1903 sw_stats->rx_stats[i].rxq_bmap[1] |=
1904 ((u32)1 <<
1905 (rxp->rxq.slr.large->rxq_id - 32));
1906 }
1907
1908 if (rxp->rxq.slr.small->rxq_id < 32) {
1909 sw_stats->rx_stats[i].rxq_bmap[0] |=
1910 ((u32)1 <<
1911 rxp->rxq.slr.small->rxq_id);
1912 } else {
1913 sw_stats->rx_stats[i].rxq_bmap[1] |=
1914 ((u32)1 <<
1915 (rxp->rxq.slr.small->rxq_id - 32));
1916 }
1917 sw_stats->rx_stats[i].num_rxqs += 1;
1918 }
1919
1920 if (rxp->cq.cq_id < 32)
1921 sw_stats->rx_stats[i].cq_bmap[0] |=
1922 (1 << rxp->cq.cq_id);
1923 else
1924 sw_stats->rx_stats[i].cq_bmap[1] |=
1925 (1 << (rxp->cq.cq_id - 32));
1926
1927 sw_stats->rx_stats[i].num_rxps++;
1928 }
1929
1930 sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
1931 sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
1932 sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
1933
1934 sw_stats->rx_stats[i].num_active_ucast = 0;
1935 if (rx->rxf.ucast_active_mac)
1936 sw_stats->rx_stats[i].num_active_ucast++;
1937 list_for_each(mac_qe, &rx->rxf.ucast_active_q)
1938 sw_stats->rx_stats[i].num_active_ucast++;
1939
1940 sw_stats->rx_stats[i].num_active_mcast = 0;
1941 list_for_each(mac_qe, &rx->rxf.mcast_active_q)
1942 sw_stats->rx_stats[i].num_active_mcast++;
1943
1944 sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
1945 sw_stats->rx_stats[i].vlan_filter_status =
1946 rx->rxf.vlan_filter_status;
1947 memcpy(sw_stats->rx_stats[i].vlan_filter_table,
1948 rx->rxf.vlan_filter_table,
1949 sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
1950
1951 sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
1952 sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
1953
1954 i++;
1955 }
1956 sw_stats->num_active_rx = i;
1957}
1958
1959static void
1960bna_fw_cb_stats_get(void *arg, int status)
1961{
1962 struct bna *bna = (struct bna *)arg;
1963 u64 *p_stats;
1964 int i, count;
1965 int rxf_count, txf_count;
1966 u64 rxf_bmap, txf_bmap;
1967
1968 bfa_q_qe_init(&bna->mbox_qe.qe);
1969
1970 if (status == 0) {
1971 p_stats = (u64 *)bna->stats.hw_stats;
1972 count = sizeof(struct bfi_ll_stats) / sizeof(u64);
1973 for (i = 0; i < count; i++)
1974 p_stats[i] = cpu_to_be64(p_stats[i]);
1975
1976 rxf_count = 0;
1977 rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
1978 ((u64)bna->stats.rxf_bmap[1] << 32);
1979 for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
1980 if (rxf_bmap & ((u64)1 << i))
1981 rxf_count++;
1982
1983 txf_count = 0;
1984 txf_bmap = (u64)bna->stats.txf_bmap[0] |
1985 ((u64)bna->stats.txf_bmap[1] << 32);
1986 for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
1987 if (txf_bmap & ((u64)1 << i))
1988 txf_count++;
1989
1990 p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
1991 ((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
1992 txf_count * sizeof(struct bfi_ll_stats_txf))/
1993 sizeof(u64));
1994
1995 /* Populate the TXF stats from the firmware DMAed copy */
1996 for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
1997 if (txf_bmap & ((u64)1 << i)) {
1998 p_stats -= sizeof(struct bfi_ll_stats_txf)/
1999 sizeof(u64);
2000 memcpy(&bna->stats.hw_stats->txf_stats[i],
2001 p_stats,
2002 sizeof(struct bfi_ll_stats_txf));
2003 }
2004
2005 /* Populate the RXF stats from the firmware DMAed copy */
2006 for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
2007 if (rxf_bmap & ((u64)1 << i)) {
2008 p_stats -= sizeof(struct bfi_ll_stats_rxf)/
2009 sizeof(u64);
2010 memcpy(&bna->stats.hw_stats->rxf_stats[i],
2011 p_stats,
2012 sizeof(struct bfi_ll_stats_rxf));
2013 }
2014
2015 bna_sw_stats_get(bna, bna->stats.sw_stats);
2016 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
2017 } else
2018 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2019}
2020
2021static void
2022bna_fw_stats_get(struct bna *bna)
2023{
2024 struct bfi_ll_stats_req ll_req;
2025
2026 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
2027 ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2028
2029 ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
2030 ll_req.rxf_id_mask[1] = htonl(bna->rx_mod.rxf_bmap[1]);
2031 ll_req.txf_id_mask[0] = htonl(bna->tx_mod.txf_bmap[0]);
2032 ll_req.txf_id_mask[1] = htonl(bna->tx_mod.txf_bmap[1]);
2033
2034 ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
2035 ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
2036
2037 bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2038 bna_fw_cb_stats_get, bna);
2039 bna_mbox_send(bna, &bna->mbox_qe);
2040
2041 bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
2042 bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
2043 bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
2044 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
2045}
2046
2047static void
2048bna_fw_cb_stats_clr(void *arg, int status)
2049{
2050 struct bna *bna = (struct bna *)arg;
2051
2052 bfa_q_qe_init(&bna->mbox_qe.qe);
2053
2054 memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
2055 memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
2056
2057 bnad_cb_stats_clr(bna->bnad);
2058}
2059
2060static void
2061bna_fw_stats_clr(struct bna *bna)
2062{
2063 struct bfi_ll_stats_req ll_req;
2064
2065 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
2066 ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2067 ll_req.rxf_id_mask[0] = htonl(0xffffffff);
2068 ll_req.rxf_id_mask[1] = htonl(0xffffffff);
2069 ll_req.txf_id_mask[0] = htonl(0xffffffff);
2070 ll_req.txf_id_mask[1] = htonl(0xffffffff);
2071
2072 bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2073 bna_fw_cb_stats_clr, bna);
2074 bna_mbox_send(bna, &bna->mbox_qe);
2075}
2076
2077void
2078bna_stats_get(struct bna *bna)
2079{
2080 if (bna_device_status_get(&bna->device))
2081 bna_fw_stats_get(bna);
2082 else
2083 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2084}
2085
2086void
2087bna_stats_clr(struct bna *bna)
2088{
2089 if (bna_device_status_get(&bna->device))
2090 bna_fw_stats_clr(bna);
2091 else {
2092 memset(&bna->stats.sw_stats, 0,
2093 sizeof(struct bna_sw_stats));
2094 memset(bna->stats.hw_stats, 0,
2095 sizeof(struct bfi_ll_stats));
2096 bnad_cb_stats_clr(bna->bnad);
2097 }
2098}
2099
2100/* IB */
2101void
2102bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
2103{
2104 ib->ib_config.coalescing_timeo = coalescing_timeo;
2105
2106 if (ib->start_count)
2107 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
2108 (u32)ib->ib_config.coalescing_timeo, 0);
2109}
2110
2111/* RxF */
2112void
2113bna_rxf_adv_init(struct bna_rxf *rxf,
2114 struct bna_rx *rx,
2115 struct bna_rx_config *q_config)
2116{
2117 switch (q_config->rxp_type) {
2118 case BNA_RXP_SINGLE:
2119 /* No-op */
2120 break;
2121 case BNA_RXP_SLR:
2122 rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
2123 break;
2124 case BNA_RXP_HDS:
2125 rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
2126 rxf->hds_cfg.header_size =
2127 q_config->hds_config.header_size;
2128 rxf->forced_offset = 0;
2129 break;
2130 default:
2131 break;
2132 }
2133
2134 if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
2135 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
2136 rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
2137 rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
2138 memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
2139 &q_config->rss_config.toeplitz_hash_key[0],
2140 sizeof(rxf->rss_cfg.toeplitz_hash_key));
2141 }
2142}
2143
2144static void
2145rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
2146{
2147 struct bfi_ll_rxf_req req;
2148
2149 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
2150
2151 req.rxf_id = rxf->rxf_id;
2152 req.enable = status;
2153
2154 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
2155 rxf_cb_cam_fltr_mbox_cmd, rxf);
2156
2157 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
2158}
2159
2160void
2161__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
2162{
2163 struct bna_rx_fndb_ram *rx_fndb_ram;
2164 u32 ctrl_flags;
2165 int i;
2166
2167 rx_fndb_ram = (struct bna_rx_fndb_ram *)
2168 BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
2169 RX_FNDB_RAM_BASE_OFFSET);
2170
2171 for (i = 0; i < BFI_MAX_RXF; i++) {
2172 if (status == BNA_STATUS_T_ENABLED) {
2173 if (i == rxf->rxf_id)
2174 continue;
2175
2176 ctrl_flags =
2177 readl(&rx_fndb_ram[i].control_flags);
2178 ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
2179 writel(ctrl_flags,
2180 &rx_fndb_ram[i].control_flags);
2181 } else {
2182 ctrl_flags =
2183 readl(&rx_fndb_ram[i].control_flags);
2184 ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
2185 writel(ctrl_flags,
2186 &rx_fndb_ram[i].control_flags);
2187 }
2188 }
2189}
2190
2191int
2192rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
2193{
2194 struct bna_mac *mac = NULL;
2195 struct list_head *qe;
2196
2197 /* Add additional MAC entries */
2198 if (!list_empty(&rxf->ucast_pending_add_q)) {
2199 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
2200 bfa_q_qe_init(qe);
2201 mac = (struct bna_mac *)qe;
2202 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
2203 list_add_tail(&mac->qe, &rxf->ucast_active_q);
2204 return 1;
2205 }
2206
2207 /* Delete MAC addresses previousely added */
2208 if (!list_empty(&rxf->ucast_pending_del_q)) {
2209 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2210 bfa_q_qe_init(qe);
2211 mac = (struct bna_mac *)qe;
2212 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2213 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2214 return 1;
2215 }
2216
2217 return 0;
2218}
2219
2220int
2221rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
2222{
2223 struct bna *bna = rxf->rx->bna;
2224
2225 /* Enable/disable promiscuous mode */
2226 if (is_promisc_enable(rxf->rxmode_pending,
2227 rxf->rxmode_pending_bitmask)) {
2228 /* move promisc configuration from pending -> active */
2229 promisc_inactive(rxf->rxmode_pending,
2230 rxf->rxmode_pending_bitmask);
2231 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
2232
2233 /* Disable VLAN filter to allow all VLANs */
2234 __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
2235 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2236 BNA_STATUS_T_ENABLED);
2237 return 1;
2238 } else if (is_promisc_disable(rxf->rxmode_pending,
2239 rxf->rxmode_pending_bitmask)) {
2240 /* move promisc configuration from pending -> active */
2241 promisc_inactive(rxf->rxmode_pending,
2242 rxf->rxmode_pending_bitmask);
2243 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2244 bna->rxf_promisc_id = BFI_MAX_RXF;
2245
2246 /* Revert VLAN filter */
2247 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2248 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2249 BNA_STATUS_T_DISABLED);
2250 return 1;
2251 }
2252
2253 return 0;
2254}
2255
2256int
2257rxf_process_packet_filter_default(struct bna_rxf *rxf)
2258{
2259 struct bna *bna = rxf->rx->bna;
2260
2261 /* Enable/disable default mode */
2262 if (is_default_enable(rxf->rxmode_pending,
2263 rxf->rxmode_pending_bitmask)) {
2264 /* move default configuration from pending -> active */
2265 default_inactive(rxf->rxmode_pending,
2266 rxf->rxmode_pending_bitmask);
2267 rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
2268
2269 /* Disable VLAN filter to allow all VLANs */
2270 __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
2271 /* Redirect all other RxF vlan filtering to this one */
2272 __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
2273 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2274 BNA_STATUS_T_ENABLED);
2275 return 1;
2276 } else if (is_default_disable(rxf->rxmode_pending,
2277 rxf->rxmode_pending_bitmask)) {
2278 /* move default configuration from pending -> active */
2279 default_inactive(rxf->rxmode_pending,
2280 rxf->rxmode_pending_bitmask);
2281 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2282 bna->rxf_default_id = BFI_MAX_RXF;
2283
2284 /* Revert VLAN filter */
2285 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2286 /* Stop RxF vlan filter table redirection */
2287 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2288 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2289 BNA_STATUS_T_DISABLED);
2290 return 1;
2291 }
2292
2293 return 0;
2294}
2295
2296int
2297rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
2298{
2299 /* Enable/disable allmulti mode */
2300 if (is_allmulti_enable(rxf->rxmode_pending,
2301 rxf->rxmode_pending_bitmask)) {
2302 /* move allmulti configuration from pending -> active */
2303 allmulti_inactive(rxf->rxmode_pending,
2304 rxf->rxmode_pending_bitmask);
2305 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
2306
2307 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2308 BNA_STATUS_T_ENABLED);
2309 return 1;
2310 } else if (is_allmulti_disable(rxf->rxmode_pending,
2311 rxf->rxmode_pending_bitmask)) {
2312 /* move allmulti configuration from pending -> active */
2313 allmulti_inactive(rxf->rxmode_pending,
2314 rxf->rxmode_pending_bitmask);
2315 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2316
2317 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2318 BNA_STATUS_T_DISABLED);
2319 return 1;
2320 }
2321
2322 return 0;
2323}
2324
2325int
2326rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
2327{
2328 struct bna_mac *mac = NULL;
2329 struct list_head *qe;
2330
2331 /* 1. delete pending ucast entries */
2332 if (!list_empty(&rxf->ucast_pending_del_q)) {
2333 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2334 bfa_q_qe_init(qe);
2335 mac = (struct bna_mac *)qe;
2336 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2337 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2338 return 1;
2339 }
2340
2341 /* 2. clear active ucast entries; move them to pending_add_q */
2342 if (!list_empty(&rxf->ucast_active_q)) {
2343 bfa_q_deq(&rxf->ucast_active_q, &qe);
2344 bfa_q_qe_init(qe);
2345 mac = (struct bna_mac *)qe;
2346 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2347 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
2348 return 1;
2349 }
2350
2351 return 0;
2352}
2353
2354int
2355rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
2356{
2357 struct bna *bna = rxf->rx->bna;
2358
2359 /* 6. Execute pending promisc mode disable command */
2360 if (is_promisc_disable(rxf->rxmode_pending,
2361 rxf->rxmode_pending_bitmask)) {
2362 /* move promisc configuration from pending -> active */
2363 promisc_inactive(rxf->rxmode_pending,
2364 rxf->rxmode_pending_bitmask);
2365 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2366 bna->rxf_promisc_id = BFI_MAX_RXF;
2367
2368 /* Revert VLAN filter */
2369 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2370 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2371 BNA_STATUS_T_DISABLED);
2372 return 1;
2373 }
2374
2375 /* 7. Clear active promisc mode; move it to pending enable */
2376 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2377 /* move promisc configuration from active -> pending */
2378 promisc_enable(rxf->rxmode_pending,
2379 rxf->rxmode_pending_bitmask);
2380 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2381
2382 /* Revert VLAN filter */
2383 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2384 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2385 BNA_STATUS_T_DISABLED);
2386 return 1;
2387 }
2388
2389 return 0;
2390}
2391
2392int
2393rxf_clear_packet_filter_default(struct bna_rxf *rxf)
2394{
2395 struct bna *bna = rxf->rx->bna;
2396
2397 /* 8. Execute pending default mode disable command */
2398 if (is_default_disable(rxf->rxmode_pending,
2399 rxf->rxmode_pending_bitmask)) {
2400 /* move default configuration from pending -> active */
2401 default_inactive(rxf->rxmode_pending,
2402 rxf->rxmode_pending_bitmask);
2403 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2404 bna->rxf_default_id = BFI_MAX_RXF;
2405
2406 /* Revert VLAN filter */
2407 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2408 /* Stop RxF vlan filter table redirection */
2409 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2410 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2411 BNA_STATUS_T_DISABLED);
2412 return 1;
2413 }
2414
2415 /* 9. Clear active default mode; move it to pending enable */
2416 if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2417 /* move default configuration from active -> pending */
2418 default_enable(rxf->rxmode_pending,
2419 rxf->rxmode_pending_bitmask);
2420 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2421
2422 /* Revert VLAN filter */
2423 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2424 /* Stop RxF vlan filter table redirection */
2425 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2426 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2427 BNA_STATUS_T_DISABLED);
2428 return 1;
2429 }
2430
2431 return 0;
2432}
2433
2434int
2435rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
2436{
2437 /* 10. Execute pending allmulti mode disable command */
2438 if (is_allmulti_disable(rxf->rxmode_pending,
2439 rxf->rxmode_pending_bitmask)) {
2440 /* move allmulti configuration from pending -> active */
2441 allmulti_inactive(rxf->rxmode_pending,
2442 rxf->rxmode_pending_bitmask);
2443 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2444 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2445 BNA_STATUS_T_DISABLED);
2446 return 1;
2447 }
2448
2449 /* 11. Clear active allmulti mode; move it to pending enable */
2450 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2451 /* move allmulti configuration from active -> pending */
2452 allmulti_enable(rxf->rxmode_pending,
2453 rxf->rxmode_pending_bitmask);
2454 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2455 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2456 BNA_STATUS_T_DISABLED);
2457 return 1;
2458 }
2459
2460 return 0;
2461}
2462
2463void
2464rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
2465{
2466 struct list_head *qe;
2467 struct bna_mac *mac;
2468
2469 /* 1. Move active ucast entries to pending_add_q */
2470 while (!list_empty(&rxf->ucast_active_q)) {
2471 bfa_q_deq(&rxf->ucast_active_q, &qe);
2472 bfa_q_qe_init(qe);
2473 list_add_tail(qe, &rxf->ucast_pending_add_q);
2474 }
2475
2476 /* 2. Throw away delete pending ucast entries */
2477 while (!list_empty(&rxf->ucast_pending_del_q)) {
2478 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2479 bfa_q_qe_init(qe);
2480 mac = (struct bna_mac *)qe;
2481 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2482 }
2483}
2484
2485void
2486rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
2487{
2488 struct bna *bna = rxf->rx->bna;
2489
2490 /* 6. Clear pending promisc mode disable */
2491 if (is_promisc_disable(rxf->rxmode_pending,
2492 rxf->rxmode_pending_bitmask)) {
2493 promisc_inactive(rxf->rxmode_pending,
2494 rxf->rxmode_pending_bitmask);
2495 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2496 bna->rxf_promisc_id = BFI_MAX_RXF;
2497 }
2498
2499 /* 7. Move promisc mode config from active -> pending */
2500 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2501 promisc_enable(rxf->rxmode_pending,
2502 rxf->rxmode_pending_bitmask);
2503 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2504 }
2505
2506}
2507
2508void
2509rxf_reset_packet_filter_default(struct bna_rxf *rxf)
2510{
2511 struct bna *bna = rxf->rx->bna;
2512
2513 /* 8. Clear pending default mode disable */
2514 if (is_default_disable(rxf->rxmode_pending,
2515 rxf->rxmode_pending_bitmask)) {
2516 default_inactive(rxf->rxmode_pending,
2517 rxf->rxmode_pending_bitmask);
2518 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2519 bna->rxf_default_id = BFI_MAX_RXF;
2520 }
2521
2522 /* 9. Move default mode config from active -> pending */
2523 if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2524 default_enable(rxf->rxmode_pending,
2525 rxf->rxmode_pending_bitmask);
2526 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2527 }
2528}
2529
2530void
2531rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
2532{
2533 /* 10. Clear pending allmulti mode disable */
2534 if (is_allmulti_disable(rxf->rxmode_pending,
2535 rxf->rxmode_pending_bitmask)) {
2536 allmulti_inactive(rxf->rxmode_pending,
2537 rxf->rxmode_pending_bitmask);
2538 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2539 }
2540
2541 /* 11. Move allmulti mode config from active -> pending */
2542 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2543 allmulti_enable(rxf->rxmode_pending,
2544 rxf->rxmode_pending_bitmask);
2545 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2546 }
2547}
2548
2549/**
2550 * Should only be called by bna_rxf_mode_set.
2551 * Helps deciding if h/w configuration is needed or not.
2552 * Returns:
2553 * 0 = no h/w change
2554 * 1 = need h/w change
2555 */
2556int
2557rxf_promisc_enable(struct bna_rxf *rxf)
2558{
2559 struct bna *bna = rxf->rx->bna;
2560 int ret = 0;
2561
2562 /* There can not be any pending disable command */
2563
2564 /* Do nothing if pending enable or already enabled */
2565 if (is_promisc_enable(rxf->rxmode_pending,
2566 rxf->rxmode_pending_bitmask) ||
2567 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
2568 /* Schedule enable */
2569 } else {
2570 /* Promisc mode should not be active in the system */
2571 promisc_enable(rxf->rxmode_pending,
2572 rxf->rxmode_pending_bitmask);
2573 bna->rxf_promisc_id = rxf->rxf_id;
2574 ret = 1;
2575 }
2576
2577 return ret;
2578}
2579
2580/**
2581 * Should only be called by bna_rxf_mode_set.
2582 * Helps deciding if h/w configuration is needed or not.
2583 * Returns:
2584 * 0 = no h/w change
2585 * 1 = need h/w change
2586 */
2587int
2588rxf_promisc_disable(struct bna_rxf *rxf)
2589{
2590 struct bna *bna = rxf->rx->bna;
2591 int ret = 0;
2592
2593 /* There can not be any pending disable */
2594
2595 /* Turn off pending enable command , if any */
2596 if (is_promisc_enable(rxf->rxmode_pending,
2597 rxf->rxmode_pending_bitmask)) {
2598 /* Promisc mode should not be active */
2599 /* system promisc state should be pending */
2600 promisc_inactive(rxf->rxmode_pending,
2601 rxf->rxmode_pending_bitmask);
2602 /* Remove the promisc state from the system */
2603 bna->rxf_promisc_id = BFI_MAX_RXF;
2604
2605 /* Schedule disable */
2606 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2607 /* Promisc mode should be active in the system */
2608 promisc_disable(rxf->rxmode_pending,
2609 rxf->rxmode_pending_bitmask);
2610 ret = 1;
2611
2612 /* Do nothing if already disabled */
2613 } else {
2614 }
2615
2616 return ret;
2617}
2618
2619/**
2620 * Should only be called by bna_rxf_mode_set.
2621 * Helps deciding if h/w configuration is needed or not.
2622 * Returns:
2623 * 0 = no h/w change
2624 * 1 = need h/w change
2625 */
2626int
2627rxf_default_enable(struct bna_rxf *rxf)
2628{
2629 struct bna *bna = rxf->rx->bna;
2630 int ret = 0;
2631
2632 /* There can not be any pending disable command */
2633
2634 /* Do nothing if pending enable or already enabled */
2635 if (is_default_enable(rxf->rxmode_pending,
2636 rxf->rxmode_pending_bitmask) ||
2637 (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
2638 /* Schedule enable */
2639 } else {
2640 /* Default mode should not be active in the system */
2641 default_enable(rxf->rxmode_pending,
2642 rxf->rxmode_pending_bitmask);
2643 bna->rxf_default_id = rxf->rxf_id;
2644 ret = 1;
2645 }
2646
2647 return ret;
2648}
2649
2650/**
2651 * Should only be called by bna_rxf_mode_set.
2652 * Helps deciding if h/w configuration is needed or not.
2653 * Returns:
2654 * 0 = no h/w change
2655 * 1 = need h/w change
2656 */
2657int
2658rxf_default_disable(struct bna_rxf *rxf)
2659{
2660 struct bna *bna = rxf->rx->bna;
2661 int ret = 0;
2662
2663 /* There can not be any pending disable */
2664
2665 /* Turn off pending enable command , if any */
2666 if (is_default_enable(rxf->rxmode_pending,
2667 rxf->rxmode_pending_bitmask)) {
2668 /* Promisc mode should not be active */
2669 /* system default state should be pending */
2670 default_inactive(rxf->rxmode_pending,
2671 rxf->rxmode_pending_bitmask);
2672 /* Remove the default state from the system */
2673 bna->rxf_default_id = BFI_MAX_RXF;
2674
2675 /* Schedule disable */
2676 } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2677 /* Default mode should be active in the system */
2678 default_disable(rxf->rxmode_pending,
2679 rxf->rxmode_pending_bitmask);
2680 ret = 1;
2681
2682 /* Do nothing if already disabled */
2683 } else {
2684 }
2685
2686 return ret;
2687}
2688
2689/**
2690 * Should only be called by bna_rxf_mode_set.
2691 * Helps deciding if h/w configuration is needed or not.
2692 * Returns:
2693 * 0 = no h/w change
2694 * 1 = need h/w change
2695 */
2696int
2697rxf_allmulti_enable(struct bna_rxf *rxf)
2698{
2699 int ret = 0;
2700
2701 /* There can not be any pending disable command */
2702
2703 /* Do nothing if pending enable or already enabled */
2704 if (is_allmulti_enable(rxf->rxmode_pending,
2705 rxf->rxmode_pending_bitmask) ||
2706 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
2707 /* Schedule enable */
2708 } else {
2709 allmulti_enable(rxf->rxmode_pending,
2710 rxf->rxmode_pending_bitmask);
2711 ret = 1;
2712 }
2713
2714 return ret;
2715}
2716
2717/**
2718 * Should only be called by bna_rxf_mode_set.
2719 * Helps deciding if h/w configuration is needed or not.
2720 * Returns:
2721 * 0 = no h/w change
2722 * 1 = need h/w change
2723 */
2724int
2725rxf_allmulti_disable(struct bna_rxf *rxf)
2726{
2727 int ret = 0;
2728
2729 /* There can not be any pending disable */
2730
2731 /* Turn off pending enable command , if any */
2732 if (is_allmulti_enable(rxf->rxmode_pending,
2733 rxf->rxmode_pending_bitmask)) {
2734 /* Allmulti mode should not be active */
2735 allmulti_inactive(rxf->rxmode_pending,
2736 rxf->rxmode_pending_bitmask);
2737
2738 /* Schedule disable */
2739 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2740 allmulti_disable(rxf->rxmode_pending,
2741 rxf->rxmode_pending_bitmask);
2742 ret = 1;
2743 }
2744
2745 return ret;
2746}
2747
2748/* RxF <- bnad */
2749void
2750bna_rx_mcast_delall(struct bna_rx *rx,
2751 void (*cbfn)(struct bnad *, struct bna_rx *,
2752 enum bna_cb_status))
2753{
2754 struct bna_rxf *rxf = &rx->rxf;
2755 struct list_head *qe;
2756 struct bna_mac *mac;
2757 int need_hw_config = 0;
2758
2759 /* Purge all entries from pending_add_q */
2760 while (!list_empty(&rxf->mcast_pending_add_q)) {
2761 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
2762 mac = (struct bna_mac *)qe;
2763 bfa_q_qe_init(&mac->qe);
2764 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
2765 }
2766
2767 /* Schedule all entries in active_q for deletion */
2768 while (!list_empty(&rxf->mcast_active_q)) {
2769 bfa_q_deq(&rxf->mcast_active_q, &qe);
2770 mac = (struct bna_mac *)qe;
2771 bfa_q_qe_init(&mac->qe);
2772 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
2773 need_hw_config = 1;
2774 }
2775
2776 if (need_hw_config) {
2777 rxf->cam_fltr_cbfn = cbfn;
2778 rxf->cam_fltr_cbarg = rx->bna->bnad;
2779 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2780 return;
2781 }
2782
2783 if (cbfn)
2784 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2785}
2786
2787/* RxF <- Rx */
2788void
2789bna_rx_receive_resume(struct bna_rx *rx,
2790 void (*cbfn)(struct bnad *, struct bna_rx *,
2791 enum bna_cb_status))
2792{
2793 struct bna_rxf *rxf = &rx->rxf;
2794
2795 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
2796 rxf->oper_state_cbfn = cbfn;
2797 rxf->oper_state_cbarg = rx->bna->bnad;
2798 bfa_fsm_send_event(rxf, RXF_E_RESUME);
2799 } else if (cbfn)
2800 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2801}
2802
2803void
2804bna_rx_receive_pause(struct bna_rx *rx,
2805 void (*cbfn)(struct bnad *, struct bna_rx *,
2806 enum bna_cb_status))
2807{
2808 struct bna_rxf *rxf = &rx->rxf;
2809
2810 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
2811 rxf->oper_state_cbfn = cbfn;
2812 rxf->oper_state_cbarg = rx->bna->bnad;
2813 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
2814 } else if (cbfn)
2815 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2816}
2817
2818/* RxF <- bnad */
2819enum bna_cb_status
2820bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
2821 void (*cbfn)(struct bnad *, struct bna_rx *,
2822 enum bna_cb_status))
2823{
2824 struct bna_rxf *rxf = &rx->rxf;
2825 struct list_head *qe;
2826 struct bna_mac *mac;
2827
2828 /* Check if already added */
2829 list_for_each(qe, &rxf->ucast_active_q) {
2830 mac = (struct bna_mac *)qe;
2831 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2832 if (cbfn)
2833 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2834 return BNA_CB_SUCCESS;
2835 }
2836 }
2837
2838 /* Check if pending addition */
2839 list_for_each(qe, &rxf->ucast_pending_add_q) {
2840 mac = (struct bna_mac *)qe;
2841 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2842 if (cbfn)
2843 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2844 return BNA_CB_SUCCESS;
2845 }
2846 }
2847
2848 mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
2849 if (mac == NULL)
2850 return BNA_CB_UCAST_CAM_FULL;
2851 bfa_q_qe_init(&mac->qe);
2852 memcpy(mac->addr, addr, ETH_ALEN);
2853 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
2854
2855 rxf->cam_fltr_cbfn = cbfn;
2856 rxf->cam_fltr_cbarg = rx->bna->bnad;
2857
2858 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2859
2860 return BNA_CB_SUCCESS;
2861}
2862
2863/* RxF <- bnad */
2864enum bna_cb_status
2865bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
2866 void (*cbfn)(struct bnad *, struct bna_rx *,
2867 enum bna_cb_status))
2868{
2869 struct bna_rxf *rxf = &rx->rxf;
2870 struct list_head *qe;
2871 struct bna_mac *mac;
2872
2873 list_for_each(qe, &rxf->ucast_pending_add_q) {
2874 mac = (struct bna_mac *)qe;
2875 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2876 list_del(qe);
2877 bfa_q_qe_init(qe);
2878 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2879 if (cbfn)
2880 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2881 return BNA_CB_SUCCESS;
2882 }
2883 }
2884
2885 list_for_each(qe, &rxf->ucast_active_q) {
2886 mac = (struct bna_mac *)qe;
2887 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2888 list_del(qe);
2889 bfa_q_qe_init(qe);
2890 list_add_tail(qe, &rxf->ucast_pending_del_q);
2891 rxf->cam_fltr_cbfn = cbfn;
2892 rxf->cam_fltr_cbarg = rx->bna->bnad;
2893 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2894 return BNA_CB_SUCCESS;
2895 }
2896 }
2897
2898 return BNA_CB_INVALID_MAC;
2899}
2900
2901/* RxF <- bnad */
2902enum bna_cb_status
2903bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2904 enum bna_rxmode bitmask,
2905 void (*cbfn)(struct bnad *, struct bna_rx *,
2906 enum bna_cb_status))
2907{
2908 struct bna_rxf *rxf = &rx->rxf;
2909 int need_hw_config = 0;
2910
2911 /* Error checks */
2912
2913 if (is_promisc_enable(new_mode, bitmask)) {
2914 /* If promisc mode is already enabled elsewhere in the system */
2915 if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
2916 (rx->bna->rxf_promisc_id != rxf->rxf_id))
2917 goto err_return;
2918
2919 /* If default mode is already enabled in the system */
2920 if (rx->bna->rxf_default_id != BFI_MAX_RXF)
2921 goto err_return;
2922
2923 /* Trying to enable promiscuous and default mode together */
2924 if (is_default_enable(new_mode, bitmask))
2925 goto err_return;
2926 }
2927
2928 if (is_default_enable(new_mode, bitmask)) {
2929 /* If default mode is already enabled elsewhere in the system */
2930 if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
2931 (rx->bna->rxf_default_id != rxf->rxf_id)) {
2932 goto err_return;
2933 }
2934
2935 /* If promiscuous mode is already enabled in the system */
2936 if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
2937 goto err_return;
2938 }
2939
2940 /* Process the commands */
2941
2942 if (is_promisc_enable(new_mode, bitmask)) {
2943 if (rxf_promisc_enable(rxf))
2944 need_hw_config = 1;
2945 } else if (is_promisc_disable(new_mode, bitmask)) {
2946 if (rxf_promisc_disable(rxf))
2947 need_hw_config = 1;
2948 }
2949
2950 if (is_default_enable(new_mode, bitmask)) {
2951 if (rxf_default_enable(rxf))
2952 need_hw_config = 1;
2953 } else if (is_default_disable(new_mode, bitmask)) {
2954 if (rxf_default_disable(rxf))
2955 need_hw_config = 1;
2956 }
2957
2958 if (is_allmulti_enable(new_mode, bitmask)) {
2959 if (rxf_allmulti_enable(rxf))
2960 need_hw_config = 1;
2961 } else if (is_allmulti_disable(new_mode, bitmask)) {
2962 if (rxf_allmulti_disable(rxf))
2963 need_hw_config = 1;
2964 }
2965
2966 /* Trigger h/w if needed */
2967
2968 if (need_hw_config) {
2969 rxf->cam_fltr_cbfn = cbfn;
2970 rxf->cam_fltr_cbarg = rx->bna->bnad;
2971 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2972 } else if (cbfn)
2973 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2974
2975 return BNA_CB_SUCCESS;
2976
2977err_return:
2978 return BNA_CB_FAIL;
2979}
2980
2981/* RxF <- bnad */
2982void
2983bna_rx_rss_enable(struct bna_rx *rx)
2984{
2985 struct bna_rxf *rxf = &rx->rxf;
2986
2987 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
2988 rxf->rss_status = BNA_STATUS_T_ENABLED;
2989 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2990}
2991
2992/* RxF <- bnad */
2993void
2994bna_rx_rss_disable(struct bna_rx *rx)
2995{
2996 struct bna_rxf *rxf = &rx->rxf;
2997
2998 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
2999 rxf->rss_status = BNA_STATUS_T_DISABLED;
3000 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3001}
3002
3003/* RxF <- bnad */
3004void
3005bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
3006{
3007 struct bna_rxf *rxf = &rx->rxf;
3008 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
3009 rxf->rss_status = BNA_STATUS_T_ENABLED;
3010 rxf->rss_cfg = *rss_config;
3011 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3012}
3013
3014void
3015/* RxF <- bnad */
3016bna_rx_vlanfilter_enable(struct bna_rx *rx)
3017{
3018 struct bna_rxf *rxf = &rx->rxf;
3019
3020 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
3021 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
3022 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
3023 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3024 }
3025}
3026
3027/* RxF <- bnad */
3028void
3029bna_rx_vlanfilter_disable(struct bna_rx *rx)
3030{
3031 struct bna_rxf *rxf = &rx->rxf;
3032
3033 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
3034 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
3035 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
3036 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3037 }
3038}
3039
3040/* Rx */
3041
3042struct bna_rxp *
3043bna_rx_get_rxp(struct bna_rx *rx, int vector)
3044{
3045 struct bna_rxp *rxp;
3046 struct list_head *qe;
3047
3048 list_for_each(qe, &rx->rxp_q) {
3049 rxp = (struct bna_rxp *)qe;
3050 if (rxp->vector == vector)
3051 return rxp;
3052 }
3053 return NULL;
3054}
3055
3056/*
3057 * bna_rx_rss_rit_set()
3058 * Sets the Q ids for the specified msi-x vectors in the RIT.
3059 * Maximum rit size supported is 64, which should be the max size of the
3060 * vectors array.
3061 */
3062
3063void
3064bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
3065{
3066 int i;
3067 struct bna_rxp *rxp;
3068 struct bna_rxq *q0 = NULL, *q1 = NULL;
3069 struct bna *bna;
3070 struct bna_rxf *rxf;
3071
3072 /* Build the RIT contents for this RX */
3073 bna = rx->bna;
3074
3075 rxf = &rx->rxf;
3076 for (i = 0; i < nvectors; i++) {
3077 rxp = bna_rx_get_rxp(rx, vectors[i]);
3078
3079 GET_RXQS(rxp, q0, q1);
3080 rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
3081 rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
3082 }
3083
3084 rxf->rit_segment->rit_size = nvectors;
3085
3086 /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
3087}
3088
3089/* Rx <- bnad */
3090void
3091bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
3092{
3093 struct bna_rxp *rxp;
3094 struct list_head *qe;
3095
3096 list_for_each(qe, &rx->rxp_q) {
3097 rxp = (struct bna_rxp *)qe;
3098 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
3099 bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
3100 }
3101}
3102
3103/* Rx <- bnad */
3104void
3105bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX])
3106{
3107 int i, j;
3108
3109 for (i = 0; i < BNA_LOAD_T_MAX; i++)
3110 for (j = 0; j < BNA_BIAS_T_MAX; j++)
3111 bna->rx_mod.dim_vector[i][j] = vector[i][j];
3112}
3113
3114/* Rx <- bnad */
3115void
3116bna_rx_dim_update(struct bna_ccb *ccb)
3117{
3118 struct bna *bna = ccb->cq->rx->bna;
3119 u32 load, bias;
3120 u32 pkt_rt, small_rt, large_rt;
3121 u8 coalescing_timeo;
3122
3123 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
3124 (ccb->pkt_rate.large_pkt_cnt == 0))
3125 return;
3126
3127 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
3128
3129 small_rt = ccb->pkt_rate.small_pkt_cnt;
3130 large_rt = ccb->pkt_rate.large_pkt_cnt;
3131
3132 pkt_rt = small_rt + large_rt;
3133
3134 if (pkt_rt < BNA_PKT_RATE_10K)
3135 load = BNA_LOAD_T_LOW_4;
3136 else if (pkt_rt < BNA_PKT_RATE_20K)
3137 load = BNA_LOAD_T_LOW_3;
3138 else if (pkt_rt < BNA_PKT_RATE_30K)
3139 load = BNA_LOAD_T_LOW_2;
3140 else if (pkt_rt < BNA_PKT_RATE_40K)
3141 load = BNA_LOAD_T_LOW_1;
3142 else if (pkt_rt < BNA_PKT_RATE_50K)
3143 load = BNA_LOAD_T_HIGH_1;
3144 else if (pkt_rt < BNA_PKT_RATE_60K)
3145 load = BNA_LOAD_T_HIGH_2;
3146 else if (pkt_rt < BNA_PKT_RATE_80K)
3147 load = BNA_LOAD_T_HIGH_3;
3148 else
3149 load = BNA_LOAD_T_HIGH_4;
3150
3151 if (small_rt > (large_rt << 1))
3152 bias = 0;
3153 else
3154 bias = 1;
3155
3156 ccb->pkt_rate.small_pkt_cnt = 0;
3157 ccb->pkt_rate.large_pkt_cnt = 0;
3158
3159 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
3160 ccb->rx_coalescing_timeo = coalescing_timeo;
3161
3162 /* Set it to IB */
3163 bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
3164}
3165
3166/* Tx */
3167/* TX <- bnad */
3168enum bna_cb_status
3169bna_tx_prio_set(struct bna_tx *tx, int prio,
3170 void (*cbfn)(struct bnad *, struct bna_tx *,
3171 enum bna_cb_status))
3172{
3173 if (tx->flags & BNA_TX_F_PRIO_LOCK)
3174 return BNA_CB_FAIL;
3175 else {
3176 tx->prio_change_cbfn = cbfn;
3177 bna_tx_prio_changed(tx, prio);
3178 }
3179
3180 return BNA_CB_SUCCESS;
3181}
3182
3183/* TX <- bnad */
3184void
3185bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3186{
3187 struct bna_txq *txq;
3188 struct list_head *qe;
3189
3190 list_for_each(qe, &tx->txq_q) {
3191 txq = (struct bna_txq *)qe;
3192 bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
3193 }
3194}
3195
3196/*
3197 * Private data
3198 */
3199
3200struct bna_ritseg_pool_cfg {
3201 u32 pool_size;
3202 u32 pool_entry_size;
3203};
3204init_ritseg_pool(ritseg_pool_cfg);
3205
3206/*
3207 * Private functions
3208 */
3209static void
3210bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
3211 struct bna_res_info *res_info)
3212{
3213 int i;
3214
3215 ucam_mod->ucmac = (struct bna_mac *)
3216 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
3217
3218 INIT_LIST_HEAD(&ucam_mod->free_q);
3219 for (i = 0; i < BFI_MAX_UCMAC; i++) {
3220 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
3221 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
3222 }
3223
3224 ucam_mod->bna = bna;
3225}
3226
3227static void
3228bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
3229{
3230 struct list_head *qe;
3231 int i = 0;
3232
3233 list_for_each(qe, &ucam_mod->free_q)
3234 i++;
3235
3236 ucam_mod->bna = NULL;
3237}
3238
3239static void
3240bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
3241 struct bna_res_info *res_info)
3242{
3243 int i;
3244
3245 mcam_mod->mcmac = (struct bna_mac *)
3246 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
3247
3248 INIT_LIST_HEAD(&mcam_mod->free_q);
3249 for (i = 0; i < BFI_MAX_MCMAC; i++) {
3250 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
3251 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
3252 }
3253
3254 mcam_mod->bna = bna;
3255}
3256
3257static void
3258bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
3259{
3260 struct list_head *qe;
3261 int i = 0;
3262
3263 list_for_each(qe, &mcam_mod->free_q)
3264 i++;
3265
3266 mcam_mod->bna = NULL;
3267}
3268
3269static void
3270bna_rit_mod_init(struct bna_rit_mod *rit_mod,
3271 struct bna_res_info *res_info)
3272{
3273 int i;
3274 int j;
3275 int count;
3276 int offset;
3277
3278 rit_mod->rit = (struct bna_rit_entry *)
3279 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
3280 rit_mod->rit_segment = (struct bna_rit_segment *)
3281 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
3282
3283 count = 0;
3284 offset = 0;
3285 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3286 INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
3287 for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
3288 bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
3289 rit_mod->rit_segment[count].max_rit_size =
3290 ritseg_pool_cfg[i].pool_entry_size;
3291 rit_mod->rit_segment[count].rit_offset = offset;
3292 rit_mod->rit_segment[count].rit =
3293 &rit_mod->rit[offset];
3294 list_add_tail(&rit_mod->rit_segment[count].qe,
3295 &rit_mod->rit_seg_pool[i]);
3296 count++;
3297 offset += ritseg_pool_cfg[i].pool_entry_size;
3298 }
3299 }
3300}
3301
3302static void
3303bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
3304{
3305 struct bna_rit_segment *rit_segment;
3306 struct list_head *qe;
3307 int i;
3308 int j;
3309
3310 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3311 j = 0;
3312 list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
3313 rit_segment = (struct bna_rit_segment *)qe;
3314 j++;
3315 }
3316 }
3317}
3318
3319/*
3320 * Public functions
3321 */
3322
3323/* Called during probe(), before calling bna_init() */
3324void
3325bna_res_req(struct bna_res_info *res_info)
3326{
3327 bna_adv_res_req(res_info);
3328
3329 /* DMA memory for retrieving IOC attributes */
3330 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
3331 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
3332 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
3333 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
3334 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
3335
3336 /* DMA memory for index segment of an IB */
3337 res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3338 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
3339 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
3340 BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
3341 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
3342
3343 /* Virtual memory for IB objects - stored by IB module */
3344 res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
3345 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
3346 BNA_MEM_T_KVA;
3347 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
3348 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
3349 BFI_MAX_IB * sizeof(struct bna_ib);
3350
3351 /* Virtual memory for intr objects - stored by IB module */
3352 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
3353 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
3354 BNA_MEM_T_KVA;
3355 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
3356 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
3357 BFI_MAX_IB * sizeof(struct bna_intr);
3358
3359 /* Virtual memory for idx_seg objects - stored by IB module */
3360 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
3361 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
3362 BNA_MEM_T_KVA;
3363 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
3364 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
3365 BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
3366
3367 /* Virtual memory for Tx objects - stored by Tx module */
3368 res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
3369 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
3370 BNA_MEM_T_KVA;
3371 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
3372 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
3373 BFI_MAX_TXQ * sizeof(struct bna_tx);
3374
3375 /* Virtual memory for TxQ - stored by Tx module */
3376 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
3377 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
3378 BNA_MEM_T_KVA;
3379 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
3380 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
3381 BFI_MAX_TXQ * sizeof(struct bna_txq);
3382
3383 /* Virtual memory for Rx objects - stored by Rx module */
3384 res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
3385 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
3386 BNA_MEM_T_KVA;
3387 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
3388 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
3389 BFI_MAX_RXQ * sizeof(struct bna_rx);
3390
3391 /* Virtual memory for RxPath - stored by Rx module */
3392 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
3393 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
3394 BNA_MEM_T_KVA;
3395 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
3396 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
3397 BFI_MAX_RXQ * sizeof(struct bna_rxp);
3398
3399 /* Virtual memory for RxQ - stored by Rx module */
3400 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
3401 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
3402 BNA_MEM_T_KVA;
3403 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
3404 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
3405 BFI_MAX_RXQ * sizeof(struct bna_rxq);
3406
3407 /* Virtual memory for Unicast MAC address - stored by ucam module */
3408 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
3409 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
3410 BNA_MEM_T_KVA;
3411 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
3412 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
3413 BFI_MAX_UCMAC * sizeof(struct bna_mac);
3414
3415 /* Virtual memory for Multicast MAC address - stored by mcam module */
3416 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
3417 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
3418 BNA_MEM_T_KVA;
3419 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
3420 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
3421 BFI_MAX_MCMAC * sizeof(struct bna_mac);
3422
3423 /* Virtual memory for RIT entries */
3424 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
3425 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
3426 BNA_MEM_T_KVA;
3427 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
3428 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
3429 BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
3430
3431 /* Virtual memory for RIT segment table */
3432 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
3433 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
3434 BNA_MEM_T_KVA;
3435 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
3436 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
3437 BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
3438
3439 /* Interrupt resource for mailbox interrupt */
3440 res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
3441 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
3442 BNA_INTR_T_MSIX;
3443 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
3444}
3445
3446/* Called during probe() */
3447void
3448bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
3449 struct bna_res_info *res_info)
3450{
3451 bna->bnad = bnad;
3452 bna->pcidev = *pcidev;
3453
3454 bna->stats.hw_stats = (struct bfi_ll_stats *)
3455 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
3456 bna->hw_stats_dma.msb =
3457 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
3458 bna->hw_stats_dma.lsb =
3459 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
3460 bna->stats.sw_stats = (struct bna_sw_stats *)
3461 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
3462
3463 bna->regs.page_addr = bna->pcidev.pci_bar_kva +
3464 reg_offset[bna->pcidev.pci_func].page_addr;
3465 bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
3466 reg_offset[bna->pcidev.pci_func].fn_int_status;
3467 bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
3468 reg_offset[bna->pcidev.pci_func].fn_int_mask;
3469
3470 if (bna->pcidev.pci_func < 3)
3471 bna->port_num = 0;
3472 else
3473 bna->port_num = 1;
3474
3475 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
3476 bna_device_init(&bna->device, bna, res_info);
3477
3478 bna_port_init(&bna->port, bna);
3479
3480 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
3481
3482 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
3483
3484 bna_ib_mod_init(&bna->ib_mod, bna, res_info);
3485
3486 bna_rit_mod_init(&bna->rit_mod, res_info);
3487
3488 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
3489
3490 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
3491
3492 bna->rxf_default_id = BFI_MAX_RXF;
3493 bna->rxf_promisc_id = BFI_MAX_RXF;
3494
3495 /* Mbox q element for posting stat request to f/w */
3496 bfa_q_qe_init(&bna->mbox_qe.qe);
3497}
3498
3499void
3500bna_uninit(struct bna *bna)
3501{
3502 bna_mcam_mod_uninit(&bna->mcam_mod);
3503
3504 bna_ucam_mod_uninit(&bna->ucam_mod);
3505
3506 bna_rit_mod_uninit(&bna->rit_mod);
3507
3508 bna_ib_mod_uninit(&bna->ib_mod);
3509
3510 bna_rx_mod_uninit(&bna->rx_mod);
3511
3512 bna_tx_mod_uninit(&bna->tx_mod);
3513
3514 bna_port_uninit(&bna->port);
3515
3516 bna_device_uninit(&bna->device);
3517
3518 bna->bnad = NULL;
3519}
3520
3521struct bna_mac *
3522bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
3523{
3524 struct list_head *qe;
3525
3526 if (list_empty(&ucam_mod->free_q))
3527 return NULL;
3528
3529 bfa_q_deq(&ucam_mod->free_q, &qe);
3530
3531 return (struct bna_mac *)qe;
3532}
3533
3534void
3535bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
3536{
3537 list_add_tail(&mac->qe, &ucam_mod->free_q);
3538}
3539
3540struct bna_mac *
3541bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
3542{
3543 struct list_head *qe;
3544
3545 if (list_empty(&mcam_mod->free_q))
3546 return NULL;
3547
3548 bfa_q_deq(&mcam_mod->free_q, &qe);
3549
3550 return (struct bna_mac *)qe;
3551}
3552
3553void
3554bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
3555{
3556 list_add_tail(&mac->qe, &mcam_mod->free_q);
3557}
3558
3559/**
3560 * Note: This should be called in the same locking context as the call to
3561 * bna_rit_mod_seg_get()
3562 */
3563int
3564bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
3565{
3566 int i;
3567
3568 /* Select the pool for seg_size */
3569 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3570 if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
3571 break;
3572 }
3573
3574 if (i == BFI_RIT_SEG_TOTAL_POOLS)
3575 return 0;
3576
3577 if (list_empty(&rit_mod->rit_seg_pool[i]))
3578 return 0;
3579
3580 return 1;
3581}
3582
3583struct bna_rit_segment *
3584bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
3585{
3586 struct bna_rit_segment *seg;
3587 struct list_head *qe;
3588 int i;
3589
3590 /* Select the pool for seg_size */
3591 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3592 if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
3593 break;
3594 }
3595
3596 if (i == BFI_RIT_SEG_TOTAL_POOLS)
3597 return NULL;
3598
3599 if (list_empty(&rit_mod->rit_seg_pool[i]))
3600 return NULL;
3601
3602 bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
3603 seg = (struct bna_rit_segment *)qe;
3604 bfa_q_qe_init(&seg->qe);
3605 seg->rit_size = seg_size;
3606
3607 return seg;
3608}
3609
3610void
3611bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
3612 struct bna_rit_segment *seg)
3613{
3614 int i;
3615
3616 /* Select the pool for seg->max_rit_size */
3617 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3618 if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
3619 break;
3620 }
3621
3622 seg->rit_size = 0;
3623 list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
3624}
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
new file mode 100644
index 000000000000..67eb376c5c7e
--- /dev/null
+++ b/drivers/net/bna/bna_hw.h
@@ -0,0 +1,1491 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 *
18 * File for interrupt macros and functions
19 */
20
21#ifndef __BNA_HW_H__
22#define __BNA_HW_H__
23
24#include "bfi_ctreg.h"
25
26/**
27 *
28 * SW imposed limits
29 *
30 */
31
32#ifndef BNA_BIOS_BUILD
33
34#define BFI_MAX_TXQ 64
35#define BFI_MAX_RXQ 64
36#define BFI_MAX_RXF 64
37#define BFI_MAX_IB 128
38#define BFI_MAX_RIT_SIZE 256
39#define BFI_RSS_RIT_SIZE 64
40#define BFI_NONRSS_RIT_SIZE 1
41#define BFI_MAX_UCMAC 256
42#define BFI_MAX_MCMAC 512
43#define BFI_IBIDX_SIZE 4
44#define BFI_MAX_VLAN 4095
45
46/**
47 * There are 2 free IB index pools:
48 * pool1: 120 segments of 1 index each
49 * pool8: 1 segment of 8 indexes
50 */
51#define BFI_IBIDX_POOL1_SIZE 116
52#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
53#define BFI_IBIDX_POOL2_SIZE 2
54#define BFI_IBIDX_POOL2_ENTRY_SIZE 2
55#define BFI_IBIDX_POOL8_SIZE 1
56#define BFI_IBIDX_POOL8_ENTRY_SIZE 8
57#define BFI_IBIDX_TOTAL_POOLS 3
58#define BFI_IBIDX_TOTAL_SEGS 119 /* (POOL1 + POOL2 + POOL8)_SIZE */
59#define BFI_IBIDX_MAX_SEGSIZE 8
60#define init_ibidx_pool(name) \
61static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
62{ \
63 { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE }, \
64 { BFI_IBIDX_POOL2_SIZE, BFI_IBIDX_POOL2_ENTRY_SIZE }, \
65 { BFI_IBIDX_POOL8_SIZE, BFI_IBIDX_POOL8_ENTRY_SIZE } \
66}
67
68/**
69 * There are 2 free RIT segment pools:
70 * Pool1: 192 segments of 1 RIT entry each
71 * Pool2: 1 segment of 64 RIT entry
72 */
73#define BFI_RIT_SEG_POOL1_SIZE 192
74#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
75#define BFI_RIT_SEG_POOLRSS_SIZE 1
76#define BFI_RIT_SEG_POOLRSS_ENTRY_SIZE 64
77#define BFI_RIT_SEG_TOTAL_POOLS 2
78#define BFI_RIT_TOTAL_SEGS 193 /* POOL1_SIZE + POOLRSS_SIZE */
79#define init_ritseg_pool(name) \
80static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
81{ \
82 { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE }, \
83 { BFI_RIT_SEG_POOLRSS_SIZE, BFI_RIT_SEG_POOLRSS_ENTRY_SIZE } \
84}
85
86#else /* BNA_BIOS_BUILD */
87
88#define BFI_MAX_TXQ 1
89#define BFI_MAX_RXQ 1
90#define BFI_MAX_RXF 1
91#define BFI_MAX_IB 2
92#define BFI_MAX_RIT_SIZE 2
93#define BFI_RSS_RIT_SIZE 64
94#define BFI_NONRSS_RIT_SIZE 1
95#define BFI_MAX_UCMAC 1
96#define BFI_MAX_MCMAC 8
97#define BFI_IBIDX_SIZE 4
98#define BFI_MAX_VLAN 4095
99/* There is one free pool: 2 segments of 1 index each */
100#define BFI_IBIDX_POOL1_SIZE 2
101#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
102#define BFI_IBIDX_TOTAL_POOLS 1
103#define BFI_IBIDX_TOTAL_SEGS 2 /* POOL1_SIZE */
104#define BFI_IBIDX_MAX_SEGSIZE 1
105#define init_ibidx_pool(name) \
106static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
107{ \
108 { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE } \
109}
110
111#define BFI_RIT_SEG_POOL1_SIZE 1
112#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
113#define BFI_RIT_SEG_TOTAL_POOLS 1
114#define BFI_RIT_TOTAL_SEGS 1 /* POOL1_SIZE */
115#define init_ritseg_pool(name) \
116static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
117{ \
118 { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE } \
119}
120
121#endif /* BNA_BIOS_BUILD */
122
123#define BFI_RSS_HASH_KEY_LEN 10
124
125#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
126#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
127#define BFI_MAX_INTERPKT_COUNT 0xFF
128#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
129#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
130#define BFI_TX_INTERPKT_COUNT 32
131#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
132#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
133#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
134
135#define BFI_TXQ_WI_SIZE 64 /* bytes */
136#define BFI_RXQ_WI_SIZE 8 /* bytes */
137#define BFI_CQ_WI_SIZE 16 /* bytes */
138#define BFI_TX_MAX_WRR_QUOTA 0xFFF
139
140#define BFI_TX_MAX_VECTORS_PER_WI 4
141#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
142#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
143#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
144
145/* Small Q buffer size */
146#define BFI_SMALL_RXBUF_SIZE 128
147
148/* Defined separately since BFA_FLASH_DMA_BUF_SZ is in bfa_flash.c */
149#define BFI_FLASH_DMA_BUF_SZ 0x010000 /* 64K DMA */
150#define BFI_HW_STATS_SIZE 0x4000 /* 16K DMA */
151
152/**
153 *
154 * HW register offsets, macros
155 *
156 */
157
158/* DMA Block Register Host Window Start Address */
159#define DMA_BLK_REG_ADDR 0x00013000
160
161/* DMA Block Internal Registers */
162#define DMA_CTRL_REG0 (DMA_BLK_REG_ADDR + 0x000)
163#define DMA_CTRL_REG1 (DMA_BLK_REG_ADDR + 0x004)
164#define DMA_ERR_INT_STATUS (DMA_BLK_REG_ADDR + 0x008)
165#define DMA_ERR_INT_ENABLE (DMA_BLK_REG_ADDR + 0x00c)
166#define DMA_ERR_INT_STATUS_SET (DMA_BLK_REG_ADDR + 0x010)
167
168/* APP Block Register Address Offset from BAR0 */
169#define APP_BLK_REG_ADDR 0x00014000
170
171/* Host Function Interrupt Mask Registers */
172#define HOSTFN0_INT_MASK (APP_BLK_REG_ADDR + 0x004)
173#define HOSTFN1_INT_MASK (APP_BLK_REG_ADDR + 0x104)
174#define HOSTFN2_INT_MASK (APP_BLK_REG_ADDR + 0x304)
175#define HOSTFN3_INT_MASK (APP_BLK_REG_ADDR + 0x404)
176
177/**
178 * Host Function PCIe Error Registers
179 * Duplicates "Correctable" & "Uncorrectable"
180 * registers in PCIe Config space.
181 */
182#define FN0_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x014)
183#define FN1_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x114)
184#define FN2_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x314)
185#define FN3_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x414)
186
187/* Host Function Error Type Status Registers */
188#define FN0_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x018)
189#define FN1_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x118)
190#define FN2_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x318)
191#define FN3_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x418)
192
193/* Host Function Error Type Mask Registers */
194#define FN0_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x01c)
195#define FN1_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x11c)
196#define FN2_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x31c)
197#define FN3_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x41c)
198
199/* Catapult Host Semaphore Status Registers (App block) */
200#define HOST_SEM_STS0_REG (APP_BLK_REG_ADDR + 0x630)
201#define HOST_SEM_STS1_REG (APP_BLK_REG_ADDR + 0x634)
202#define HOST_SEM_STS2_REG (APP_BLK_REG_ADDR + 0x638)
203#define HOST_SEM_STS3_REG (APP_BLK_REG_ADDR + 0x63c)
204#define HOST_SEM_STS4_REG (APP_BLK_REG_ADDR + 0x640)
205#define HOST_SEM_STS5_REG (APP_BLK_REG_ADDR + 0x644)
206#define HOST_SEM_STS6_REG (APP_BLK_REG_ADDR + 0x648)
207#define HOST_SEM_STS7_REG (APP_BLK_REG_ADDR + 0x64c)
208
209/* PCIe Misc Register */
210#define PCIE_MISC_REG (APP_BLK_REG_ADDR + 0x200)
211
212/* Temp Sensor Control Registers */
213#define TEMPSENSE_CNTL_REG (APP_BLK_REG_ADDR + 0x250)
214#define TEMPSENSE_STAT_REG (APP_BLK_REG_ADDR + 0x254)
215
216/* APP Block local error registers */
217#define APP_LOCAL_ERR_STAT (APP_BLK_REG_ADDR + 0x258)
218#define APP_LOCAL_ERR_MSK (APP_BLK_REG_ADDR + 0x25c)
219
220/* PCIe Link Error registers */
221#define PCIE_LNK_ERR_STAT (APP_BLK_REG_ADDR + 0x260)
222#define PCIE_LNK_ERR_MSK (APP_BLK_REG_ADDR + 0x264)
223
224/**
225 * FCoE/FIP Ethertype Register
226 * 31:16 -- Chip wide value for FIP type
227 * 15:0 -- Chip wide value for FCoE type
228 */
229#define FCOE_FIP_ETH_TYPE (APP_BLK_REG_ADDR + 0x280)
230
231/**
232 * Reserved Ethertype Register
233 * 31:16 -- Reserved
234 * 15:0 -- Other ethertype
235 */
236#define RESV_ETH_TYPE (APP_BLK_REG_ADDR + 0x284)
237
238/**
239 * Host Command Status Registers
240 * Each set consists of 3 registers :
241 * clear, set, cmd
242 * 16 such register sets in all
243 * See catapult_spec.pdf for detailed functionality
244 * Put each type in a single macro accessed by _num ?
245 */
246#define HOST_CMDSTS0_CLR_REG (APP_BLK_REG_ADDR + 0x500)
247#define HOST_CMDSTS0_SET_REG (APP_BLK_REG_ADDR + 0x504)
248#define HOST_CMDSTS0_REG (APP_BLK_REG_ADDR + 0x508)
249#define HOST_CMDSTS1_CLR_REG (APP_BLK_REG_ADDR + 0x510)
250#define HOST_CMDSTS1_SET_REG (APP_BLK_REG_ADDR + 0x514)
251#define HOST_CMDSTS1_REG (APP_BLK_REG_ADDR + 0x518)
252#define HOST_CMDSTS2_CLR_REG (APP_BLK_REG_ADDR + 0x520)
253#define HOST_CMDSTS2_SET_REG (APP_BLK_REG_ADDR + 0x524)
254#define HOST_CMDSTS2_REG (APP_BLK_REG_ADDR + 0x528)
255#define HOST_CMDSTS3_CLR_REG (APP_BLK_REG_ADDR + 0x530)
256#define HOST_CMDSTS3_SET_REG (APP_BLK_REG_ADDR + 0x534)
257#define HOST_CMDSTS3_REG (APP_BLK_REG_ADDR + 0x538)
258#define HOST_CMDSTS4_CLR_REG (APP_BLK_REG_ADDR + 0x540)
259#define HOST_CMDSTS4_SET_REG (APP_BLK_REG_ADDR + 0x544)
260#define HOST_CMDSTS4_REG (APP_BLK_REG_ADDR + 0x548)
261#define HOST_CMDSTS5_CLR_REG (APP_BLK_REG_ADDR + 0x550)
262#define HOST_CMDSTS5_SET_REG (APP_BLK_REG_ADDR + 0x554)
263#define HOST_CMDSTS5_REG (APP_BLK_REG_ADDR + 0x558)
264#define HOST_CMDSTS6_CLR_REG (APP_BLK_REG_ADDR + 0x560)
265#define HOST_CMDSTS6_SET_REG (APP_BLK_REG_ADDR + 0x564)
266#define HOST_CMDSTS6_REG (APP_BLK_REG_ADDR + 0x568)
267#define HOST_CMDSTS7_CLR_REG (APP_BLK_REG_ADDR + 0x570)
268#define HOST_CMDSTS7_SET_REG (APP_BLK_REG_ADDR + 0x574)
269#define HOST_CMDSTS7_REG (APP_BLK_REG_ADDR + 0x578)
270#define HOST_CMDSTS8_CLR_REG (APP_BLK_REG_ADDR + 0x580)
271#define HOST_CMDSTS8_SET_REG (APP_BLK_REG_ADDR + 0x584)
272#define HOST_CMDSTS8_REG (APP_BLK_REG_ADDR + 0x588)
273#define HOST_CMDSTS9_CLR_REG (APP_BLK_REG_ADDR + 0x590)
274#define HOST_CMDSTS9_SET_REG (APP_BLK_REG_ADDR + 0x594)
275#define HOST_CMDSTS9_REG (APP_BLK_REG_ADDR + 0x598)
276#define HOST_CMDSTS10_CLR_REG (APP_BLK_REG_ADDR + 0x5A0)
277#define HOST_CMDSTS10_SET_REG (APP_BLK_REG_ADDR + 0x5A4)
278#define HOST_CMDSTS10_REG (APP_BLK_REG_ADDR + 0x5A8)
279#define HOST_CMDSTS11_CLR_REG (APP_BLK_REG_ADDR + 0x5B0)
280#define HOST_CMDSTS11_SET_REG (APP_BLK_REG_ADDR + 0x5B4)
281#define HOST_CMDSTS11_REG (APP_BLK_REG_ADDR + 0x5B8)
282#define HOST_CMDSTS12_CLR_REG (APP_BLK_REG_ADDR + 0x5C0)
283#define HOST_CMDSTS12_SET_REG (APP_BLK_REG_ADDR + 0x5C4)
284#define HOST_CMDSTS12_REG (APP_BLK_REG_ADDR + 0x5C8)
285#define HOST_CMDSTS13_CLR_REG (APP_BLK_REG_ADDR + 0x5D0)
286#define HOST_CMDSTS13_SET_REG (APP_BLK_REG_ADDR + 0x5D4)
287#define HOST_CMDSTS13_REG (APP_BLK_REG_ADDR + 0x5D8)
288#define HOST_CMDSTS14_CLR_REG (APP_BLK_REG_ADDR + 0x5E0)
289#define HOST_CMDSTS14_SET_REG (APP_BLK_REG_ADDR + 0x5E4)
290#define HOST_CMDSTS14_REG (APP_BLK_REG_ADDR + 0x5E8)
291#define HOST_CMDSTS15_CLR_REG (APP_BLK_REG_ADDR + 0x5F0)
292#define HOST_CMDSTS15_SET_REG (APP_BLK_REG_ADDR + 0x5F4)
293#define HOST_CMDSTS15_REG (APP_BLK_REG_ADDR + 0x5F8)
294
295/**
296 * LPU0 Block Register Address Offset from BAR0
297 * Range 0x18000 - 0x18033
298 */
299#define LPU0_BLK_REG_ADDR 0x00018000
300
301/**
302 * LPU0 Registers
303 * Should they be directly used from host,
304 * except for diagnostics ?
305 * CTL_REG : Control register
306 * CMD_REG : Triggers exec. of cmd. in
307 * Mailbox memory
308 */
309#define LPU0_MBOX_CTL_REG (LPU0_BLK_REG_ADDR + 0x000)
310#define LPU0_MBOX_CMD_REG (LPU0_BLK_REG_ADDR + 0x004)
311#define LPU0_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x008)
312#define LPU1_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x00c)
313#define LPU0_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x010)
314#define LPU1_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x014)
315#define LPU0_ERR_STATUS_REG (LPU0_BLK_REG_ADDR + 0x018)
316#define LPU0_ERR_SET_REG (LPU0_BLK_REG_ADDR + 0x020)
317
318/**
319 * LPU1 Block Register Address Offset from BAR0
320 * Range 0x18400 - 0x18433
321 */
322#define LPU1_BLK_REG_ADDR 0x00018400
323
324/**
325 * LPU1 Registers
326 * Same as LPU0 registers above
327 */
328#define LPU1_MBOX_CTL_REG (LPU1_BLK_REG_ADDR + 0x000)
329#define LPU1_MBOX_CMD_REG (LPU1_BLK_REG_ADDR + 0x004)
330#define LPU0_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x008)
331#define LPU1_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x00c)
332#define LPU0_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x010)
333#define LPU1_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x014)
334#define LPU1_ERR_STATUS_REG (LPU1_BLK_REG_ADDR + 0x018)
335#define LPU1_ERR_SET_REG (LPU1_BLK_REG_ADDR + 0x020)
336
337/**
338 * PSS Block Register Address Offset from BAR0
339 * Range 0x18800 - 0x188DB
340 */
341#define PSS_BLK_REG_ADDR 0x00018800
342
343/**
344 * PSS Registers
345 * For details, see catapult_spec.pdf
346 * ERR_STATUS_REG : Indicates error in PSS module
347 * RAM_ERR_STATUS_REG : Indicates RAM module that detected error
348 */
349#define ERR_STATUS_SET (PSS_BLK_REG_ADDR + 0x018)
350#define PSS_RAM_ERR_STATUS_REG (PSS_BLK_REG_ADDR + 0x01C)
351
352/**
353 * PSS Semaphore Lock Registers, total 16
354 * First read when unlocked returns 0,
355 * and is set to 1, atomically.
356 * Subsequent reads returns 1.
357 * To clear set the value to 0.
358 * Range : 0x20 to 0x5c
359 */
360#define PSS_SEM_LOCK_REG(_num) \
361 (PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
362
363/**
364 * PSS Semaphore Status Registers,
365 * corresponding to the lock registers above
366 */
367#define PSS_SEM_STATUS_REG(_num) \
368 (PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
369
370/**
371 * Catapult CPQ Registers
372 * Defines for Mailbox Registers
373 * Used to send mailbox commands to firmware from
374 * host. The data part is written to the MBox
375 * memory, registers are used to indicate that
376 * a commnad is resident in memory.
377 *
378 * Note : LPU0<->LPU1 mailboxes are not listed here
379 */
380#define CPQ_BLK_REG_ADDR 0x00019000
381
382#define HOSTFN0_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x130)
383#define HOSTFN0_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x134)
384#define LPU0_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x138)
385#define LPU1_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x13C)
386
387#define HOSTFN1_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x140)
388#define HOSTFN1_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x144)
389#define LPU0_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x148)
390#define LPU1_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x14C)
391
392#define HOSTFN2_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x170)
393#define HOSTFN2_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x174)
394#define LPU0_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x178)
395#define LPU1_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x17C)
396
397#define HOSTFN3_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x180)
398#define HOSTFN3_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x184)
399#define LPU0_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x188)
400#define LPU1_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x18C)
401
402/* Host Function Force Parity Error Registers */
403#define HOSTFN0_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x120)
404#define HOSTFN1_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x124)
405#define HOSTFN2_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x128)
406#define HOSTFN3_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x12C)
407
408/* LL Port[0|1] Halt Mask Registers */
409#define LL_HALT_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1A0)
410#define LL_HALT_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1B0)
411
412/* LL Port[0|1] Error Mask Registers */
413#define LL_ERR_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1D0)
414#define LL_ERR_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1D4)
415
416/* EMC FLI (Flash Controller) Block Register Address Offset from BAR0 */
417#define FLI_BLK_REG_ADDR 0x0001D000
418
419/* EMC FLI Registers */
420#define FLI_CMD_REG (FLI_BLK_REG_ADDR + 0x000)
421#define FLI_ADDR_REG (FLI_BLK_REG_ADDR + 0x004)
422#define FLI_CTL_REG (FLI_BLK_REG_ADDR + 0x008)
423#define FLI_WRDATA_REG (FLI_BLK_REG_ADDR + 0x00C)
424#define FLI_RDDATA_REG (FLI_BLK_REG_ADDR + 0x010)
425#define FLI_DEV_STATUS_REG (FLI_BLK_REG_ADDR + 0x014)
426#define FLI_SIG_WD_REG (FLI_BLK_REG_ADDR + 0x018)
427
428/**
429 * RO register
430 * 31:16 -- Vendor Id
431 * 15:0 -- Device Id
432 */
433#define FLI_DEV_VENDOR_REG (FLI_BLK_REG_ADDR + 0x01C)
434#define FLI_ERR_STATUS_REG (FLI_BLK_REG_ADDR + 0x020)
435
436/**
437 * RAD (RxAdm) Block Register Address Offset from BAR0
438 * RAD0 Range : 0x20000 - 0x203FF
439 * RAD1 Range : 0x20400 - 0x207FF
440 */
441#define RAD0_BLK_REG_ADDR 0x00020000
442#define RAD1_BLK_REG_ADDR 0x00020400
443
444/* RAD0 Registers */
445#define RAD0_CTL_REG (RAD0_BLK_REG_ADDR + 0x000)
446#define RAD0_PE_PARM_REG (RAD0_BLK_REG_ADDR + 0x004)
447#define RAD0_BCN_REG (RAD0_BLK_REG_ADDR + 0x008)
448
449/* Default function ID register */
450#define RAD0_DEFAULT_REG (RAD0_BLK_REG_ADDR + 0x00C)
451
452/* Default promiscuous ID register */
453#define RAD0_PROMISC_REG (RAD0_BLK_REG_ADDR + 0x010)
454
455#define RAD0_BCNQ_REG (RAD0_BLK_REG_ADDR + 0x014)
456
457/*
458 * This register selects 1 of 8 PM Q's using
459 * VLAN pri, for non-BCN packets without a VLAN tag
460 */
461#define RAD0_DEFAULTQ_REG (RAD0_BLK_REG_ADDR + 0x018)
462
463#define RAD0_ERR_STS (RAD0_BLK_REG_ADDR + 0x01C)
464#define RAD0_SET_ERR_STS (RAD0_BLK_REG_ADDR + 0x020)
465#define RAD0_ERR_INT_EN (RAD0_BLK_REG_ADDR + 0x024)
466#define RAD0_FIRST_ERR (RAD0_BLK_REG_ADDR + 0x028)
467#define RAD0_FORCE_ERR (RAD0_BLK_REG_ADDR + 0x02C)
468
469#define RAD0_IF_RCVD (RAD0_BLK_REG_ADDR + 0x030)
470#define RAD0_IF_RCVD_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x034)
471#define RAD0_IF_RCVD_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x038)
472#define RAD0_IF_RCVD_VLAN (RAD0_BLK_REG_ADDR + 0x03C)
473#define RAD0_IF_RCVD_UCAST (RAD0_BLK_REG_ADDR + 0x040)
474#define RAD0_IF_RCVD_UCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x044)
475#define RAD0_IF_RCVD_UCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x048)
476#define RAD0_IF_RCVD_UCAST_VLAN (RAD0_BLK_REG_ADDR + 0x04C)
477#define RAD0_IF_RCVD_MCAST (RAD0_BLK_REG_ADDR + 0x050)
478#define RAD0_IF_RCVD_MCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x054)
479#define RAD0_IF_RCVD_MCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x058)
480#define RAD0_IF_RCVD_MCAST_VLAN (RAD0_BLK_REG_ADDR + 0x05C)
481#define RAD0_IF_RCVD_BCAST (RAD0_BLK_REG_ADDR + 0x060)
482#define RAD0_IF_RCVD_BCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x064)
483#define RAD0_IF_RCVD_BCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x068)
484#define RAD0_IF_RCVD_BCAST_VLAN (RAD0_BLK_REG_ADDR + 0x06C)
485#define RAD0_DROPPED_FRAMES (RAD0_BLK_REG_ADDR + 0x070)
486
487#define RAD0_MAC_MAN_1H (RAD0_BLK_REG_ADDR + 0x080)
488#define RAD0_MAC_MAN_1L (RAD0_BLK_REG_ADDR + 0x084)
489#define RAD0_MAC_MAN_2H (RAD0_BLK_REG_ADDR + 0x088)
490#define RAD0_MAC_MAN_2L (RAD0_BLK_REG_ADDR + 0x08C)
491#define RAD0_MAC_MAN_3H (RAD0_BLK_REG_ADDR + 0x090)
492#define RAD0_MAC_MAN_3L (RAD0_BLK_REG_ADDR + 0x094)
493#define RAD0_MAC_MAN_4H (RAD0_BLK_REG_ADDR + 0x098)
494#define RAD0_MAC_MAN_4L (RAD0_BLK_REG_ADDR + 0x09C)
495
496#define RAD0_LAST4_IP (RAD0_BLK_REG_ADDR + 0x100)
497
498/* RAD1 Registers */
499#define RAD1_CTL_REG (RAD1_BLK_REG_ADDR + 0x000)
500#define RAD1_PE_PARM_REG (RAD1_BLK_REG_ADDR + 0x004)
501#define RAD1_BCN_REG (RAD1_BLK_REG_ADDR + 0x008)
502
503/* Default function ID register */
504#define RAD1_DEFAULT_REG (RAD1_BLK_REG_ADDR + 0x00C)
505
506/* Promiscuous function ID register */
507#define RAD1_PROMISC_REG (RAD1_BLK_REG_ADDR + 0x010)
508
509#define RAD1_BCNQ_REG (RAD1_BLK_REG_ADDR + 0x014)
510
511/*
512 * This register selects 1 of 8 PM Q's using
513 * VLAN pri, for non-BCN packets without a VLAN tag
514 */
515#define RAD1_DEFAULTQ_REG (RAD1_BLK_REG_ADDR + 0x018)
516
517#define RAD1_ERR_STS (RAD1_BLK_REG_ADDR + 0x01C)
518#define RAD1_SET_ERR_STS (RAD1_BLK_REG_ADDR + 0x020)
519#define RAD1_ERR_INT_EN (RAD1_BLK_REG_ADDR + 0x024)
520
521/**
522 * TXA Block Register Address Offset from BAR0
523 * TXA0 Range : 0x21000 - 0x213FF
524 * TXA1 Range : 0x21400 - 0x217FF
525 */
526#define TXA0_BLK_REG_ADDR 0x00021000
527#define TXA1_BLK_REG_ADDR 0x00021400
528
529/* TXA Registers */
530#define TXA0_CTRL_REG (TXA0_BLK_REG_ADDR + 0x000)
531#define TXA1_CTRL_REG (TXA1_BLK_REG_ADDR + 0x000)
532
533/**
534 * TSO Sequence # Registers (RO)
535 * Total 8 (for 8 queues)
536 * Holds the last seq.# for TSO frames
537 * See catapult_spec.pdf for more details
538 */
539#define TXA0_TSO_TCP_SEQ_REG(_num) \
540 (TXA0_BLK_REG_ADDR + 0x020 + ((_num) << 2))
541
542#define TXA1_TSO_TCP_SEQ_REG(_num) \
543 (TXA1_BLK_REG_ADDR + 0x020 + ((_num) << 2))
544
545/**
546 * TSO IP ID # Registers (RO)
547 * Total 8 (for 8 queues)
548 * Holds the last IP ID for TSO frames
549 * See catapult_spec.pdf for more details
550 */
551#define TXA0_TSO_IP_INFO_REG(_num) \
552 (TXA0_BLK_REG_ADDR + 0x040 + ((_num) << 2))
553
554#define TXA1_TSO_IP_INFO_REG(_num) \
555 (TXA1_BLK_REG_ADDR + 0x040 + ((_num) << 2))
556
557/**
558 * RXA Block Register Address Offset from BAR0
559 * RXA0 Range : 0x21800 - 0x21BFF
560 * RXA1 Range : 0x21C00 - 0x21FFF
561 */
562#define RXA0_BLK_REG_ADDR 0x00021800
563#define RXA1_BLK_REG_ADDR 0x00021C00
564
565/* RXA Registers */
566#define RXA0_CTL_REG (RXA0_BLK_REG_ADDR + 0x040)
567#define RXA1_CTL_REG (RXA1_BLK_REG_ADDR + 0x040)
568
569/**
570 * PPLB Block Register Address Offset from BAR0
571 * PPLB0 Range : 0x22000 - 0x223FF
572 * PPLB1 Range : 0x22400 - 0x227FF
573 */
574#define PLB0_BLK_REG_ADDR 0x00022000
575#define PLB1_BLK_REG_ADDR 0x00022400
576
577/**
578 * PLB Registers
579 * Holds RL timer used time stamps in RLT tagged frames
580 */
581#define PLB0_ECM_TIMER_REG (PLB0_BLK_REG_ADDR + 0x05C)
582#define PLB1_ECM_TIMER_REG (PLB1_BLK_REG_ADDR + 0x05C)
583
584/* Controls the rate-limiter on each of the priority class */
585#define PLB0_RL_CTL (PLB0_BLK_REG_ADDR + 0x060)
586#define PLB1_RL_CTL (PLB1_BLK_REG_ADDR + 0x060)
587
588/**
589 * Max byte register, total 8, 0-7
590 * see catapult_spec.pdf for details
591 */
592#define PLB0_RL_MAX_BC(_num) \
593 (PLB0_BLK_REG_ADDR + 0x064 + ((_num) << 2))
594#define PLB1_RL_MAX_BC(_num) \
595 (PLB1_BLK_REG_ADDR + 0x064 + ((_num) << 2))
596
597/**
598 * RL Time Unit Register for priority 0-7
599 * 4 bits per priority
600 * (2^rl_unit)*1us is the actual time period
601 */
602#define PLB0_RL_TU_PRIO (PLB0_BLK_REG_ADDR + 0x084)
603#define PLB1_RL_TU_PRIO (PLB1_BLK_REG_ADDR + 0x084)
604
605/**
606 * RL byte count register,
607 * bytes transmitted in (rl_unit*1)us time period
608 * 1 per priority, 8 in all, 0-7.
609 */
610#define PLB0_RL_BYTE_CNT(_num) \
611 (PLB0_BLK_REG_ADDR + 0x088 + ((_num) << 2))
612#define PLB1_RL_BYTE_CNT(_num) \
613 (PLB1_BLK_REG_ADDR + 0x088 + ((_num) << 2))
614
615/**
616 * RL Min factor register
617 * 2 bits per priority,
618 * 4 factors possible: 1, 0.5, 0.25, 0
619 * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
620 */
621#define PLB0_RL_MIN_REG (PLB0_BLK_REG_ADDR + 0x0A8)
622#define PLB1_RL_MIN_REG (PLB1_BLK_REG_ADDR + 0x0A8)
623
624/**
625 * RL Max factor register
626 * 2 bits per priority,
627 * 4 factors possible: 1, 0.5, 0.25, 0
628 * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
629 */
630#define PLB0_RL_MAX_REG (PLB0_BLK_REG_ADDR + 0x0AC)
631#define PLB1_RL_MAX_REG (PLB1_BLK_REG_ADDR + 0x0AC)
632
633/* MAC SERDES Address Paging register */
634#define PLB0_EMS_ADD_REG (PLB0_BLK_REG_ADDR + 0xD0)
635#define PLB1_EMS_ADD_REG (PLB1_BLK_REG_ADDR + 0xD0)
636
637/* LL EMS Registers */
638#define LL_EMS0_BLK_REG_ADDR 0x00026800
639#define LL_EMS1_BLK_REG_ADDR 0x00026C00
640
641/**
642 * BPC Block Register Address Offset from BAR0
643 * BPC0 Range : 0x23000 - 0x233FF
644 * BPC1 Range : 0x23400 - 0x237FF
645 */
646#define BPC0_BLK_REG_ADDR 0x00023000
647#define BPC1_BLK_REG_ADDR 0x00023400
648
649/**
650 * PMM Block Register Address Offset from BAR0
651 * PMM0 Range : 0x23800 - 0x23BFF
652 * PMM1 Range : 0x23C00 - 0x23FFF
653 */
654#define PMM0_BLK_REG_ADDR 0x00023800
655#define PMM1_BLK_REG_ADDR 0x00023C00
656
657/**
658 * HQM Block Register Address Offset from BAR0
659 * HQM0 Range : 0x24000 - 0x243FF
660 * HQM1 Range : 0x24400 - 0x247FF
661 */
662#define HQM0_BLK_REG_ADDR 0x00024000
663#define HQM1_BLK_REG_ADDR 0x00024400
664
665/**
666 * HQM Control Register
667 * Controls some aspects of IB
668 * See catapult_spec.pdf for details
669 */
670#define HQM0_CTL_REG (HQM0_BLK_REG_ADDR + 0x000)
671#define HQM1_CTL_REG (HQM1_BLK_REG_ADDR + 0x000)
672
673/**
674 * HQM Stop Q Semaphore Registers.
675 * Only one Queue resource can be stopped at
676 * any given time. This register controls access
677 * to the single stop Q resource.
678 * See catapult_spec.pdf for details
679 */
680#define HQM0_RXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x028)
681#define HQM0_TXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x02C)
682#define HQM1_RXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x028)
683#define HQM1_TXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x02C)
684
685/**
686 * LUT Block Register Address Offset from BAR0
687 * LUT0 Range : 0x25800 - 0x25BFF
688 * LUT1 Range : 0x25C00 - 0x25FFF
689 */
690#define LUT0_BLK_REG_ADDR 0x00025800
691#define LUT1_BLK_REG_ADDR 0x00025C00
692
693/**
694 * LUT Registers
695 * See catapult_spec.pdf for details
696 */
697#define LUT0_ERR_STS (LUT0_BLK_REG_ADDR + 0x000)
698#define LUT1_ERR_STS (LUT1_BLK_REG_ADDR + 0x000)
699#define LUT0_SET_ERR_STS (LUT0_BLK_REG_ADDR + 0x004)
700#define LUT1_SET_ERR_STS (LUT1_BLK_REG_ADDR + 0x004)
701
702/**
703 * TRC (Debug/Trace) Register Offset from BAR0
704 * Range : 0x26000 -- 0x263FFF
705 */
706#define TRC_BLK_REG_ADDR 0x00026000
707
708/**
709 * TRC Registers
710 * See catapult_spec.pdf for details of each
711 */
712#define TRC_CTL_REG (TRC_BLK_REG_ADDR + 0x000)
713#define TRC_MODS_REG (TRC_BLK_REG_ADDR + 0x004)
714#define TRC_TRGC_REG (TRC_BLK_REG_ADDR + 0x008)
715#define TRC_CNT1_REG (TRC_BLK_REG_ADDR + 0x010)
716#define TRC_CNT2_REG (TRC_BLK_REG_ADDR + 0x014)
717#define TRC_NXTS_REG (TRC_BLK_REG_ADDR + 0x018)
718#define TRC_DIRR_REG (TRC_BLK_REG_ADDR + 0x01C)
719
720/**
721 * TRC Trigger match filters, total 10
722 * Determines the trigger condition
723 */
724#define TRC_TRGM_REG(_num) \
725 (TRC_BLK_REG_ADDR + 0x040 + ((_num) << 2))
726
727/**
728 * TRC Next State filters, total 10
729 * Determines the next state conditions
730 */
731#define TRC_NXTM_REG(_num) \
732 (TRC_BLK_REG_ADDR + 0x080 + ((_num) << 2))
733
734/**
735 * TRC Store Match filters, total 10
736 * Determines the store conditions
737 */
738#define TRC_STRM_REG(_num) \
739 (TRC_BLK_REG_ADDR + 0x0C0 + ((_num) << 2))
740
741/* DOORBELLS ACCESS */
742
743/**
744 * Catapult doorbells
745 * Each doorbell-queue set has
746 * 1 RxQ, 1 TxQ, 2 IBs in that order
747 * Size of each entry in 32 bytes, even though only 1 word
748 * is used. For Non-VM case each doorbell-q set is
749 * separated by 128 bytes, for VM case it is separated
750 * by 4K bytes
751 * Non VM case Range : 0x38000 - 0x39FFF
752 * VM case Range : 0x100000 - 0x11FFFF
753 * The range applies to both HQMs
754 */
755#define HQM_DOORBELL_BLK_BASE_ADDR 0x00038000
756#define HQM_DOORBELL_VM_BLK_BASE_ADDR 0x00100000
757
758/* MEMORY ACCESS */
759
760/**
761 * Catapult H/W Block Memory Access Address
762 * To the host a memory space of 32K (page) is visible
763 * at a time. The address range is from 0x08000 to 0x0FFFF
764 */
765#define HW_BLK_HOST_MEM_ADDR 0x08000
766
767/**
768 * Catapult LUT Memory Access Page Numbers
769 * Range : LUT0 0xa0-0xa1
770 * LUT1 0xa2-0xa3
771 */
772#define LUT0_MEM_BLK_BASE_PG_NUM 0x000000A0
773#define LUT1_MEM_BLK_BASE_PG_NUM 0x000000A2
774
775/**
776 * Catapult RxFn Database Memory Block Base Offset
777 *
778 * The Rx function database exists in LUT block.
779 * In PCIe space this is accessible as a 256x32
780 * bit block. Each entry in this database is 4
781 * (4 byte) words. Max. entries is 64.
782 * Address of an entry corresponding to a function
783 * = base_addr + (function_no. * 16)
784 */
785#define RX_FNDB_RAM_BASE_OFFSET 0x0000B400
786
787/**
788 * Catapult TxFn Database Memory Block Base Offset Address
789 *
790 * The Tx function database exists in LUT block.
791 * In PCIe space this is accessible as a 64x32
792 * bit block. Each entry in this database is 1
793 * (4 byte) word. Max. entries is 64.
794 * Address of an entry corresponding to a function
795 * = base_addr + (function_no. * 4)
796 */
797#define TX_FNDB_RAM_BASE_OFFSET 0x0000B800
798
799/**
800 * Catapult Unicast CAM Base Offset Address
801 *
802 * Exists in LUT memory space.
803 * Shared by both the LL & FCoE driver.
804 * Size is 256x48 bits; mapped to PCIe space
805 * 512x32 bit blocks. For each address, bits
806 * are written in the order : [47:32] and then
807 * [31:0].
808 */
809#define UCAST_CAM_BASE_OFFSET 0x0000A800
810
811/**
812 * Catapult Unicast RAM Base Offset Address
813 *
814 * Exists in LUT memory space.
815 * Shared by both the LL & FCoE driver.
816 * Size is 256x9 bits.
817 */
818#define UCAST_RAM_BASE_OFFSET 0x0000B000
819
820/**
821 * Catapult Mulicast CAM Base Offset Address
822 *
823 * Exists in LUT memory space.
824 * Shared by both the LL & FCoE driver.
825 * Size is 256x48 bits; mapped to PCIe space
826 * 512x32 bit blocks. For each address, bits
827 * are written in the order : [47:32] and then
828 * [31:0].
829 */
830#define MCAST_CAM_BASE_OFFSET 0x0000A000
831
832/**
833 * Catapult VLAN RAM Base Offset Address
834 *
835 * Exists in LUT memory space.
836 * Size is 4096x66 bits; mapped to PCIe space as
837 * 8192x32 bit blocks.
838 * All the 4K entries are within the address range
839 * 0x0000 to 0x8000, so in the first LUT page.
840 */
841#define VLAN_RAM_BASE_OFFSET 0x00000000
842
843/**
844 * Catapult Tx Stats RAM Base Offset Address
845 *
846 * Exists in LUT memory space.
847 * Size is 1024x33 bits;
848 * Each Tx function has 64 bytes of space
849 */
850#define TX_STATS_RAM_BASE_OFFSET 0x00009000
851
852/**
853 * Catapult Rx Stats RAM Base Offset Address
854 *
855 * Exists in LUT memory space.
856 * Size is 1024x33 bits;
857 * Each Rx function has 64 bytes of space
858 */
859#define RX_STATS_RAM_BASE_OFFSET 0x00008000
860
861/* Catapult RXA Memory Access Page Numbers */
862#define RXA0_MEM_BLK_BASE_PG_NUM 0x0000008C
863#define RXA1_MEM_BLK_BASE_PG_NUM 0x0000008D
864
865/**
866 * Catapult Multicast Vector Table Base Offset Address
867 *
868 * Exists in RxA memory space.
869 * Organized as 512x65 bit block.
870 * However for each entry 16 bytes allocated (power of 2)
871 * Total size 512*16 bytes.
872 * There are two logical divisions, 256 entries each :
873 * a) Entries 0x00 to 0xff (256) -- Approx. MVT
874 * Offset 0x000 to 0xFFF
875 * b) Entries 0x100 to 0x1ff (256) -- Exact MVT
876 * Offsets 0x1000 to 0x1FFF
877 */
878#define MCAST_APPROX_MVT_BASE_OFFSET 0x00000000
879#define MCAST_EXACT_MVT_BASE_OFFSET 0x00001000
880
881/**
882 * Catapult RxQ Translate Table (RIT) Base Offset Address
883 *
884 * Exists in RxA memory space
885 * Total no. of entries 64
886 * Each entry is 1 (4 byte) word.
887 * 31:12 -- Reserved
888 * 11:0 -- Two 6 bit RxQ Ids
889 */
890#define FUNCTION_TO_RXQ_TRANSLATE 0x00002000
891
892/* Catapult RxAdm (RAD) Memory Access Page Numbers */
893#define RAD0_MEM_BLK_BASE_PG_NUM 0x00000086
894#define RAD1_MEM_BLK_BASE_PG_NUM 0x00000087
895
896/**
897 * Catapult RSS Table Base Offset Address
898 *
899 * Exists in RAD memory space.
900 * Each entry is 352 bits, but alligned on
901 * 64 byte (512 bit) boundary. Accessed
902 * 4 byte words, the whole entry can be
903 * broken into 11 word accesses.
904 */
905#define RSS_TABLE_BASE_OFFSET 0x00000800
906
907/**
908 * Catapult CPQ Block Page Number
909 * This value is written to the page number registers
910 * to access the memory associated with the mailboxes.
911 */
912#define CPQ_BLK_PG_NUM 0x00000005
913
914/**
915 * Clarification :
916 * LL functions are 2 & 3; can HostFn0/HostFn1
917 * <-> LPU0/LPU1 memories be used ?
918 */
919/**
920 * Catapult HostFn0/HostFn1 to LPU0/LPU1 Mbox memory
921 * Per catapult_spec.pdf, the offset of the mbox
922 * memory is in the register space at an offset of 0x200
923 */
924#define CPQ_BLK_REG_MBOX_ADDR (CPQ_BLK_REG_ADDR + 0x200)
925
926#define HOSTFN_LPU_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x000)
927
928/* Catapult LPU0/LPU1 to HostFn0/HostFn1 Mbox memory */
929#define LPU_HOSTFN_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x080)
930
931/**
932 * Catapult HQM Block Page Number
933 * This is written to the page number register for
934 * the appropriate function to access the memory
935 * associated with HQM
936 */
937#define HQM0_BLK_PG_NUM 0x00000096
938#define HQM1_BLK_PG_NUM 0x00000097
939
940/**
941 * Note that TxQ and RxQ entries are interlaced
942 * the HQM memory, i.e RXQ0, TXQ0, RXQ1, TXQ1.. etc.
943 */
944
945#define HQM_RXTX_Q_RAM_BASE_OFFSET 0x00004000
946
947/**
948 * CQ Memory
949 * Exists in HQM Memory space
950 * Each entry is 16 (4 byte) words of which
951 * only 12 words are used for configuration
952 * Total 64 entries per HQM memory space
953 */
954#define HQM_CQ_RAM_BASE_OFFSET 0x00006000
955
956/**
957 * Interrupt Block (IB) Memory
958 * Exists in HQM Memory space
959 * Each entry is 8 (4 byte) words of which
960 * only 5 words are used for configuration
961 * Total 128 entries per HQM memory space
962 */
963#define HQM_IB_RAM_BASE_OFFSET 0x00001000
964
965/**
966 * Index Table (IT) Memory
967 * Exists in HQM Memory space
968 * Each entry is 1 (4 byte) word which
969 * is used for configuration
970 * Total 128 entries per HQM memory space
971 */
972#define HQM_INDX_TBL_RAM_BASE_OFFSET 0x00002000
973
974/**
975 * PSS Block Memory Page Number
976 * This is written to the appropriate page number
977 * register to access the CPU memory.
978 * Also known as the PSS secondary memory (SMEM).
979 * Range : 0x180 to 0x1CF
980 * See catapult_spec.pdf for details
981 */
982#define PSS_BLK_PG_NUM 0x00000180
983
984/**
985 * Offsets of different instances of PSS SMEM
986 * 2.5M of continuous 1T memory space : 2 blocks
987 * of 1M each (32 pages each, page=32KB) and 4 smaller
988 * blocks of 128K each (4 pages each, page=32KB)
989 * PSS_LMEM_INST0 is used for firmware download
990 */
991#define PSS_LMEM_INST0 0x00000000
992#define PSS_LMEM_INST1 0x00100000
993#define PSS_LMEM_INST2 0x00200000
994#define PSS_LMEM_INST3 0x00220000
995#define PSS_LMEM_INST4 0x00240000
996#define PSS_LMEM_INST5 0x00260000
997
998#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
999
1000#define BNA_GET_PAGE_NUM(_base_page, _offset) \
1001 ((_base_page) + ((_offset) >> 15))
1002
1003#define BNA_GET_PAGE_OFFSET(_offset) \
1004 ((_offset) & 0x7fff)
1005
1006#define BNA_GET_MEM_BASE_ADDR(_bar0, _base_offset) \
1007 ((_bar0) + HW_BLK_HOST_MEM_ADDR \
1008 + BNA_GET_PAGE_OFFSET((_base_offset)))
1009
1010#define BNA_GET_VLAN_MEM_ENTRY_ADDR(_bar0, _fn_id, _vlan_id)\
1011 (_bar0 + (HW_BLK_HOST_MEM_ADDR) \
1012 + (BNA_GET_PAGE_OFFSET(VLAN_RAM_BASE_OFFSET)) \
1013 + (((_fn_id) & 0x3f) << 9) \
1014 + (((_vlan_id) & 0xfe0) >> 3))
1015
1016/**
1017 *
1018 * Interrupt related bits, flags and macros
1019 *
1020 */
1021
1022#define __LPU02HOST_MBOX0_STATUS_BITS 0x00100000
1023#define __LPU12HOST_MBOX0_STATUS_BITS 0x00200000
1024#define __LPU02HOST_MBOX1_STATUS_BITS 0x00400000
1025#define __LPU12HOST_MBOX1_STATUS_BITS 0x00800000
1026
1027#define __LPU02HOST_MBOX0_MASK_BITS 0x00100000
1028#define __LPU12HOST_MBOX0_MASK_BITS 0x00200000
1029#define __LPU02HOST_MBOX1_MASK_BITS 0x00400000
1030#define __LPU12HOST_MBOX1_MASK_BITS 0x00800000
1031
1032#define __LPU2HOST_MBOX_MASK_BITS \
1033 (__LPU02HOST_MBOX0_MASK_BITS | __LPU02HOST_MBOX1_MASK_BITS | \
1034 __LPU12HOST_MBOX0_MASK_BITS | __LPU12HOST_MBOX1_MASK_BITS)
1035
1036#define __LPU2HOST_IB_STATUS_BITS 0x0000ffff
1037
1038#define BNA_IS_LPU0_MBOX_INTR(_intr_status) \
1039 ((_intr_status) & (__LPU02HOST_MBOX0_STATUS_BITS | \
1040 __LPU02HOST_MBOX1_STATUS_BITS))
1041
1042#define BNA_IS_LPU1_MBOX_INTR(_intr_status) \
1043 ((_intr_status) & (__LPU12HOST_MBOX0_STATUS_BITS | \
1044 __LPU12HOST_MBOX1_STATUS_BITS))
1045
1046#define BNA_IS_MBOX_INTR(_intr_status) \
1047 ((_intr_status) & \
1048 (__LPU02HOST_MBOX0_STATUS_BITS | \
1049 __LPU02HOST_MBOX1_STATUS_BITS | \
1050 __LPU12HOST_MBOX0_STATUS_BITS | \
1051 __LPU12HOST_MBOX1_STATUS_BITS))
1052
1053#define __EMC_ERROR_STATUS_BITS 0x00010000
1054#define __LPU0_ERROR_STATUS_BITS 0x00020000
1055#define __LPU1_ERROR_STATUS_BITS 0x00040000
1056#define __PSS_ERROR_STATUS_BITS 0x00080000
1057
1058#define __HALT_STATUS_BITS 0x01000000
1059
1060#define __EMC_ERROR_MASK_BITS 0x00010000
1061#define __LPU0_ERROR_MASK_BITS 0x00020000
1062#define __LPU1_ERROR_MASK_BITS 0x00040000
1063#define __PSS_ERROR_MASK_BITS 0x00080000
1064
1065#define __HALT_MASK_BITS 0x01000000
1066
1067#define __ERROR_MASK_BITS \
1068 (__EMC_ERROR_MASK_BITS | __LPU0_ERROR_MASK_BITS | \
1069 __LPU1_ERROR_MASK_BITS | __PSS_ERROR_MASK_BITS | \
1070 __HALT_MASK_BITS)
1071
1072#define BNA_IS_ERR_INTR(_intr_status) \
1073 ((_intr_status) & \
1074 (__EMC_ERROR_STATUS_BITS | \
1075 __LPU0_ERROR_STATUS_BITS | \
1076 __LPU1_ERROR_STATUS_BITS | \
1077 __PSS_ERROR_STATUS_BITS | \
1078 __HALT_STATUS_BITS))
1079
1080#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
1081 (BNA_IS_MBOX_INTR((_intr_status)) | \
1082 BNA_IS_ERR_INTR((_intr_status)))
1083
1084#define BNA_IS_INTX_DATA_INTR(_intr_status) \
1085 ((_intr_status) & __LPU2HOST_IB_STATUS_BITS)
1086
1087#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
1088do { \
1089 (_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
1090 __LPU02HOST_MBOX1_STATUS_BITS | \
1091 __LPU12HOST_MBOX0_STATUS_BITS | \
1092 __LPU12HOST_MBOX1_STATUS_BITS); \
1093} while (0)
1094
1095#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
1096do { \
1097 (_intr_status) &= ~(__EMC_ERROR_STATUS_BITS | \
1098 __LPU0_ERROR_STATUS_BITS | \
1099 __LPU1_ERROR_STATUS_BITS | \
1100 __PSS_ERROR_STATUS_BITS | \
1101 __HALT_STATUS_BITS); \
1102} while (0)
1103
1104#define bna_intx_disable(_bna, _cur_mask) \
1105{ \
1106 (_cur_mask) = readl((_bna)->regs.fn_int_mask);\
1107 writel(0xffffffff, (_bna)->regs.fn_int_mask);\
1108}
1109
1110#define bna_intx_enable(bna, new_mask) \
1111 writel((new_mask), (bna)->regs.fn_int_mask)
1112
1113#define bna_mbox_intr_disable(bna) \
1114 writel((readl((bna)->regs.fn_int_mask) | \
1115 (__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
1116 (bna)->regs.fn_int_mask)
1117
1118#define bna_mbox_intr_enable(bna) \
1119 writel((readl((bna)->regs.fn_int_mask) & \
1120 ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
1121 (bna)->regs.fn_int_mask)
1122
1123#define bna_intr_status_get(_bna, _status) \
1124{ \
1125 (_status) = readl((_bna)->regs.fn_int_status); \
1126 if ((_status)) { \
1127 writel((_status) & ~(__LPU02HOST_MBOX0_STATUS_BITS |\
1128 __LPU02HOST_MBOX1_STATUS_BITS |\
1129 __LPU12HOST_MBOX0_STATUS_BITS |\
1130 __LPU12HOST_MBOX1_STATUS_BITS), \
1131 (_bna)->regs.fn_int_status);\
1132 } \
1133}
1134
1135#define bna_intr_status_get_no_clr(_bna, _status) \
1136 (_status) = readl((_bna)->regs.fn_int_status)
1137
1138#define bna_intr_mask_get(bna, mask) \
1139 (*mask) = readl((bna)->regs.fn_int_mask)
1140
1141#define bna_intr_ack(bna, intr_bmap) \
1142 writel((intr_bmap), (bna)->regs.fn_int_status)
1143
1144#define bna_ib_intx_disable(bna, ib_id) \
1145 writel(readl((bna)->regs.fn_int_mask) | \
1146 (1 << (ib_id)), \
1147 (bna)->regs.fn_int_mask)
1148
1149#define bna_ib_intx_enable(bna, ib_id) \
1150 writel(readl((bna)->regs.fn_int_mask) & \
1151 ~(1 << (ib_id)), \
1152 (bna)->regs.fn_int_mask)
1153
1154#define bna_mbox_msix_idx_set(_device) \
1155do {\
1156 writel(((_device)->vector & 0x000001FF), \
1157 (_device)->bna->pcidev.pci_bar_kva + \
1158 reg_offset[(_device)->bna->pcidev.pci_func].msix_idx);\
1159} while (0)
1160
1161/**
1162 *
1163 * TxQ, RxQ, CQ related bits, offsets, macros
1164 *
1165 */
1166
1167#define BNA_Q_IDLE_STATE 0x00008001
1168
1169#define BNA_GET_DOORBELL_BASE_ADDR(_bar0) \
1170 ((_bar0) + HQM_DOORBELL_BLK_BASE_ADDR)
1171
1172#define BNA_GET_DOORBELL_ENTRY_OFFSET(_entry) \
1173 ((HQM_DOORBELL_BLK_BASE_ADDR) \
1174 + (_entry << 7))
1175
1176#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
1177 (0x80000000 | ((_timeout) << 16) | (_events))
1178
1179#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
1180
1181/* TxQ Entry Opcodes */
1182#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
1183#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
1184#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
1185
1186/* TxQ Entry Control Flags */
1187#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
1188#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
1189#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
1190#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
1191#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
1192#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
1193#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
1194
1195#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
1196 (((_hdr_size) << 10) | ((_offset) & 0x3FF))
1197
1198/*
1199 * Completion Q defines
1200 */
1201/* CQ Entry Flags */
1202#define BNA_CQ_EF_MAC_ERROR (1 << 0)
1203#define BNA_CQ_EF_FCS_ERROR (1 << 1)
1204#define BNA_CQ_EF_TOO_LONG (1 << 2)
1205#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
1206
1207#define BNA_CQ_EF_RSVD1 (1 << 4)
1208#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
1209#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
1210#define BNA_CQ_EF_HDS_HEADER (1 << 7)
1211
1212#define BNA_CQ_EF_UDP (1 << 8)
1213#define BNA_CQ_EF_TCP (1 << 9)
1214#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
1215#define BNA_CQ_EF_IPV6 (1 << 11)
1216
1217#define BNA_CQ_EF_IPV4 (1 << 12)
1218#define BNA_CQ_EF_VLAN (1 << 13)
1219#define BNA_CQ_EF_RSS (1 << 14)
1220#define BNA_CQ_EF_RSVD2 (1 << 15)
1221
1222#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
1223#define BNA_CQ_EF_MCAST (1 << 17)
1224#define BNA_CQ_EF_BCAST (1 << 18)
1225#define BNA_CQ_EF_REMOTE (1 << 19)
1226
1227#define BNA_CQ_EF_LOCAL (1 << 20)
1228
1229/**
1230 *
1231 * Data structures
1232 *
1233 */
1234
1235enum txf_flags {
1236 BFI_TXF_CF_ENABLE = 1 << 0,
1237 BFI_TXF_CF_VLAN_FILTER = 1 << 8,
1238 BFI_TXF_CF_VLAN_ADMIT = 1 << 9,
1239 BFI_TXF_CF_VLAN_INSERT = 1 << 10,
1240 BFI_TXF_CF_RSVD1 = 1 << 11,
1241 BFI_TXF_CF_MAC_SA_CHECK = 1 << 12,
1242 BFI_TXF_CF_VLAN_WI_BASED = 1 << 13,
1243 BFI_TXF_CF_VSWITCH_MCAST = 1 << 14,
1244 BFI_TXF_CF_VSWITCH_UCAST = 1 << 15,
1245 BFI_TXF_CF_RSVD2 = 0x7F << 1
1246};
1247
1248enum ib_flags {
1249 BFI_IB_CF_MASTER_ENABLE = (1 << 0),
1250 BFI_IB_CF_MSIX_MODE = (1 << 1),
1251 BFI_IB_CF_COALESCING_MODE = (1 << 2),
1252 BFI_IB_CF_INTER_PKT_ENABLE = (1 << 3),
1253 BFI_IB_CF_INT_ENABLE = (1 << 4),
1254 BFI_IB_CF_INTER_PKT_DMA = (1 << 5),
1255 BFI_IB_CF_ACK_PENDING = (1 << 6),
1256 BFI_IB_CF_RESERVED1 = (1 << 7)
1257};
1258
1259enum rss_hash_type {
1260 BFI_RSS_T_V4_TCP = (1 << 11),
1261 BFI_RSS_T_V4_IP = (1 << 10),
1262 BFI_RSS_T_V6_TCP = (1 << 9),
1263 BFI_RSS_T_V6_IP = (1 << 8)
1264};
1265enum hds_header_type {
1266 BNA_HDS_T_V4_TCP = (1 << 11),
1267 BNA_HDS_T_V4_UDP = (1 << 10),
1268 BNA_HDS_T_V6_TCP = (1 << 9),
1269 BNA_HDS_T_V6_UDP = (1 << 8),
1270 BNA_HDS_FORCED = (1 << 7),
1271};
1272enum rxf_flags {
1273 BNA_RXF_CF_SM_LG_RXQ = (1 << 15),
1274 BNA_RXF_CF_DEFAULT_VLAN = (1 << 14),
1275 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE = (1 << 13),
1276 BNA_RXF_CF_VLAN_STRIP = (1 << 12),
1277 BNA_RXF_CF_RSS_ENABLE = (1 << 8)
1278};
1279struct bna_chip_regs_offset {
1280 u32 page_addr;
1281 u32 fn_int_status;
1282 u32 fn_int_mask;
1283 u32 msix_idx;
1284};
1285extern const struct bna_chip_regs_offset reg_offset[];
1286
1287struct bna_chip_regs {
1288 void __iomem *page_addr;
1289 void __iomem *fn_int_status;
1290 void __iomem *fn_int_mask;
1291};
1292
1293struct bna_txq_mem {
1294 u32 pg_tbl_addr_lo;
1295 u32 pg_tbl_addr_hi;
1296 u32 cur_q_entry_lo;
1297 u32 cur_q_entry_hi;
1298 u32 reserved1;
1299 u32 reserved2;
1300 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1301 /* 15:0 ->producer pointer (index?) */
1302 u32 entry_n_pg_size; /* 31:16->entry size */
1303 /* 15:0 ->page size */
1304 u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
1305 /* 23:16->Int Blk Offset */
1306 /* 15:0 ->consumer pointer(index?) */
1307 u32 cns_ptr2_n_q_state; /* 31:16->cons. ptr 2; 15:0-> Q state */
1308 u32 nxt_qid_n_fid_n_pri; /* 17:10->next */
1309 /* QId;9:3->FID;2:0->Priority */
1310 u32 wvc_n_cquota_n_rquota; /* 31:24->WI Vector Count; */
1311 /* 23:12->Cfg Quota; */
1312 /* 11:0 ->Run Quota */
1313 u32 reserved3[4];
1314};
1315
1316struct bna_rxq_mem {
1317 u32 pg_tbl_addr_lo;
1318 u32 pg_tbl_addr_hi;
1319 u32 cur_q_entry_lo;
1320 u32 cur_q_entry_hi;
1321 u32 reserved1;
1322 u32 reserved2;
1323 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1324 /* 15:0 ->producer pointer (index?) */
1325 u32 entry_n_pg_size; /* 31:16->entry size */
1326 /* 15:0 ->page size */
1327 u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
1328 /* 23:16->CQ; */
1329 /* 15:0->consumer pointer(index?) */
1330 u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
1331 u32 next_qid; /* 17:10->next QId */
1332 u32 reserved3;
1333 u32 reserved4[4];
1334};
1335
1336struct bna_rxtx_q_mem {
1337 struct bna_rxq_mem rxq;
1338 struct bna_txq_mem txq;
1339};
1340
1341struct bna_cq_mem {
1342 u32 pg_tbl_addr_lo;
1343 u32 pg_tbl_addr_hi;
1344 u32 cur_q_entry_lo;
1345 u32 cur_q_entry_hi;
1346
1347 u32 reserved1;
1348 u32 reserved2;
1349 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1350 /* 15:0 ->producer pointer (index?) */
1351 u32 entry_n_pg_size; /* 31:16->entry size */
1352 /* 15:0 ->page size */
1353 u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
1354 /* 23:16->Int Blk Offset */
1355 /* 15:0 ->consumer pointer(index?) */
1356 u32 q_state; /* 31:16->reserved; 15:0-> Q state */
1357 u32 reserved3[2];
1358 u32 reserved4[4];
1359};
1360
1361struct bna_ib_blk_mem {
1362 u32 host_addr_lo;
1363 u32 host_addr_hi;
1364 u32 clsc_n_ctrl_n_msix; /* 31:24->coalescing; */
1365 /* 23:16->coalescing cfg; */
1366 /* 15:8 ->control; */
1367 /* 7:0 ->msix; */
1368 u32 ipkt_n_ent_n_idxof;
1369 u32 ipkt_cnt_cfg_n_unacked;
1370
1371 u32 reserved[3];
1372};
1373
1374struct bna_idx_tbl_mem {
1375 u32 idx; /* !< 31:16->res;15:0->idx; */
1376};
1377
1378struct bna_doorbell_qset {
1379 u32 rxq[0x20 >> 2];
1380 u32 txq[0x20 >> 2];
1381 u32 ib0[0x20 >> 2];
1382 u32 ib1[0x20 >> 2];
1383};
1384
1385struct bna_rx_fndb_ram {
1386 u32 rss_prop;
1387 u32 size_routing_props;
1388 u32 rit_hds_mcastq;
1389 u32 control_flags;
1390};
1391
1392struct bna_tx_fndb_ram {
1393 u32 vlan_n_ctrl_flags;
1394};
1395
1396/**
1397 * @brief
1398 * Structure which maps to RxFn Indirection Table (RIT)
1399 * Size : 1 word
1400 * See catapult_spec.pdf, RxA for details
1401 */
1402struct bna_rit_mem {
1403 u32 rxq_ids; /* !< 31:12->res;11:0->two 6 bit RxQ Ids */
1404};
1405
1406/**
1407 * @brief
1408 * Structure which maps to RSS Table entry
1409 * Size : 16 words
1410 * See catapult_spec.pdf, RAD for details
1411 */
1412struct bna_rss_mem {
1413 /*
1414 * 31:12-> res
1415 * 11:8 -> protocol type
1416 * 7:0 -> hash index
1417 */
1418 u32 type_n_hash;
1419 u32 hash_key[10]; /* !< 40 byte Toeplitz hash key */
1420 u32 reserved[5];
1421};
1422
1423/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
1424struct bna_dma_addr {
1425 u32 msb;
1426 u32 lsb;
1427};
1428
1429struct bna_txq_wi_vector {
1430 u16 reserved;
1431 u16 length; /* Only 14 LSB are valid */
1432 struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
1433};
1434
1435typedef u16 bna_txq_wi_opcode_t;
1436
1437typedef u16 bna_txq_wi_ctrl_flag_t;
1438
1439/**
1440 * TxQ Entry Structure
1441 *
1442 * BEWARE: Load values into this structure with correct endianess.
1443 */
1444struct bna_txq_entry {
1445 union {
1446 struct {
1447 u8 reserved;
1448 u8 num_vectors; /* number of vectors present */
1449 bna_txq_wi_opcode_t opcode; /* Either */
1450 /* BNA_TXQ_WI_SEND or */
1451 /* BNA_TXQ_WI_SEND_LSO */
1452 bna_txq_wi_ctrl_flag_t flags; /* OR of all the flags */
1453 u16 l4_hdr_size_n_offset;
1454 u16 vlan_tag;
1455 u16 lso_mss; /* Only 14 LSB are valid */
1456 u32 frame_length; /* Only 24 LSB are valid */
1457 } wi;
1458
1459 struct {
1460 u16 reserved;
1461 bna_txq_wi_opcode_t opcode; /* Must be */
1462 /* BNA_TXQ_WI_EXTENSION */
1463 u32 reserved2[3]; /* Place holder for */
1464 /* removed vector (12 bytes) */
1465 } wi_ext;
1466 } hdr;
1467 struct bna_txq_wi_vector vector[4];
1468};
1469#define wi_hdr hdr.wi
1470#define wi_ext_hdr hdr.wi_ext
1471
1472/* RxQ Entry Structure */
1473struct bna_rxq_entry { /* Rx-Buffer */
1474 struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
1475};
1476
1477typedef u32 bna_cq_e_flag_t;
1478
1479/* CQ Entry Structure */
1480struct bna_cq_entry {
1481 bna_cq_e_flag_t flags;
1482 u16 vlan_tag;
1483 u16 length;
1484 u32 rss_hash;
1485 u8 valid;
1486 u8 reserved1;
1487 u8 reserved2;
1488 u8 rxq_id;
1489};
1490
1491#endif /* __BNA_HW_H__ */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
new file mode 100644
index 000000000000..890846d55502
--- /dev/null
+++ b/drivers/net/bna/bna_txrx.c
@@ -0,0 +1,4209 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19#include "bfa_sm.h"
20#include "bfi.h"
21
22/**
23 * IB
24 */
25#define bna_ib_find_free_ibidx(_mask, _pos)\
26do {\
27 (_pos) = 0;\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
30 (_pos)++;\
31} while (0)
32
33#define bna_ib_count_ibidx(_mask, _count)\
34do {\
35 int pos = 0;\
36 (_count) = 0;\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
39 (_count) = pos + 1;\
40 pos++;\
41 } \
42} while (0)
43
44#define bna_ib_select_segpool(_count, _q_idx)\
45do {\
46 int i;\
47 (_q_idx) = -1;\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
50 (_q_idx) = i;\
51 break;\
52 } \
53 } \
54} while (0)
55
56struct bna_ibidx_pool {
57 int pool_size;
58 int pool_entry_size;
59};
60init_ibidx_pool(ibidx_pool);
61
62static struct bna_intr *
63bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
64 int vector)
65{
66 struct bna_intr *intr;
67 struct list_head *qe;
68
69 list_for_each(qe, &ib_mod->intr_active_q) {
70 intr = (struct bna_intr *)qe;
71
72 if ((intr->intr_type == intr_type) &&
73 (intr->vector == vector)) {
74 intr->ref_count++;
75 return intr;
76 }
77 }
78
79 if (list_empty(&ib_mod->intr_free_q))
80 return NULL;
81
82 bfa_q_deq(&ib_mod->intr_free_q, &intr);
83 bfa_q_qe_init(&intr->qe);
84
85 intr->ref_count = 1;
86 intr->intr_type = intr_type;
87 intr->vector = vector;
88
89 list_add_tail(&intr->qe, &ib_mod->intr_active_q);
90
91 return intr;
92}
93
94static void
95bna_intr_put(struct bna_ib_mod *ib_mod,
96 struct bna_intr *intr)
97{
98 intr->ref_count--;
99
100 if (intr->ref_count == 0) {
101 intr->ib = NULL;
102 list_del(&intr->qe);
103 bfa_q_qe_init(&intr->qe);
104 list_add_tail(&intr->qe, &ib_mod->intr_free_q);
105 }
106}
107
108void
109bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
110 struct bna_res_info *res_info)
111{
112 int i;
113 int j;
114 int count;
115 u8 offset;
116 struct bna_doorbell_qset *qset;
117 unsigned long off;
118
119 ib_mod->bna = bna;
120
121 ib_mod->ib = (struct bna_ib *)
122 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
123 ib_mod->intr = (struct bna_intr *)
124 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
125 ib_mod->idx_seg = (struct bna_ibidx_seg *)
126 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
127
128 INIT_LIST_HEAD(&ib_mod->ib_free_q);
129 INIT_LIST_HEAD(&ib_mod->intr_free_q);
130 INIT_LIST_HEAD(&ib_mod->intr_active_q);
131
132 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
133 INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
134
135 for (i = 0; i < BFI_MAX_IB; i++) {
136 ib_mod->ib[i].ib_id = i;
137
138 ib_mod->ib[i].ib_seg_host_addr_kva =
139 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
140 ib_mod->ib[i].ib_seg_host_addr.lsb =
141 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
142 ib_mod->ib[i].ib_seg_host_addr.msb =
143 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
144
145 qset = (struct bna_doorbell_qset *)0;
146 off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
147 * (0x20 >> 2)]);
148 ib_mod->ib[i].door_bell.doorbell_addr = off +
149 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
150
151 bfa_q_qe_init(&ib_mod->ib[i].qe);
152 list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
153
154 bfa_q_qe_init(&ib_mod->intr[i].qe);
155 list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
156 }
157
158 count = 0;
159 offset = 0;
160 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
161 for (j = 0; j < ibidx_pool[i].pool_size; j++) {
162 bfa_q_qe_init(&ib_mod->idx_seg[count]);
163 ib_mod->idx_seg[count].ib_seg_size =
164 ibidx_pool[i].pool_entry_size;
165 ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
166 list_add_tail(&ib_mod->idx_seg[count].qe,
167 &ib_mod->ibidx_seg_pool[i]);
168 count++;
169 offset += ibidx_pool[i].pool_entry_size;
170 }
171 }
172}
173
174void
175bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
176{
177 int i;
178 int j;
179 struct list_head *qe;
180
181 i = 0;
182 list_for_each(qe, &ib_mod->ib_free_q)
183 i++;
184
185 i = 0;
186 list_for_each(qe, &ib_mod->intr_free_q)
187 i++;
188
189 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
190 j = 0;
191 list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
192 j++;
193 }
194
195 ib_mod->bna = NULL;
196}
197
198struct bna_ib *
199bna_ib_get(struct bna_ib_mod *ib_mod,
200 enum bna_intr_type intr_type,
201 int vector)
202{
203 struct bna_ib *ib;
204 struct bna_intr *intr;
205
206 if (intr_type == BNA_INTR_T_INTX)
207 vector = (1 << vector);
208
209 intr = bna_intr_get(ib_mod, intr_type, vector);
210 if (intr == NULL)
211 return NULL;
212
213 if (intr->ib) {
214 if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
215 bna_intr_put(ib_mod, intr);
216 return NULL;
217 }
218 intr->ib->ref_count++;
219 return intr->ib;
220 }
221
222 if (list_empty(&ib_mod->ib_free_q)) {
223 bna_intr_put(ib_mod, intr);
224 return NULL;
225 }
226
227 bfa_q_deq(&ib_mod->ib_free_q, &ib);
228 bfa_q_qe_init(&ib->qe);
229
230 ib->ref_count = 1;
231 ib->start_count = 0;
232 ib->idx_mask = 0;
233
234 ib->intr = intr;
235 ib->idx_seg = NULL;
236 intr->ib = ib;
237
238 ib->bna = ib_mod->bna;
239
240 return ib;
241}
242
243void
244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
245{
246 bna_intr_put(ib_mod, ib->intr);
247
248 ib->ref_count--;
249
250 if (ib->ref_count == 0) {
251 ib->intr = NULL;
252 ib->bna = NULL;
253 list_add_tail(&ib->qe, &ib_mod->ib_free_q);
254 }
255}
256
257/* Returns index offset - starting from 0 */
258int
259bna_ib_reserve_idx(struct bna_ib *ib)
260{
261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
262 struct bna_ibidx_seg *idx_seg;
263 int idx;
264 int num_idx;
265 int q_idx;
266
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib->idx_mask, idx);
269 if (idx == BFI_IBIDX_MAX_SEGSIZE)
270 return -1;
271
272 /*
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
275 */
276 bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
277
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
280 ib->idx_mask |= (1 << idx);
281 return idx;
282 }
283
284 if (ib->start_count)
285 return -1;
286
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx, q_idx);
289 while (1) {
290 if (q_idx == BFI_IBIDX_TOTAL_POOLS)
291 return -1;
292 if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
293 break;
294 q_idx++;
295 }
296 bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
297 bfa_q_qe_init(&idx_seg->qe);
298
299 /* Free the old segment */
300 if (ib->idx_seg) {
301 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
302 list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
303 }
304
305 ib->idx_seg = idx_seg;
306
307 ib->idx_mask |= (1 << idx);
308
309 return idx;
310}
311
312void
313bna_ib_release_idx(struct bna_ib *ib, int idx)
314{
315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
316 struct bna_ibidx_seg *idx_seg;
317 int num_idx;
318 int cur_q_idx;
319 int new_q_idx;
320
321 ib->idx_mask &= ~(1 << idx);
322
323 if (ib->start_count)
324 return;
325
326 bna_ib_count_ibidx(ib->idx_mask, num_idx);
327
328 /*
329 * Free the segment, if there are no more indexes in the segment
330 * held by this IB
331 */
332 if (!num_idx) {
333 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
334 list_add_tail(&ib->idx_seg->qe,
335 &ib_mod->ibidx_seg_pool[cur_q_idx]);
336 ib->idx_seg = NULL;
337 return;
338 }
339
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx, new_q_idx);
342 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
343 while (new_q_idx < cur_q_idx) {
344 if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
345 break;
346 new_q_idx++;
347 }
348 if (new_q_idx < cur_q_idx) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
351 bfa_q_qe_init(&idx_seg->qe);
352 /* Free the old segment */
353 list_add_tail(&ib->idx_seg->qe,
354 &ib_mod->ibidx_seg_pool[cur_q_idx]);
355 ib->idx_seg = idx_seg;
356 }
357}
358
359int
360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
361{
362 if (ib->start_count)
363 return -1;
364
365 ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
366 ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
367 ib->ib_config.interpkt_count = ib_config->interpkt_count;
368 ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
369
370 ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
371 if (ib->intr->intr_type == BNA_INTR_T_MSIX)
372 ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
373
374 return 0;
375}
376
377void
378bna_ib_start(struct bna_ib *ib)
379{
380 struct bna_ib_blk_mem ib_cfg;
381 struct bna_ib_blk_mem *ib_mem;
382 u32 pg_num;
383 u32 intx_mask;
384 int i;
385 void __iomem *base_addr;
386 unsigned long off;
387
388 ib->start_count++;
389
390 if (ib->start_count > 1)
391 return;
392
393 ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
394 ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
395
396 ib_cfg.clsc_n_ctrl_n_msix = (((u32)
397 ib->ib_config.coalescing_timeo << 16) |
398 ((u32)ib->ib_config.ctrl_flags << 8) |
399 (ib->intr->vector));
400 ib_cfg.ipkt_n_ent_n_idxof =
401 ((u32)
402 (ib->ib_config.interpkt_timeo & 0xf) << 16) |
403 ((u32)ib->idx_seg->ib_seg_size << 8) |
404 (ib->idx_seg->ib_idx_tbl_offset);
405 ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
406 ib->ib_config.interpkt_count << 24);
407
408 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
409 HQM_IB_RAM_BASE_OFFSET);
410 writel(pg_num, ib->bna->regs.page_addr);
411
412 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
413 HQM_IB_RAM_BASE_OFFSET);
414
415 ib_mem = (struct bna_ib_blk_mem *)0;
416 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
417 writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
418
419 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
420 writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
421
422 off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
423 writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
424
425 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
426 writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
427
428 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
429 writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
430
431 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
432 (u32)ib->ib_config.coalescing_timeo, 0);
433
434 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
435 HQM_INDX_TBL_RAM_BASE_OFFSET);
436 writel(pg_num, ib->bna->regs.page_addr);
437
438 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
439 HQM_INDX_TBL_RAM_BASE_OFFSET);
440 for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
441 off = (unsigned long)
442 ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
443 writel(0, base_addr + off);
444 }
445
446 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
447 bna_intx_disable(ib->bna, intx_mask);
448 intx_mask &= ~(ib->intr->vector);
449 bna_intx_enable(ib->bna, intx_mask);
450 }
451}
452
453void
454bna_ib_stop(struct bna_ib *ib)
455{
456 u32 intx_mask;
457
458 ib->start_count--;
459
460 if (ib->start_count == 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE,
462 ib->door_bell.doorbell_addr);
463 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
464 bna_intx_disable(ib->bna, intx_mask);
465 intx_mask |= (ib->intr->vector);
466 bna_intx_enable(ib->bna, intx_mask);
467 }
468 }
469}
470
471void
472bna_ib_fail(struct bna_ib *ib)
473{
474 ib->start_count = 0;
475}
476
477/**
478 * RXF
479 */
480static void rxf_enable(struct bna_rxf *rxf);
481static void rxf_disable(struct bna_rxf *rxf);
482static void __rxf_config_set(struct bna_rxf *rxf);
483static void __rxf_rit_set(struct bna_rxf *rxf);
484static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
485static int rxf_process_packet_filter(struct bna_rxf *rxf);
486static int rxf_clear_packet_filter(struct bna_rxf *rxf);
487static void rxf_reset_packet_filter(struct bna_rxf *rxf);
488static void rxf_cb_enabled(void *arg, int status);
489static void rxf_cb_disabled(void *arg, int status);
490static void bna_rxf_cb_stats_cleared(void *arg, int status);
491static void __rxf_enable(struct bna_rxf *rxf);
492static void __rxf_disable(struct bna_rxf *rxf);
493
494bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
495 enum bna_rxf_event);
496bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
497 enum bna_rxf_event);
498bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
499 enum bna_rxf_event);
500bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
501 enum bna_rxf_event);
502bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
503 enum bna_rxf_event);
504bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
505 enum bna_rxf_event);
506bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
507 enum bna_rxf_event);
508bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
509 enum bna_rxf_event);
510bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
511 enum bna_rxf_event);
512
513static struct bfa_sm_table rxf_sm_table[] = {
514 {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
515 {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
517 {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
519 {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
520 {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
521 {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
523};
524
525static void
526bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
527{
528 call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
529}
530
531static void
532bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
533{
534 switch (event) {
535 case RXF_E_START:
536 bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
537 break;
538
539 case RXF_E_STOP:
540 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
541 break;
542
543 case RXF_E_FAIL:
544 /* No-op */
545 break;
546
547 case RXF_E_CAM_FLTR_MOD:
548 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
549 break;
550
551 case RXF_E_STARTED:
552 case RXF_E_STOPPED:
553 case RXF_E_CAM_FLTR_RESP:
554 /**
555 * These events are received due to flushing of mbox
556 * when device fails
557 */
558 /* No-op */
559 break;
560
561 case RXF_E_PAUSE:
562 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
563 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
564 break;
565
566 case RXF_E_RESUME:
567 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
568 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
569 break;
570
571 default:
572 bfa_sm_fault(rxf->rx->bna, event);
573 }
574}
575
576static void
577bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
578{
579 __rxf_config_set(rxf);
580 __rxf_rit_set(rxf);
581 rxf_enable(rxf);
582}
583
584static void
585bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
586{
587 switch (event) {
588 case RXF_E_STOP:
589 /**
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
592 */
593 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
594 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
595 break;
596
597 case RXF_E_FAIL:
598 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
599 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
600 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
601 break;
602
603 case RXF_E_CAM_FLTR_MOD:
604 /* No-op */
605 break;
606
607 case RXF_E_STARTED:
608 /**
609 * Force rxf_process_filter() to go through initial
610 * config
611 */
612 if ((rxf->ucast_active_mac != NULL) &&
613 (rxf->ucast_pending_set == 0))
614 rxf->ucast_pending_set = 1;
615
616 if (rxf->rss_status == BNA_STATUS_T_ENABLED)
617 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
618
619 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
620
621 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
622 break;
623
624 case RXF_E_PAUSE:
625 case RXF_E_RESUME:
626 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
627 break;
628
629 default:
630 bfa_sm_fault(rxf->rx->bna, event);
631 }
632}
633
634static void
635bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
636{
637 if (!rxf_process_packet_filter(rxf)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
640 }
641}
642
643static void
644bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
645{
646 switch (event) {
647 case RXF_E_STOP:
648 /**
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
651 */
652 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
653 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
654 break;
655
656 case RXF_E_FAIL:
657 rxf_reset_packet_filter(rxf);
658 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
659 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
660 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
661 break;
662
663 case RXF_E_CAM_FLTR_MOD:
664 /* No-op */
665 break;
666
667 case RXF_E_CAM_FLTR_RESP:
668 if (!rxf_process_packet_filter(rxf)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
671 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
672 }
673 break;
674
675 case RXF_E_PAUSE:
676 case RXF_E_RESUME:
677 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
678 break;
679
680 default:
681 bfa_sm_fault(rxf->rx->bna, event);
682 }
683}
684
685static void
686bna_rxf_sm_started_entry(struct bna_rxf *rxf)
687{
688 call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
689
690 if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
691 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
692 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
693 else
694 bfa_fsm_send_event(rxf, RXF_E_RESUME);
695 }
696
697}
698
699static void
700bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
701{
702 switch (event) {
703 case RXF_E_STOP:
704 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
707 break;
708
709 case RXF_E_FAIL:
710 rxf_reset_packet_filter(rxf);
711 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
712 break;
713
714 case RXF_E_CAM_FLTR_MOD:
715 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
716 break;
717
718 case RXF_E_PAUSE:
719 bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
720 break;
721
722 case RXF_E_RESUME:
723 bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
724 break;
725
726 default:
727 bfa_sm_fault(rxf->rx->bna, event);
728 }
729}
730
731static void
732bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
733{
734 /**
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
738 */
739}
740
741static void
742bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
743{
744 switch (event) {
745 case RXF_E_FAIL:
746 /**
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
750 */
751 rxf_reset_packet_filter(rxf);
752 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
753 break;
754
755 case RXF_E_CAM_FLTR_RESP:
756 if (!rxf_clear_packet_filter(rxf)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
759 rxf_disable(rxf);
760 }
761 break;
762
763 default:
764 bfa_sm_fault(rxf->rx->bna, event);
765 }
766}
767
768static void
769bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
770{
771 /**
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
775 */
776}
777
778static void
779bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
780{
781 switch (event) {
782 case RXF_E_FAIL:
783 /**
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
787 */
788 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
789 break;
790
791 case RXF_E_STARTED:
792 /**
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
795 * RXF_E_STOP event
796 */
797 rxf_disable(rxf);
798 break;
799
800 case RXF_E_STOPPED:
801 /**
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
805 */
806 bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
807 break;
808
809 case RXF_E_PAUSE:
810 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
811 break;
812
813 case RXF_E_RESUME:
814 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
815 break;
816
817 default:
818 bfa_sm_fault(rxf->rx->bna, event);
819 }
820}
821
822static void
823bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
824{
825 rxf->rxf_flags &=
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
827 __rxf_disable(rxf);
828}
829
830static void
831bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
832{
833 switch (event) {
834 case RXF_E_FAIL:
835 /**
836 * FSM was in the process of disabling rxf, initiated by
837 * bnad.
838 */
839 call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
840 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
841 break;
842
843 case RXF_E_STOPPED:
844 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
845 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
846 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
847 break;
848
849 /*
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
852 */
853 default:
854 bfa_sm_fault(rxf->rx->bna, event);
855 }
856}
857
858static void
859bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
860{
861 rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
862 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
863 __rxf_enable(rxf);
864}
865
866static void
867bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
868{
869 switch (event) {
870 case RXF_E_FAIL:
871 /**
872 * FSM was in the process of disabling rxf, initiated by
873 * bnad.
874 */
875 call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
876 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
877 break;
878
879 case RXF_E_STARTED:
880 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
881 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
882 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
883 break;
884
885 /*
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
888 */
889 default:
890 bfa_sm_fault(rxf->rx->bna, event);
891 }
892}
893
894static void
895bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
896{
897 __bna_rxf_stat_clr(rxf);
898}
899
900static void
901bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
902{
903 switch (event) {
904 case RXF_E_FAIL:
905 case RXF_E_STAT_CLEARED:
906 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
907 break;
908
909 default:
910 bfa_sm_fault(rxf->rx->bna, event);
911 }
912}
913
914static void
915__rxf_enable(struct bna_rxf *rxf)
916{
917 struct bfi_ll_rxf_multi_req ll_req;
918 u32 bm[2] = {0, 0};
919
920 if (rxf->rxf_id < 32)
921 bm[0] = 1 << rxf->rxf_id;
922 else
923 bm[1] = 1 << (rxf->rxf_id - 32);
924
925 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
926 ll_req.rxf_id_mask[0] = htonl(bm[0]);
927 ll_req.rxf_id_mask[1] = htonl(bm[1]);
928 ll_req.enable = 1;
929
930 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
931 rxf_cb_enabled, rxf);
932
933 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
934}
935
936static void
937__rxf_disable(struct bna_rxf *rxf)
938{
939 struct bfi_ll_rxf_multi_req ll_req;
940 u32 bm[2] = {0, 0};
941
942 if (rxf->rxf_id < 32)
943 bm[0] = 1 << rxf->rxf_id;
944 else
945 bm[1] = 1 << (rxf->rxf_id - 32);
946
947 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
948 ll_req.rxf_id_mask[0] = htonl(bm[0]);
949 ll_req.rxf_id_mask[1] = htonl(bm[1]);
950 ll_req.enable = 0;
951
952 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
953 rxf_cb_disabled, rxf);
954
955 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
956}
957
958static void
959__rxf_config_set(struct bna_rxf *rxf)
960{
961 u32 i;
962 struct bna_rss_mem *rss_mem;
963 struct bna_rx_fndb_ram *rx_fndb_ram;
964 struct bna *bna = rxf->rx->bna;
965 void __iomem *base_addr;
966 unsigned long off;
967
968 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
969 RSS_TABLE_BASE_OFFSET);
970
971 rss_mem = (struct bna_rss_mem *)0;
972
973 /* Configure RSS if required */
974 if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
977 bna->port_num, RSS_TABLE_BASE_OFFSET),
978 bna->regs.page_addr);
979
980 /* temporarily disable RSS, while hash value is written */
981 off = (unsigned long)&rss_mem[0].type_n_hash;
982 writel(0, base_addr + off);
983
984 for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
985 off = (unsigned long)
986 &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
987 writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
988 base_addr + off);
989 }
990
991 off = (unsigned long)&rss_mem[0].type_n_hash;
992 writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
993 base_addr + off);
994 }
995
996 /* Configure RxF */
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
999 RX_FNDB_RAM_BASE_OFFSET),
1000 bna->regs.page_addr);
1001
1002 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1003 RX_FNDB_RAM_BASE_OFFSET);
1004
1005 rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
1006
1007 /* We always use RSS table 0 */
1008 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
1009 writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
1010 base_addr + off);
1011
1012 /* small large buffer enable/disable */
1013 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
1014 writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
1015 base_addr + off);
1016
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
1019 writel((rxf->rit_segment->rit_offset << 16) |
1020 (rxf->forced_offset << 8) |
1021 (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
1022 base_addr + off);
1023
1024 /*
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1027 */
1028
1029 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
1030 writel(((u32)rxf->default_vlan_tag << 16) |
1031 (rxf->ctrl_flags &
1032 (BNA_RXF_CF_DEFAULT_VLAN |
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
1034 BNA_RXF_CF_VLAN_STRIP)) |
1035 (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
1036 rxf->hds_cfg.header_size,
1037 base_addr + off);
1038}
1039
1040void
1041__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
1042{
1043 struct bna *bna = rxf->rx->bna;
1044 int i;
1045
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
1047 (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
1048 bna->regs.page_addr);
1049
1050 if (status == BNA_STATUS_T_ENABLED) {
1051 /* enable VLAN filtering on this function */
1052 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1053 writel(rxf->vlan_filter_table[i],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1056 i * 32));
1057 }
1058 } else {
1059 /* disable VLAN filtering on this function */
1060 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1061 writel(0xffffffff,
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1064 i * 32));
1065 }
1066 }
1067}
1068
1069static void
1070__rxf_rit_set(struct bna_rxf *rxf)
1071{
1072 struct bna *bna = rxf->rx->bna;
1073 struct bna_rit_mem *rit_mem;
1074 int i;
1075 void __iomem *base_addr;
1076 unsigned long off;
1077
1078 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1079 FUNCTION_TO_RXQ_TRANSLATE);
1080
1081 rit_mem = (struct bna_rit_mem *)0;
1082
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
1084 FUNCTION_TO_RXQ_TRANSLATE),
1085 bna->regs.page_addr);
1086
1087 for (i = 0; i < rxf->rit_segment->rit_size; i++) {
1088 off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
1089 writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
1090 rxf->rit_segment->rit[i].small_rxq_id,
1091 base_addr + off);
1092 }
1093}
1094
1095static void
1096__bna_rxf_stat_clr(struct bna_rxf *rxf)
1097{
1098 struct bfi_ll_stats_req ll_req;
1099 u32 bm[2] = {0, 0};
1100
1101 if (rxf->rxf_id < 32)
1102 bm[0] = 1 << rxf->rxf_id;
1103 else
1104 bm[1] = 1 << (rxf->rxf_id - 32);
1105
1106 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
1107 ll_req.stats_mask = 0;
1108 ll_req.txf_id_mask[0] = 0;
1109 ll_req.txf_id_mask[1] = 0;
1110
1111 ll_req.rxf_id_mask[0] = htonl(bm[0]);
1112 ll_req.rxf_id_mask[1] = htonl(bm[1]);
1113
1114 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
1115 bna_rxf_cb_stats_cleared, rxf);
1116 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1117}
1118
1119static void
1120rxf_enable(struct bna_rxf *rxf)
1121{
1122 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1123 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1124 else {
1125 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
1126 __rxf_enable(rxf);
1127 }
1128}
1129
1130static void
1131rxf_cb_enabled(void *arg, int status)
1132{
1133 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1134
1135 bfa_q_qe_init(&rxf->mbox_qe.qe);
1136 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1137}
1138
1139static void
1140rxf_disable(struct bna_rxf *rxf)
1141{
1142 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1143 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1144 else
1145 rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
1146 __rxf_disable(rxf);
1147}
1148
1149static void
1150rxf_cb_disabled(void *arg, int status)
1151{
1152 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1153
1154 bfa_q_qe_init(&rxf->mbox_qe.qe);
1155 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1156}
1157
1158void
1159rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
1160{
1161 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1162
1163 bfa_q_qe_init(&rxf->mbox_qe.qe);
1164
1165 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
1166}
1167
1168static void
1169bna_rxf_cb_stats_cleared(void *arg, int status)
1170{
1171 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1172
1173 bfa_q_qe_init(&rxf->mbox_qe.qe);
1174 bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
1175}
1176
1177void
1178rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
1179 const struct bna_mac *mac_addr)
1180{
1181 struct bfi_ll_mac_addr_req req;
1182
1183 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
1184
1185 req.rxf_id = rxf->rxf_id;
1186 memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
1187
1188 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
1189 rxf_cb_cam_fltr_mbox_cmd, rxf);
1190
1191 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1192}
1193
1194static int
1195rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
1196{
1197 struct bna_mac *mac = NULL;
1198 struct list_head *qe;
1199
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf->mcast_pending_add_q)) {
1202 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1203 bfa_q_qe_init(qe);
1204 mac = (struct bna_mac *)qe;
1205 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
1206 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1207 return 1;
1208 }
1209
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf->mcast_pending_del_q)) {
1212 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1213 bfa_q_qe_init(qe);
1214 mac = (struct bna_mac *)qe;
1215 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1216 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1217 return 1;
1218 }
1219
1220 return 0;
1221}
1222
1223static int
1224rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
1225{
1226 /* Apply the VLAN filter */
1227 if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
1228 rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
1229 if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
1230 !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
1231 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
1232 }
1233
1234 /* Apply RSS configuration */
1235 if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
1236 rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
1237 if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
1238 /* RSS is being disabled */
1239 rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
1240 __rxf_rit_set(rxf);
1241 __rxf_config_set(rxf);
1242 } else {
1243 /* RSS is being enabled or reconfigured */
1244 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
1245 __rxf_rit_set(rxf);
1246 __rxf_config_set(rxf);
1247 }
1248 }
1249
1250 return 0;
1251}
1252
1253/**
1254 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1255 * command. Also processes pending filter configuration - promiscuous mode,
1256 * default mode, allmutli mode and issues mailbox command or directly applies
1257 * to h/w
1258 */
1259static int
1260rxf_process_packet_filter(struct bna_rxf *rxf)
1261{
1262 /* Set the default MAC first */
1263 if (rxf->ucast_pending_set > 0) {
1264 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
1265 rxf->ucast_active_mac);
1266 rxf->ucast_pending_set--;
1267 return 1;
1268 }
1269
1270 if (rxf_process_packet_filter_ucast(rxf))
1271 return 1;
1272
1273 if (rxf_process_packet_filter_mcast(rxf))
1274 return 1;
1275
1276 if (rxf_process_packet_filter_promisc(rxf))
1277 return 1;
1278
1279 if (rxf_process_packet_filter_default(rxf))
1280 return 1;
1281
1282 if (rxf_process_packet_filter_allmulti(rxf))
1283 return 1;
1284
1285 if (rxf_process_packet_filter_vlan(rxf))
1286 return 1;
1287
1288 return 0;
1289}
1290
1291static int
1292rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
1293{
1294 struct bna_mac *mac = NULL;
1295 struct list_head *qe;
1296
1297 /* 3. delete pending mcast entries */
1298 if (!list_empty(&rxf->mcast_pending_del_q)) {
1299 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1300 bfa_q_qe_init(qe);
1301 mac = (struct bna_mac *)qe;
1302 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1303 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1304 return 1;
1305 }
1306
1307 /* 4. clear active mcast entries; move them to pending_add_q */
1308 if (!list_empty(&rxf->mcast_active_q)) {
1309 bfa_q_deq(&rxf->mcast_active_q, &qe);
1310 bfa_q_qe_init(qe);
1311 mac = (struct bna_mac *)qe;
1312 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1313 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1314 return 1;
1315 }
1316
1317 return 0;
1318}
1319
1320/**
1321 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1322 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1323 * so that they are added to CAM again in the rxf start path. Moves the current
1324 * filter settings - promiscuous, default, allmutli - to pending filter
1325 * configuration
1326 */
1327static int
1328rxf_clear_packet_filter(struct bna_rxf *rxf)
1329{
1330 if (rxf_clear_packet_filter_ucast(rxf))
1331 return 1;
1332
1333 if (rxf_clear_packet_filter_mcast(rxf))
1334 return 1;
1335
1336 /* 5. clear active default MAC in the CAM */
1337 if (rxf->ucast_pending_set > 0)
1338 rxf->ucast_pending_set = 0;
1339
1340 if (rxf_clear_packet_filter_promisc(rxf))
1341 return 1;
1342
1343 if (rxf_clear_packet_filter_default(rxf))
1344 return 1;
1345
1346 if (rxf_clear_packet_filter_allmulti(rxf))
1347 return 1;
1348
1349 return 0;
1350}
1351
1352static void
1353rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
1354{
1355 struct list_head *qe;
1356 struct bna_mac *mac;
1357
1358 /* 3. Move active mcast entries to pending_add_q */
1359 while (!list_empty(&rxf->mcast_active_q)) {
1360 bfa_q_deq(&rxf->mcast_active_q, &qe);
1361 bfa_q_qe_init(qe);
1362 list_add_tail(qe, &rxf->mcast_pending_add_q);
1363 }
1364
1365 /* 4. Throw away delete pending mcast entries */
1366 while (!list_empty(&rxf->mcast_pending_del_q)) {
1367 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1368 bfa_q_qe_init(qe);
1369 mac = (struct bna_mac *)qe;
1370 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1371 }
1372}
1373
1374/**
1375 * In the rxf fail path, throws away the ucast/mcast entries pending for
1376 * deletion, moves all active ucast/mcast entries to pending queue so that
1377 * they are added back to CAM in the rxf start path. Also moves the current
1378 * filter configuration to pending filter configuration.
1379 */
1380static void
1381rxf_reset_packet_filter(struct bna_rxf *rxf)
1382{
1383 rxf_reset_packet_filter_ucast(rxf);
1384
1385 rxf_reset_packet_filter_mcast(rxf);
1386
1387 /* 5. Turn off ucast set flag */
1388 rxf->ucast_pending_set = 0;
1389
1390 rxf_reset_packet_filter_promisc(rxf);
1391
1392 rxf_reset_packet_filter_default(rxf);
1393
1394 rxf_reset_packet_filter_allmulti(rxf);
1395}
1396
1397void
1398bna_rxf_init(struct bna_rxf *rxf,
1399 struct bna_rx *rx,
1400 struct bna_rx_config *q_config)
1401{
1402 struct list_head *qe;
1403 struct bna_rxp *rxp;
1404
1405 /* rxf_id is initialized during rx_mod init */
1406 rxf->rx = rx;
1407
1408 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
1409 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
1410 rxf->ucast_pending_set = 0;
1411 INIT_LIST_HEAD(&rxf->ucast_active_q);
1412 rxf->ucast_active_mac = NULL;
1413
1414 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
1415 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
1416 INIT_LIST_HEAD(&rxf->mcast_active_q);
1417
1418 bfa_q_qe_init(&rxf->mbox_qe.qe);
1419
1420 if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
1421 rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
1422
1423 rxf->rxf_oper_state = (q_config->paused) ?
1424 BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
1425
1426 bna_rxf_adv_init(rxf, rx, q_config);
1427
1428 rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
1429 q_config->num_paths);
1430
1431 list_for_each(qe, &rx->rxp_q) {
1432 rxp = (struct bna_rxp *)qe;
1433 if (q_config->rxp_type == BNA_RXP_SINGLE)
1434 rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
1435 else
1436 rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
1437 break;
1438 }
1439
1440 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
1441 memset(rxf->vlan_filter_table, 0,
1442 (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
1443
1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1445}
1446
1447void
1448bna_rxf_uninit(struct bna_rxf *rxf)
1449{
1450 struct bna_mac *mac;
1451
1452 bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
1453 rxf->rit_segment = NULL;
1454
1455 rxf->ucast_pending_set = 0;
1456
1457 while (!list_empty(&rxf->ucast_pending_add_q)) {
1458 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
1459 bfa_q_qe_init(&mac->qe);
1460 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1461 }
1462
1463 if (rxf->ucast_active_mac) {
1464 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1465 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
1466 rxf->ucast_active_mac);
1467 rxf->ucast_active_mac = NULL;
1468 }
1469
1470 while (!list_empty(&rxf->mcast_pending_add_q)) {
1471 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
1472 bfa_q_qe_init(&mac->qe);
1473 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1474 }
1475
1476 rxf->rx = NULL;
1477}
1478
1479void
1480bna_rxf_start(struct bna_rxf *rxf)
1481{
1482 rxf->start_cbfn = bna_rx_cb_rxf_started;
1483 rxf->start_cbarg = rxf->rx;
1484 rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
1485 bfa_fsm_send_event(rxf, RXF_E_START);
1486}
1487
1488void
1489bna_rxf_stop(struct bna_rxf *rxf)
1490{
1491 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
1492 rxf->stop_cbarg = rxf->rx;
1493 bfa_fsm_send_event(rxf, RXF_E_STOP);
1494}
1495
1496void
1497bna_rxf_fail(struct bna_rxf *rxf)
1498{
1499 rxf->rxf_flags |= BNA_RXF_FL_FAILED;
1500 bfa_fsm_send_event(rxf, RXF_E_FAIL);
1501}
1502
1503int
1504bna_rxf_state_get(struct bna_rxf *rxf)
1505{
1506 return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
1507}
1508
1509enum bna_cb_status
1510bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
1511 void (*cbfn)(struct bnad *, struct bna_rx *,
1512 enum bna_cb_status))
1513{
1514 struct bna_rxf *rxf = &rx->rxf;
1515
1516 if (rxf->ucast_active_mac == NULL) {
1517 rxf->ucast_active_mac =
1518 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
1519 if (rxf->ucast_active_mac == NULL)
1520 return BNA_CB_UCAST_CAM_FULL;
1521 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1522 }
1523
1524 memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
1525 rxf->ucast_pending_set++;
1526 rxf->cam_fltr_cbfn = cbfn;
1527 rxf->cam_fltr_cbarg = rx->bna->bnad;
1528
1529 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1530
1531 return BNA_CB_SUCCESS;
1532}
1533
1534enum bna_cb_status
1535bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1536 void (*cbfn)(struct bnad *, struct bna_rx *,
1537 enum bna_cb_status))
1538{
1539 struct bna_rxf *rxf = &rx->rxf;
1540 struct list_head *qe;
1541 struct bna_mac *mac;
1542
1543 /* Check if already added */
1544 list_for_each(qe, &rxf->mcast_active_q) {
1545 mac = (struct bna_mac *)qe;
1546 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1547 if (cbfn)
1548 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1549 return BNA_CB_SUCCESS;
1550 }
1551 }
1552
1553 /* Check if pending addition */
1554 list_for_each(qe, &rxf->mcast_pending_add_q) {
1555 mac = (struct bna_mac *)qe;
1556 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1557 if (cbfn)
1558 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1559 return BNA_CB_SUCCESS;
1560 }
1561 }
1562
1563 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1564 if (mac == NULL)
1565 return BNA_CB_MCAST_LIST_FULL;
1566 bfa_q_qe_init(&mac->qe);
1567 memcpy(mac->addr, addr, ETH_ALEN);
1568 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1569
1570 rxf->cam_fltr_cbfn = cbfn;
1571 rxf->cam_fltr_cbarg = rx->bna->bnad;
1572
1573 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1574
1575 return BNA_CB_SUCCESS;
1576}
1577
1578enum bna_cb_status
1579bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
1580 void (*cbfn)(struct bnad *, struct bna_rx *,
1581 enum bna_cb_status))
1582{
1583 struct bna_rxf *rxf = &rx->rxf;
1584 struct list_head *qe;
1585 struct bna_mac *mac;
1586
1587 list_for_each(qe, &rxf->mcast_pending_add_q) {
1588 mac = (struct bna_mac *)qe;
1589 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1590 list_del(qe);
1591 bfa_q_qe_init(qe);
1592 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1593 if (cbfn)
1594 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1595 return BNA_CB_SUCCESS;
1596 }
1597 }
1598
1599 list_for_each(qe, &rxf->mcast_active_q) {
1600 mac = (struct bna_mac *)qe;
1601 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1602 list_del(qe);
1603 bfa_q_qe_init(qe);
1604 list_add_tail(qe, &rxf->mcast_pending_del_q);
1605 rxf->cam_fltr_cbfn = cbfn;
1606 rxf->cam_fltr_cbarg = rx->bna->bnad;
1607 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1608 return BNA_CB_SUCCESS;
1609 }
1610 }
1611
1612 return BNA_CB_INVALID_MAC;
1613}
1614
1615enum bna_cb_status
1616bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1617 void (*cbfn)(struct bnad *, struct bna_rx *,
1618 enum bna_cb_status))
1619{
1620 struct bna_rxf *rxf = &rx->rxf;
1621 struct list_head list_head;
1622 struct list_head *qe;
1623 u8 *mcaddr;
1624 struct bna_mac *mac;
1625 struct bna_mac *mac1;
1626 int skip;
1627 int delete;
1628 int need_hw_config = 0;
1629 int i;
1630
1631 /* Allocate nodes */
1632 INIT_LIST_HEAD(&list_head);
1633 for (i = 0, mcaddr = mclist; i < count; i++) {
1634 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1635 if (mac == NULL)
1636 goto err_return;
1637 bfa_q_qe_init(&mac->qe);
1638 memcpy(mac->addr, mcaddr, ETH_ALEN);
1639 list_add_tail(&mac->qe, &list_head);
1640
1641 mcaddr += ETH_ALEN;
1642 }
1643
1644 /* Schedule for addition */
1645 while (!list_empty(&list_head)) {
1646 bfa_q_deq(&list_head, &qe);
1647 mac = (struct bna_mac *)qe;
1648 bfa_q_qe_init(&mac->qe);
1649
1650 skip = 0;
1651
1652 /* Skip if already added */
1653 list_for_each(qe, &rxf->mcast_active_q) {
1654 mac1 = (struct bna_mac *)qe;
1655 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1656 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1657 mac);
1658 skip = 1;
1659 break;
1660 }
1661 }
1662
1663 if (skip)
1664 continue;
1665
1666 /* Skip if pending addition */
1667 list_for_each(qe, &rxf->mcast_pending_add_q) {
1668 mac1 = (struct bna_mac *)qe;
1669 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1670 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1671 mac);
1672 skip = 1;
1673 break;
1674 }
1675 }
1676
1677 if (skip)
1678 continue;
1679
1680 need_hw_config = 1;
1681 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1682 }
1683
1684 /**
1685 * Delete the entries that are in the pending_add_q but not
1686 * in the new list
1687 */
1688 while (!list_empty(&rxf->mcast_pending_add_q)) {
1689 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1690 mac = (struct bna_mac *)qe;
1691 bfa_q_qe_init(&mac->qe);
1692 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1693 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1694 delete = 0;
1695 break;
1696 }
1697 mcaddr += ETH_ALEN;
1698 }
1699 if (delete)
1700 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1701 else
1702 list_add_tail(&mac->qe, &list_head);
1703 }
1704 while (!list_empty(&list_head)) {
1705 bfa_q_deq(&list_head, &qe);
1706 mac = (struct bna_mac *)qe;
1707 bfa_q_qe_init(&mac->qe);
1708 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1709 }
1710
1711 /**
1712 * Schedule entries for deletion that are in the active_q but not
1713 * in the new list
1714 */
1715 while (!list_empty(&rxf->mcast_active_q)) {
1716 bfa_q_deq(&rxf->mcast_active_q, &qe);
1717 mac = (struct bna_mac *)qe;
1718 bfa_q_qe_init(&mac->qe);
1719 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1720 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1721 delete = 0;
1722 break;
1723 }
1724 mcaddr += ETH_ALEN;
1725 }
1726 if (delete) {
1727 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
1728 need_hw_config = 1;
1729 } else {
1730 list_add_tail(&mac->qe, &list_head);
1731 }
1732 }
1733 while (!list_empty(&list_head)) {
1734 bfa_q_deq(&list_head, &qe);
1735 mac = (struct bna_mac *)qe;
1736 bfa_q_qe_init(&mac->qe);
1737 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1738 }
1739
1740 if (need_hw_config) {
1741 rxf->cam_fltr_cbfn = cbfn;
1742 rxf->cam_fltr_cbarg = rx->bna->bnad;
1743 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1744 } else if (cbfn)
1745 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1746
1747 return BNA_CB_SUCCESS;
1748
1749err_return:
1750 while (!list_empty(&list_head)) {
1751 bfa_q_deq(&list_head, &qe);
1752 mac = (struct bna_mac *)qe;
1753 bfa_q_qe_init(&mac->qe);
1754 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1755 }
1756
1757 return BNA_CB_MCAST_LIST_FULL;
1758}
1759
1760void
1761bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1762{
1763 struct bna_rxf *rxf = &rx->rxf;
1764 int index = (vlan_id >> 5);
1765 int bit = (1 << (vlan_id & 0x1F));
1766
1767 rxf->vlan_filter_table[index] |= bit;
1768 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1769 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1770 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1771 }
1772}
1773
1774void
1775bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1776{
1777 struct bna_rxf *rxf = &rx->rxf;
1778 int index = (vlan_id >> 5);
1779 int bit = (1 << (vlan_id & 0x1F));
1780
1781 rxf->vlan_filter_table[index] &= ~bit;
1782 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1783 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1784 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1785 }
1786}
1787
1788/**
1789 * RX
1790 */
1791#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1792 struct bna_doorbell_qset *_qset; \
1793 unsigned long off; \
1794 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1795 (q)->rcb->q_depth = (qdepth); \
1796 (q)->rcb->unmap_q = unmapq_mem; \
1797 (q)->rcb->rxq = (q); \
1798 (q)->rcb->cq = &(rxp)->cq; \
1799 (q)->rcb->bnad = (bna)->bnad; \
1800 _qset = (struct bna_doorbell_qset *)0; \
1801 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1802 (q)->rcb->q_dbell = off + \
1803 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1804 (q)->rcb->id = _id; \
1805} while (0)
1806
1807#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1808 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1809
1810#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1811 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1812
1813#define call_rx_stop_callback(rx, status) \
1814 if ((rx)->stop_cbfn) { \
1815 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1816 (rx)->stop_cbfn = NULL; \
1817 (rx)->stop_cbarg = NULL; \
1818 }
1819
1820/*
1821 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1822 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1823 * for each rxpath.
1824 */
1825
1826#define call_rx_disable_cbfn(rx, status) \
1827 if ((rx)->disable_cbfn) { \
1828 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1829 status); \
1830 (rx)->disable_cbfn = NULL; \
1831 (rx)->disable_cbarg = NULL; \
1832 } \
1833
1834#define rxqs_reqd(type, num_rxqs) \
1835 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1836
1837#define rx_ib_fail(rx) \
1838do { \
1839 struct bna_rxp *rxp; \
1840 struct list_head *qe; \
1841 list_for_each(qe, &(rx)->rxp_q) { \
1842 rxp = (struct bna_rxp *)qe; \
1843 bna_ib_fail(rxp->cq.ib); \
1844 } \
1845} while (0)
1846
1847static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
1848static void __bna_rxq_start(struct bna_rxq *rxq);
1849static void __bna_cq_start(struct bna_cq *cq);
1850static void bna_rit_create(struct bna_rx *rx);
1851static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
1852static void bna_rx_cb_rxq_stopped_all(void *arg);
1853
1854bfa_fsm_state_decl(bna_rx, stopped,
1855 struct bna_rx, enum bna_rx_event);
1856bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1857 struct bna_rx, enum bna_rx_event);
1858bfa_fsm_state_decl(bna_rx, started,
1859 struct bna_rx, enum bna_rx_event);
1860bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1861 struct bna_rx, enum bna_rx_event);
1862bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1863 struct bna_rx, enum bna_rx_event);
1864
1865static struct bfa_sm_table rx_sm_table[] = {
1866 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1867 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1868 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
1869 {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
1870 {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
1871};
1872
1873static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1874{
1875 struct bna_rxp *rxp;
1876 struct list_head *qe_rxp;
1877
1878 list_for_each(qe_rxp, &rx->rxp_q) {
1879 rxp = (struct bna_rxp *)qe_rxp;
1880 rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
1881 }
1882
1883 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1884}
1885
1886static void bna_rx_sm_stopped(struct bna_rx *rx,
1887 enum bna_rx_event event)
1888{
1889 switch (event) {
1890 case RX_E_START:
1891 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1892 break;
1893 case RX_E_STOP:
1894 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1895 break;
1896 case RX_E_FAIL:
1897 /* no-op */
1898 break;
1899 default:
1900 bfa_sm_fault(rx->bna, event);
1901 break;
1902 }
1903
1904}
1905
1906static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1907{
1908 struct bna_rxp *rxp;
1909 struct list_head *qe_rxp;
1910 struct bna_rxq *q0 = NULL, *q1 = NULL;
1911
1912 /* Setup the RIT */
1913 bna_rit_create(rx);
1914
1915 list_for_each(qe_rxp, &rx->rxp_q) {
1916 rxp = (struct bna_rxp *)qe_rxp;
1917 bna_ib_start(rxp->cq.ib);
1918 GET_RXQS(rxp, q0, q1);
1919 q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
1920 __bna_rxq_start(q0);
1921 rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
1922 if (q1) {
1923 __bna_rxq_start(q1);
1924 rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
1925 }
1926 __bna_cq_start(&rxp->cq);
1927 }
1928
1929 bna_rxf_start(&rx->rxf);
1930}
1931
1932static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1933 enum bna_rx_event event)
1934{
1935 switch (event) {
1936 case RX_E_STOP:
1937 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1938 break;
1939 case RX_E_FAIL:
1940 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1941 rx_ib_fail(rx);
1942 bna_rxf_fail(&rx->rxf);
1943 break;
1944 case RX_E_RXF_STARTED:
1945 bfa_fsm_set_state(rx, bna_rx_sm_started);
1946 break;
1947 default:
1948 bfa_sm_fault(rx->bna, event);
1949 break;
1950 }
1951}
1952
1953void
1954bna_rx_sm_started_entry(struct bna_rx *rx)
1955{
1956 struct bna_rxp *rxp;
1957 struct list_head *qe_rxp;
1958
1959 /* Start IB */
1960 list_for_each(qe_rxp, &rx->rxp_q) {
1961 rxp = (struct bna_rxp *)qe_rxp;
1962 bna_ib_ack(&rxp->cq.ib->door_bell, 0);
1963 }
1964
1965 bna_llport_admin_up(&rx->bna->port.llport);
1966}
1967
1968void
1969bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1970{
1971 switch (event) {
1972 case RX_E_FAIL:
1973 bna_llport_admin_down(&rx->bna->port.llport);
1974 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1975 rx_ib_fail(rx);
1976 bna_rxf_fail(&rx->rxf);
1977 break;
1978 case RX_E_STOP:
1979 bna_llport_admin_down(&rx->bna->port.llport);
1980 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1981 break;
1982 default:
1983 bfa_sm_fault(rx->bna, event);
1984 break;
1985 }
1986}
1987
1988void
1989bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1990{
1991 bna_rxf_stop(&rx->rxf);
1992}
1993
1994void
1995bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1996{
1997 switch (event) {
1998 case RX_E_RXF_STOPPED:
1999 bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
2000 break;
2001 case RX_E_RXF_STARTED:
2002 /**
2003 * RxF was in the process of starting up when
2004 * RXF_E_STOP was issued. Ignore this event
2005 */
2006 break;
2007 case RX_E_FAIL:
2008 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2009 rx_ib_fail(rx);
2010 bna_rxf_fail(&rx->rxf);
2011 break;
2012 default:
2013 bfa_sm_fault(rx->bna, event);
2014 break;
2015 }
2016
2017}
2018
2019void
2020bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
2021{
2022 struct bna_rxp *rxp = NULL;
2023 struct bna_rxq *q0 = NULL;
2024 struct bna_rxq *q1 = NULL;
2025 struct list_head *qe;
2026 u32 rxq_mask[2] = {0, 0};
2027
2028 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2029 bfa_wc_up(&rx->rxq_stop_wc);
2030 list_for_each(qe, &rx->rxp_q) {
2031 rxp = (struct bna_rxp *)qe;
2032 GET_RXQS(rxp, q0, q1);
2033 if (q0->rxq_id < 32)
2034 rxq_mask[0] |= ((u32)1 << q0->rxq_id);
2035 else
2036 rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
2037 if (q1) {
2038 if (q1->rxq_id < 32)
2039 rxq_mask[0] |= ((u32)1 << q1->rxq_id);
2040 else
2041 rxq_mask[1] |= ((u32)
2042 1 << (q1->rxq_id - 32));
2043 }
2044 }
2045
2046 __bna_multi_rxq_stop(rxp, rxq_mask);
2047}
2048
2049void
2050bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
2051{
2052 struct bna_rxp *rxp = NULL;
2053 struct list_head *qe;
2054
2055 switch (event) {
2056 case RX_E_RXQ_STOPPED:
2057 list_for_each(qe, &rx->rxp_q) {
2058 rxp = (struct bna_rxp *)qe;
2059 bna_ib_stop(rxp->cq.ib);
2060 }
2061 /* Fall through */
2062 case RX_E_FAIL:
2063 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2064 break;
2065 default:
2066 bfa_sm_fault(rx->bna, event);
2067 break;
2068 }
2069}
2070
2071void
2072__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
2073{
2074 struct bfi_ll_q_stop_req ll_req;
2075
2076 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
2077 ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
2078 ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
2079 bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
2080 bna_rx_cb_multi_rxq_stopped, rxp);
2081 bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
2082}
2083
2084void
2085__bna_rxq_start(struct bna_rxq *rxq)
2086{
2087 struct bna_rxtx_q_mem *q_mem;
2088 struct bna_rxq_mem rxq_cfg, *rxq_mem;
2089 struct bna_dma_addr cur_q_addr;
2090 /* struct bna_doorbell_qset *qset; */
2091 struct bna_qpt *qpt;
2092 u32 pg_num;
2093 struct bna *bna = rxq->rx->bna;
2094 void __iomem *base_addr;
2095 unsigned long off;
2096
2097 qpt = &rxq->qpt;
2098 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2099
2100 rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2101 rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2102 rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2103 rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2104
2105 rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
2106 rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
2107 (qpt->page_size >> 2);
2108 rxq_cfg.sg_n_cq_n_cns_ptr =
2109 ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
2110 rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
2111 BNA_Q_IDLE_STATE;
2112 rxq_cfg.next_qid = 0x0 | (0x3 << 8);
2113
2114 /* Write the page number register */
2115 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2116 HQM_RXTX_Q_RAM_BASE_OFFSET);
2117 writel(pg_num, bna->regs.page_addr);
2118
2119 /* Write to h/w */
2120 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2121 HQM_RXTX_Q_RAM_BASE_OFFSET);
2122
2123 q_mem = (struct bna_rxtx_q_mem *)0;
2124 rxq_mem = &q_mem[rxq->rxq_id].rxq;
2125
2126 off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
2127 writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
2128
2129 off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
2130 writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
2131
2132 off = (unsigned long)&rxq_mem->cur_q_entry_lo;
2133 writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
2134
2135 off = (unsigned long)&rxq_mem->cur_q_entry_hi;
2136 writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
2137
2138 off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
2139 writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2140
2141 off = (unsigned long)&rxq_mem->entry_n_pg_size;
2142 writel(rxq_cfg.entry_n_pg_size, base_addr + off);
2143
2144 off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
2145 writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
2146
2147 off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
2148 writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
2149
2150 off = (unsigned long)&rxq_mem->next_qid;
2151 writel(rxq_cfg.next_qid, base_addr + off);
2152
2153 rxq->rcb->producer_index = 0;
2154 rxq->rcb->consumer_index = 0;
2155}
2156
2157void
2158__bna_cq_start(struct bna_cq *cq)
2159{
2160 struct bna_cq_mem cq_cfg, *cq_mem;
2161 const struct bna_qpt *qpt;
2162 struct bna_dma_addr cur_q_addr;
2163 u32 pg_num;
2164 struct bna *bna = cq->rx->bna;
2165 void __iomem *base_addr;
2166 unsigned long off;
2167
2168 qpt = &cq->qpt;
2169 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2170
2171 /*
2172 * Fill out structure, to be subsequently written
2173 * to hardware
2174 */
2175 cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2176 cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2177 cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2178 cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2179
2180 cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
2181 cq_cfg.entry_n_pg_size =
2182 ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
2183 cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
2184 ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
2185 cq_cfg.q_state = BNA_Q_IDLE_STATE;
2186
2187 /* Write the page number register */
2188 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2189 HQM_CQ_RAM_BASE_OFFSET);
2190
2191 writel(pg_num, bna->regs.page_addr);
2192
2193 /* H/W write */
2194 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2195 HQM_CQ_RAM_BASE_OFFSET);
2196
2197 cq_mem = (struct bna_cq_mem *)0;
2198
2199 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
2200 writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
2201
2202 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
2203 writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
2204
2205 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
2206 writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
2207
2208 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
2209 writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
2210
2211 off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
2212 writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2213
2214 off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
2215 writel(cq_cfg.entry_n_pg_size, base_addr + off);
2216
2217 off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
2218 writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
2219
2220 off = (unsigned long)&cq_mem[cq->cq_id].q_state;
2221 writel(cq_cfg.q_state, base_addr + off);
2222
2223 cq->ccb->producer_index = 0;
2224 *(cq->ccb->hw_producer_index) = 0;
2225}
2226
2227void
2228bna_rit_create(struct bna_rx *rx)
2229{
2230 struct list_head *qe_rxp;
2231 struct bna *bna;
2232 struct bna_rxp *rxp;
2233 struct bna_rxq *q0 = NULL;
2234 struct bna_rxq *q1 = NULL;
2235 int offset;
2236
2237 bna = rx->bna;
2238
2239 offset = 0;
2240 list_for_each(qe_rxp, &rx->rxp_q) {
2241 rxp = (struct bna_rxp *)qe_rxp;
2242 GET_RXQS(rxp, q0, q1);
2243 rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
2244 rx->rxf.rit_segment->rit[offset].small_rxq_id =
2245 (q1 ? q1->rxq_id : 0);
2246 offset++;
2247 }
2248}
2249
2250int
2251_rx_can_satisfy(struct bna_rx_mod *rx_mod,
2252 struct bna_rx_config *rx_cfg)
2253{
2254 if ((rx_mod->rx_free_count == 0) ||
2255 (rx_mod->rxp_free_count == 0) ||
2256 (rx_mod->rxq_free_count == 0))
2257 return 0;
2258
2259 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
2260 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2261 (rx_mod->rxq_free_count < rx_cfg->num_paths))
2262 return 0;
2263 } else {
2264 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2265 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
2266 return 0;
2267 }
2268
2269 if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
2270 return 0;
2271
2272 return 1;
2273}
2274
2275struct bna_rxq *
2276_get_free_rxq(struct bna_rx_mod *rx_mod)
2277{
2278 struct bna_rxq *rxq = NULL;
2279 struct list_head *qe = NULL;
2280
2281 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
2282 if (qe) {
2283 rx_mod->rxq_free_count--;
2284 rxq = (struct bna_rxq *)qe;
2285 }
2286 return rxq;
2287}
2288
2289void
2290_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2291{
2292 bfa_q_qe_init(&rxq->qe);
2293 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
2294 rx_mod->rxq_free_count++;
2295}
2296
2297struct bna_rxp *
2298_get_free_rxp(struct bna_rx_mod *rx_mod)
2299{
2300 struct list_head *qe = NULL;
2301 struct bna_rxp *rxp = NULL;
2302
2303 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
2304 if (qe) {
2305 rx_mod->rxp_free_count--;
2306
2307 rxp = (struct bna_rxp *)qe;
2308 }
2309
2310 return rxp;
2311}
2312
2313void
2314_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2315{
2316 bfa_q_qe_init(&rxp->qe);
2317 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
2318 rx_mod->rxp_free_count++;
2319}
2320
2321struct bna_rx *
2322_get_free_rx(struct bna_rx_mod *rx_mod)
2323{
2324 struct list_head *qe = NULL;
2325 struct bna_rx *rx = NULL;
2326
2327 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2328 if (qe) {
2329 rx_mod->rx_free_count--;
2330
2331 rx = (struct bna_rx *)qe;
2332 bfa_q_qe_init(qe);
2333 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2334 }
2335
2336 return rx;
2337}
2338
2339void
2340_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2341{
2342 bfa_q_qe_init(&rx->qe);
2343 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2344 rx_mod->rx_free_count++;
2345}
2346
2347void
2348_rx_init(struct bna_rx *rx, struct bna *bna)
2349{
2350 rx->bna = bna;
2351 rx->rx_flags = 0;
2352
2353 INIT_LIST_HEAD(&rx->rxp_q);
2354
2355 rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
2356 rx->rxq_stop_wc.wc_cbarg = rx;
2357 rx->rxq_stop_wc.wc_count = 0;
2358
2359 rx->stop_cbfn = NULL;
2360 rx->stop_cbarg = NULL;
2361}
2362
2363void
2364_rxp_add_rxqs(struct bna_rxp *rxp,
2365 struct bna_rxq *q0,
2366 struct bna_rxq *q1)
2367{
2368 switch (rxp->type) {
2369 case BNA_RXP_SINGLE:
2370 rxp->rxq.single.only = q0;
2371 rxp->rxq.single.reserved = NULL;
2372 break;
2373 case BNA_RXP_SLR:
2374 rxp->rxq.slr.large = q0;
2375 rxp->rxq.slr.small = q1;
2376 break;
2377 case BNA_RXP_HDS:
2378 rxp->rxq.hds.data = q0;
2379 rxp->rxq.hds.hdr = q1;
2380 break;
2381 default:
2382 break;
2383 }
2384}
2385
2386void
2387_rxq_qpt_init(struct bna_rxq *rxq,
2388 struct bna_rxp *rxp,
2389 u32 page_count,
2390 u32 page_size,
2391 struct bna_mem_descr *qpt_mem,
2392 struct bna_mem_descr *swqpt_mem,
2393 struct bna_mem_descr *page_mem)
2394{
2395 int i;
2396
2397 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2398 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2399 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2400 rxq->qpt.page_count = page_count;
2401 rxq->qpt.page_size = page_size;
2402
2403 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2404
2405 for (i = 0; i < rxq->qpt.page_count; i++) {
2406 rxq->rcb->sw_qpt[i] = page_mem[i].kva;
2407 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2408 page_mem[i].dma.lsb;
2409 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2410 page_mem[i].dma.msb;
2411
2412 }
2413}
2414
2415void
2416_rxp_cqpt_setup(struct bna_rxp *rxp,
2417 u32 page_count,
2418 u32 page_size,
2419 struct bna_mem_descr *qpt_mem,
2420 struct bna_mem_descr *swqpt_mem,
2421 struct bna_mem_descr *page_mem)
2422{
2423 int i;
2424
2425 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2426 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2427 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2428 rxp->cq.qpt.page_count = page_count;
2429 rxp->cq.qpt.page_size = page_size;
2430
2431 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2432
2433 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2434 rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
2435
2436 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2437 page_mem[i].dma.lsb;
2438 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2439 page_mem[i].dma.msb;
2440
2441 }
2442}
2443
2444void
2445_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
2446{
2447 list_add_tail(&rxp->qe, &rx->rxp_q);
2448}
2449
2450void
2451_init_rxmod_queues(struct bna_rx_mod *rx_mod)
2452{
2453 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2454 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2455 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2456 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2457
2458 rx_mod->rx_free_count = 0;
2459 rx_mod->rxq_free_count = 0;
2460 rx_mod->rxp_free_count = 0;
2461}
2462
2463void
2464_rx_ctor(struct bna_rx *rx, int id)
2465{
2466 bfa_q_qe_init(&rx->qe);
2467 INIT_LIST_HEAD(&rx->rxp_q);
2468 rx->bna = NULL;
2469
2470 rx->rxf.rxf_id = id;
2471
2472 /* FIXME: mbox_qe ctor()?? */
2473 bfa_q_qe_init(&rx->mbox_qe.qe);
2474
2475 rx->stop_cbfn = NULL;
2476 rx->stop_cbarg = NULL;
2477}
2478
2479void
2480bna_rx_cb_multi_rxq_stopped(void *arg, int status)
2481{
2482 struct bna_rxp *rxp = (struct bna_rxp *)arg;
2483
2484 bfa_wc_down(&rxp->rx->rxq_stop_wc);
2485}
2486
2487void
2488bna_rx_cb_rxq_stopped_all(void *arg)
2489{
2490 struct bna_rx *rx = (struct bna_rx *)arg;
2491
2492 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
2493}
2494
2495void
2496bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2497 enum bna_cb_status status)
2498{
2499 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2500
2501 bfa_wc_down(&rx_mod->rx_stop_wc);
2502}
2503
2504void
2505bna_rx_mod_cb_rx_stopped_all(void *arg)
2506{
2507 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2508
2509 if (rx_mod->stop_cbfn)
2510 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2511 rx_mod->stop_cbfn = NULL;
2512}
2513
2514void
2515bna_rx_start(struct bna_rx *rx)
2516{
2517 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2518 if (rx->rx_flags & BNA_RX_F_ENABLE)
2519 bfa_fsm_send_event(rx, RX_E_START);
2520}
2521
2522void
2523bna_rx_stop(struct bna_rx *rx)
2524{
2525 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2526 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2527 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
2528 else {
2529 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2530 rx->stop_cbarg = &rx->bna->rx_mod;
2531 bfa_fsm_send_event(rx, RX_E_STOP);
2532 }
2533}
2534
2535void
2536bna_rx_fail(struct bna_rx *rx)
2537{
2538 /* Indicate port is not enabled, and failed */
2539 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2540 rx->rx_flags |= BNA_RX_F_PORT_FAILED;
2541 bfa_fsm_send_event(rx, RX_E_FAIL);
2542}
2543
2544void
2545bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
2546{
2547 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
2548 if (rx->rxf.rxf_id < 32)
2549 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
2550 else
2551 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
2552 1 << (rx->rxf.rxf_id - 32));
2553}
2554
2555void
2556bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
2557{
2558 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
2559 if (rx->rxf.rxf_id < 32)
2560 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
2561 else
2562 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
2563 1 << (rx->rxf.rxf_id - 32);
2564}
2565
2566void
2567bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2568{
2569 struct bna_rx *rx;
2570 struct list_head *qe;
2571
2572 rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
2573 if (type == BNA_RX_T_LOOPBACK)
2574 rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
2575
2576 list_for_each(qe, &rx_mod->rx_active_q) {
2577 rx = (struct bna_rx *)qe;
2578 if (rx->type == type)
2579 bna_rx_start(rx);
2580 }
2581}
2582
2583void
2584bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2585{
2586 struct bna_rx *rx;
2587 struct list_head *qe;
2588
2589 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2590 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2591
2592 rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
2593
2594 /**
2595 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2596 * as we are going to call bna_rx_stop
2597 */
2598 list_for_each(qe, &rx_mod->rx_active_q) {
2599 rx = (struct bna_rx *)qe;
2600 if (rx->type == type)
2601 bfa_wc_up(&rx_mod->rx_stop_wc);
2602 }
2603
2604 if (rx_mod->rx_stop_wc.wc_count == 0) {
2605 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2606 rx_mod->stop_cbfn = NULL;
2607 return;
2608 }
2609
2610 list_for_each(qe, &rx_mod->rx_active_q) {
2611 rx = (struct bna_rx *)qe;
2612 if (rx->type == type)
2613 bna_rx_stop(rx);
2614 }
2615}
2616
2617void
2618bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2619{
2620 struct bna_rx *rx;
2621 struct list_head *qe;
2622
2623 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2624 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2625
2626 list_for_each(qe, &rx_mod->rx_active_q) {
2627 rx = (struct bna_rx *)qe;
2628 bna_rx_fail(rx);
2629 }
2630}
2631
2632void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2633 struct bna_res_info *res_info)
2634{
2635 int index;
2636 struct bna_rx *rx_ptr;
2637 struct bna_rxp *rxp_ptr;
2638 struct bna_rxq *rxq_ptr;
2639
2640 rx_mod->bna = bna;
2641 rx_mod->flags = 0;
2642
2643 rx_mod->rx = (struct bna_rx *)
2644 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2645 rx_mod->rxp = (struct bna_rxp *)
2646 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2647 rx_mod->rxq = (struct bna_rxq *)
2648 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2649
2650 /* Initialize the queues */
2651 _init_rxmod_queues(rx_mod);
2652
2653 /* Build RX queues */
2654 for (index = 0; index < BFI_MAX_RXQ; index++) {
2655 rx_ptr = &rx_mod->rx[index];
2656 _rx_ctor(rx_ptr, index);
2657 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2658 rx_mod->rx_free_count++;
2659 }
2660
2661 /* build RX-path queue */
2662 for (index = 0; index < BFI_MAX_RXQ; index++) {
2663 rxp_ptr = &rx_mod->rxp[index];
2664 rxp_ptr->cq.cq_id = index;
2665 bfa_q_qe_init(&rxp_ptr->qe);
2666 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2667 rx_mod->rxp_free_count++;
2668 }
2669
2670 /* build RXQ queue */
2671 for (index = 0; index < BFI_MAX_RXQ; index++) {
2672 rxq_ptr = &rx_mod->rxq[index];
2673 rxq_ptr->rxq_id = index;
2674
2675 bfa_q_qe_init(&rxq_ptr->qe);
2676 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2677 rx_mod->rxq_free_count++;
2678 }
2679
2680 rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
2681 rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
2682 rx_mod->rx_stop_wc.wc_count = 0;
2683}
2684
2685void
2686bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2687{
2688 struct list_head *qe;
2689 int i;
2690
2691 i = 0;
2692 list_for_each(qe, &rx_mod->rx_free_q)
2693 i++;
2694
2695 i = 0;
2696 list_for_each(qe, &rx_mod->rxp_free_q)
2697 i++;
2698
2699 i = 0;
2700 list_for_each(qe, &rx_mod->rxq_free_q)
2701 i++;
2702
2703 rx_mod->bna = NULL;
2704}
2705
2706int
2707bna_rx_state_get(struct bna_rx *rx)
2708{
2709 return bfa_sm_to_state(rx_sm_table, rx->fsm);
2710}
2711
2712void
2713bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2714{
2715 u32 cq_size, hq_size, dq_size;
2716 u32 cpage_count, hpage_count, dpage_count;
2717 struct bna_mem_info *mem_info;
2718 u32 cq_depth;
2719 u32 hq_depth;
2720 u32 dq_depth;
2721
2722 dq_depth = q_cfg->q_depth;
2723 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2724 cq_depth = dq_depth + hq_depth;
2725
2726 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2727 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2728 cq_size = ALIGN(cq_size, PAGE_SIZE);
2729 cpage_count = SIZE_TO_PAGES(cq_size);
2730
2731 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2732 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2733 dq_size = ALIGN(dq_size, PAGE_SIZE);
2734 dpage_count = SIZE_TO_PAGES(dq_size);
2735
2736 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2737 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2738 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2739 hq_size = ALIGN(hq_size, PAGE_SIZE);
2740 hpage_count = SIZE_TO_PAGES(hq_size);
2741 } else {
2742 hpage_count = 0;
2743 }
2744
2745 /* CCB structures */
2746 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2747 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2748 mem_info->mem_type = BNA_MEM_T_KVA;
2749 mem_info->len = sizeof(struct bna_ccb);
2750 mem_info->num = q_cfg->num_paths;
2751
2752 /* RCB structures */
2753 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2754 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2755 mem_info->mem_type = BNA_MEM_T_KVA;
2756 mem_info->len = sizeof(struct bna_rcb);
2757 mem_info->num = BNA_GET_RXQS(q_cfg);
2758
2759 /* Completion QPT */
2760 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2761 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2762 mem_info->mem_type = BNA_MEM_T_DMA;
2763 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2764 mem_info->num = q_cfg->num_paths;
2765
2766 /* Completion s/w QPT */
2767 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2768 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2769 mem_info->mem_type = BNA_MEM_T_KVA;
2770 mem_info->len = cpage_count * sizeof(void *);
2771 mem_info->num = q_cfg->num_paths;
2772
2773 /* Completion QPT pages */
2774 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2775 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2776 mem_info->mem_type = BNA_MEM_T_DMA;
2777 mem_info->len = PAGE_SIZE;
2778 mem_info->num = cpage_count * q_cfg->num_paths;
2779
2780 /* Data QPTs */
2781 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2782 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2783 mem_info->mem_type = BNA_MEM_T_DMA;
2784 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2785 mem_info->num = q_cfg->num_paths;
2786
2787 /* Data s/w QPTs */
2788 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2789 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2790 mem_info->mem_type = BNA_MEM_T_KVA;
2791 mem_info->len = dpage_count * sizeof(void *);
2792 mem_info->num = q_cfg->num_paths;
2793
2794 /* Data QPT pages */
2795 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2796 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2797 mem_info->mem_type = BNA_MEM_T_DMA;
2798 mem_info->len = PAGE_SIZE;
2799 mem_info->num = dpage_count * q_cfg->num_paths;
2800
2801 /* Hdr QPTs */
2802 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2803 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2804 mem_info->mem_type = BNA_MEM_T_DMA;
2805 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2806 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2807
2808 /* Hdr s/w QPTs */
2809 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2810 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2811 mem_info->mem_type = BNA_MEM_T_KVA;
2812 mem_info->len = hpage_count * sizeof(void *);
2813 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2814
2815 /* Hdr QPT pages */
2816 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2817 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2818 mem_info->mem_type = BNA_MEM_T_DMA;
2819 mem_info->len = (hpage_count ? PAGE_SIZE : 0);
2820 mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
2821
2822 /* RX Interrupts */
2823 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2824 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2825 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2826}
2827
2828struct bna_rx *
2829bna_rx_create(struct bna *bna, struct bnad *bnad,
2830 struct bna_rx_config *rx_cfg,
2831 struct bna_rx_event_cbfn *rx_cbfn,
2832 struct bna_res_info *res_info,
2833 void *priv)
2834{
2835 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2836 struct bna_rx *rx;
2837 struct bna_rxp *rxp;
2838 struct bna_rxq *q0;
2839 struct bna_rxq *q1;
2840 struct bna_intr_info *intr_info;
2841 u32 page_count;
2842 struct bna_mem_descr *ccb_mem;
2843 struct bna_mem_descr *rcb_mem;
2844 struct bna_mem_descr *unmapq_mem;
2845 struct bna_mem_descr *cqpt_mem;
2846 struct bna_mem_descr *cswqpt_mem;
2847 struct bna_mem_descr *cpage_mem;
2848 struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
2849 struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
2850 struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
2851 struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
2852 struct bna_mem_descr *hpage_mem; /* hdr page mem */
2853 struct bna_mem_descr *dpage_mem; /* data page mem */
2854 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
2855 int dpage_count, hpage_count, rcb_idx;
2856 struct bna_ib_config ibcfg;
2857 /* Fail if we don't have enough RXPs, RXQs */
2858 if (!_rx_can_satisfy(rx_mod, rx_cfg))
2859 return NULL;
2860
2861 /* Initialize resource pointers */
2862 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2863 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2864 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2865 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2866 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2867 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2868 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2869 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2870 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2871 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2872 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2873 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2874 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2875
2876 /* Compute q depth & page count */
2877 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
2878 rx_cfg->num_paths;
2879
2880 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
2881 rx_cfg->num_paths;
2882
2883 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
2884 rx_cfg->num_paths;
2885 /* Get RX pointer */
2886 rx = _get_free_rx(rx_mod);
2887 _rx_init(rx, bna);
2888 rx->priv = priv;
2889 rx->type = rx_cfg->rx_type;
2890
2891 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2892 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2893 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2894 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2895 /* Following callbacks are mandatory */
2896 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2897 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2898
2899 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
2900 switch (rx->type) {
2901 case BNA_RX_T_REGULAR:
2902 if (!(rx->bna->rx_mod.flags &
2903 BNA_RX_MOD_F_PORT_LOOPBACK))
2904 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2905 break;
2906 case BNA_RX_T_LOOPBACK:
2907 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
2908 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2909 break;
2910 }
2911 }
2912
2913 for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
2914 rxp = _get_free_rxp(rx_mod);
2915 rxp->type = rx_cfg->rxp_type;
2916 rxp->rx = rx;
2917 rxp->cq.rx = rx;
2918
2919 /* Get required RXQs, and queue them to rx-path */
2920 q0 = _get_free_rxq(rx_mod);
2921 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2922 q1 = NULL;
2923 else
2924 q1 = _get_free_rxq(rx_mod);
2925
2926 /* Initialize IB */
2927 if (1 == intr_info->num) {
2928 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2929 intr_info->intr_type,
2930 intr_info->idl[0].vector);
2931 rxp->vector = intr_info->idl[0].vector;
2932 } else {
2933 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2934 intr_info->intr_type,
2935 intr_info->idl[i].vector);
2936
2937 /* Map the MSI-x vector used for this RXP */
2938 rxp->vector = intr_info->idl[i].vector;
2939 }
2940
2941 rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
2942
2943 ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2944 ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
2945 ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2946 ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
2947
2948 ret = bna_ib_config(rxp->cq.ib, &ibcfg);
2949
2950 /* Link rxqs to rxp */
2951 _rxp_add_rxqs(rxp, q0, q1);
2952
2953 /* Link rxp to rx */
2954 _rx_add_rxp(rx, rxp);
2955
2956 q0->rx = rx;
2957 q0->rxp = rxp;
2958
2959 /* Initialize RCB for the large / data q */
2960 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2961 RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
2962 (void *)unmapq_mem[rcb_idx].kva);
2963 rcb_idx++;
2964 (q0)->rx_packets = (q0)->rx_bytes = 0;
2965 (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
2966
2967 /* Initialize RXQs */
2968 _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
2969 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2970 q0->rcb->page_idx = dpage_idx;
2971 q0->rcb->page_count = dpage_count;
2972 dpage_idx += dpage_count;
2973
2974 /* Call bnad to complete rcb setup */
2975 if (rx->rcb_setup_cbfn)
2976 rx->rcb_setup_cbfn(bnad, q0->rcb);
2977
2978 if (q1) {
2979 q1->rx = rx;
2980 q1->rxp = rxp;
2981
2982 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2983 RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
2984 (void *)unmapq_mem[rcb_idx].kva);
2985 rcb_idx++;
2986 (q1)->buffer_size = (rx_cfg)->small_buff_size;
2987 (q1)->rx_packets = (q1)->rx_bytes = 0;
2988 (q1)->rx_packets_with_error =
2989 (q1)->rxbuf_alloc_failed = 0;
2990
2991 _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
2992 &hqpt_mem[i], &hsqpt_mem[i],
2993 &hpage_mem[hpage_idx]);
2994 q1->rcb->page_idx = hpage_idx;
2995 q1->rcb->page_count = hpage_count;
2996 hpage_idx += hpage_count;
2997
2998 /* Call bnad to complete rcb setup */
2999 if (rx->rcb_setup_cbfn)
3000 rx->rcb_setup_cbfn(bnad, q1->rcb);
3001 }
3002 /* Setup RXP::CQ */
3003 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
3004 _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
3005 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
3006 rxp->cq.ccb->page_idx = cpage_idx;
3007 rxp->cq.ccb->page_count = page_count;
3008 cpage_idx += page_count;
3009
3010 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
3011 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
3012
3013 rxp->cq.ccb->producer_index = 0;
3014 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
3015 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
3016 0 : rx_cfg->q_depth);
3017 rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
3018 rxp->cq.ccb->rcb[0] = q0->rcb;
3019 if (q1)
3020 rxp->cq.ccb->rcb[1] = q1->rcb;
3021 rxp->cq.ccb->cq = &rxp->cq;
3022 rxp->cq.ccb->bnad = bna->bnad;
3023 rxp->cq.ccb->hw_producer_index =
3024 ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
3025 (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
3026 *(rxp->cq.ccb->hw_producer_index) = 0;
3027 rxp->cq.ccb->intr_type = intr_info->intr_type;
3028 rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
3029 intr_info->idl[0].vector :
3030 intr_info->idl[i].vector;
3031 rxp->cq.ccb->rx_coalescing_timeo =
3032 rxp->cq.ib->ib_config.coalescing_timeo;
3033 rxp->cq.ccb->id = i;
3034
3035 /* Call bnad to complete CCB setup */
3036 if (rx->ccb_setup_cbfn)
3037 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
3038
3039 } /* for each rx-path */
3040
3041 bna_rxf_init(&rx->rxf, rx, rx_cfg);
3042
3043 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
3044
3045 return rx;
3046}
3047
3048void
3049bna_rx_destroy(struct bna_rx *rx)
3050{
3051 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
3052 struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
3053 struct bna_rxq *q0 = NULL;
3054 struct bna_rxq *q1 = NULL;
3055 struct bna_rxp *rxp;
3056 struct list_head *qe;
3057
3058 bna_rxf_uninit(&rx->rxf);
3059
3060 while (!list_empty(&rx->rxp_q)) {
3061 bfa_q_deq(&rx->rxp_q, &rxp);
3062 GET_RXQS(rxp, q0, q1);
3063 /* Callback to bnad for destroying RCB */
3064 if (rx->rcb_destroy_cbfn)
3065 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
3066 q0->rcb = NULL;
3067 q0->rxp = NULL;
3068 q0->rx = NULL;
3069 _put_free_rxq(rx_mod, q0);
3070 if (q1) {
3071 /* Callback to bnad for destroying RCB */
3072 if (rx->rcb_destroy_cbfn)
3073 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
3074 q1->rcb = NULL;
3075 q1->rxp = NULL;
3076 q1->rx = NULL;
3077 _put_free_rxq(rx_mod, q1);
3078 }
3079 rxp->rxq.slr.large = NULL;
3080 rxp->rxq.slr.small = NULL;
3081 if (rxp->cq.ib) {
3082 if (rxp->cq.ib_seg_offset != 0xff)
3083 bna_ib_release_idx(rxp->cq.ib,
3084 rxp->cq.ib_seg_offset);
3085 bna_ib_put(ib_mod, rxp->cq.ib);
3086 rxp->cq.ib = NULL;
3087 }
3088 /* Callback to bnad for destroying CCB */
3089 if (rx->ccb_destroy_cbfn)
3090 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
3091 rxp->cq.ccb = NULL;
3092 rxp->rx = NULL;
3093 _put_free_rxp(rx_mod, rxp);
3094 }
3095
3096 list_for_each(qe, &rx_mod->rx_active_q) {
3097 if (qe == &rx->qe) {
3098 list_del(&rx->qe);
3099 bfa_q_qe_init(&rx->qe);
3100 break;
3101 }
3102 }
3103
3104 rx->bna = NULL;
3105 rx->priv = NULL;
3106 _put_free_rx(rx_mod, rx);
3107}
3108
3109void
3110bna_rx_enable(struct bna_rx *rx)
3111{
3112 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
3113 return;
3114
3115 rx->rx_flags |= BNA_RX_F_ENABLE;
3116 if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
3117 bfa_fsm_send_event(rx, RX_E_START);
3118}
3119
3120void
3121bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
3122 void (*cbfn)(void *, struct bna_rx *,
3123 enum bna_cb_status))
3124{
3125 if (type == BNA_SOFT_CLEANUP) {
3126 /* h/w should not be accessed. Treat we're stopped */
3127 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
3128 } else {
3129 rx->stop_cbfn = cbfn;
3130 rx->stop_cbarg = rx->bna->bnad;
3131
3132 rx->rx_flags &= ~BNA_RX_F_ENABLE;
3133
3134 bfa_fsm_send_event(rx, RX_E_STOP);
3135 }
3136}
3137
3138/**
3139 * TX
3140 */
3141#define call_tx_stop_cbfn(tx, status)\
3142do {\
3143 if ((tx)->stop_cbfn)\
3144 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3145 (tx)->stop_cbfn = NULL;\
3146 (tx)->stop_cbarg = NULL;\
3147} while (0)
3148
3149#define call_tx_prio_change_cbfn(tx, status)\
3150do {\
3151 if ((tx)->prio_change_cbfn)\
3152 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3153 (tx)->prio_change_cbfn = NULL;\
3154} while (0)
3155
3156static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
3157 enum bna_cb_status status);
3158static void bna_tx_cb_txq_stopped(void *arg, int status);
3159static void bna_tx_cb_stats_cleared(void *arg, int status);
3160static void __bna_tx_stop(struct bna_tx *tx);
3161static void __bna_tx_start(struct bna_tx *tx);
3162static void __bna_txf_stat_clr(struct bna_tx *tx);
3163
3164enum bna_tx_event {
3165 TX_E_START = 1,
3166 TX_E_STOP = 2,
3167 TX_E_FAIL = 3,
3168 TX_E_TXQ_STOPPED = 4,
3169 TX_E_PRIO_CHANGE = 5,
3170 TX_E_STAT_CLEARED = 6,
3171};
3172
3173enum bna_tx_state {
3174 BNA_TX_STOPPED = 1,
3175 BNA_TX_STARTED = 2,
3176 BNA_TX_TXQ_STOP_WAIT = 3,
3177 BNA_TX_PRIO_STOP_WAIT = 4,
3178 BNA_TX_STAT_CLR_WAIT = 5,
3179};
3180
3181bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
3182 enum bna_tx_event);
3183bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
3184 enum bna_tx_event);
3185bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
3186 enum bna_tx_event);
3187bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3188 enum bna_tx_event);
3189bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
3190 enum bna_tx_event);
3191
3192static struct bfa_sm_table tx_sm_table[] = {
3193 {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
3194 {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
3195 {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
3196 {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
3197 {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
3198};
3199
3200static void
3201bna_tx_sm_stopped_entry(struct bna_tx *tx)
3202{
3203 struct bna_txq *txq;
3204 struct list_head *qe;
3205
3206 list_for_each(qe, &tx->txq_q) {
3207 txq = (struct bna_txq *)qe;
3208 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3209 }
3210
3211 call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
3212}
3213
3214static void
3215bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3216{
3217 switch (event) {
3218 case TX_E_START:
3219 bfa_fsm_set_state(tx, bna_tx_sm_started);
3220 break;
3221
3222 case TX_E_STOP:
3223 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3224 break;
3225
3226 case TX_E_FAIL:
3227 /* No-op */
3228 break;
3229
3230 case TX_E_PRIO_CHANGE:
3231 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3232 break;
3233
3234 case TX_E_TXQ_STOPPED:
3235 /**
3236 * This event is received due to flushing of mbox when
3237 * device fails
3238 */
3239 /* No-op */
3240 break;
3241
3242 default:
3243 bfa_sm_fault(tx->bna, event);
3244 }
3245}
3246
3247static void
3248bna_tx_sm_started_entry(struct bna_tx *tx)
3249{
3250 struct bna_txq *txq;
3251 struct list_head *qe;
3252
3253 __bna_tx_start(tx);
3254
3255 /* Start IB */
3256 list_for_each(qe, &tx->txq_q) {
3257 txq = (struct bna_txq *)qe;
3258 bna_ib_ack(&txq->ib->door_bell, 0);
3259 }
3260}
3261
3262static void
3263bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3264{
3265 struct bna_txq *txq;
3266 struct list_head *qe;
3267
3268 switch (event) {
3269 case TX_E_STOP:
3270 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3271 __bna_tx_stop(tx);
3272 break;
3273
3274 case TX_E_FAIL:
3275 list_for_each(qe, &tx->txq_q) {
3276 txq = (struct bna_txq *)qe;
3277 bna_ib_fail(txq->ib);
3278 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3279 }
3280 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3281 break;
3282
3283 case TX_E_PRIO_CHANGE:
3284 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3285 break;
3286
3287 default:
3288 bfa_sm_fault(tx->bna, event);
3289 }
3290}
3291
3292static void
3293bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
3294{
3295}
3296
3297static void
3298bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3299{
3300 struct bna_txq *txq;
3301 struct list_head *qe;
3302
3303 switch (event) {
3304 case TX_E_FAIL:
3305 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3306 break;
3307
3308 case TX_E_TXQ_STOPPED:
3309 list_for_each(qe, &tx->txq_q) {
3310 txq = (struct bna_txq *)qe;
3311 bna_ib_stop(txq->ib);
3312 }
3313 bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
3314 break;
3315
3316 case TX_E_PRIO_CHANGE:
3317 /* No-op */
3318 break;
3319
3320 default:
3321 bfa_sm_fault(tx->bna, event);
3322 }
3323}
3324
3325static void
3326bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3327{
3328 __bna_tx_stop(tx);
3329}
3330
3331static void
3332bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3333{
3334 struct bna_txq *txq;
3335 struct list_head *qe;
3336
3337 switch (event) {
3338 case TX_E_STOP:
3339 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3340 break;
3341
3342 case TX_E_FAIL:
3343 call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
3344 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3345 break;
3346
3347 case TX_E_TXQ_STOPPED:
3348 list_for_each(qe, &tx->txq_q) {
3349 txq = (struct bna_txq *)qe;
3350 bna_ib_stop(txq->ib);
3351 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3352 }
3353 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3354 bfa_fsm_set_state(tx, bna_tx_sm_started);
3355 break;
3356
3357 case TX_E_PRIO_CHANGE:
3358 /* No-op */
3359 break;
3360
3361 default:
3362 bfa_sm_fault(tx->bna, event);
3363 }
3364}
3365
3366static void
3367bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
3368{
3369 __bna_txf_stat_clr(tx);
3370}
3371
3372static void
3373bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
3374{
3375 switch (event) {
3376 case TX_E_FAIL:
3377 case TX_E_STAT_CLEARED:
3378 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3379 break;
3380
3381 default:
3382 bfa_sm_fault(tx->bna, event);
3383 }
3384}
3385
3386static void
3387__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
3388{
3389 struct bna_rxtx_q_mem *q_mem;
3390 struct bna_txq_mem txq_cfg;
3391 struct bna_txq_mem *txq_mem;
3392 struct bna_dma_addr cur_q_addr;
3393 u32 pg_num;
3394 void __iomem *base_addr;
3395 unsigned long off;
3396
3397 /* Fill out structure, to be subsequently written to hardware */
3398 txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
3399 txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
3400 cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
3401 txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
3402 txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
3403
3404 txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
3405
3406 txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
3407 (txq->qpt.page_size >> 2);
3408 txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
3409 ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
3410
3411 txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
3412 txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
3413 (txq->priority & 0x3));
3414 txq_cfg.wvc_n_cquota_n_rquota =
3415 ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
3416 (BFI_TX_MAX_WRR_QUOTA & 0xfff));
3417
3418 /* Setup the page and write to H/W */
3419
3420 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
3421 HQM_RXTX_Q_RAM_BASE_OFFSET);
3422 writel(pg_num, tx->bna->regs.page_addr);
3423
3424 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3425 HQM_RXTX_Q_RAM_BASE_OFFSET);
3426 q_mem = (struct bna_rxtx_q_mem *)0;
3427 txq_mem = &q_mem[txq->txq_id].txq;
3428
3429 /*
3430 * The following 4 lines, is a hack b'cos the H/W needs to read
3431 * these DMA addresses as little endian
3432 */
3433
3434 off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
3435 writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
3436
3437 off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
3438 writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
3439
3440 off = (unsigned long)&txq_mem->cur_q_entry_lo;
3441 writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
3442
3443 off = (unsigned long)&txq_mem->cur_q_entry_hi;
3444 writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
3445
3446 off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
3447 writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
3448
3449 off = (unsigned long)&txq_mem->entry_n_pg_size;
3450 writel(txq_cfg.entry_n_pg_size, base_addr + off);
3451
3452 off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
3453 writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
3454
3455 off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
3456 writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
3457
3458 off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
3459 writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
3460
3461 off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
3462 writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
3463
3464 txq->tcb->producer_index = 0;
3465 txq->tcb->consumer_index = 0;
3466 *(txq->tcb->hw_consumer_index) = 0;
3467
3468}
3469
3470static void
3471__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
3472{
3473 struct bfi_ll_q_stop_req ll_req;
3474 u32 bit_mask[2] = {0, 0};
3475 if (txq->txq_id < 32)
3476 bit_mask[0] = (u32)1 << txq->txq_id;
3477 else
3478 bit_mask[1] = (u32)1 << (txq->txq_id - 32);
3479
3480 memset(&ll_req, 0, sizeof(ll_req));
3481 ll_req.mh.msg_class = BFI_MC_LL;
3482 ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
3483 ll_req.mh.mtag.h2i.lpu_id = 0;
3484 ll_req.q_id_mask[0] = htonl(bit_mask[0]);
3485 ll_req.q_id_mask[1] = htonl(bit_mask[1]);
3486
3487 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3488 bna_tx_cb_txq_stopped, tx);
3489
3490 bna_mbox_send(tx->bna, &tx->mbox_qe);
3491}
3492
3493static void
3494__bna_txf_start(struct bna_tx *tx)
3495{
3496 struct bna_tx_fndb_ram *tx_fndb;
3497 struct bna_txf *txf = &tx->txf;
3498 void __iomem *base_addr;
3499 unsigned long off;
3500
3501 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3502 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
3503 tx->bna->regs.page_addr);
3504
3505 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3506 TX_FNDB_RAM_BASE_OFFSET);
3507
3508 tx_fndb = (struct bna_tx_fndb_ram *)0;
3509 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3510
3511 writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
3512 base_addr + off);
3513
3514 if (tx->txf.txf_id < 32)
3515 tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
3516 else
3517 tx->bna->tx_mod.txf_bmap[1] |= ((u32)
3518 1 << (tx->txf.txf_id - 32));
3519}
3520
3521static void
3522__bna_txf_stop(struct bna_tx *tx)
3523{
3524 struct bna_tx_fndb_ram *tx_fndb;
3525 u32 page_num;
3526 u32 ctl_flags;
3527 struct bna_txf *txf = &tx->txf;
3528 void __iomem *base_addr;
3529 unsigned long off;
3530
3531 /* retrieve the running txf_flags & turn off enable bit */
3532 page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3533 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
3534 writel(page_num, tx->bna->regs.page_addr);
3535
3536 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3537 TX_FNDB_RAM_BASE_OFFSET);
3538 tx_fndb = (struct bna_tx_fndb_ram *)0;
3539 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3540
3541 ctl_flags = readl(base_addr + off);
3542 ctl_flags &= ~BFI_TXF_CF_ENABLE;
3543
3544 writel(ctl_flags, base_addr + off);
3545
3546 if (tx->txf.txf_id < 32)
3547 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
3548 else
3549 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
3550 1 << (tx->txf.txf_id - 32));
3551}
3552
3553static void
3554__bna_txf_stat_clr(struct bna_tx *tx)
3555{
3556 struct bfi_ll_stats_req ll_req;
3557 u32 txf_bmap[2] = {0, 0};
3558 if (tx->txf.txf_id < 32)
3559 txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
3560 else
3561 txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
3562 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
3563 ll_req.stats_mask = 0;
3564 ll_req.rxf_id_mask[0] = 0;
3565 ll_req.rxf_id_mask[1] = 0;
3566 ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
3567 ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
3568
3569 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3570 bna_tx_cb_stats_cleared, tx);
3571 bna_mbox_send(tx->bna, &tx->mbox_qe);
3572}
3573
3574static void
3575__bna_tx_start(struct bna_tx *tx)
3576{
3577 struct bna_txq *txq;
3578 struct list_head *qe;
3579
3580 list_for_each(qe, &tx->txq_q) {
3581 txq = (struct bna_txq *)qe;
3582 bna_ib_start(txq->ib);
3583 __bna_txq_start(tx, txq);
3584 }
3585
3586 __bna_txf_start(tx);
3587
3588 list_for_each(qe, &tx->txq_q) {
3589 txq = (struct bna_txq *)qe;
3590 txq->tcb->priority = txq->priority;
3591 (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
3592 }
3593}
3594
3595static void
3596__bna_tx_stop(struct bna_tx *tx)
3597{
3598 struct bna_txq *txq;
3599 struct list_head *qe;
3600
3601 list_for_each(qe, &tx->txq_q) {
3602 txq = (struct bna_txq *)qe;
3603 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3604 }
3605
3606 __bna_txf_stop(tx);
3607
3608 list_for_each(qe, &tx->txq_q) {
3609 txq = (struct bna_txq *)qe;
3610 bfa_wc_up(&tx->txq_stop_wc);
3611 }
3612
3613 list_for_each(qe, &tx->txq_q) {
3614 txq = (struct bna_txq *)qe;
3615 __bna_txq_stop(tx, txq);
3616 }
3617}
3618
3619static void
3620bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3621 struct bna_mem_descr *qpt_mem,
3622 struct bna_mem_descr *swqpt_mem,
3623 struct bna_mem_descr *page_mem)
3624{
3625 int i;
3626
3627 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3628 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3629 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3630 txq->qpt.page_count = page_count;
3631 txq->qpt.page_size = page_size;
3632
3633 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3634
3635 for (i = 0; i < page_count; i++) {
3636 txq->tcb->sw_qpt[i] = page_mem[i].kva;
3637
3638 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3639 page_mem[i].dma.lsb;
3640 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3641 page_mem[i].dma.msb;
3642
3643 }
3644}
3645
3646static void
3647bna_tx_free(struct bna_tx *tx)
3648{
3649 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3650 struct bna_txq *txq;
3651 struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
3652 struct list_head *qe;
3653
3654 while (!list_empty(&tx->txq_q)) {
3655 bfa_q_deq(&tx->txq_q, &txq);
3656 bfa_q_qe_init(&txq->qe);
3657 if (txq->ib) {
3658 if (txq->ib_seg_offset != -1)
3659 bna_ib_release_idx(txq->ib,
3660 txq->ib_seg_offset);
3661 bna_ib_put(ib_mod, txq->ib);
3662 txq->ib = NULL;
3663 }
3664 txq->tcb = NULL;
3665 txq->tx = NULL;
3666 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3667 }
3668
3669 list_for_each(qe, &tx_mod->tx_active_q) {
3670 if (qe == &tx->qe) {
3671 list_del(&tx->qe);
3672 bfa_q_qe_init(&tx->qe);
3673 break;
3674 }
3675 }
3676
3677 tx->bna = NULL;
3678 tx->priv = NULL;
3679 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3680}
3681
3682static void
3683bna_tx_cb_txq_stopped(void *arg, int status)
3684{
3685 struct bna_tx *tx = (struct bna_tx *)arg;
3686
3687 bfa_q_qe_init(&tx->mbox_qe.qe);
3688 bfa_wc_down(&tx->txq_stop_wc);
3689}
3690
3691static void
3692bna_tx_cb_txq_stopped_all(void *arg)
3693{
3694 struct bna_tx *tx = (struct bna_tx *)arg;
3695
3696 bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
3697}
3698
3699static void
3700bna_tx_cb_stats_cleared(void *arg, int status)
3701{
3702 struct bna_tx *tx = (struct bna_tx *)arg;
3703
3704 bfa_q_qe_init(&tx->mbox_qe.qe);
3705
3706 bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
3707}
3708
3709static void
3710bna_tx_start(struct bna_tx *tx)
3711{
3712 tx->flags |= BNA_TX_F_PORT_STARTED;
3713 if (tx->flags & BNA_TX_F_ENABLED)
3714 bfa_fsm_send_event(tx, TX_E_START);
3715}
3716
3717static void
3718bna_tx_stop(struct bna_tx *tx)
3719{
3720 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3721 tx->stop_cbarg = &tx->bna->tx_mod;
3722
3723 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3724 bfa_fsm_send_event(tx, TX_E_STOP);
3725}
3726
3727static void
3728bna_tx_fail(struct bna_tx *tx)
3729{
3730 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3731 bfa_fsm_send_event(tx, TX_E_FAIL);
3732}
3733
3734void
3735bna_tx_prio_changed(struct bna_tx *tx, int prio)
3736{
3737 struct bna_txq *txq;
3738 struct list_head *qe;
3739
3740 list_for_each(qe, &tx->txq_q) {
3741 txq = (struct bna_txq *)qe;
3742 txq->priority = prio;
3743 }
3744
3745 bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
3746}
3747
3748static void
3749bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
3750{
3751 if (cee_link)
3752 tx->flags |= BNA_TX_F_PRIO_LOCK;
3753 else
3754 tx->flags &= ~BNA_TX_F_PRIO_LOCK;
3755}
3756
3757static void
3758bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
3759 enum bna_cb_status status)
3760{
3761 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3762
3763 bfa_wc_down(&tx_mod->tx_stop_wc);
3764}
3765
3766static void
3767bna_tx_mod_cb_tx_stopped_all(void *arg)
3768{
3769 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3770
3771 if (tx_mod->stop_cbfn)
3772 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
3773 tx_mod->stop_cbfn = NULL;
3774}
3775
3776void
3777bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3778{
3779 u32 q_size;
3780 u32 page_count;
3781 struct bna_mem_info *mem_info;
3782
3783 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3784 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3785 mem_info->mem_type = BNA_MEM_T_KVA;
3786 mem_info->len = sizeof(struct bna_tcb);
3787 mem_info->num = num_txq;
3788
3789 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3790 q_size = ALIGN(q_size, PAGE_SIZE);
3791 page_count = q_size >> PAGE_SHIFT;
3792
3793 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3794 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3795 mem_info->mem_type = BNA_MEM_T_DMA;
3796 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3797 mem_info->num = num_txq;
3798
3799 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3800 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3801 mem_info->mem_type = BNA_MEM_T_KVA;
3802 mem_info->len = page_count * sizeof(void *);
3803 mem_info->num = num_txq;
3804
3805 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3806 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3807 mem_info->mem_type = BNA_MEM_T_DMA;
3808 mem_info->len = PAGE_SIZE;
3809 mem_info->num = num_txq * page_count;
3810
3811 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3812 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3813 BNA_INTR_T_MSIX;
3814 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3815}
3816
3817struct bna_tx *
3818bna_tx_create(struct bna *bna, struct bnad *bnad,
3819 struct bna_tx_config *tx_cfg,
3820 struct bna_tx_event_cbfn *tx_cbfn,
3821 struct bna_res_info *res_info, void *priv)
3822{
3823 struct bna_intr_info *intr_info;
3824 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3825 struct bna_tx *tx;
3826 struct bna_txq *txq;
3827 struct list_head *qe;
3828 struct bna_ib_mod *ib_mod = &bna->ib_mod;
3829 struct bna_doorbell_qset *qset;
3830 struct bna_ib_config ib_config;
3831 int page_count;
3832 int page_size;
3833 int page_idx;
3834 int i;
3835 unsigned long off;
3836
3837 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3838 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
3839 tx_cfg->num_txq;
3840 page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3841
3842 /**
3843 * Get resources
3844 */
3845
3846 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3847 return NULL;
3848
3849 /* Tx */
3850
3851 if (list_empty(&tx_mod->tx_free_q))
3852 return NULL;
3853 bfa_q_deq(&tx_mod->tx_free_q, &tx);
3854 bfa_q_qe_init(&tx->qe);
3855
3856 /* TxQs */
3857
3858 INIT_LIST_HEAD(&tx->txq_q);
3859 for (i = 0; i < tx_cfg->num_txq; i++) {
3860 if (list_empty(&tx_mod->txq_free_q))
3861 goto err_return;
3862
3863 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3864 bfa_q_qe_init(&txq->qe);
3865 list_add_tail(&txq->qe, &tx->txq_q);
3866 txq->ib = NULL;
3867 txq->ib_seg_offset = -1;
3868 txq->tx = tx;
3869 }
3870
3871 /* IBs */
3872 i = 0;
3873 list_for_each(qe, &tx->txq_q) {
3874 txq = (struct bna_txq *)qe;
3875
3876 if (intr_info->num == 1)
3877 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3878 intr_info->idl[0].vector);
3879 else
3880 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3881 intr_info->idl[i].vector);
3882
3883 if (txq->ib == NULL)
3884 goto err_return;
3885
3886 txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
3887 if (txq->ib_seg_offset == -1)
3888 goto err_return;
3889
3890 i++;
3891 }
3892
3893 /*
3894 * Initialize
3895 */
3896
3897 /* Tx */
3898
3899 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3900 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3901 /* Following callbacks are mandatory */
3902 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3903 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3904 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3905
3906 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3907 tx->bna = bna;
3908 tx->priv = priv;
3909 tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
3910 tx->txq_stop_wc.wc_cbarg = tx;
3911 tx->txq_stop_wc.wc_count = 0;
3912
3913 tx->type = tx_cfg->tx_type;
3914
3915 tx->flags = 0;
3916 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
3917 switch (tx->type) {
3918 case BNA_TX_T_REGULAR:
3919 if (!(tx->bna->tx_mod.flags &
3920 BNA_TX_MOD_F_PORT_LOOPBACK))
3921 tx->flags |= BNA_TX_F_PORT_STARTED;
3922 break;
3923 case BNA_TX_T_LOOPBACK:
3924 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
3925 tx->flags |= BNA_TX_F_PORT_STARTED;
3926 break;
3927 }
3928 }
3929 if (tx->bna->tx_mod.cee_link)
3930 tx->flags |= BNA_TX_F_PRIO_LOCK;
3931
3932 /* TxQ */
3933
3934 i = 0;
3935 page_idx = 0;
3936 list_for_each(qe, &tx->txq_q) {
3937 txq = (struct bna_txq *)qe;
3938 txq->priority = tx_mod->priority;
3939 txq->tcb = (struct bna_tcb *)
3940 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3941 txq->tx_packets = 0;
3942 txq->tx_bytes = 0;
3943
3944 /* IB */
3945
3946 ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3947 ib_config.interpkt_timeo = 0; /* Not used */
3948 ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
3949 ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
3950 BFI_IB_CF_INT_ENABLE |
3951 BFI_IB_CF_COALESCING_MODE);
3952 bna_ib_config(txq->ib, &ib_config);
3953
3954 /* TCB */
3955
3956 txq->tcb->producer_index = 0;
3957 txq->tcb->consumer_index = 0;
3958 txq->tcb->hw_consumer_index = (volatile u32 *)
3959 ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
3960 (txq->ib_seg_offset * BFI_IBIDX_SIZE));
3961 *(txq->tcb->hw_consumer_index) = 0;
3962 txq->tcb->q_depth = tx_cfg->txq_depth;
3963 txq->tcb->unmap_q = (void *)
3964 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3965 qset = (struct bna_doorbell_qset *)0;
3966 off = (unsigned long)&qset[txq->txq_id].txq[0];
3967 txq->tcb->q_dbell = off +
3968 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
3969 txq->tcb->i_dbell = &txq->ib->door_bell;
3970 txq->tcb->intr_type = intr_info->intr_type;
3971 txq->tcb->intr_vector = (intr_info->num == 1) ?
3972 intr_info->idl[0].vector :
3973 intr_info->idl[i].vector;
3974 txq->tcb->txq = txq;
3975 txq->tcb->bnad = bnad;
3976 txq->tcb->id = i;
3977
3978 /* QPT, SWQPT, Pages */
3979 bna_txq_qpt_setup(txq, page_count, page_size,
3980 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3981 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3982 &res_info[BNA_TX_RES_MEM_T_PAGE].
3983 res_u.mem_info.mdl[page_idx]);
3984 txq->tcb->page_idx = page_idx;
3985 txq->tcb->page_count = page_count;
3986 page_idx += page_count;
3987
3988 /* Callback to bnad for setting up TCB */
3989 if (tx->tcb_setup_cbfn)
3990 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3991
3992 i++;
3993 }
3994
3995 /* TxF */
3996
3997 tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
3998 tx->txf.vlan = 0;
3999
4000 /* Mbox element */
4001 bfa_q_qe_init(&tx->mbox_qe.qe);
4002
4003 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
4004
4005 return tx;
4006
4007err_return:
4008 bna_tx_free(tx);
4009 return NULL;
4010}
4011
4012void
4013bna_tx_destroy(struct bna_tx *tx)
4014{
4015 /* Callback to bnad for destroying TCB */
4016 if (tx->tcb_destroy_cbfn) {
4017 struct bna_txq *txq;
4018 struct list_head *qe;
4019
4020 list_for_each(qe, &tx->txq_q) {
4021 txq = (struct bna_txq *)qe;
4022 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
4023 }
4024 }
4025
4026 bna_tx_free(tx);
4027}
4028
4029void
4030bna_tx_enable(struct bna_tx *tx)
4031{
4032 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
4033 return;
4034
4035 tx->flags |= BNA_TX_F_ENABLED;
4036
4037 if (tx->flags & BNA_TX_F_PORT_STARTED)
4038 bfa_fsm_send_event(tx, TX_E_START);
4039}
4040
4041void
4042bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
4043 void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
4044{
4045 if (type == BNA_SOFT_CLEANUP) {
4046 (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
4047 return;
4048 }
4049
4050 tx->stop_cbfn = cbfn;
4051 tx->stop_cbarg = tx->bna->bnad;
4052
4053 tx->flags &= ~BNA_TX_F_ENABLED;
4054
4055 bfa_fsm_send_event(tx, TX_E_STOP);
4056}
4057
4058int
4059bna_tx_state_get(struct bna_tx *tx)
4060{
4061 return bfa_sm_to_state(tx_sm_table, tx->fsm);
4062}
4063
4064void
4065bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
4066 struct bna_res_info *res_info)
4067{
4068 int i;
4069
4070 tx_mod->bna = bna;
4071 tx_mod->flags = 0;
4072
4073 tx_mod->tx = (struct bna_tx *)
4074 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
4075 tx_mod->txq = (struct bna_txq *)
4076 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
4077
4078 INIT_LIST_HEAD(&tx_mod->tx_free_q);
4079 INIT_LIST_HEAD(&tx_mod->tx_active_q);
4080
4081 INIT_LIST_HEAD(&tx_mod->txq_free_q);
4082
4083 for (i = 0; i < BFI_MAX_TXQ; i++) {
4084 tx_mod->tx[i].txf.txf_id = i;
4085 bfa_q_qe_init(&tx_mod->tx[i].qe);
4086 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
4087
4088 tx_mod->txq[i].txq_id = i;
4089 bfa_q_qe_init(&tx_mod->txq[i].qe);
4090 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
4091 }
4092
4093 tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
4094 tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
4095 tx_mod->tx_stop_wc.wc_count = 0;
4096}
4097
4098void
4099bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
4100{
4101 struct list_head *qe;
4102 int i;
4103
4104 i = 0;
4105 list_for_each(qe, &tx_mod->tx_free_q)
4106 i++;
4107
4108 i = 0;
4109 list_for_each(qe, &tx_mod->txq_free_q)
4110 i++;
4111
4112 tx_mod->bna = NULL;
4113}
4114
4115void
4116bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4117{
4118 struct bna_tx *tx;
4119 struct list_head *qe;
4120
4121 tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
4122 if (type == BNA_TX_T_LOOPBACK)
4123 tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
4124
4125 list_for_each(qe, &tx_mod->tx_active_q) {
4126 tx = (struct bna_tx *)qe;
4127 if (tx->type == type)
4128 bna_tx_start(tx);
4129 }
4130}
4131
4132void
4133bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4134{
4135 struct bna_tx *tx;
4136 struct list_head *qe;
4137
4138 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4139 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4140
4141 tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
4142
4143 /**
4144 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4145 * as we are going to call bna_tx_stop
4146 */
4147 list_for_each(qe, &tx_mod->tx_active_q) {
4148 tx = (struct bna_tx *)qe;
4149 if (tx->type == type)
4150 bfa_wc_up(&tx_mod->tx_stop_wc);
4151 }
4152
4153 if (tx_mod->tx_stop_wc.wc_count == 0) {
4154 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
4155 tx_mod->stop_cbfn = NULL;
4156 return;
4157 }
4158
4159 list_for_each(qe, &tx_mod->tx_active_q) {
4160 tx = (struct bna_tx *)qe;
4161 if (tx->type == type)
4162 bna_tx_stop(tx);
4163 }
4164}
4165
4166void
4167bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
4168{
4169 struct bna_tx *tx;
4170 struct list_head *qe;
4171
4172 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4173 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4174
4175 list_for_each(qe, &tx_mod->tx_active_q) {
4176 tx = (struct bna_tx *)qe;
4177 bna_tx_fail(tx);
4178 }
4179}
4180
4181void
4182bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
4183{
4184 struct bna_tx *tx;
4185 struct list_head *qe;
4186
4187 if (prio != tx_mod->priority) {
4188 tx_mod->priority = prio;
4189
4190 list_for_each(qe, &tx_mod->tx_active_q) {
4191 tx = (struct bna_tx *)qe;
4192 bna_tx_prio_changed(tx, prio);
4193 }
4194 }
4195}
4196
4197void
4198bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
4199{
4200 struct bna_tx *tx;
4201 struct list_head *qe;
4202
4203 tx_mod->cee_link = cee_link;
4204
4205 list_for_each(qe, &tx_mod->tx_active_q) {
4206 tx = (struct bna_tx *)qe;
4207 bna_tx_cee_link_status(tx, cee_link);
4208 }
4209}
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
new file mode 100644
index 000000000000..6877310f6ef4
--- /dev/null
+++ b/drivers/net/bna/bna_types.h
@@ -0,0 +1,1128 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BNA_TYPES_H__
19#define __BNA_TYPES_H__
20
21#include "cna.h"
22#include "bna_hw.h"
23#include "bfa_cee.h"
24
25/**
26 *
27 * Forward declarations
28 *
29 */
30
31struct bna_txq;
32struct bna_tx;
33struct bna_rxq;
34struct bna_cq;
35struct bna_rx;
36struct bna_rxf;
37struct bna_port;
38struct bna;
39struct bnad;
40
41/**
42 *
43 * Enums, primitive data types
44 *
45 */
46
47enum bna_status {
48 BNA_STATUS_T_DISABLED = 0,
49 BNA_STATUS_T_ENABLED = 1
50};
51
52enum bna_cleanup_type {
53 BNA_HARD_CLEANUP = 0,
54 BNA_SOFT_CLEANUP = 1
55};
56
57enum bna_cb_status {
58 BNA_CB_SUCCESS = 0,
59 BNA_CB_FAIL = 1,
60 BNA_CB_INTERRUPT = 2,
61 BNA_CB_BUSY = 3,
62 BNA_CB_INVALID_MAC = 4,
63 BNA_CB_MCAST_LIST_FULL = 5,
64 BNA_CB_UCAST_CAM_FULL = 6,
65 BNA_CB_WAITING = 7,
66 BNA_CB_NOT_EXEC = 8
67};
68
69enum bna_res_type {
70 BNA_RES_T_MEM = 1,
71 BNA_RES_T_INTR = 2
72};
73
74enum bna_mem_type {
75 BNA_MEM_T_KVA = 1,
76 BNA_MEM_T_DMA = 2
77};
78
79enum bna_intr_type {
80 BNA_INTR_T_INTX = 1,
81 BNA_INTR_T_MSIX = 2
82};
83
84enum bna_res_req_type {
85 BNA_RES_MEM_T_COM = 0,
86 BNA_RES_MEM_T_ATTR = 1,
87 BNA_RES_MEM_T_FWTRC = 2,
88 BNA_RES_MEM_T_STATS = 3,
89 BNA_RES_MEM_T_SWSTATS = 4,
90 BNA_RES_MEM_T_IBIDX = 5,
91 BNA_RES_MEM_T_IB_ARRAY = 6,
92 BNA_RES_MEM_T_INTR_ARRAY = 7,
93 BNA_RES_MEM_T_IDXSEG_ARRAY = 8,
94 BNA_RES_MEM_T_TX_ARRAY = 9,
95 BNA_RES_MEM_T_TXQ_ARRAY = 10,
96 BNA_RES_MEM_T_RX_ARRAY = 11,
97 BNA_RES_MEM_T_RXP_ARRAY = 12,
98 BNA_RES_MEM_T_RXQ_ARRAY = 13,
99 BNA_RES_MEM_T_UCMAC_ARRAY = 14,
100 BNA_RES_MEM_T_MCMAC_ARRAY = 15,
101 BNA_RES_MEM_T_RIT_ENTRY = 16,
102 BNA_RES_MEM_T_RIT_SEGMENT = 17,
103 BNA_RES_INTR_T_MBOX = 18,
104 BNA_RES_T_MAX
105};
106
107enum bna_tx_res_req_type {
108 BNA_TX_RES_MEM_T_TCB = 0,
109 BNA_TX_RES_MEM_T_UNMAPQ = 1,
110 BNA_TX_RES_MEM_T_QPT = 2,
111 BNA_TX_RES_MEM_T_SWQPT = 3,
112 BNA_TX_RES_MEM_T_PAGE = 4,
113 BNA_TX_RES_INTR_T_TXCMPL = 5,
114 BNA_TX_RES_T_MAX,
115};
116
117enum bna_rx_mem_type {
118 BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
119 BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
120 BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
121 BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
122 BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
123 BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
124 BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
125 BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
126 BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
127 BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
128 BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
129 BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
130 BNA_RX_RES_T_INTR = 12, /* Rx interrupts */
131 BNA_RX_RES_T_MAX = 13
132};
133
134enum bna_mbox_state {
135 BNA_MBOX_FREE = 0,
136 BNA_MBOX_POSTED = 1
137};
138
139enum bna_tx_type {
140 BNA_TX_T_REGULAR = 0,
141 BNA_TX_T_LOOPBACK = 1,
142};
143
144enum bna_tx_flags {
145 BNA_TX_F_PORT_STARTED = 1,
146 BNA_TX_F_ENABLED = 2,
147 BNA_TX_F_PRIO_LOCK = 4,
148};
149
150enum bna_tx_mod_flags {
151 BNA_TX_MOD_F_PORT_STARTED = 1,
152 BNA_TX_MOD_F_PORT_LOOPBACK = 2,
153};
154
155enum bna_rx_type {
156 BNA_RX_T_REGULAR = 0,
157 BNA_RX_T_LOOPBACK = 1,
158};
159
160enum bna_rxp_type {
161 BNA_RXP_SINGLE = 1,
162 BNA_RXP_SLR = 2,
163 BNA_RXP_HDS = 3
164};
165
166enum bna_rxmode {
167 BNA_RXMODE_PROMISC = 1,
168 BNA_RXMODE_DEFAULT = 2,
169 BNA_RXMODE_ALLMULTI = 4
170};
171
172enum bna_rx_event {
173 RX_E_START = 1,
174 RX_E_STOP = 2,
175 RX_E_FAIL = 3,
176 RX_E_RXF_STARTED = 4,
177 RX_E_RXF_STOPPED = 5,
178 RX_E_RXQ_STOPPED = 6,
179};
180
181enum bna_rx_state {
182 BNA_RX_STOPPED = 1,
183 BNA_RX_RXF_START_WAIT = 2,
184 BNA_RX_STARTED = 3,
185 BNA_RX_RXF_STOP_WAIT = 4,
186 BNA_RX_RXQ_STOP_WAIT = 5,
187};
188
189enum bna_rx_flags {
190 BNA_RX_F_ENABLE = 0x01, /* bnad enabled rxf */
191 BNA_RX_F_PORT_ENABLED = 0x02, /* Port object is enabled */
192 BNA_RX_F_PORT_FAILED = 0x04, /* Port in failed state */
193};
194
195enum bna_rx_mod_flags {
196 BNA_RX_MOD_F_PORT_STARTED = 1,
197 BNA_RX_MOD_F_PORT_LOOPBACK = 2,
198};
199
200enum bna_rxf_oper_state {
201 BNA_RXF_OPER_STATE_RUNNING = 0x01, /* rxf operational */
202 BNA_RXF_OPER_STATE_PAUSED = 0x02, /* rxf in PAUSED state */
203};
204
205enum bna_rxf_flags {
206 BNA_RXF_FL_STOP_PENDING = 0x01,
207 BNA_RXF_FL_FAILED = 0x02,
208 BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
209 BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
210 BNA_RXF_FL_RXF_ENABLED = 0x10,
211 BNA_RXF_FL_VLAN_CONFIG_PENDING = 0x20,
212};
213
214enum bna_rxf_event {
215 RXF_E_START = 1,
216 RXF_E_STOP = 2,
217 RXF_E_FAIL = 3,
218 RXF_E_CAM_FLTR_MOD = 4,
219 RXF_E_STARTED = 5,
220 RXF_E_STOPPED = 6,
221 RXF_E_CAM_FLTR_RESP = 7,
222 RXF_E_PAUSE = 8,
223 RXF_E_RESUME = 9,
224 RXF_E_STAT_CLEARED = 10,
225};
226
227enum bna_rxf_state {
228 BNA_RXF_STOPPED = 1,
229 BNA_RXF_START_WAIT = 2,
230 BNA_RXF_CAM_FLTR_MOD_WAIT = 3,
231 BNA_RXF_STARTED = 4,
232 BNA_RXF_CAM_FLTR_CLR_WAIT = 5,
233 BNA_RXF_STOP_WAIT = 6,
234 BNA_RXF_PAUSE_WAIT = 7,
235 BNA_RXF_RESUME_WAIT = 8,
236 BNA_RXF_STAT_CLR_WAIT = 9,
237};
238
239enum bna_port_type {
240 BNA_PORT_T_REGULAR = 0,
241 BNA_PORT_T_LOOPBACK_INTERNAL = 1,
242 BNA_PORT_T_LOOPBACK_EXTERNAL = 2,
243};
244
245enum bna_link_status {
246 BNA_LINK_DOWN = 0,
247 BNA_LINK_UP = 1,
248 BNA_CEE_UP = 2
249};
250
251enum bna_llport_flags {
252 BNA_LLPORT_F_ENABLED = 1,
253 BNA_LLPORT_F_RX_ENABLED = 2
254};
255
256enum bna_port_flags {
257 BNA_PORT_F_DEVICE_READY = 1,
258 BNA_PORT_F_ENABLED = 2,
259 BNA_PORT_F_PAUSE_CHANGED = 4,
260 BNA_PORT_F_MTU_CHANGED = 8
261};
262
263enum bna_pkt_rates {
264 BNA_PKT_RATE_10K = 10000,
265 BNA_PKT_RATE_20K = 20000,
266 BNA_PKT_RATE_30K = 30000,
267 BNA_PKT_RATE_40K = 40000,
268 BNA_PKT_RATE_50K = 50000,
269 BNA_PKT_RATE_60K = 60000,
270 BNA_PKT_RATE_70K = 70000,
271 BNA_PKT_RATE_80K = 80000,
272};
273
274enum bna_dim_load_types {
275 BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
276 BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
277 BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
278 BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
279 BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
280 BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
281 BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
282 BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
283 BNA_LOAD_T_MAX = 8
284};
285
286enum bna_dim_bias_types {
287 BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
288 BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
289 BNA_BIAS_T_MAX = 2
290};
291
292struct bna_mac {
293 /* This should be the first one */
294 struct list_head qe;
295 u8 addr[ETH_ALEN];
296};
297
298struct bna_mem_descr {
299 u32 len;
300 void *kva;
301 struct bna_dma_addr dma;
302};
303
304struct bna_mem_info {
305 enum bna_mem_type mem_type;
306 u32 len;
307 u32 num;
308 u32 align_sz; /* 0/1 = no alignment */
309 struct bna_mem_descr *mdl;
310 void *cookie; /* For bnad to unmap dma later */
311};
312
313struct bna_intr_descr {
314 int vector;
315};
316
317struct bna_intr_info {
318 enum bna_intr_type intr_type;
319 int num;
320 struct bna_intr_descr *idl;
321};
322
323union bna_res_u {
324 struct bna_mem_info mem_info;
325 struct bna_intr_info intr_info;
326};
327
328struct bna_res_info {
329 enum bna_res_type res_type;
330 union bna_res_u res_u;
331};
332
333/* HW QPT */
334struct bna_qpt {
335 struct bna_dma_addr hw_qpt_ptr;
336 void *kv_qpt_ptr;
337 u32 page_count;
338 u32 page_size;
339};
340
341/**
342 *
343 * Device
344 *
345 */
346
347struct bna_device {
348 bfa_fsm_t fsm;
349 struct bfa_ioc ioc;
350
351 enum bna_intr_type intr_type;
352 int vector;
353
354 void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
355 struct bnad *ready_cbarg;
356
357 void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
358 struct bnad *stop_cbarg;
359
360 struct bna *bna;
361};
362
363/**
364 *
365 * Mail box
366 *
367 */
368
369struct bna_mbox_qe {
370 /* This should be the first one */
371 struct list_head qe;
372
373 struct bfa_mbox_cmd cmd;
374 u32 cmd_len;
375 /* Callback for port, tx, rx, rxf */
376 void (*cbfn)(void *arg, int status);
377 void *cbarg;
378};
379
380struct bna_mbox_mod {
381 enum bna_mbox_state state;
382 struct list_head posted_q;
383 u32 msg_pending;
384 u32 msg_ctr;
385 struct bna *bna;
386};
387
388/**
389 *
390 * Port
391 *
392 */
393
394/* Pause configuration */
395struct bna_pause_config {
396 enum bna_status tx_pause;
397 enum bna_status rx_pause;
398};
399
400struct bna_llport {
401 bfa_fsm_t fsm;
402 enum bna_llport_flags flags;
403
404 enum bna_port_type type;
405
406 enum bna_link_status link_status;
407
408 int admin_up_count;
409
410 void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
411
412 struct bna_mbox_qe mbox_qe;
413
414 struct bna *bna;
415};
416
417struct bna_port {
418 bfa_fsm_t fsm;
419 enum bna_port_flags flags;
420
421 enum bna_port_type type;
422
423 struct bna_llport llport;
424
425 struct bna_pause_config pause_config;
426 u8 priority;
427 int mtu;
428
429 /* Callback for bna_port_disable(), port_stop() */
430 void (*stop_cbfn)(void *, enum bna_cb_status);
431 void *stop_cbarg;
432
433 /* Callback for bna_port_pause_config() */
434 void (*pause_cbfn)(struct bnad *, enum bna_cb_status);
435
436 /* Callback for bna_port_mtu_set() */
437 void (*mtu_cbfn)(struct bnad *, enum bna_cb_status);
438
439 void (*link_cbfn)(struct bnad *, enum bna_link_status);
440
441 struct bfa_wc chld_stop_wc;
442
443 struct bna_mbox_qe mbox_qe;
444
445 struct bna *bna;
446};
447
448/**
449 *
450 * Interrupt Block
451 *
452 */
453
454/* IB index segment structure */
455struct bna_ibidx_seg {
456 /* This should be the first one */
457 struct list_head qe;
458
459 u8 ib_seg_size;
460 u8 ib_idx_tbl_offset;
461};
462
463/* Interrupt structure */
464struct bna_intr {
465 /* This should be the first one */
466 struct list_head qe;
467 int ref_count;
468
469 enum bna_intr_type intr_type;
470 int vector;
471
472 struct bna_ib *ib;
473};
474
475/* Doorbell structure */
476struct bna_ib_dbell {
477 void *__iomem doorbell_addr;
478 u32 doorbell_ack;
479};
480
481/* Interrupt timer configuration */
482struct bna_ib_config {
483 u8 coalescing_timeo; /* Unit is 5usec. */
484
485 int interpkt_count;
486 int interpkt_timeo;
487
488 enum ib_flags ctrl_flags;
489};
490
491/* IB structure */
492struct bna_ib {
493 /* This should be the first one */
494 struct list_head qe;
495
496 int ib_id;
497
498 int ref_count;
499 int start_count;
500
501 struct bna_dma_addr ib_seg_host_addr;
502 void *ib_seg_host_addr_kva;
503 u32 idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
504
505 struct bna_ibidx_seg *idx_seg;
506
507 struct bna_ib_dbell door_bell;
508
509 struct bna_intr *intr;
510
511 struct bna_ib_config ib_config;
512
513 struct bna *bna;
514};
515
516/* IB module - keeps track of IBs and interrupts */
517struct bna_ib_mod {
518 struct bna_ib *ib; /* BFI_MAX_IB entries */
519 struct bna_intr *intr; /* BFI_MAX_IB entries */
520 struct bna_ibidx_seg *idx_seg; /* BNA_IBIDX_TOTAL_SEGS */
521
522 struct list_head ib_free_q;
523
524 struct list_head ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
525
526 struct list_head intr_free_q;
527 struct list_head intr_active_q;
528
529 struct bna *bna;
530};
531
532/**
533 *
534 * Tx object
535 *
536 */
537
538/* Tx datapath control structure */
539#define BNA_Q_NAME_SIZE 16
540struct bna_tcb {
541 /* Fast path */
542 void **sw_qpt;
543 void *unmap_q;
544 u32 producer_index;
545 u32 consumer_index;
546 volatile u32 *hw_consumer_index;
547 u32 q_depth;
548 void *__iomem q_dbell;
549 struct bna_ib_dbell *i_dbell;
550 int page_idx;
551 int page_count;
552 /* Control path */
553 struct bna_txq *txq;
554 struct bnad *bnad;
555 enum bna_intr_type intr_type;
556 int intr_vector;
557 u8 priority; /* Current priority */
558 unsigned long flags; /* Used by bnad as required */
559 int id;
560 char name[BNA_Q_NAME_SIZE];
561};
562
563/* TxQ QPT and configuration */
564struct bna_txq {
565 /* This should be the first one */
566 struct list_head qe;
567
568 int txq_id;
569
570 u8 priority;
571
572 struct bna_qpt qpt;
573 struct bna_tcb *tcb;
574 struct bna_ib *ib;
575 int ib_seg_offset;
576
577 struct bna_tx *tx;
578
579 u64 tx_packets;
580 u64 tx_bytes;
581};
582
583/* TxF structure (hardware Tx Function) */
584struct bna_txf {
585 int txf_id;
586 enum txf_flags ctrl_flags;
587 u16 vlan;
588};
589
590/* Tx object */
591struct bna_tx {
592 /* This should be the first one */
593 struct list_head qe;
594
595 bfa_fsm_t fsm;
596 enum bna_tx_flags flags;
597
598 enum bna_tx_type type;
599
600 struct list_head txq_q;
601 struct bna_txf txf;
602
603 /* Tx event handlers */
604 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
605 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
606 void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
607 void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
608 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
609
610 /* callback for bna_tx_disable(), bna_tx_stop() */
611 void (*stop_cbfn)(void *arg, struct bna_tx *tx,
612 enum bna_cb_status status);
613 void *stop_cbarg;
614
615 /* callback for bna_tx_prio_set() */
616 void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
617 enum bna_cb_status status);
618
619 struct bfa_wc txq_stop_wc;
620
621 struct bna_mbox_qe mbox_qe;
622
623 struct bna *bna;
624 void *priv; /* bnad's cookie */
625};
626
627struct bna_tx_config {
628 int num_txq;
629 int txq_depth;
630 enum bna_tx_type tx_type;
631};
632
633struct bna_tx_event_cbfn {
634 /* Optional */
635 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
636 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
637 /* Mandatory */
638 void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
639 void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
640 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
641};
642
643/* Tx module - keeps track of free, active tx objects */
644struct bna_tx_mod {
645 struct bna_tx *tx; /* BFI_MAX_TXQ entries */
646 struct bna_txq *txq; /* BFI_MAX_TXQ entries */
647
648 struct list_head tx_free_q;
649 struct list_head tx_active_q;
650
651 struct list_head txq_free_q;
652
653 /* callback for bna_tx_mod_stop() */
654 void (*stop_cbfn)(struct bna_port *port,
655 enum bna_cb_status status);
656
657 struct bfa_wc tx_stop_wc;
658
659 enum bna_tx_mod_flags flags;
660
661 int priority;
662 int cee_link;
663
664 u32 txf_bmap[2];
665
666 struct bna *bna;
667};
668
669/**
670 *
671 * Receive Indirection Table
672 *
673 */
674
675/* One row of RIT table */
676struct bna_rit_entry {
677 u8 large_rxq_id; /* used for either large or data buffers */
678 u8 small_rxq_id; /* used for either small or header buffers */
679};
680
681/* RIT segment */
682struct bna_rit_segment {
683 struct list_head qe;
684
685 u32 rit_offset;
686 u32 rit_size;
687 /**
688 * max_rit_size: Varies per RIT segment depending on how RIT is
689 * partitioned
690 */
691 u32 max_rit_size;
692
693 struct bna_rit_entry *rit;
694};
695
696struct bna_rit_mod {
697 struct bna_rit_entry *rit;
698 struct bna_rit_segment *rit_segment;
699
700 struct list_head rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
701};
702
703/**
704 *
705 * Rx object
706 *
707 */
708
709/* Rx datapath control structure */
710struct bna_rcb {
711 /* Fast path */
712 void **sw_qpt;
713 void *unmap_q;
714 u32 producer_index;
715 u32 consumer_index;
716 u32 q_depth;
717 void *__iomem q_dbell;
718 int page_idx;
719 int page_count;
720 /* Control path */
721 struct bna_rxq *rxq;
722 struct bna_cq *cq;
723 struct bnad *bnad;
724 unsigned long flags;
725 int id;
726};
727
728/* RxQ structure - QPT, configuration */
729struct bna_rxq {
730 struct list_head qe;
731 int rxq_id;
732
733 int buffer_size;
734 int q_depth;
735
736 struct bna_qpt qpt;
737 struct bna_rcb *rcb;
738
739 struct bna_rxp *rxp;
740 struct bna_rx *rx;
741
742 u64 rx_packets;
743 u64 rx_bytes;
744 u64 rx_packets_with_error;
745 u64 rxbuf_alloc_failed;
746};
747
748/* RxQ pair */
749union bna_rxq_u {
750 struct {
751 struct bna_rxq *hdr;
752 struct bna_rxq *data;
753 } hds;
754 struct {
755 struct bna_rxq *small;
756 struct bna_rxq *large;
757 } slr;
758 struct {
759 struct bna_rxq *only;
760 struct bna_rxq *reserved;
761 } single;
762};
763
764/* Packet rate for Dynamic Interrupt Moderation */
765struct bna_pkt_rate {
766 u32 small_pkt_cnt;
767 u32 large_pkt_cnt;
768};
769
770/* Completion control structure */
771struct bna_ccb {
772 /* Fast path */
773 void **sw_qpt;
774 u32 producer_index;
775 volatile u32 *hw_producer_index;
776 u32 q_depth;
777 struct bna_ib_dbell *i_dbell;
778 struct bna_rcb *rcb[2];
779 void *ctrl; /* For bnad */
780 struct bna_pkt_rate pkt_rate;
781 int page_idx;
782 int page_count;
783
784 /* Control path */
785 struct bna_cq *cq;
786 struct bnad *bnad;
787 enum bna_intr_type intr_type;
788 int intr_vector;
789 u8 rx_coalescing_timeo; /* For NAPI */
790 int id;
791 char name[BNA_Q_NAME_SIZE];
792};
793
794/* CQ QPT, configuration */
795struct bna_cq {
796 int cq_id;
797
798 struct bna_qpt qpt;
799 struct bna_ccb *ccb;
800
801 struct bna_ib *ib;
802 u8 ib_seg_offset;
803
804 struct bna_rx *rx;
805};
806
807struct bna_rss_config {
808 enum rss_hash_type hash_type;
809 u8 hash_mask;
810 u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
811};
812
813struct bna_hds_config {
814 enum hds_header_type hdr_type;
815 int header_size;
816};
817
818/* This structure is used during RX creation */
819struct bna_rx_config {
820 enum bna_rx_type rx_type;
821 int num_paths;
822 enum bna_rxp_type rxp_type;
823 int paused;
824 int q_depth;
825 /*
826 * Small/Large (or Header/Data) buffer size to be configured
827 * for SLR and HDS queue type. Large buffer size comes from
828 * port->mtu.
829 */
830 int small_buff_size;
831
832 enum bna_status rss_status;
833 struct bna_rss_config rss_config;
834
835 enum bna_status hds_status;
836 struct bna_hds_config hds_config;
837
838 enum bna_status vlan_strip_status;
839};
840
841/* Rx Path structure - one per MSIX vector/CPU */
842struct bna_rxp {
843 /* This should be the first one */
844 struct list_head qe;
845
846 enum bna_rxp_type type;
847 union bna_rxq_u rxq;
848 struct bna_cq cq;
849
850 struct bna_rx *rx;
851
852 /* MSI-x vector number for configuring RSS */
853 int vector;
854
855 struct bna_mbox_qe mbox_qe;
856};
857
858/* HDS configuration structure */
859struct bna_rxf_hds {
860 enum hds_header_type hdr_type;
861 int header_size;
862};
863
864/* RSS configuration structure */
865struct bna_rxf_rss {
866 enum rss_hash_type hash_type;
867 u8 hash_mask;
868 u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
869};
870
871/* RxF structure (hardware Rx Function) */
872struct bna_rxf {
873 bfa_fsm_t fsm;
874 int rxf_id;
875 enum rxf_flags ctrl_flags;
876 u16 default_vlan_tag;
877 enum bna_rxf_oper_state rxf_oper_state;
878 enum bna_status hds_status;
879 struct bna_rxf_hds hds_cfg;
880 enum bna_status rss_status;
881 struct bna_rxf_rss rss_cfg;
882 struct bna_rit_segment *rit_segment;
883 struct bna_rx *rx;
884 u32 forced_offset;
885 struct bna_mbox_qe mbox_qe;
886 int mcast_rxq_id;
887
888 /* callback for bna_rxf_start() */
889 void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
890 struct bna_rx *start_cbarg;
891
892 /* callback for bna_rxf_stop() */
893 void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
894 struct bna_rx *stop_cbarg;
895
896 /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
897 void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
898 enum bna_cb_status status);
899 struct bnad *oper_state_cbarg;
900
901 /**
902 * callback for:
903 * bna_rxf_ucast_set()
904 * bna_rxf_{ucast/mcast}_add(),
905 * bna_rxf_{ucast/mcast}_del(),
906 * bna_rxf_mode_set()
907 */
908 void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
909 enum bna_cb_status status);
910 struct bnad *cam_fltr_cbarg;
911
912 enum bna_rxf_flags rxf_flags;
913
914 /* List of unicast addresses yet to be applied to h/w */
915 struct list_head ucast_pending_add_q;
916 struct list_head ucast_pending_del_q;
917 int ucast_pending_set;
918 /* ucast addresses applied to the h/w */
919 struct list_head ucast_active_q;
920 struct bna_mac *ucast_active_mac;
921
922 /* List of multicast addresses yet to be applied to h/w */
923 struct list_head mcast_pending_add_q;
924 struct list_head mcast_pending_del_q;
925 /* multicast addresses applied to the h/w */
926 struct list_head mcast_active_q;
927
928 /* Rx modes yet to be applied to h/w */
929 enum bna_rxmode rxmode_pending;
930 enum bna_rxmode rxmode_pending_bitmask;
931 /* Rx modes applied to h/w */
932 enum bna_rxmode rxmode_active;
933
934 enum bna_status vlan_filter_status;
935 u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
936};
937
938/* Rx object */
939struct bna_rx {
940 /* This should be the first one */
941 struct list_head qe;
942
943 bfa_fsm_t fsm;
944
945 enum bna_rx_type type;
946
947 /* list-head for RX path objects */
948 struct list_head rxp_q;
949
950 struct bna_rxf rxf;
951
952 enum bna_rx_flags rx_flags;
953
954 struct bna_mbox_qe mbox_qe;
955
956 struct bfa_wc rxq_stop_wc;
957
958 /* Rx event handlers */
959 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
960 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
961 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
962 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
963 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
964 void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
965
966 /* callback for bna_rx_disable(), bna_rx_stop() */
967 void (*stop_cbfn)(void *arg, struct bna_rx *rx,
968 enum bna_cb_status status);
969 void *stop_cbarg;
970
971 struct bna *bna;
972 void *priv; /* bnad's cookie */
973};
974
975struct bna_rx_event_cbfn {
976 /* Optional */
977 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
978 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
979 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
980 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
981 /* Mandatory */
982 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
983 void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
984};
985
986/* Rx module - keeps track of free, active rx objects */
987struct bna_rx_mod {
988 struct bna *bna; /* back pointer to parent */
989 struct bna_rx *rx; /* BFI_MAX_RXQ entries */
990 struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
991 struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
992
993 struct list_head rx_free_q;
994 struct list_head rx_active_q;
995 int rx_free_count;
996
997 struct list_head rxp_free_q;
998 int rxp_free_count;
999
1000 struct list_head rxq_free_q;
1001 int rxq_free_count;
1002
1003 enum bna_rx_mod_flags flags;
1004
1005 /* callback for bna_rx_mod_stop() */
1006 void (*stop_cbfn)(struct bna_port *port,
1007 enum bna_cb_status status);
1008
1009 struct bfa_wc rx_stop_wc;
1010 u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
1011 u32 rxf_bmap[2];
1012};
1013
1014/**
1015 *
1016 * CAM
1017 *
1018 */
1019
1020struct bna_ucam_mod {
1021 struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
1022 struct list_head free_q;
1023
1024 struct bna *bna;
1025};
1026
1027struct bna_mcam_mod {
1028 struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
1029 struct list_head free_q;
1030
1031 struct bna *bna;
1032};
1033
1034/**
1035 *
1036 * Statistics
1037 *
1038 */
1039
1040struct bna_tx_stats {
1041 int tx_state;
1042 int tx_flags;
1043 int num_txqs;
1044 u32 txq_bmap[2];
1045 int txf_id;
1046};
1047
1048struct bna_rx_stats {
1049 int rx_state;
1050 int rx_flags;
1051 int num_rxps;
1052 int num_rxqs;
1053 u32 rxq_bmap[2];
1054 u32 cq_bmap[2];
1055 int rxf_id;
1056 int rxf_state;
1057 int rxf_oper_state;
1058 int num_active_ucast;
1059 int num_active_mcast;
1060 int rxmode_active;
1061 int vlan_filter_status;
1062 u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
1063 int rss_status;
1064 int hds_status;
1065};
1066
1067struct bna_sw_stats {
1068 int device_state;
1069 int port_state;
1070 int port_flags;
1071 int llport_state;
1072 int priority;
1073 int num_active_tx;
1074 int num_active_rx;
1075 struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
1076 struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
1077};
1078
1079struct bna_stats {
1080 u32 txf_bmap[2];
1081 u32 rxf_bmap[2];
1082 struct bfi_ll_stats *hw_stats;
1083 struct bna_sw_stats *sw_stats;
1084};
1085
1086/**
1087 *
1088 * BNA
1089 *
1090 */
1091
1092struct bna {
1093 struct bfa_pcidev pcidev;
1094
1095 int port_num;
1096
1097 struct bna_chip_regs regs;
1098
1099 struct bna_dma_addr hw_stats_dma;
1100 struct bna_stats stats;
1101
1102 struct bna_device device;
1103 struct bfa_cee cee;
1104
1105 struct bna_mbox_mod mbox_mod;
1106
1107 struct bna_port port;
1108
1109 struct bna_tx_mod tx_mod;
1110
1111 struct bna_rx_mod rx_mod;
1112
1113 struct bna_ib_mod ib_mod;
1114
1115 struct bna_ucam_mod ucam_mod;
1116 struct bna_mcam_mod mcam_mod;
1117
1118 struct bna_rit_mod rit_mod;
1119
1120 int rxf_default_id;
1121 int rxf_promisc_id;
1122
1123 struct bna_mbox_qe mbox_qe;
1124
1125 struct bnad *bnad;
1126};
1127
1128#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
new file mode 100644
index 000000000000..8158fb93cb4c
--- /dev/null
+++ b/drivers/net/bna/bnad.c
@@ -0,0 +1,3266 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/netdevice.h>
19#include <linux/skbuff.h>
20#include <linux/etherdevice.h>
21#include <linux/in.h>
22#include <linux/ethtool.h>
23#include <linux/if_vlan.h>
24#include <linux/if_ether.h>
25#include <linux/ip.h>
26
27#include "bnad.h"
28#include "bna.h"
29#include "cna.h"
30
31DEFINE_MUTEX(bnad_fwimg_mutex);
32
33/*
34 * Module params
35 */
36static uint bnad_msix_disable;
37module_param(bnad_msix_disable, uint, 0444);
38MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
39
40static uint bnad_ioc_auto_recover = 1;
41module_param(bnad_ioc_auto_recover, uint, 0444);
42MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
43
44/*
45 * Global variables
46 */
47u32 bnad_rxqs_per_cq = 2;
48
49const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
50
51/*
52 * Local MACROS
53 */
54#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
55
56#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
57
58#define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
62
63#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
64do { \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
71} while (0)
72
73/*
74 * Reinitialize completions in CQ, once Rx is taken down
75 */
76static void
77bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
78{
79 struct bna_cq_entry *cmpl, *next_cmpl;
80 unsigned int wi_range, wis = 0, ccb_prod = 0;
81 int i;
82
83 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
84 wi_range);
85
86 for (i = 0; i < ccb->q_depth; i++) {
87 wis++;
88 if (likely(--wi_range))
89 next_cmpl = cmpl + 1;
90 else {
91 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
92 wis = 0;
93 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
94 next_cmpl, wi_range);
95 }
96 cmpl->valid = 0;
97 cmpl = next_cmpl;
98 }
99}
100
101/*
102 * Frees all pending Tx Bufs
103 * At this point no activity is expected on the Q,
104 * so DMA unmap & freeing is fine.
105 */
106static void
107bnad_free_all_txbufs(struct bnad *bnad,
108 struct bna_tcb *tcb)
109{
110 u16 unmap_cons;
111 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
112 struct bnad_skb_unmap *unmap_array;
113 struct sk_buff *skb = NULL;
114 int i;
115
116 unmap_array = unmap_q->unmap_array;
117
118 unmap_cons = 0;
119 while (unmap_cons < unmap_q->q_depth) {
120 skb = unmap_array[unmap_cons].skb;
121 if (!skb) {
122 unmap_cons++;
123 continue;
124 }
125 unmap_array[unmap_cons].skb = NULL;
126
127 pci_unmap_single(bnad->pcidev,
128 pci_unmap_addr(&unmap_array[unmap_cons],
129 dma_addr), skb_headlen(skb),
130 PCI_DMA_TODEVICE);
131
132 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
133 unmap_cons++;
134 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
135 pci_unmap_page(bnad->pcidev,
136 pci_unmap_addr(&unmap_array[unmap_cons],
137 dma_addr),
138 skb_shinfo(skb)->frags[i].size,
139 PCI_DMA_TODEVICE);
140 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
141 0);
142 unmap_cons++;
143 }
144 dev_kfree_skb_any(skb);
145 }
146}
147
148/* Data Path Handlers */
149
150/*
151 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
152 * Can be called in a) Interrupt context
153 * b) Sending context
154 * c) Tasklet context
155 */
156static u32
157bnad_free_txbufs(struct bnad *bnad,
158 struct bna_tcb *tcb)
159{
160 u32 sent_packets = 0, sent_bytes = 0;
161 u16 wis, unmap_cons, updated_hw_cons;
162 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
163 struct bnad_skb_unmap *unmap_array;
164 struct sk_buff *skb;
165 int i;
166
167 /*
168 * Just return if TX is stopped. This check is useful
169 * when bnad_free_txbufs() runs out of a tasklet scheduled
170 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
171 * but this routine runs actually after the cleanup has been
172 * executed.
173 */
174 if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
175 return 0;
176
177 updated_hw_cons = *(tcb->hw_consumer_index);
178
179 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
180 updated_hw_cons, tcb->q_depth);
181
182 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
183
184 unmap_array = unmap_q->unmap_array;
185 unmap_cons = unmap_q->consumer_index;
186
187 prefetch(&unmap_array[unmap_cons + 1]);
188 while (wis) {
189 skb = unmap_array[unmap_cons].skb;
190
191 unmap_array[unmap_cons].skb = NULL;
192
193 sent_packets++;
194 sent_bytes += skb->len;
195 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
196
197 pci_unmap_single(bnad->pcidev,
198 pci_unmap_addr(&unmap_array[unmap_cons],
199 dma_addr), skb_headlen(skb),
200 PCI_DMA_TODEVICE);
201 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
202 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
203
204 prefetch(&unmap_array[unmap_cons + 1]);
205 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
206 prefetch(&unmap_array[unmap_cons + 1]);
207
208 pci_unmap_page(bnad->pcidev,
209 pci_unmap_addr(&unmap_array[unmap_cons],
210 dma_addr),
211 skb_shinfo(skb)->frags[i].size,
212 PCI_DMA_TODEVICE);
213 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
214 0);
215 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
216 }
217 dev_kfree_skb_any(skb);
218 }
219
220 /* Update consumer pointers. */
221 tcb->consumer_index = updated_hw_cons;
222 unmap_q->consumer_index = unmap_cons;
223
224 tcb->txq->tx_packets += sent_packets;
225 tcb->txq->tx_bytes += sent_bytes;
226
227 return sent_packets;
228}
229
230/* Tx Free Tasklet function */
231/* Frees for all the tcb's in all the Tx's */
232/*
233 * Scheduled from sending context, so that
234 * the fat Tx lock is not held for too long
235 * in the sending context.
236 */
237static void
238bnad_tx_free_tasklet(unsigned long bnad_ptr)
239{
240 struct bnad *bnad = (struct bnad *)bnad_ptr;
241 struct bna_tcb *tcb;
242 u32 acked;
243 int i, j;
244
245 for (i = 0; i < bnad->num_tx; i++) {
246 for (j = 0; j < bnad->num_txq_per_tx; j++) {
247 tcb = bnad->tx_info[i].tcb[j];
248 if (!tcb)
249 continue;
250 if (((u16) (*tcb->hw_consumer_index) !=
251 tcb->consumer_index) &&
252 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
253 &tcb->flags))) {
254 acked = bnad_free_txbufs(bnad, tcb);
255 bna_ib_ack(tcb->i_dbell, acked);
256 smp_mb__before_clear_bit();
257 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
258 }
259 }
260 }
261}
262
263static u32
264bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
265{
266 struct net_device *netdev = bnad->netdev;
267 u32 sent;
268
269 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
270 return 0;
271
272 sent = bnad_free_txbufs(bnad, tcb);
273 if (sent) {
274 if (netif_queue_stopped(netdev) &&
275 netif_carrier_ok(netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(netdev);
279 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
280 }
281 bna_ib_ack(tcb->i_dbell, sent);
282 } else
283 bna_ib_ack(tcb->i_dbell, 0);
284
285 smp_mb__before_clear_bit();
286 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
287
288 return sent;
289}
290
291/* MSIX Tx Completion Handler */
292static irqreturn_t
293bnad_msix_tx(int irq, void *data)
294{
295 struct bna_tcb *tcb = (struct bna_tcb *)data;
296 struct bnad *bnad = tcb->bnad;
297
298 bnad_tx(bnad, tcb);
299
300 return IRQ_HANDLED;
301}
302
303static void
304bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
305{
306 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
307
308 rcb->producer_index = 0;
309 rcb->consumer_index = 0;
310
311 unmap_q->producer_index = 0;
312 unmap_q->consumer_index = 0;
313}
314
315static void
316bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
317{
318 struct bnad_unmap_q *unmap_q;
319 struct sk_buff *skb;
320
321 unmap_q = rcb->unmap_q;
322 while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
323 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
324 BUG_ON(!(skb));
325 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
326 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
327 unmap_array[unmap_q->consumer_index],
328 dma_addr), rcb->rxq->buffer_size +
329 NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
330 dev_kfree_skb(skb);
331 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
332 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
333 }
334
335 bnad_reset_rcb(bnad, rcb);
336}
337
338static void
339bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
340{
341 u16 to_alloc, alloced, unmap_prod, wi_range;
342 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_skb_unmap *unmap_array;
344 struct bna_rxq_entry *rxent;
345 struct sk_buff *skb;
346 dma_addr_t dma_addr;
347
348 alloced = 0;
349 to_alloc =
350 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
351
352 unmap_array = unmap_q->unmap_array;
353 unmap_prod = unmap_q->producer_index;
354
355 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
356
357 while (to_alloc--) {
358 if (!wi_range) {
359 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
360 wi_range);
361 }
362 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
363 GFP_ATOMIC);
364 if (unlikely(!skb)) {
365 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
366 goto finishing;
367 }
368 skb->dev = bnad->netdev;
369 skb_reserve(skb, NET_IP_ALIGN);
370 unmap_array[unmap_prod].skb = skb;
371 dma_addr = pci_map_single(bnad->pcidev, skb->data,
372 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
373 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
374 dma_addr);
375 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
376 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
377
378 rxent++;
379 wi_range--;
380 alloced++;
381 }
382
383finishing:
384 if (likely(alloced)) {
385 unmap_q->producer_index = unmap_prod;
386 rcb->producer_index = unmap_prod;
387 smp_mb();
388 bna_rxq_prod_indx_doorbell(rcb);
389 }
390}
391
392/*
393 * Locking is required in the enable path
394 * because it is called from a napi poll
395 * context, where the bna_lock is not held
396 * unlike the IRQ context.
397 */
398static void
399bnad_enable_txrx_irqs(struct bnad *bnad)
400{
401 struct bna_tcb *tcb;
402 struct bna_ccb *ccb;
403 int i, j;
404 unsigned long flags;
405
406 spin_lock_irqsave(&bnad->bna_lock, flags);
407 for (i = 0; i < bnad->num_tx; i++) {
408 for (j = 0; j < bnad->num_txq_per_tx; j++) {
409 tcb = bnad->tx_info[i].tcb[j];
410 bna_ib_coalescing_timer_set(tcb->i_dbell,
411 tcb->txq->ib->ib_config.coalescing_timeo);
412 bna_ib_ack(tcb->i_dbell, 0);
413 }
414 }
415
416 for (i = 0; i < bnad->num_rx; i++) {
417 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
418 ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
419 bnad_enable_rx_irq_unsafe(ccb);
420 }
421 }
422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
423}
424
425static inline void
426bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
427{
428 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
429
430 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
431 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
432 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
433 bnad_alloc_n_post_rxbufs(bnad, rcb);
434 smp_mb__before_clear_bit();
435 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
436 }
437}
438
439static u32
440bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
441{
442 struct bna_cq_entry *cmpl, *next_cmpl;
443 struct bna_rcb *rcb = NULL;
444 unsigned int wi_range, packets = 0, wis = 0;
445 struct bnad_unmap_q *unmap_q;
446 struct sk_buff *skb;
447 u32 flags;
448 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
449 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
450
451 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
453 wi_range);
454 BUG_ON(!(wi_range <= ccb->q_depth));
455 while (cmpl->valid && packets < budget) {
456 packets++;
457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
458
459 if (qid0 == cmpl->rxq_id)
460 rcb = ccb->rcb[0];
461 else
462 rcb = ccb->rcb[1];
463
464 unmap_q = rcb->unmap_q;
465
466 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
467 BUG_ON(!(skb));
468 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
469 pci_unmap_single(bnad->pcidev,
470 pci_unmap_addr(&unmap_q->
471 unmap_array[unmap_q->
472 consumer_index],
473 dma_addr),
474 rcb->rxq->buffer_size,
475 PCI_DMA_FROMDEVICE);
476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
477
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
480
481 wis++;
482 if (likely(--wi_range))
483 next_cmpl = cmpl + 1;
484 else {
485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
486 wis = 0;
487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
488 next_cmpl, wi_range);
489 BUG_ON(!(wi_range <= ccb->q_depth));
490 }
491 prefetch(next_cmpl);
492
493 flags = ntohl(cmpl->flags);
494 if (unlikely
495 (flags &
496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
497 BNA_CQ_EF_TOO_LONG))) {
498 dev_kfree_skb_any(skb);
499 rcb->rxq->rx_packets_with_error++;
500 goto next;
501 }
502
503 skb_put(skb, ntohs(cmpl->length));
504 if (likely
505 (bnad->rx_csum &&
506 (((flags & BNA_CQ_EF_IPV4) &&
507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
508 (flags & BNA_CQ_EF_IPV6)) &&
509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
510 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
512 else
513 skb_checksum_none_assert(skb);
514
515 rcb->rxq->rx_packets++;
516 rcb->rxq->rx_bytes += skb->len;
517 skb->protocol = eth_type_trans(skb, bnad->netdev);
518
519 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
520 struct bnad_rx_ctrl *rx_ctrl =
521 (struct bnad_rx_ctrl *)ccb->ctrl;
522 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
523 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
524 ntohs(cmpl->vlan_tag), skb);
525 else
526 vlan_hwaccel_receive_skb(skb,
527 bnad->vlan_grp,
528 ntohs(cmpl->vlan_tag));
529
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl *rx_ctrl =
532 (struct bnad_rx_ctrl *)ccb->ctrl;
533 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 napi_gro_receive(&rx_ctrl->napi, skb);
535 else
536 netif_receive_skb(skb);
537 }
538
539next:
540 cmpl->valid = 0;
541 cmpl = next_cmpl;
542 }
543
544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
545
546 if (likely(ccb)) {
547 bna_ib_ack(ccb->i_dbell, packets);
548 bnad_refill_rxq(bnad, ccb->rcb[0]);
549 if (ccb->rcb[1])
550 bnad_refill_rxq(bnad, ccb->rcb[1]);
551 } else
552 bna_ib_ack(ccb->i_dbell, 0);
553
554 return packets;
555}
556
557static void
558bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
559{
560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561 bna_ib_ack(ccb->i_dbell, 0);
562}
563
564static void
565bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
566{
567 spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
568 bnad_enable_rx_irq_unsafe(ccb);
569 spin_unlock_irq(&bnad->bna_lock);
570}
571
572static void
573bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
574{
575 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
576 if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
577 bnad_disable_rx_irq(bnad, ccb);
578 __napi_schedule((&rx_ctrl->napi));
579 }
580 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
581}
582
583/* MSIX Rx Path Handler */
584static irqreturn_t
585bnad_msix_rx(int irq, void *data)
586{
587 struct bna_ccb *ccb = (struct bna_ccb *)data;
588 struct bnad *bnad = ccb->bnad;
589
590 bnad_netif_rx_schedule_poll(bnad, ccb);
591
592 return IRQ_HANDLED;
593}
594
595/* Interrupt handlers */
596
597/* Mbox Interrupt Handlers */
598static irqreturn_t
599bnad_msix_mbox_handler(int irq, void *data)
600{
601 u32 intr_status;
602 unsigned long flags;
603 struct net_device *netdev = data;
604 struct bnad *bnad;
605
606 bnad = netdev_priv(netdev);
607
608 /* BNA_ISR_GET(bnad); Inc Ref count */
609 spin_lock_irqsave(&bnad->bna_lock, flags);
610
611 bna_intr_status_get(&bnad->bna, intr_status);
612
613 if (BNA_IS_MBOX_ERR_INTR(intr_status))
614 bna_mbox_handler(&bnad->bna, intr_status);
615
616 spin_unlock_irqrestore(&bnad->bna_lock, flags);
617
618 /* BNAD_ISR_PUT(bnad); Dec Ref count */
619 return IRQ_HANDLED;
620}
621
622static irqreturn_t
623bnad_isr(int irq, void *data)
624{
625 int i, j;
626 u32 intr_status;
627 unsigned long flags;
628 struct net_device *netdev = data;
629 struct bnad *bnad = netdev_priv(netdev);
630 struct bnad_rx_info *rx_info;
631 struct bnad_rx_ctrl *rx_ctrl;
632
633 spin_lock_irqsave(&bnad->bna_lock, flags);
634
635 bna_intr_status_get(&bnad->bna, intr_status);
636 if (!intr_status) {
637 spin_unlock_irqrestore(&bnad->bna_lock, flags);
638 return IRQ_NONE;
639 }
640
641 if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
642 bna_mbox_handler(&bnad->bna, intr_status);
643 if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
644 spin_unlock_irqrestore(&bnad->bna_lock, flags);
645 goto done;
646 }
647 }
648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
649
650 /* Process data interrupts */
651 for (i = 0; i < bnad->num_rx; i++) {
652 rx_info = &bnad->rx_info[i];
653 if (!rx_info->rx)
654 continue;
655 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
656 rx_ctrl = &rx_info->rx_ctrl[j];
657 if (rx_ctrl->ccb)
658 bnad_netif_rx_schedule_poll(bnad,
659 rx_ctrl->ccb);
660 }
661 }
662done:
663 return IRQ_HANDLED;
664}
665
666/*
667 * Called in interrupt / callback context
668 * with bna_lock held, so cfg_flags access is OK
669 */
670static void
671bnad_enable_mbox_irq(struct bnad *bnad)
672{
673 int irq = BNAD_GET_MBOX_IRQ(bnad);
674
675 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
676 return;
677
678 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
679 enable_irq(irq);
680 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
681}
682
683/*
684 * Called with bnad->bna_lock held b'cos of
685 * bnad->cfg_flags access.
686 */
687void
688bnad_disable_mbox_irq(struct bnad *bnad)
689{
690 int irq = BNAD_GET_MBOX_IRQ(bnad);
691
692 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
693 return;
694
695 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
696 disable_irq_nosync(irq);
697 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
698}
699
700/* Control Path Handlers */
701
702/* Callbacks */
703void
704bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
705{
706 bnad_enable_mbox_irq(bnad);
707}
708
709void
710bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
711{
712 bnad_disable_mbox_irq(bnad);
713}
714
715void
716bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
717{
718 complete(&bnad->bnad_completions.ioc_comp);
719 bnad->bnad_completions.ioc_comp_status = status;
720}
721
722void
723bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
724{
725 complete(&bnad->bnad_completions.ioc_comp);
726 bnad->bnad_completions.ioc_comp_status = status;
727}
728
729static void
730bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
731{
732 struct bnad *bnad = (struct bnad *)arg;
733
734 complete(&bnad->bnad_completions.port_comp);
735
736 netif_carrier_off(bnad->netdev);
737}
738
739void
740bnad_cb_port_link_status(struct bnad *bnad,
741 enum bna_link_status link_status)
742{
743 bool link_up = 0;
744
745 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
746
747 if (link_status == BNA_CEE_UP) {
748 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
749 BNAD_UPDATE_CTR(bnad, cee_up);
750 } else
751 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
752
753 if (link_up) {
754 if (!netif_carrier_ok(bnad->netdev)) {
755 pr_warn("bna: %s link up\n",
756 bnad->netdev->name);
757 netif_carrier_on(bnad->netdev);
758 BNAD_UPDATE_CTR(bnad, link_toggle);
759 if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
760 /* Force an immediate Transmit Schedule */
761 pr_info("bna: %s TX_STARTED\n",
762 bnad->netdev->name);
763 netif_wake_queue(bnad->netdev);
764 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
765 } else {
766 netif_stop_queue(bnad->netdev);
767 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
768 }
769 }
770 } else {
771 if (netif_carrier_ok(bnad->netdev)) {
772 pr_warn("bna: %s link down\n",
773 bnad->netdev->name);
774 netif_carrier_off(bnad->netdev);
775 BNAD_UPDATE_CTR(bnad, link_toggle);
776 }
777 }
778}
779
780static void
781bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
782 enum bna_cb_status status)
783{
784 struct bnad *bnad = (struct bnad *)arg;
785
786 complete(&bnad->bnad_completions.tx_comp);
787}
788
789static void
790bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
791{
792 struct bnad_tx_info *tx_info =
793 (struct bnad_tx_info *)tcb->txq->tx->priv;
794 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
795
796 tx_info->tcb[tcb->id] = tcb;
797 unmap_q->producer_index = 0;
798 unmap_q->consumer_index = 0;
799 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
800}
801
802static void
803bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
804{
805 struct bnad_tx_info *tx_info =
806 (struct bnad_tx_info *)tcb->txq->tx->priv;
807
808 tx_info->tcb[tcb->id] = NULL;
809}
810
811static void
812bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
813{
814 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
815
816 unmap_q->producer_index = 0;
817 unmap_q->consumer_index = 0;
818 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
819}
820
821static void
822bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
823{
824 struct bnad_rx_info *rx_info =
825 (struct bnad_rx_info *)ccb->cq->rx->priv;
826
827 rx_info->rx_ctrl[ccb->id].ccb = ccb;
828 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
829}
830
831static void
832bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
833{
834 struct bnad_rx_info *rx_info =
835 (struct bnad_rx_info *)ccb->cq->rx->priv;
836
837 rx_info->rx_ctrl[ccb->id].ccb = NULL;
838}
839
840static void
841bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
842{
843 struct bnad_tx_info *tx_info =
844 (struct bnad_tx_info *)tcb->txq->tx->priv;
845
846 if (tx_info != &bnad->tx_info[0])
847 return;
848
849 clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
850 netif_stop_queue(bnad->netdev);
851 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
852}
853
854static void
855bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
856{
857 if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
858 return;
859
860 if (netif_carrier_ok(bnad->netdev)) {
861 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
862 netif_wake_queue(bnad->netdev);
863 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
864 }
865}
866
867static void
868bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
869{
870 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
871
872 if (!tcb || (!tcb->unmap_q))
873 return;
874
875 if (!unmap_q->unmap_array)
876 return;
877
878 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
879 return;
880
881 bnad_free_all_txbufs(bnad, tcb);
882
883 unmap_q->producer_index = 0;
884 unmap_q->consumer_index = 0;
885
886 smp_mb__before_clear_bit();
887 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
888}
889
890static void
891bnad_cb_rx_cleanup(struct bnad *bnad,
892 struct bna_ccb *ccb)
893{
894 bnad_cq_cmpl_init(bnad, ccb);
895
896 bnad_free_rxbufs(bnad, ccb->rcb[0]);
897 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
898
899 if (ccb->rcb[1]) {
900 bnad_free_rxbufs(bnad, ccb->rcb[1]);
901 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
902 }
903}
904
905static void
906bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
907{
908 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
909
910 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
911
912 /* Now allocate & post buffers for this RCB */
913 /* !!Allocation in callback context */
914 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
915 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
916 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
917 bnad_alloc_n_post_rxbufs(bnad, rcb);
918 smp_mb__before_clear_bit();
919 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
920 }
921}
922
923static void
924bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
925 enum bna_cb_status status)
926{
927 struct bnad *bnad = (struct bnad *)arg;
928
929 complete(&bnad->bnad_completions.rx_comp);
930}
931
932static void
933bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
934 enum bna_cb_status status)
935{
936 bnad->bnad_completions.mcast_comp_status = status;
937 complete(&bnad->bnad_completions.mcast_comp);
938}
939
940void
941bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
942 struct bna_stats *stats)
943{
944 if (status == BNA_CB_SUCCESS)
945 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
946
947 if (!netif_running(bnad->netdev) ||
948 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
949 return;
950
951 mod_timer(&bnad->stats_timer,
952 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
953}
954
955void
956bnad_cb_stats_clr(struct bnad *bnad)
957{
958}
959
960/* Resource allocation, free functions */
961
962static void
963bnad_mem_free(struct bnad *bnad,
964 struct bna_mem_info *mem_info)
965{
966 int i;
967 dma_addr_t dma_pa;
968
969 if (mem_info->mdl == NULL)
970 return;
971
972 for (i = 0; i < mem_info->num; i++) {
973 if (mem_info->mdl[i].kva != NULL) {
974 if (mem_info->mem_type == BNA_MEM_T_DMA) {
975 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
976 dma_pa);
977 pci_free_consistent(bnad->pcidev,
978 mem_info->mdl[i].len,
979 mem_info->mdl[i].kva, dma_pa);
980 } else
981 kfree(mem_info->mdl[i].kva);
982 }
983 }
984 kfree(mem_info->mdl);
985 mem_info->mdl = NULL;
986}
987
988static int
989bnad_mem_alloc(struct bnad *bnad,
990 struct bna_mem_info *mem_info)
991{
992 int i;
993 dma_addr_t dma_pa;
994
995 if ((mem_info->num == 0) || (mem_info->len == 0)) {
996 mem_info->mdl = NULL;
997 return 0;
998 }
999
1000 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1001 GFP_KERNEL);
1002 if (mem_info->mdl == NULL)
1003 return -ENOMEM;
1004
1005 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1006 for (i = 0; i < mem_info->num; i++) {
1007 mem_info->mdl[i].len = mem_info->len;
1008 mem_info->mdl[i].kva =
1009 pci_alloc_consistent(bnad->pcidev,
1010 mem_info->len, &dma_pa);
1011
1012 if (mem_info->mdl[i].kva == NULL)
1013 goto err_return;
1014
1015 BNA_SET_DMA_ADDR(dma_pa,
1016 &(mem_info->mdl[i].dma));
1017 }
1018 } else {
1019 for (i = 0; i < mem_info->num; i++) {
1020 mem_info->mdl[i].len = mem_info->len;
1021 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1022 GFP_KERNEL);
1023 if (mem_info->mdl[i].kva == NULL)
1024 goto err_return;
1025 }
1026 }
1027
1028 return 0;
1029
1030err_return:
1031 bnad_mem_free(bnad, mem_info);
1032 return -ENOMEM;
1033}
1034
1035/* Free IRQ for Mailbox */
1036static void
1037bnad_mbox_irq_free(struct bnad *bnad,
1038 struct bna_intr_info *intr_info)
1039{
1040 int irq;
1041 unsigned long flags;
1042
1043 if (intr_info->idl == NULL)
1044 return;
1045
1046 spin_lock_irqsave(&bnad->bna_lock, flags);
1047
1048 bnad_disable_mbox_irq(bnad);
1049
1050 irq = BNAD_GET_MBOX_IRQ(bnad);
1051 free_irq(irq, bnad->netdev);
1052
1053 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1054
1055 kfree(intr_info->idl);
1056}
1057
1058/*
1059 * Allocates IRQ for Mailbox, but keep it disabled
1060 * This will be enabled once we get the mbox enable callback
1061 * from bna
1062 */
1063static int
1064bnad_mbox_irq_alloc(struct bnad *bnad,
1065 struct bna_intr_info *intr_info)
1066{
1067 int err;
1068 unsigned long flags;
1069 u32 irq;
1070 irq_handler_t irq_handler;
1071
1072 /* Mbox should use only 1 vector */
1073
1074 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1075 if (!intr_info->idl)
1076 return -ENOMEM;
1077
1078 spin_lock_irqsave(&bnad->bna_lock, flags);
1079 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1080 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1081 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1082 flags = 0;
1083 intr_info->intr_type = BNA_INTR_T_MSIX;
1084 intr_info->idl[0].vector = bnad->msix_num - 1;
1085 } else {
1086 irq_handler = (irq_handler_t)bnad_isr;
1087 irq = bnad->pcidev->irq;
1088 flags = IRQF_SHARED;
1089 intr_info->intr_type = BNA_INTR_T_INTX;
1090 /* intr_info->idl.vector = 0 ? */
1091 }
1092 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1093
1094 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1095
1096 err = request_irq(irq, irq_handler, flags,
1097 bnad->mbox_irq_name, bnad->netdev);
1098 if (err) {
1099 kfree(intr_info->idl);
1100 intr_info->idl = NULL;
1101 return err;
1102 }
1103
1104 spin_lock_irqsave(&bnad->bna_lock, flags);
1105 bnad_disable_mbox_irq(bnad);
1106 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1107 return 0;
1108}
1109
1110static void
1111bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1112{
1113 kfree(intr_info->idl);
1114 intr_info->idl = NULL;
1115}
1116
1117/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1118static int
1119bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1120 uint txrx_id, struct bna_intr_info *intr_info)
1121{
1122 int i, vector_start = 0;
1123 u32 cfg_flags;
1124 unsigned long flags;
1125
1126 spin_lock_irqsave(&bnad->bna_lock, flags);
1127 cfg_flags = bnad->cfg_flags;
1128 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1129
1130 if (cfg_flags & BNAD_CF_MSIX) {
1131 intr_info->intr_type = BNA_INTR_T_MSIX;
1132 intr_info->idl = kcalloc(intr_info->num,
1133 sizeof(struct bna_intr_descr),
1134 GFP_KERNEL);
1135 if (!intr_info->idl)
1136 return -ENOMEM;
1137
1138 switch (src) {
1139 case BNAD_INTR_TX:
1140 vector_start = txrx_id;
1141 break;
1142
1143 case BNAD_INTR_RX:
1144 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1145 txrx_id;
1146 break;
1147
1148 default:
1149 BUG();
1150 }
1151
1152 for (i = 0; i < intr_info->num; i++)
1153 intr_info->idl[i].vector = vector_start + i;
1154 } else {
1155 intr_info->intr_type = BNA_INTR_T_INTX;
1156 intr_info->num = 1;
1157 intr_info->idl = kcalloc(intr_info->num,
1158 sizeof(struct bna_intr_descr),
1159 GFP_KERNEL);
1160 if (!intr_info->idl)
1161 return -ENOMEM;
1162
1163 switch (src) {
1164 case BNAD_INTR_TX:
1165 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1166 break;
1167
1168 case BNAD_INTR_RX:
1169 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1170 break;
1171 }
1172 }
1173 return 0;
1174}
1175
1176/**
1177 * NOTE: Should be called for MSIX only
1178 * Unregisters Tx MSIX vector(s) from the kernel
1179 */
1180static void
1181bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1182 int num_txqs)
1183{
1184 int i;
1185 int vector_num;
1186
1187 for (i = 0; i < num_txqs; i++) {
1188 if (tx_info->tcb[i] == NULL)
1189 continue;
1190
1191 vector_num = tx_info->tcb[i]->intr_vector;
1192 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1193 }
1194}
1195
1196/**
1197 * NOTE: Should be called for MSIX only
1198 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1199 */
1200static int
1201bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1202 uint tx_id, int num_txqs)
1203{
1204 int i;
1205 int err;
1206 int vector_num;
1207
1208 for (i = 0; i < num_txqs; i++) {
1209 vector_num = tx_info->tcb[i]->intr_vector;
1210 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1211 tx_id + tx_info->tcb[i]->id);
1212 err = request_irq(bnad->msix_table[vector_num].vector,
1213 (irq_handler_t)bnad_msix_tx, 0,
1214 tx_info->tcb[i]->name,
1215 tx_info->tcb[i]);
1216 if (err)
1217 goto err_return;
1218 }
1219
1220 return 0;
1221
1222err_return:
1223 if (i > 0)
1224 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1225 return -1;
1226}
1227
1228/**
1229 * NOTE: Should be called for MSIX only
1230 * Unregisters Rx MSIX vector(s) from the kernel
1231 */
1232static void
1233bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1234 int num_rxps)
1235{
1236 int i;
1237 int vector_num;
1238
1239 for (i = 0; i < num_rxps; i++) {
1240 if (rx_info->rx_ctrl[i].ccb == NULL)
1241 continue;
1242
1243 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1244 free_irq(bnad->msix_table[vector_num].vector,
1245 rx_info->rx_ctrl[i].ccb);
1246 }
1247}
1248
1249/**
1250 * NOTE: Should be called for MSIX only
1251 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1252 */
1253static int
1254bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1255 uint rx_id, int num_rxps)
1256{
1257 int i;
1258 int err;
1259 int vector_num;
1260
1261 for (i = 0; i < num_rxps; i++) {
1262 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1263 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1264 bnad->netdev->name,
1265 rx_id + rx_info->rx_ctrl[i].ccb->id);
1266 err = request_irq(bnad->msix_table[vector_num].vector,
1267 (irq_handler_t)bnad_msix_rx, 0,
1268 rx_info->rx_ctrl[i].ccb->name,
1269 rx_info->rx_ctrl[i].ccb);
1270 if (err)
1271 goto err_return;
1272 }
1273
1274 return 0;
1275
1276err_return:
1277 if (i > 0)
1278 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1279 return -1;
1280}
1281
1282/* Free Tx object Resources */
1283static void
1284bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1285{
1286 int i;
1287
1288 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1289 if (res_info[i].res_type == BNA_RES_T_MEM)
1290 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1291 else if (res_info[i].res_type == BNA_RES_T_INTR)
1292 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1293 }
1294}
1295
1296/* Allocates memory and interrupt resources for Tx object */
1297static int
1298bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1299 uint tx_id)
1300{
1301 int i, err = 0;
1302
1303 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1304 if (res_info[i].res_type == BNA_RES_T_MEM)
1305 err = bnad_mem_alloc(bnad,
1306 &res_info[i].res_u.mem_info);
1307 else if (res_info[i].res_type == BNA_RES_T_INTR)
1308 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1309 &res_info[i].res_u.intr_info);
1310 if (err)
1311 goto err_return;
1312 }
1313 return 0;
1314
1315err_return:
1316 bnad_tx_res_free(bnad, res_info);
1317 return err;
1318}
1319
1320/* Free Rx object Resources */
1321static void
1322bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1323{
1324 int i;
1325
1326 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1327 if (res_info[i].res_type == BNA_RES_T_MEM)
1328 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1329 else if (res_info[i].res_type == BNA_RES_T_INTR)
1330 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1331 }
1332}
1333
1334/* Allocates memory and interrupt resources for Rx object */
1335static int
1336bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1337 uint rx_id)
1338{
1339 int i, err = 0;
1340
1341 /* All memory needs to be allocated before setup_ccbs */
1342 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1343 if (res_info[i].res_type == BNA_RES_T_MEM)
1344 err = bnad_mem_alloc(bnad,
1345 &res_info[i].res_u.mem_info);
1346 else if (res_info[i].res_type == BNA_RES_T_INTR)
1347 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1348 &res_info[i].res_u.intr_info);
1349 if (err)
1350 goto err_return;
1351 }
1352 return 0;
1353
1354err_return:
1355 bnad_rx_res_free(bnad, res_info);
1356 return err;
1357}
1358
1359/* Timer callbacks */
1360/* a) IOC timer */
1361static void
1362bnad_ioc_timeout(unsigned long data)
1363{
1364 struct bnad *bnad = (struct bnad *)data;
1365 unsigned long flags;
1366
1367 spin_lock_irqsave(&bnad->bna_lock, flags);
1368 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1369 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1370}
1371
1372static void
1373bnad_ioc_hb_check(unsigned long data)
1374{
1375 struct bnad *bnad = (struct bnad *)data;
1376 unsigned long flags;
1377
1378 spin_lock_irqsave(&bnad->bna_lock, flags);
1379 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1381}
1382
1383static void
1384bnad_ioc_sem_timeout(unsigned long data)
1385{
1386 struct bnad *bnad = (struct bnad *)data;
1387 unsigned long flags;
1388
1389 spin_lock_irqsave(&bnad->bna_lock, flags);
1390 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
1391 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1392}
1393
1394/*
1395 * All timer routines use bnad->bna_lock to protect against
1396 * the following race, which may occur in case of no locking:
1397 * Time CPU m CPU n
1398 * 0 1 = test_bit
1399 * 1 clear_bit
1400 * 2 del_timer_sync
1401 * 3 mod_timer
1402 */
1403
1404/* b) Dynamic Interrupt Moderation Timer */
1405static void
1406bnad_dim_timeout(unsigned long data)
1407{
1408 struct bnad *bnad = (struct bnad *)data;
1409 struct bnad_rx_info *rx_info;
1410 struct bnad_rx_ctrl *rx_ctrl;
1411 int i, j;
1412 unsigned long flags;
1413
1414 if (!netif_carrier_ok(bnad->netdev))
1415 return;
1416
1417 spin_lock_irqsave(&bnad->bna_lock, flags);
1418 for (i = 0; i < bnad->num_rx; i++) {
1419 rx_info = &bnad->rx_info[i];
1420 if (!rx_info->rx)
1421 continue;
1422 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1423 rx_ctrl = &rx_info->rx_ctrl[j];
1424 if (!rx_ctrl->ccb)
1425 continue;
1426 bna_rx_dim_update(rx_ctrl->ccb);
1427 }
1428 }
1429
1430 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1431 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1432 mod_timer(&bnad->dim_timer,
1433 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1434 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1435}
1436
1437/* c) Statistics Timer */
1438static void
1439bnad_stats_timeout(unsigned long data)
1440{
1441 struct bnad *bnad = (struct bnad *)data;
1442 unsigned long flags;
1443
1444 if (!netif_running(bnad->netdev) ||
1445 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1446 return;
1447
1448 spin_lock_irqsave(&bnad->bna_lock, flags);
1449 bna_stats_get(&bnad->bna);
1450 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1451}
1452
1453/*
1454 * Set up timer for DIM
1455 * Called with bnad->bna_lock held
1456 */
1457void
1458bnad_dim_timer_start(struct bnad *bnad)
1459{
1460 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1461 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1462 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1463 (unsigned long)bnad);
1464 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1465 mod_timer(&bnad->dim_timer,
1466 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1467 }
1468}
1469
1470/*
1471 * Set up timer for statistics
1472 * Called with mutex_lock(&bnad->conf_mutex) held
1473 */
1474static void
1475bnad_stats_timer_start(struct bnad *bnad)
1476{
1477 unsigned long flags;
1478
1479 spin_lock_irqsave(&bnad->bna_lock, flags);
1480 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1481 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1482 (unsigned long)bnad);
1483 mod_timer(&bnad->stats_timer,
1484 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1485 }
1486 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1487
1488}
1489
1490/*
1491 * Stops the stats timer
1492 * Called with mutex_lock(&bnad->conf_mutex) held
1493 */
1494static void
1495bnad_stats_timer_stop(struct bnad *bnad)
1496{
1497 int to_del = 0;
1498 unsigned long flags;
1499
1500 spin_lock_irqsave(&bnad->bna_lock, flags);
1501 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1502 to_del = 1;
1503 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1504 if (to_del)
1505 del_timer_sync(&bnad->stats_timer);
1506}
1507
1508/* Utilities */
1509
1510static void
1511bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1512{
1513 int i = 1; /* Index 0 has broadcast address */
1514 struct netdev_hw_addr *mc_addr;
1515
1516 netdev_for_each_mc_addr(mc_addr, netdev) {
1517 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1518 ETH_ALEN);
1519 i++;
1520 }
1521}
1522
1523static int
1524bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1525{
1526 struct bnad_rx_ctrl *rx_ctrl =
1527 container_of(napi, struct bnad_rx_ctrl, napi);
1528 struct bna_ccb *ccb;
1529 struct bnad *bnad;
1530 int rcvd = 0;
1531
1532 ccb = rx_ctrl->ccb;
1533
1534 bnad = ccb->bnad;
1535
1536 if (!netif_carrier_ok(bnad->netdev))
1537 goto poll_exit;
1538
1539 rcvd = bnad_poll_cq(bnad, ccb, budget);
1540 if (rcvd == budget)
1541 return rcvd;
1542
1543poll_exit:
1544 napi_complete((napi));
1545
1546 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1547
1548 bnad_enable_rx_irq(bnad, ccb);
1549 return rcvd;
1550}
1551
1552static int
1553bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
1554{
1555 struct bnad_rx_ctrl *rx_ctrl =
1556 container_of(napi, struct bnad_rx_ctrl, napi);
1557 struct bna_ccb *ccb;
1558 struct bnad *bnad;
1559 int rcvd = 0;
1560 int i, j;
1561
1562 ccb = rx_ctrl->ccb;
1563
1564 bnad = ccb->bnad;
1565
1566 if (!netif_carrier_ok(bnad->netdev))
1567 goto poll_exit;
1568
1569 /* Handle Tx Completions, if any */
1570 for (i = 0; i < bnad->num_tx; i++) {
1571 for (j = 0; j < bnad->num_txq_per_tx; j++)
1572 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
1573 }
1574
1575 /* Handle Rx Completions */
1576 rcvd = bnad_poll_cq(bnad, ccb, budget);
1577 if (rcvd == budget)
1578 return rcvd;
1579poll_exit:
1580 napi_complete((napi));
1581
1582 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1583
1584 bnad_enable_txrx_irqs(bnad);
1585 return rcvd;
1586}
1587
1588static void
1589bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1590{
1591 int (*napi_poll) (struct napi_struct *, int);
1592 struct bnad_rx_ctrl *rx_ctrl;
1593 int i;
1594 unsigned long flags;
1595
1596 spin_lock_irqsave(&bnad->bna_lock, flags);
1597 if (bnad->cfg_flags & BNAD_CF_MSIX)
1598 napi_poll = bnad_napi_poll_rx;
1599 else
1600 napi_poll = bnad_napi_poll_txrx;
1601 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1602
1603 /* Initialize & enable NAPI */
1604 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1605 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1606 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1607 napi_poll, 64);
1608 napi_enable(&rx_ctrl->napi);
1609 }
1610}
1611
1612static void
1613bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1614{
1615 int i;
1616
1617 /* First disable and then clean up */
1618 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1619 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1620 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1621 }
1622}
1623
1624/* Should be held with conf_lock held */
1625void
1626bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1627{
1628 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1629 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1630 unsigned long flags;
1631
1632 if (!tx_info->tx)
1633 return;
1634
1635 init_completion(&bnad->bnad_completions.tx_comp);
1636 spin_lock_irqsave(&bnad->bna_lock, flags);
1637 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1638 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1639 wait_for_completion(&bnad->bnad_completions.tx_comp);
1640
1641 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1642 bnad_tx_msix_unregister(bnad, tx_info,
1643 bnad->num_txq_per_tx);
1644
1645 spin_lock_irqsave(&bnad->bna_lock, flags);
1646 bna_tx_destroy(tx_info->tx);
1647 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1648
1649 tx_info->tx = NULL;
1650
1651 if (0 == tx_id)
1652 tasklet_kill(&bnad->tx_free_tasklet);
1653
1654 bnad_tx_res_free(bnad, res_info);
1655}
1656
1657/* Should be held with conf_lock held */
1658int
1659bnad_setup_tx(struct bnad *bnad, uint tx_id)
1660{
1661 int err;
1662 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1663 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1664 struct bna_intr_info *intr_info =
1665 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1666 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1667 struct bna_tx_event_cbfn tx_cbfn;
1668 struct bna_tx *tx;
1669 unsigned long flags;
1670
1671 /* Initialize the Tx object configuration */
1672 tx_config->num_txq = bnad->num_txq_per_tx;
1673 tx_config->txq_depth = bnad->txq_depth;
1674 tx_config->tx_type = BNA_TX_T_REGULAR;
1675
1676 /* Initialize the tx event handlers */
1677 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1678 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1679 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1680 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1681 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1682
1683 /* Get BNA's resource requirement for one tx object */
1684 spin_lock_irqsave(&bnad->bna_lock, flags);
1685 bna_tx_res_req(bnad->num_txq_per_tx,
1686 bnad->txq_depth, res_info);
1687 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1688
1689 /* Fill Unmap Q memory requirements */
1690 BNAD_FILL_UNMAPQ_MEM_REQ(
1691 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1692 bnad->num_txq_per_tx,
1693 BNAD_TX_UNMAPQ_DEPTH);
1694
1695 /* Allocate resources */
1696 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1697 if (err)
1698 return err;
1699
1700 /* Ask BNA to create one Tx object, supplying required resources */
1701 spin_lock_irqsave(&bnad->bna_lock, flags);
1702 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1703 tx_info);
1704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1705 if (!tx)
1706 goto err_return;
1707 tx_info->tx = tx;
1708
1709 /* Register ISR for the Tx object */
1710 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1711 err = bnad_tx_msix_register(bnad, tx_info,
1712 tx_id, bnad->num_txq_per_tx);
1713 if (err)
1714 goto err_return;
1715 }
1716
1717 spin_lock_irqsave(&bnad->bna_lock, flags);
1718 bna_tx_enable(tx);
1719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1720
1721 return 0;
1722
1723err_return:
1724 bnad_tx_res_free(bnad, res_info);
1725 return err;
1726}
1727
1728/* Setup the rx config for bna_rx_create */
1729/* bnad decides the configuration */
1730static void
1731bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1732{
1733 rx_config->rx_type = BNA_RX_T_REGULAR;
1734 rx_config->num_paths = bnad->num_rxp_per_rx;
1735
1736 if (bnad->num_rxp_per_rx > 1) {
1737 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1738 rx_config->rss_config.hash_type =
1739 (BFI_RSS_T_V4_TCP |
1740 BFI_RSS_T_V6_TCP |
1741 BFI_RSS_T_V4_IP |
1742 BFI_RSS_T_V6_IP);
1743 rx_config->rss_config.hash_mask =
1744 bnad->num_rxp_per_rx - 1;
1745 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1746 sizeof(rx_config->rss_config.toeplitz_hash_key));
1747 } else {
1748 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1749 memset(&rx_config->rss_config, 0,
1750 sizeof(rx_config->rss_config));
1751 }
1752 rx_config->rxp_type = BNA_RXP_SLR;
1753 rx_config->q_depth = bnad->rxq_depth;
1754
1755 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1756
1757 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1758}
1759
1760/* Called with mutex_lock(&bnad->conf_mutex) held */
1761void
1762bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1763{
1764 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1765 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1766 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1767 unsigned long flags;
1768 int dim_timer_del = 0;
1769
1770 if (!rx_info->rx)
1771 return;
1772
1773 if (0 == rx_id) {
1774 spin_lock_irqsave(&bnad->bna_lock, flags);
1775 dim_timer_del = bnad_dim_timer_running(bnad);
1776 if (dim_timer_del)
1777 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1778 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1779 if (dim_timer_del)
1780 del_timer_sync(&bnad->dim_timer);
1781 }
1782
1783 bnad_napi_disable(bnad, rx_id);
1784
1785 init_completion(&bnad->bnad_completions.rx_comp);
1786 spin_lock_irqsave(&bnad->bna_lock, flags);
1787 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1788 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1789 wait_for_completion(&bnad->bnad_completions.rx_comp);
1790
1791 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1792 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1793
1794 spin_lock_irqsave(&bnad->bna_lock, flags);
1795 bna_rx_destroy(rx_info->rx);
1796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1797
1798 rx_info->rx = NULL;
1799
1800 bnad_rx_res_free(bnad, res_info);
1801}
1802
1803/* Called with mutex_lock(&bnad->conf_mutex) held */
1804int
1805bnad_setup_rx(struct bnad *bnad, uint rx_id)
1806{
1807 int err;
1808 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1809 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1810 struct bna_intr_info *intr_info =
1811 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1812 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1813 struct bna_rx_event_cbfn rx_cbfn;
1814 struct bna_rx *rx;
1815 unsigned long flags;
1816
1817 /* Initialize the Rx object configuration */
1818 bnad_init_rx_config(bnad, rx_config);
1819
1820 /* Initialize the Rx event handlers */
1821 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1822 rx_cbfn.rcb_destroy_cbfn = NULL;
1823 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1824 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1825 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1826 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1827
1828 /* Get BNA's resource requirement for one Rx object */
1829 spin_lock_irqsave(&bnad->bna_lock, flags);
1830 bna_rx_res_req(rx_config, res_info);
1831 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1832
1833 /* Fill Unmap Q memory requirements */
1834 BNAD_FILL_UNMAPQ_MEM_REQ(
1835 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1836 rx_config->num_paths +
1837 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1838 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1839
1840 /* Allocate resource */
1841 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1842 if (err)
1843 return err;
1844
1845 /* Ask BNA to create one Rx object, supplying required resources */
1846 spin_lock_irqsave(&bnad->bna_lock, flags);
1847 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1848 rx_info);
1849 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850 if (!rx)
1851 goto err_return;
1852 rx_info->rx = rx;
1853
1854 /* Register ISR for the Rx object */
1855 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1856 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1857 rx_config->num_paths);
1858 if (err)
1859 goto err_return;
1860 }
1861
1862 /* Enable NAPI */
1863 bnad_napi_enable(bnad, rx_id);
1864
1865 spin_lock_irqsave(&bnad->bna_lock, flags);
1866 if (0 == rx_id) {
1867 /* Set up Dynamic Interrupt Moderation Vector */
1868 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1869 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1870
1871 /* Enable VLAN filtering only on the default Rx */
1872 bna_rx_vlanfilter_enable(rx);
1873
1874 /* Start the DIM timer */
1875 bnad_dim_timer_start(bnad);
1876 }
1877
1878 bna_rx_enable(rx);
1879 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1880
1881 return 0;
1882
1883err_return:
1884 bnad_cleanup_rx(bnad, rx_id);
1885 return err;
1886}
1887
1888/* Called with conf_lock & bnad->bna_lock held */
1889void
1890bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1891{
1892 struct bnad_tx_info *tx_info;
1893
1894 tx_info = &bnad->tx_info[0];
1895 if (!tx_info->tx)
1896 return;
1897
1898 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1899}
1900
1901/* Called with conf_lock & bnad->bna_lock held */
1902void
1903bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1904{
1905 struct bnad_rx_info *rx_info;
1906 int i;
1907
1908 for (i = 0; i < bnad->num_rx; i++) {
1909 rx_info = &bnad->rx_info[i];
1910 if (!rx_info->rx)
1911 continue;
1912 bna_rx_coalescing_timeo_set(rx_info->rx,
1913 bnad->rx_coalescing_timeo);
1914 }
1915}
1916
1917/*
1918 * Called with bnad->bna_lock held
1919 */
1920static int
1921bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1922{
1923 int ret;
1924
1925 if (!is_valid_ether_addr(mac_addr))
1926 return -EADDRNOTAVAIL;
1927
1928 /* If datapath is down, pretend everything went through */
1929 if (!bnad->rx_info[0].rx)
1930 return 0;
1931
1932 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1933 if (ret != BNA_CB_SUCCESS)
1934 return -EADDRNOTAVAIL;
1935
1936 return 0;
1937}
1938
1939/* Should be called with conf_lock held */
1940static int
1941bnad_enable_default_bcast(struct bnad *bnad)
1942{
1943 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1944 int ret;
1945 unsigned long flags;
1946
1947 init_completion(&bnad->bnad_completions.mcast_comp);
1948
1949 spin_lock_irqsave(&bnad->bna_lock, flags);
1950 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1951 bnad_cb_rx_mcast_add);
1952 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1953
1954 if (ret == BNA_CB_SUCCESS)
1955 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1956 else
1957 return -ENODEV;
1958
1959 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1960 return -ENODEV;
1961
1962 return 0;
1963}
1964
1965/* Statistics utilities */
1966void
1967bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1968{
1969 int i, j;
1970
1971 for (i = 0; i < bnad->num_rx; i++) {
1972 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1973 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1974 stats->rx_packets += bnad->rx_info[i].
1975 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
1976 stats->rx_bytes += bnad->rx_info[i].
1977 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
1978 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1979 bnad->rx_info[i].rx_ctrl[j].ccb->
1980 rcb[1]->rxq) {
1981 stats->rx_packets +=
1982 bnad->rx_info[i].rx_ctrl[j].
1983 ccb->rcb[1]->rxq->rx_packets;
1984 stats->rx_bytes +=
1985 bnad->rx_info[i].rx_ctrl[j].
1986 ccb->rcb[1]->rxq->rx_bytes;
1987 }
1988 }
1989 }
1990 }
1991 for (i = 0; i < bnad->num_tx; i++) {
1992 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1993 if (bnad->tx_info[i].tcb[j]) {
1994 stats->tx_packets +=
1995 bnad->tx_info[i].tcb[j]->txq->tx_packets;
1996 stats->tx_bytes +=
1997 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
1998 }
1999 }
2000 }
2001}
2002
2003/*
2004 * Must be called with the bna_lock held.
2005 */
2006void
2007bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2008{
2009 struct bfi_ll_stats_mac *mac_stats;
2010 u64 bmap;
2011 int i;
2012
2013 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2014 stats->rx_errors =
2015 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2016 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2017 mac_stats->rx_undersize;
2018 stats->tx_errors = mac_stats->tx_fcs_error +
2019 mac_stats->tx_undersize;
2020 stats->rx_dropped = mac_stats->rx_drop;
2021 stats->tx_dropped = mac_stats->tx_drop;
2022 stats->multicast = mac_stats->rx_multicast;
2023 stats->collisions = mac_stats->tx_total_collision;
2024
2025 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2026
2027 /* receive ring buffer overflow ?? */
2028
2029 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2030 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2031 /* recv'r fifo overrun */
2032 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2033 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2034 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2035 if (bmap & 1) {
2036 stats->rx_fifo_errors +=
2037 bnad->stats.bna_stats->
2038 hw_stats->rxf_stats[i].frame_drops;
2039 break;
2040 }
2041 bmap >>= 1;
2042 }
2043}
2044
2045static void
2046bnad_mbox_irq_sync(struct bnad *bnad)
2047{
2048 u32 irq;
2049 unsigned long flags;
2050
2051 spin_lock_irqsave(&bnad->bna_lock, flags);
2052 if (bnad->cfg_flags & BNAD_CF_MSIX)
2053 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2054 else
2055 irq = bnad->pcidev->irq;
2056 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2057
2058 synchronize_irq(irq);
2059}
2060
2061/* Utility used by bnad_start_xmit, for doing TSO */
2062static int
2063bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2064{
2065 int err;
2066
2067 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2068 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2069 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2070 if (skb_header_cloned(skb)) {
2071 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2072 if (err) {
2073 BNAD_UPDATE_CTR(bnad, tso_err);
2074 return err;
2075 }
2076 }
2077
2078 /*
2079 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2080 * excluding the length field.
2081 */
2082 if (skb->protocol == htons(ETH_P_IP)) {
2083 struct iphdr *iph = ip_hdr(skb);
2084
2085 /* Do we really need these? */
2086 iph->tot_len = 0;
2087 iph->check = 0;
2088
2089 tcp_hdr(skb)->check =
2090 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2091 IPPROTO_TCP, 0);
2092 BNAD_UPDATE_CTR(bnad, tso4);
2093 } else {
2094 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2095
2096 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2097 ipv6h->payload_len = 0;
2098 tcp_hdr(skb)->check =
2099 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2100 IPPROTO_TCP, 0);
2101 BNAD_UPDATE_CTR(bnad, tso6);
2102 }
2103
2104 return 0;
2105}
2106
2107/*
2108 * Initialize Q numbers depending on Rx Paths
2109 * Called with bnad->bna_lock held, because of cfg_flags
2110 * access.
2111 */
2112static void
2113bnad_q_num_init(struct bnad *bnad)
2114{
2115 int rxps;
2116
2117 rxps = min((uint)num_online_cpus(),
2118 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2119
2120 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2121 rxps = 1; /* INTx */
2122
2123 bnad->num_rx = 1;
2124 bnad->num_tx = 1;
2125 bnad->num_rxp_per_rx = rxps;
2126 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2127}
2128
2129/*
2130 * Adjusts the Q numbers, given a number of msix vectors
2131 * Give preference to RSS as opposed to Tx priority Queues,
2132 * in such a case, just use 1 Tx Q
2133 * Called with bnad->bna_lock held b'cos of cfg_flags access
2134 */
2135static void
2136bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2137{
2138 bnad->num_txq_per_tx = 1;
2139 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2140 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2141 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2142 bnad->num_rxp_per_rx = msix_vectors -
2143 (bnad->num_tx * bnad->num_txq_per_tx) -
2144 BNAD_MAILBOX_MSIX_VECTORS;
2145 } else
2146 bnad->num_rxp_per_rx = 1;
2147}
2148
2149static void
2150bnad_set_netdev_perm_addr(struct bnad *bnad)
2151{
2152 struct net_device *netdev = bnad->netdev;
2153
2154 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
2155 if (is_zero_ether_addr(netdev->dev_addr))
2156 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
2157}
2158
2159/* Enable / disable device */
2160static void
2161bnad_device_disable(struct bnad *bnad)
2162{
2163 unsigned long flags;
2164
2165 init_completion(&bnad->bnad_completions.ioc_comp);
2166
2167 spin_lock_irqsave(&bnad->bna_lock, flags);
2168 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2169 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2170
2171 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2172
2173}
2174
2175static int
2176bnad_device_enable(struct bnad *bnad)
2177{
2178 int err = 0;
2179 unsigned long flags;
2180
2181 init_completion(&bnad->bnad_completions.ioc_comp);
2182
2183 spin_lock_irqsave(&bnad->bna_lock, flags);
2184 bna_device_enable(&bnad->bna.device);
2185 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2186
2187 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2188
2189 if (bnad->bnad_completions.ioc_comp_status)
2190 err = bnad->bnad_completions.ioc_comp_status;
2191
2192 return err;
2193}
2194
2195/* Free BNA resources */
2196static void
2197bnad_res_free(struct bnad *bnad)
2198{
2199 int i;
2200 struct bna_res_info *res_info = &bnad->res_info[0];
2201
2202 for (i = 0; i < BNA_RES_T_MAX; i++) {
2203 if (res_info[i].res_type == BNA_RES_T_MEM)
2204 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2205 else
2206 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2207 }
2208}
2209
2210/* Allocates memory and interrupt resources for BNA */
2211static int
2212bnad_res_alloc(struct bnad *bnad)
2213{
2214 int i, err;
2215 struct bna_res_info *res_info = &bnad->res_info[0];
2216
2217 for (i = 0; i < BNA_RES_T_MAX; i++) {
2218 if (res_info[i].res_type == BNA_RES_T_MEM)
2219 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2220 else
2221 err = bnad_mbox_irq_alloc(bnad,
2222 &res_info[i].res_u.intr_info);
2223 if (err)
2224 goto err_return;
2225 }
2226 return 0;
2227
2228err_return:
2229 bnad_res_free(bnad);
2230 return err;
2231}
2232
2233/* Interrupt enable / disable */
2234static void
2235bnad_enable_msix(struct bnad *bnad)
2236{
2237 int i, ret;
2238 u32 tot_msix_num;
2239 unsigned long flags;
2240
2241 spin_lock_irqsave(&bnad->bna_lock, flags);
2242 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2243 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2244 return;
2245 }
2246 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2247
2248 if (bnad->msix_table)
2249 return;
2250
2251 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2252
2253 bnad->msix_table =
2254 kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2255
2256 if (!bnad->msix_table)
2257 goto intx_mode;
2258
2259 for (i = 0; i < tot_msix_num; i++)
2260 bnad->msix_table[i].entry = i;
2261
2262 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
2263 if (ret > 0) {
2264 /* Not enough MSI-X vectors. */
2265
2266 spin_lock_irqsave(&bnad->bna_lock, flags);
2267 /* ret = #of vectors that we got */
2268 bnad_q_num_adjust(bnad, ret);
2269 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2270
2271 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2272 + (bnad->num_rx
2273 * bnad->num_rxp_per_rx) +
2274 BNAD_MAILBOX_MSIX_VECTORS;
2275 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2276
2277 /* Try once more with adjusted numbers */
2278 /* If this fails, fall back to INTx */
2279 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2280 tot_msix_num);
2281 if (ret)
2282 goto intx_mode;
2283
2284 } else if (ret < 0)
2285 goto intx_mode;
2286 return;
2287
2288intx_mode:
2289
2290 kfree(bnad->msix_table);
2291 bnad->msix_table = NULL;
2292 bnad->msix_num = 0;
2293 bnad->msix_diag_num = 0;
2294 spin_lock_irqsave(&bnad->bna_lock, flags);
2295 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2296 bnad_q_num_init(bnad);
2297 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2298}
2299
2300static void
2301bnad_disable_msix(struct bnad *bnad)
2302{
2303 u32 cfg_flags;
2304 unsigned long flags;
2305
2306 spin_lock_irqsave(&bnad->bna_lock, flags);
2307 cfg_flags = bnad->cfg_flags;
2308 if (bnad->cfg_flags & BNAD_CF_MSIX)
2309 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2310 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2311
2312 if (cfg_flags & BNAD_CF_MSIX) {
2313 pci_disable_msix(bnad->pcidev);
2314 kfree(bnad->msix_table);
2315 bnad->msix_table = NULL;
2316 }
2317}
2318
2319/* Netdev entry points */
2320static int
2321bnad_open(struct net_device *netdev)
2322{
2323 int err;
2324 struct bnad *bnad = netdev_priv(netdev);
2325 struct bna_pause_config pause_config;
2326 int mtu;
2327 unsigned long flags;
2328
2329 mutex_lock(&bnad->conf_mutex);
2330
2331 /* Tx */
2332 err = bnad_setup_tx(bnad, 0);
2333 if (err)
2334 goto err_return;
2335
2336 /* Rx */
2337 err = bnad_setup_rx(bnad, 0);
2338 if (err)
2339 goto cleanup_tx;
2340
2341 /* Port */
2342 pause_config.tx_pause = 0;
2343 pause_config.rx_pause = 0;
2344
2345 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2346
2347 spin_lock_irqsave(&bnad->bna_lock, flags);
2348 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2349 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2350 bna_port_enable(&bnad->bna.port);
2351 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2352
2353 /* Enable broadcast */
2354 bnad_enable_default_bcast(bnad);
2355
2356 /* Set the UCAST address */
2357 spin_lock_irqsave(&bnad->bna_lock, flags);
2358 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2359 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2360
2361 /* Start the stats timer */
2362 bnad_stats_timer_start(bnad);
2363
2364 mutex_unlock(&bnad->conf_mutex);
2365
2366 return 0;
2367
2368cleanup_tx:
2369 bnad_cleanup_tx(bnad, 0);
2370
2371err_return:
2372 mutex_unlock(&bnad->conf_mutex);
2373 return err;
2374}
2375
2376static int
2377bnad_stop(struct net_device *netdev)
2378{
2379 struct bnad *bnad = netdev_priv(netdev);
2380 unsigned long flags;
2381
2382 mutex_lock(&bnad->conf_mutex);
2383
2384 /* Stop the stats timer */
2385 bnad_stats_timer_stop(bnad);
2386
2387 init_completion(&bnad->bnad_completions.port_comp);
2388
2389 spin_lock_irqsave(&bnad->bna_lock, flags);
2390 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2391 bnad_cb_port_disabled);
2392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2393
2394 wait_for_completion(&bnad->bnad_completions.port_comp);
2395
2396 bnad_cleanup_tx(bnad, 0);
2397 bnad_cleanup_rx(bnad, 0);
2398
2399 /* Synchronize mailbox IRQ */
2400 bnad_mbox_irq_sync(bnad);
2401
2402 mutex_unlock(&bnad->conf_mutex);
2403
2404 return 0;
2405}
2406
2407/* TX */
2408/*
2409 * bnad_start_xmit : Netdev entry point for Transmit
2410 * Called under lock held by net_device
2411 */
2412static netdev_tx_t
2413bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2414{
2415 struct bnad *bnad = netdev_priv(netdev);
2416
2417 u16 txq_prod, vlan_tag = 0;
2418 u32 unmap_prod, wis, wis_used, wi_range;
2419 u32 vectors, vect_id, i, acked;
2420 u32 tx_id;
2421 int err;
2422
2423 struct bnad_tx_info *tx_info;
2424 struct bna_tcb *tcb;
2425 struct bnad_unmap_q *unmap_q;
2426 dma_addr_t dma_addr;
2427 struct bna_txq_entry *txqent;
2428 bna_txq_wi_ctrl_flag_t flags;
2429
2430 if (unlikely
2431 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2432 dev_kfree_skb(skb);
2433 return NETDEV_TX_OK;
2434 }
2435
2436 /*
2437 * Takes care of the Tx that is scheduled between clearing the flag
2438 * and the netif_stop_queue() call.
2439 */
2440 if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
2441 dev_kfree_skb(skb);
2442 return NETDEV_TX_OK;
2443 }
2444
2445 tx_id = 0;
2446
2447 tx_info = &bnad->tx_info[tx_id];
2448 tcb = tx_info->tcb[tx_id];
2449 unmap_q = tcb->unmap_q;
2450
2451 vectors = 1 + skb_shinfo(skb)->nr_frags;
2452 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2453 dev_kfree_skb(skb);
2454 return NETDEV_TX_OK;
2455 }
2456 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2457 acked = 0;
2458 if (unlikely
2459 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2460 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2461 if ((u16) (*tcb->hw_consumer_index) !=
2462 tcb->consumer_index &&
2463 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2464 acked = bnad_free_txbufs(bnad, tcb);
2465 bna_ib_ack(tcb->i_dbell, acked);
2466 smp_mb__before_clear_bit();
2467 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2468 } else {
2469 netif_stop_queue(netdev);
2470 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2471 }
2472
2473 smp_mb();
2474 /*
2475 * Check again to deal with race condition between
2476 * netif_stop_queue here, and netif_wake_queue in
2477 * interrupt handler which is not inside netif tx lock.
2478 */
2479 if (likely
2480 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2481 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2482 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2483 return NETDEV_TX_BUSY;
2484 } else {
2485 netif_wake_queue(netdev);
2486 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2487 }
2488 }
2489
2490 unmap_prod = unmap_q->producer_index;
2491 wis_used = 1;
2492 vect_id = 0;
2493 flags = 0;
2494
2495 txq_prod = tcb->producer_index;
2496 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2497 BUG_ON(!(wi_range <= tcb->q_depth));
2498 txqent->hdr.wi.reserved = 0;
2499 txqent->hdr.wi.num_vectors = vectors;
2500 txqent->hdr.wi.opcode =
2501 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2502 BNA_TXQ_WI_SEND));
2503
2504 if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
2505 vlan_tag = (u16) vlan_tx_tag_get(skb);
2506 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2507 }
2508 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2509 vlan_tag =
2510 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2511 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2512 }
2513
2514 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2515
2516 if (skb_is_gso(skb)) {
2517 err = bnad_tso_prepare(bnad, skb);
2518 if (err) {
2519 dev_kfree_skb(skb);
2520 return NETDEV_TX_OK;
2521 }
2522 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2523 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2524 txqent->hdr.wi.l4_hdr_size_n_offset =
2525 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2526 (tcp_hdrlen(skb) >> 2,
2527 skb_transport_offset(skb)));
2528 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2529 u8 proto = 0;
2530
2531 txqent->hdr.wi.lso_mss = 0;
2532
2533 if (skb->protocol == htons(ETH_P_IP))
2534 proto = ip_hdr(skb)->protocol;
2535 else if (skb->protocol == htons(ETH_P_IPV6)) {
2536 /* nexthdr may not be TCP immediately. */
2537 proto = ipv6_hdr(skb)->nexthdr;
2538 }
2539 if (proto == IPPROTO_TCP) {
2540 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2541 txqent->hdr.wi.l4_hdr_size_n_offset =
2542 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2543 (0, skb_transport_offset(skb)));
2544
2545 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2546
2547 BUG_ON(!(skb_headlen(skb) >=
2548 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2549
2550 } else if (proto == IPPROTO_UDP) {
2551 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2552 txqent->hdr.wi.l4_hdr_size_n_offset =
2553 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2554 (0, skb_transport_offset(skb)));
2555
2556 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2557
2558 BUG_ON(!(skb_headlen(skb) >=
2559 skb_transport_offset(skb) +
2560 sizeof(struct udphdr)));
2561 } else {
2562 err = skb_checksum_help(skb);
2563 BNAD_UPDATE_CTR(bnad, csum_help);
2564 if (err) {
2565 dev_kfree_skb(skb);
2566 BNAD_UPDATE_CTR(bnad, csum_help_err);
2567 return NETDEV_TX_OK;
2568 }
2569 }
2570 } else {
2571 txqent->hdr.wi.lso_mss = 0;
2572 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2573 }
2574
2575 txqent->hdr.wi.flags = htons(flags);
2576
2577 txqent->hdr.wi.frame_length = htonl(skb->len);
2578
2579 unmap_q->unmap_array[unmap_prod].skb = skb;
2580 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2581 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2582 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2583 PCI_DMA_TODEVICE);
2584 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2585 dma_addr);
2586
2587 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2588 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2589
2590 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2591 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2592 u32 size = frag->size;
2593
2594 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2595 vect_id = 0;
2596 if (--wi_range)
2597 txqent++;
2598 else {
2599 BNA_QE_INDX_ADD(txq_prod, wis_used,
2600 tcb->q_depth);
2601 wis_used = 0;
2602 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2603 txqent, wi_range);
2604 BUG_ON(!(wi_range <= tcb->q_depth));
2605 }
2606 wis_used++;
2607 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2608 }
2609
2610 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2611 txqent->vector[vect_id].length = htons(size);
2612 dma_addr =
2613 pci_map_page(bnad->pcidev, frag->page,
2614 frag->page_offset, size,
2615 PCI_DMA_TODEVICE);
2616 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2617 dma_addr);
2618 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2619 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2620 }
2621
2622 unmap_q->producer_index = unmap_prod;
2623 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2624 tcb->producer_index = txq_prod;
2625
2626 smp_mb();
2627 bna_txq_prod_indx_doorbell(tcb);
2628
2629 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2630 tasklet_schedule(&bnad->tx_free_tasklet);
2631
2632 return NETDEV_TX_OK;
2633}
2634
2635/*
2636 * Used spin_lock to synchronize reading of stats structures, which
2637 * is written by BNA under the same lock.
2638 */
2639static struct rtnl_link_stats64 *
2640bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2641{
2642 struct bnad *bnad = netdev_priv(netdev);
2643 unsigned long flags;
2644
2645 spin_lock_irqsave(&bnad->bna_lock, flags);
2646
2647 bnad_netdev_qstats_fill(bnad, stats);
2648 bnad_netdev_hwstats_fill(bnad, stats);
2649
2650 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2651
2652 return stats;
2653}
2654
2655static void
2656bnad_set_rx_mode(struct net_device *netdev)
2657{
2658 struct bnad *bnad = netdev_priv(netdev);
2659 u32 new_mask, valid_mask;
2660 unsigned long flags;
2661
2662 spin_lock_irqsave(&bnad->bna_lock, flags);
2663
2664 new_mask = valid_mask = 0;
2665
2666 if (netdev->flags & IFF_PROMISC) {
2667 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2668 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2669 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2670 bnad->cfg_flags |= BNAD_CF_PROMISC;
2671 }
2672 } else {
2673 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2674 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2675 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2676 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2677 }
2678 }
2679
2680 if (netdev->flags & IFF_ALLMULTI) {
2681 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2682 new_mask |= BNA_RXMODE_ALLMULTI;
2683 valid_mask |= BNA_RXMODE_ALLMULTI;
2684 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2685 }
2686 } else {
2687 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2688 new_mask &= ~BNA_RXMODE_ALLMULTI;
2689 valid_mask |= BNA_RXMODE_ALLMULTI;
2690 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2691 }
2692 }
2693
2694 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2695
2696 if (!netdev_mc_empty(netdev)) {
2697 u8 *mcaddr_list;
2698 int mc_count = netdev_mc_count(netdev);
2699
2700 /* Index 0 holds the broadcast address */
2701 mcaddr_list =
2702 kzalloc((mc_count + 1) * ETH_ALEN,
2703 GFP_ATOMIC);
2704 if (!mcaddr_list)
2705 goto unlock;
2706
2707 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2708
2709 /* Copy rest of the MC addresses */
2710 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2711
2712 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2713 mcaddr_list, NULL);
2714
2715 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2716 kfree(mcaddr_list);
2717 }
2718unlock:
2719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2720}
2721
2722/*
2723 * bna_lock is used to sync writes to netdev->addr
2724 * conf_lock cannot be used since this call may be made
2725 * in a non-blocking context.
2726 */
2727static int
2728bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2729{
2730 int err;
2731 struct bnad *bnad = netdev_priv(netdev);
2732 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2733 unsigned long flags;
2734
2735 spin_lock_irqsave(&bnad->bna_lock, flags);
2736
2737 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2738
2739 if (!err)
2740 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2741
2742 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2743
2744 return err;
2745}
2746
2747static int
2748bnad_change_mtu(struct net_device *netdev, int new_mtu)
2749{
2750 int mtu, err = 0;
2751 unsigned long flags;
2752
2753 struct bnad *bnad = netdev_priv(netdev);
2754
2755 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2756 return -EINVAL;
2757
2758 mutex_lock(&bnad->conf_mutex);
2759
2760 netdev->mtu = new_mtu;
2761
2762 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2763
2764 spin_lock_irqsave(&bnad->bna_lock, flags);
2765 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2766 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2767
2768 mutex_unlock(&bnad->conf_mutex);
2769 return err;
2770}
2771
2772static void
2773bnad_vlan_rx_register(struct net_device *netdev,
2774 struct vlan_group *vlan_grp)
2775{
2776 struct bnad *bnad = netdev_priv(netdev);
2777
2778 mutex_lock(&bnad->conf_mutex);
2779 bnad->vlan_grp = vlan_grp;
2780 mutex_unlock(&bnad->conf_mutex);
2781}
2782
2783static void
2784bnad_vlan_rx_add_vid(struct net_device *netdev,
2785 unsigned short vid)
2786{
2787 struct bnad *bnad = netdev_priv(netdev);
2788 unsigned long flags;
2789
2790 if (!bnad->rx_info[0].rx)
2791 return;
2792
2793 mutex_lock(&bnad->conf_mutex);
2794
2795 spin_lock_irqsave(&bnad->bna_lock, flags);
2796 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2798
2799 mutex_unlock(&bnad->conf_mutex);
2800}
2801
2802static void
2803bnad_vlan_rx_kill_vid(struct net_device *netdev,
2804 unsigned short vid)
2805{
2806 struct bnad *bnad = netdev_priv(netdev);
2807 unsigned long flags;
2808
2809 if (!bnad->rx_info[0].rx)
2810 return;
2811
2812 mutex_lock(&bnad->conf_mutex);
2813
2814 spin_lock_irqsave(&bnad->bna_lock, flags);
2815 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2816 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2817
2818 mutex_unlock(&bnad->conf_mutex);
2819}
2820
2821#ifdef CONFIG_NET_POLL_CONTROLLER
2822static void
2823bnad_netpoll(struct net_device *netdev)
2824{
2825 struct bnad *bnad = netdev_priv(netdev);
2826 struct bnad_rx_info *rx_info;
2827 struct bnad_rx_ctrl *rx_ctrl;
2828 u32 curr_mask;
2829 int i, j;
2830
2831 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2832 bna_intx_disable(&bnad->bna, curr_mask);
2833 bnad_isr(bnad->pcidev->irq, netdev);
2834 bna_intx_enable(&bnad->bna, curr_mask);
2835 } else {
2836 for (i = 0; i < bnad->num_rx; i++) {
2837 rx_info = &bnad->rx_info[i];
2838 if (!rx_info->rx)
2839 continue;
2840 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2841 rx_ctrl = &rx_info->rx_ctrl[j];
2842 if (rx_ctrl->ccb) {
2843 bnad_disable_rx_irq(bnad,
2844 rx_ctrl->ccb);
2845 bnad_netif_rx_schedule_poll(bnad,
2846 rx_ctrl->ccb);
2847 }
2848 }
2849 }
2850 }
2851}
2852#endif
2853
2854static const struct net_device_ops bnad_netdev_ops = {
2855 .ndo_open = bnad_open,
2856 .ndo_stop = bnad_stop,
2857 .ndo_start_xmit = bnad_start_xmit,
2858 .ndo_get_stats64 = bnad_get_stats64,
2859 .ndo_set_rx_mode = bnad_set_rx_mode,
2860 .ndo_set_multicast_list = bnad_set_rx_mode,
2861 .ndo_validate_addr = eth_validate_addr,
2862 .ndo_set_mac_address = bnad_set_mac_address,
2863 .ndo_change_mtu = bnad_change_mtu,
2864 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2865 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2866 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2867#ifdef CONFIG_NET_POLL_CONTROLLER
2868 .ndo_poll_controller = bnad_netpoll
2869#endif
2870};
2871
2872static void
2873bnad_netdev_init(struct bnad *bnad, bool using_dac)
2874{
2875 struct net_device *netdev = bnad->netdev;
2876
2877 netdev->features |= NETIF_F_IPV6_CSUM;
2878 netdev->features |= NETIF_F_TSO;
2879 netdev->features |= NETIF_F_TSO6;
2880
2881 netdev->features |= NETIF_F_GRO;
2882 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2883
2884 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2885
2886 if (using_dac)
2887 netdev->features |= NETIF_F_HIGHDMA;
2888
2889 netdev->features |=
2890 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2891 NETIF_F_HW_VLAN_FILTER;
2892
2893 netdev->vlan_features = netdev->features;
2894 netdev->mem_start = bnad->mmio_start;
2895 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2896
2897 netdev->netdev_ops = &bnad_netdev_ops;
2898 bnad_set_ethtool_ops(netdev);
2899}
2900
2901/*
2902 * 1. Initialize the bnad structure
2903 * 2. Setup netdev pointer in pci_dev
2904 * 3. Initialze Tx free tasklet
2905 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2906 */
2907static int
2908bnad_init(struct bnad *bnad,
2909 struct pci_dev *pdev, struct net_device *netdev)
2910{
2911 unsigned long flags;
2912
2913 SET_NETDEV_DEV(netdev, &pdev->dev);
2914 pci_set_drvdata(pdev, netdev);
2915
2916 bnad->netdev = netdev;
2917 bnad->pcidev = pdev;
2918 bnad->mmio_start = pci_resource_start(pdev, 0);
2919 bnad->mmio_len = pci_resource_len(pdev, 0);
2920 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2921 if (!bnad->bar0) {
2922 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2923 pci_set_drvdata(pdev, NULL);
2924 return -ENOMEM;
2925 }
2926 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2927 (unsigned long long) bnad->mmio_len);
2928
2929 spin_lock_irqsave(&bnad->bna_lock, flags);
2930 if (!bnad_msix_disable)
2931 bnad->cfg_flags = BNAD_CF_MSIX;
2932
2933 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2934
2935 bnad_q_num_init(bnad);
2936 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2937
2938 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2939 (bnad->num_rx * bnad->num_rxp_per_rx) +
2940 BNAD_MAILBOX_MSIX_VECTORS;
2941 bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
2942
2943 bnad->txq_depth = BNAD_TXQ_DEPTH;
2944 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2945 bnad->rx_csum = true;
2946
2947 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2948 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2949
2950 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2951 (unsigned long)bnad);
2952
2953 return 0;
2954}
2955
2956/*
2957 * Must be called after bnad_pci_uninit()
2958 * so that iounmap() and pci_set_drvdata(NULL)
2959 * happens only after PCI uninitialization.
2960 */
2961static void
2962bnad_uninit(struct bnad *bnad)
2963{
2964 if (bnad->bar0)
2965 iounmap(bnad->bar0);
2966 pci_set_drvdata(bnad->pcidev, NULL);
2967}
2968
2969/*
2970 * Initialize locks
2971 a) Per device mutes used for serializing configuration
2972 changes from OS interface
2973 b) spin lock used to protect bna state machine
2974 */
2975static void
2976bnad_lock_init(struct bnad *bnad)
2977{
2978 spin_lock_init(&bnad->bna_lock);
2979 mutex_init(&bnad->conf_mutex);
2980}
2981
2982static void
2983bnad_lock_uninit(struct bnad *bnad)
2984{
2985 mutex_destroy(&bnad->conf_mutex);
2986}
2987
2988/* PCI Initialization */
2989static int
2990bnad_pci_init(struct bnad *bnad,
2991 struct pci_dev *pdev, bool *using_dac)
2992{
2993 int err;
2994
2995 err = pci_enable_device(pdev);
2996 if (err)
2997 return err;
2998 err = pci_request_regions(pdev, BNAD_NAME);
2999 if (err)
3000 goto disable_device;
3001 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3002 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3003 *using_dac = 1;
3004 } else {
3005 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3006 if (err) {
3007 err = pci_set_consistent_dma_mask(pdev,
3008 DMA_BIT_MASK(32));
3009 if (err)
3010 goto release_regions;
3011 }
3012 *using_dac = 0;
3013 }
3014 pci_set_master(pdev);
3015 return 0;
3016
3017release_regions:
3018 pci_release_regions(pdev);
3019disable_device:
3020 pci_disable_device(pdev);
3021
3022 return err;
3023}
3024
3025static void
3026bnad_pci_uninit(struct pci_dev *pdev)
3027{
3028 pci_release_regions(pdev);
3029 pci_disable_device(pdev);
3030}
3031
3032static int __devinit
3033bnad_pci_probe(struct pci_dev *pdev,
3034 const struct pci_device_id *pcidev_id)
3035{
3036 bool using_dac;
3037 int err;
3038 struct bnad *bnad;
3039 struct bna *bna;
3040 struct net_device *netdev;
3041 struct bfa_pcidev pcidev_info;
3042 unsigned long flags;
3043
3044 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3045 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3046
3047 mutex_lock(&bnad_fwimg_mutex);
3048 if (!cna_get_firmware_buf(pdev)) {
3049 mutex_unlock(&bnad_fwimg_mutex);
3050 pr_warn("Failed to load Firmware Image!\n");
3051 return -ENODEV;
3052 }
3053 mutex_unlock(&bnad_fwimg_mutex);
3054
3055 /*
3056 * Allocates sizeof(struct net_device + struct bnad)
3057 * bnad = netdev->priv
3058 */
3059 netdev = alloc_etherdev(sizeof(struct bnad));
3060 if (!netdev) {
3061 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3062 err = -ENOMEM;
3063 return err;
3064 }
3065 bnad = netdev_priv(netdev);
3066
3067 /*
3068 * PCI initialization
3069 * Output : using_dac = 1 for 64 bit DMA
3070 * = 0 for 32 bit DMA
3071 */
3072 err = bnad_pci_init(bnad, pdev, &using_dac);
3073 if (err)
3074 goto free_netdev;
3075
3076 bnad_lock_init(bnad);
3077 /*
3078 * Initialize bnad structure
3079 * Setup relation between pci_dev & netdev
3080 * Init Tx free tasklet
3081 */
3082 err = bnad_init(bnad, pdev, netdev);
3083 if (err)
3084 goto pci_uninit;
3085 /* Initialize netdev structure, set up ethtool ops */
3086 bnad_netdev_init(bnad, using_dac);
3087
3088 bnad_enable_msix(bnad);
3089
3090 /* Get resource requirement form bna */
3091 bna_res_req(&bnad->res_info[0]);
3092
3093 /* Allocate resources from bna */
3094 err = bnad_res_alloc(bnad);
3095 if (err)
3096 goto free_netdev;
3097
3098 bna = &bnad->bna;
3099
3100 /* Setup pcidev_info for bna_init() */
3101 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3102 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3103 pcidev_info.device_id = bnad->pcidev->device;
3104 pcidev_info.pci_bar_kva = bnad->bar0;
3105
3106 mutex_lock(&bnad->conf_mutex);
3107
3108 spin_lock_irqsave(&bnad->bna_lock, flags);
3109 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3110
3111 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3112
3113 bnad->stats.bna_stats = &bna->stats;
3114
3115 /* Set up timers */
3116 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3117 ((unsigned long)bnad));
3118 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3119 ((unsigned long)bnad));
3120 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
3121 ((unsigned long)bnad));
3122
3123 /* Now start the timer before calling IOC */
3124 mod_timer(&bnad->bna.device.ioc.ioc_timer,
3125 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3126
3127 /*
3128 * Start the chip
3129 * Don't care even if err != 0, bna state machine will
3130 * deal with it
3131 */
3132 err = bnad_device_enable(bnad);
3133
3134 /* Get the burnt-in mac */
3135 spin_lock_irqsave(&bnad->bna_lock, flags);
3136 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3137 bnad_set_netdev_perm_addr(bnad);
3138 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3139
3140 mutex_unlock(&bnad->conf_mutex);
3141
3142 /*
3143 * Make sure the link appears down to the stack
3144 */
3145 netif_carrier_off(netdev);
3146
3147 /* Finally, reguister with net_device layer */
3148 err = register_netdev(netdev);
3149 if (err) {
3150 pr_err("BNA : Registering with netdev failed\n");
3151 goto disable_device;
3152 }
3153
3154 return 0;
3155
3156disable_device:
3157 mutex_lock(&bnad->conf_mutex);
3158 bnad_device_disable(bnad);
3159 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3160 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3161 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3162 spin_lock_irqsave(&bnad->bna_lock, flags);
3163 bna_uninit(bna);
3164 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3165 mutex_unlock(&bnad->conf_mutex);
3166
3167 bnad_res_free(bnad);
3168 bnad_disable_msix(bnad);
3169pci_uninit:
3170 bnad_pci_uninit(pdev);
3171 bnad_lock_uninit(bnad);
3172 bnad_uninit(bnad);
3173free_netdev:
3174 free_netdev(netdev);
3175 return err;
3176}
3177
3178static void __devexit
3179bnad_pci_remove(struct pci_dev *pdev)
3180{
3181 struct net_device *netdev = pci_get_drvdata(pdev);
3182 struct bnad *bnad;
3183 struct bna *bna;
3184 unsigned long flags;
3185
3186 if (!netdev)
3187 return;
3188
3189 pr_info("%s bnad_pci_remove\n", netdev->name);
3190 bnad = netdev_priv(netdev);
3191 bna = &bnad->bna;
3192
3193 unregister_netdev(netdev);
3194
3195 mutex_lock(&bnad->conf_mutex);
3196 bnad_device_disable(bnad);
3197 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3198 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3199 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3200 spin_lock_irqsave(&bnad->bna_lock, flags);
3201 bna_uninit(bna);
3202 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3203 mutex_unlock(&bnad->conf_mutex);
3204
3205 bnad_res_free(bnad);
3206 bnad_disable_msix(bnad);
3207 bnad_pci_uninit(pdev);
3208 bnad_lock_uninit(bnad);
3209 bnad_uninit(bnad);
3210 free_netdev(netdev);
3211}
3212
3213const struct pci_device_id bnad_pci_id_table[] = {
3214 {
3215 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3216 PCI_DEVICE_ID_BROCADE_CT),
3217 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3218 .class_mask = 0xffff00
3219 }, {0, }
3220};
3221
3222MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3223
3224static struct pci_driver bnad_pci_driver = {
3225 .name = BNAD_NAME,
3226 .id_table = bnad_pci_id_table,
3227 .probe = bnad_pci_probe,
3228 .remove = __devexit_p(bnad_pci_remove),
3229};
3230
3231static int __init
3232bnad_module_init(void)
3233{
3234 int err;
3235
3236 pr_info("Brocade 10G Ethernet driver\n");
3237
3238 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3239
3240 err = pci_register_driver(&bnad_pci_driver);
3241 if (err < 0) {
3242 pr_err("bna : PCI registration failed in module init "
3243 "(%d)\n", err);
3244 return err;
3245 }
3246
3247 return 0;
3248}
3249
3250static void __exit
3251bnad_module_exit(void)
3252{
3253 pci_unregister_driver(&bnad_pci_driver);
3254
3255 if (bfi_fw)
3256 release_firmware(bfi_fw);
3257}
3258
3259module_init(bnad_module_init);
3260module_exit(bnad_module_exit);
3261
3262MODULE_AUTHOR("Brocade");
3263MODULE_LICENSE("GPL");
3264MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3265MODULE_VERSION(BNAD_VERSION);
3266MODULE_FIRMWARE(CNA_FW_FILE_CT);
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
new file mode 100644
index 000000000000..ee377888b905
--- /dev/null
+++ b/drivers/net/bna/bnad.h
@@ -0,0 +1,333 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BNAD_H__
19#define __BNAD_H__
20
21#include <linux/rtnetlink.h>
22#include <linux/workqueue.h>
23#include <linux/ipv6.h>
24#include <linux/etherdevice.h>
25#include <linux/mutex.h>
26#include <linux/firmware.h>
27
28/* Fix for IA64 */
29#include <asm/checksum.h>
30#include <net/ip6_checksum.h>
31
32#include <net/ip.h>
33#include <net/tcp.h>
34
35#include "bna.h"
36
37#define BNAD_TXQ_DEPTH 2048
38#define BNAD_RXQ_DEPTH 2048
39
40#define BNAD_MAX_TXS 1
41#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
42#define BNAD_TXQ_NUM 1
43
44#define BNAD_MAX_RXS 1
45#define BNAD_MAX_RXPS_PER_RX 16
46
47/*
48 * Control structure pointed to ccb->ctrl, which
49 * determines the NAPI / LRO behavior CCB
50 * There is 1:1 corres. between ccb & ctrl
51 */
52struct bnad_rx_ctrl {
53 struct bna_ccb *ccb;
54 struct napi_struct napi;
55};
56
57#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
58
59#define BNAD_GET_TX_ID(_skb) (0)
60
61/*
62 * GLOBAL #defines (CONSTANTS)
63 */
64#define BNAD_NAME "bna"
65#define BNAD_NAME_LEN 64
66
67#define BNAD_VERSION "2.3.2.0"
68
69#define BNAD_MAILBOX_MSIX_VECTORS 1
70
71#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
72#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
73
74#define BNAD_MAX_Q_DEPTH 0x10000
75#define BNAD_MIN_Q_DEPTH 0x200
76
77#define BNAD_JUMBO_MTU 9000
78
79#define BNAD_NETIF_WAKE_THRESHOLD 8
80
81#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
82
83/* Bit positions for tcb->flags */
84#define BNAD_TXQ_FREE_SENT 0
85
86/* Bit positions for rcb->flags */
87#define BNAD_RXQ_REFILL 0
88#define BNAD_RXQ_STARTED 1
89
90/*
91 * DATA STRUCTURES
92 */
93
94/* enums */
95enum bnad_intr_source {
96 BNAD_INTR_TX = 1,
97 BNAD_INTR_RX = 2
98};
99
100enum bnad_link_state {
101 BNAD_LS_DOWN = 0,
102 BNAD_LS_UP = 1
103};
104
105struct bnad_completion {
106 struct completion ioc_comp;
107 struct completion ucast_comp;
108 struct completion mcast_comp;
109 struct completion tx_comp;
110 struct completion rx_comp;
111 struct completion stats_comp;
112 struct completion port_comp;
113
114 u8 ioc_comp_status;
115 u8 ucast_comp_status;
116 u8 mcast_comp_status;
117 u8 tx_comp_status;
118 u8 rx_comp_status;
119 u8 stats_comp_status;
120 u8 port_comp_status;
121};
122
123/* Tx Rx Control Stats */
124struct bnad_drv_stats {
125 u64 netif_queue_stop;
126 u64 netif_queue_wakeup;
127 u64 tso4;
128 u64 tso6;
129 u64 tso_err;
130 u64 tcpcsum_offload;
131 u64 udpcsum_offload;
132 u64 csum_help;
133 u64 csum_help_err;
134
135 u64 hw_stats_updates;
136 u64 netif_rx_schedule;
137 u64 netif_rx_complete;
138 u64 netif_rx_dropped;
139
140 u64 link_toggle;
141 u64 cee_up;
142
143 u64 rxp_info_alloc_failed;
144 u64 mbox_intr_disabled;
145 u64 mbox_intr_enabled;
146 u64 tx_unmap_q_alloc_failed;
147 u64 rx_unmap_q_alloc_failed;
148
149 u64 rxbuf_alloc_failed;
150};
151
152/* Complete driver stats */
153struct bnad_stats {
154 struct bnad_drv_stats drv_stats;
155 struct bna_stats *bna_stats;
156};
157
158/* Tx / Rx Resources */
159struct bnad_tx_res_info {
160 struct bna_res_info res_info[BNA_TX_RES_T_MAX];
161};
162
163struct bnad_rx_res_info {
164 struct bna_res_info res_info[BNA_RX_RES_T_MAX];
165};
166
167struct bnad_tx_info {
168 struct bna_tx *tx; /* 1:1 between tx_info & tx */
169 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
170} ____cacheline_aligned;
171
172struct bnad_rx_info {
173 struct bna_rx *rx; /* 1:1 between rx_info & rx */
174
175 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
176} ____cacheline_aligned;
177
178/* Unmap queues for Tx / Rx cleanup */
179struct bnad_skb_unmap {
180 struct sk_buff *skb;
181 DECLARE_PCI_UNMAP_ADDR(dma_addr)
182};
183
184struct bnad_unmap_q {
185 u32 producer_index;
186 u32 consumer_index;
187 u32 q_depth;
188 /* This should be the last one */
189 struct bnad_skb_unmap unmap_array[1];
190};
191
192/* Bit mask values for bnad->cfg_flags */
193#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
194#define BNAD_CF_PROMISC 0x02
195#define BNAD_CF_ALLMULTI 0x04
196#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
197
198/* Defines for run_flags bit-mask */
199/* Set, tested & cleared using xxx_bit() functions */
200/* Values indicated bit positions */
201#define BNAD_RF_CEE_RUNNING 1
202#define BNAD_RF_HW_ERROR 2
203#define BNAD_RF_MBOX_IRQ_DISABLED 3
204#define BNAD_RF_TX_STARTED 4
205#define BNAD_RF_RX_STARTED 5
206#define BNAD_RF_DIM_TIMER_RUNNING 6
207#define BNAD_RF_STATS_TIMER_RUNNING 7
208
209struct bnad {
210 struct net_device *netdev;
211
212 /* Data path */
213 struct bnad_tx_info tx_info[BNAD_MAX_TXS];
214 struct bnad_rx_info rx_info[BNAD_MAX_RXS];
215
216 struct vlan_group *vlan_grp;
217 /*
218 * These q numbers are global only because
219 * they are used to calculate MSIx vectors.
220 * Actually the exact # of queues are per Tx/Rx
221 * object.
222 */
223 u32 num_tx;
224 u32 num_rx;
225 u32 num_txq_per_tx;
226 u32 num_rxp_per_rx;
227
228 u32 txq_depth;
229 u32 rxq_depth;
230
231 u8 tx_coalescing_timeo;
232 u8 rx_coalescing_timeo;
233
234 struct bna_rx_config rx_config[BNAD_MAX_RXS];
235 struct bna_tx_config tx_config[BNAD_MAX_TXS];
236
237 u32 rx_csum;
238
239 void __iomem *bar0; /* BAR0 address */
240
241 struct bna bna;
242
243 u32 cfg_flags;
244 unsigned long run_flags;
245
246 struct pci_dev *pcidev;
247 u64 mmio_start;
248 u64 mmio_len;
249
250 u32 msix_num;
251 u32 msix_diag_num;
252 struct msix_entry *msix_table;
253
254 struct mutex conf_mutex;
255 spinlock_t bna_lock ____cacheline_aligned;
256
257 /* Timers */
258 struct timer_list ioc_timer;
259 struct timer_list dim_timer;
260 struct timer_list stats_timer;
261
262 /* Control path resources, memory & irq */
263 struct bna_res_info res_info[BNA_RES_T_MAX];
264 struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
265 struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
266
267 struct bnad_completion bnad_completions;
268
269 /* Burnt in MAC address */
270 mac_t perm_addr;
271
272 struct tasklet_struct tx_free_tasklet;
273
274 /* Statistics */
275 struct bnad_stats stats;
276
277 struct bnad_diag *diag;
278
279 char adapter_name[BNAD_NAME_LEN];
280 char port_name[BNAD_NAME_LEN];
281 char mbox_irq_name[BNAD_NAME_LEN];
282};
283
284/*
285 * EXTERN VARIABLES
286 */
287extern struct firmware *bfi_fw;
288extern u32 bnad_rxqs_per_cq;
289
290/*
291 * EXTERN PROTOTYPES
292 */
293extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
294/* Netdev entry point prototypes */
295extern void bnad_set_ethtool_ops(struct net_device *netdev);
296
297/* Configuration & setup */
298extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
299extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
300
301extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
302extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
303extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
304extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
305
306/* Timer start/stop protos */
307extern void bnad_dim_timer_start(struct bnad *bnad);
308
309/* Statistics */
310extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
311extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
312
313/**
314 * MACROS
315 */
316/* To set & get the stats counters */
317#define BNAD_UPDATE_CTR(_bnad, _ctr) \
318 (((_bnad)->stats.drv_stats._ctr)++)
319
320#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
321
322#define bnad_enable_rx_irq_unsafe(_ccb) \
323{ \
324 bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
325 (_ccb)->rx_coalescing_timeo); \
326 bna_ib_ack((_ccb)->i_dbell, 0); \
327}
328
329#define bnad_dim_timer_running(_bnad) \
330 (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
331 (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
332
333#endif /* __BNAD_H__ */
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
new file mode 100644
index 000000000000..11fa2ea842c1
--- /dev/null
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -0,0 +1,1277 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "cna.h"
20
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/ethtool.h>
24#include <linux/rtnetlink.h>
25
26#include "bna.h"
27
28#include "bnad.h"
29
30#define BNAD_NUM_TXF_COUNTERS 12
31#define BNAD_NUM_RXF_COUNTERS 10
32#define BNAD_NUM_CQ_COUNTERS 3
33#define BNAD_NUM_RXQ_COUNTERS 6
34#define BNAD_NUM_TXQ_COUNTERS 5
35
36#define BNAD_ETHTOOL_STATS_NUM \
37 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
38 sizeof(struct bnad_drv_stats) / sizeof(u64) + \
39 offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
40
41static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
42 "rx_packets",
43 "tx_packets",
44 "rx_bytes",
45 "tx_bytes",
46 "rx_errors",
47 "tx_errors",
48 "rx_dropped",
49 "tx_dropped",
50 "multicast",
51 "collisions",
52
53 "rx_length_errors",
54 "rx_over_errors",
55 "rx_crc_errors",
56 "rx_frame_errors",
57 "rx_fifo_errors",
58 "rx_missed_errors",
59
60 "tx_aborted_errors",
61 "tx_carrier_errors",
62 "tx_fifo_errors",
63 "tx_heartbeat_errors",
64 "tx_window_errors",
65
66 "rx_compressed",
67 "tx_compressed",
68
69 "netif_queue_stop",
70 "netif_queue_wakeup",
71 "tso4",
72 "tso6",
73 "tso_err",
74 "tcpcsum_offload",
75 "udpcsum_offload",
76 "csum_help",
77 "csum_help_err",
78 "hw_stats_updates",
79 "netif_rx_schedule",
80 "netif_rx_complete",
81 "netif_rx_dropped",
82
83 "link_toggle",
84 "cee_up",
85
86 "rxp_info_alloc_failed",
87 "mbox_intr_disabled",
88 "mbox_intr_enabled",
89 "tx_unmap_q_alloc_failed",
90 "rx_unmap_q_alloc_failed",
91 "rxbuf_alloc_failed",
92
93 "mac_frame_64",
94 "mac_frame_65_127",
95 "mac_frame_128_255",
96 "mac_frame_256_511",
97 "mac_frame_512_1023",
98 "mac_frame_1024_1518",
99 "mac_frame_1518_1522",
100 "mac_rx_bytes",
101 "mac_rx_packets",
102 "mac_rx_fcs_error",
103 "mac_rx_multicast",
104 "mac_rx_broadcast",
105 "mac_rx_control_frames",
106 "mac_rx_pause",
107 "mac_rx_unknown_opcode",
108 "mac_rx_alignment_error",
109 "mac_rx_frame_length_error",
110 "mac_rx_code_error",
111 "mac_rx_carrier_sense_error",
112 "mac_rx_undersize",
113 "mac_rx_oversize",
114 "mac_rx_fragments",
115 "mac_rx_jabber",
116 "mac_rx_drop",
117
118 "mac_tx_bytes",
119 "mac_tx_packets",
120 "mac_tx_multicast",
121 "mac_tx_broadcast",
122 "mac_tx_pause",
123 "mac_tx_deferral",
124 "mac_tx_excessive_deferral",
125 "mac_tx_single_collision",
126 "mac_tx_muliple_collision",
127 "mac_tx_late_collision",
128 "mac_tx_excessive_collision",
129 "mac_tx_total_collision",
130 "mac_tx_pause_honored",
131 "mac_tx_drop",
132 "mac_tx_jabber",
133 "mac_tx_fcs_error",
134 "mac_tx_control_frame",
135 "mac_tx_oversize",
136 "mac_tx_undersize",
137 "mac_tx_fragments",
138
139 "bpc_tx_pause_0",
140 "bpc_tx_pause_1",
141 "bpc_tx_pause_2",
142 "bpc_tx_pause_3",
143 "bpc_tx_pause_4",
144 "bpc_tx_pause_5",
145 "bpc_tx_pause_6",
146 "bpc_tx_pause_7",
147 "bpc_tx_zero_pause_0",
148 "bpc_tx_zero_pause_1",
149 "bpc_tx_zero_pause_2",
150 "bpc_tx_zero_pause_3",
151 "bpc_tx_zero_pause_4",
152 "bpc_tx_zero_pause_5",
153 "bpc_tx_zero_pause_6",
154 "bpc_tx_zero_pause_7",
155 "bpc_tx_first_pause_0",
156 "bpc_tx_first_pause_1",
157 "bpc_tx_first_pause_2",
158 "bpc_tx_first_pause_3",
159 "bpc_tx_first_pause_4",
160 "bpc_tx_first_pause_5",
161 "bpc_tx_first_pause_6",
162 "bpc_tx_first_pause_7",
163
164 "bpc_rx_pause_0",
165 "bpc_rx_pause_1",
166 "bpc_rx_pause_2",
167 "bpc_rx_pause_3",
168 "bpc_rx_pause_4",
169 "bpc_rx_pause_5",
170 "bpc_rx_pause_6",
171 "bpc_rx_pause_7",
172 "bpc_rx_zero_pause_0",
173 "bpc_rx_zero_pause_1",
174 "bpc_rx_zero_pause_2",
175 "bpc_rx_zero_pause_3",
176 "bpc_rx_zero_pause_4",
177 "bpc_rx_zero_pause_5",
178 "bpc_rx_zero_pause_6",
179 "bpc_rx_zero_pause_7",
180 "bpc_rx_first_pause_0",
181 "bpc_rx_first_pause_1",
182 "bpc_rx_first_pause_2",
183 "bpc_rx_first_pause_3",
184 "bpc_rx_first_pause_4",
185 "bpc_rx_first_pause_5",
186 "bpc_rx_first_pause_6",
187 "bpc_rx_first_pause_7",
188
189 "rad_rx_frames",
190 "rad_rx_octets",
191 "rad_rx_vlan_frames",
192 "rad_rx_ucast",
193 "rad_rx_ucast_octets",
194 "rad_rx_ucast_vlan",
195 "rad_rx_mcast",
196 "rad_rx_mcast_octets",
197 "rad_rx_mcast_vlan",
198 "rad_rx_bcast",
199 "rad_rx_bcast_octets",
200 "rad_rx_bcast_vlan",
201 "rad_rx_drops",
202
203 "fc_rx_ucast_octets",
204 "fc_rx_ucast",
205 "fc_rx_ucast_vlan",
206 "fc_rx_mcast_octets",
207 "fc_rx_mcast",
208 "fc_rx_mcast_vlan",
209 "fc_rx_bcast_octets",
210 "fc_rx_bcast",
211 "fc_rx_bcast_vlan",
212
213 "fc_tx_ucast_octets",
214 "fc_tx_ucast",
215 "fc_tx_ucast_vlan",
216 "fc_tx_mcast_octets",
217 "fc_tx_mcast",
218 "fc_tx_mcast_vlan",
219 "fc_tx_bcast_octets",
220 "fc_tx_bcast",
221 "fc_tx_bcast_vlan",
222 "fc_tx_parity_errors",
223 "fc_tx_timeout",
224 "fc_tx_fid_parity_errors",
225};
226
227static int
228bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
229{
230 cmd->supported = SUPPORTED_10000baseT_Full;
231 cmd->advertising = ADVERTISED_10000baseT_Full;
232 cmd->autoneg = AUTONEG_DISABLE;
233 cmd->supported |= SUPPORTED_FIBRE;
234 cmd->advertising |= ADVERTISED_FIBRE;
235 cmd->port = PORT_FIBRE;
236 cmd->phy_address = 0;
237
238 if (netif_carrier_ok(netdev)) {
239 cmd->speed = SPEED_10000;
240 cmd->duplex = DUPLEX_FULL;
241 } else {
242 cmd->speed = -1;
243 cmd->duplex = -1;
244 }
245 cmd->transceiver = XCVR_EXTERNAL;
246 cmd->maxtxpkt = 0;
247 cmd->maxrxpkt = 0;
248
249 return 0;
250}
251
252static int
253bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
254{
255 /* 10G full duplex setting supported only */
256 if (cmd->autoneg == AUTONEG_ENABLE)
257 return -EOPNOTSUPP; else {
258 if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL))
259 return 0;
260 }
261
262 return -EOPNOTSUPP;
263}
264
265static void
266bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
267{
268 struct bnad *bnad = netdev_priv(netdev);
269 struct bfa_ioc_attr *ioc_attr;
270 unsigned long flags;
271
272 strcpy(drvinfo->driver, BNAD_NAME);
273 strcpy(drvinfo->version, BNAD_VERSION);
274
275 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
276 if (ioc_attr) {
277 memset(ioc_attr, 0, sizeof(*ioc_attr));
278 spin_lock_irqsave(&bnad->bna_lock, flags);
279 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
280 spin_unlock_irqrestore(&bnad->bna_lock, flags);
281
282 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
283 sizeof(drvinfo->fw_version) - 1);
284 kfree(ioc_attr);
285 }
286
287 strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
288}
289
290static int
291get_regs(struct bnad *bnad, u32 * regs)
292{
293 int num = 0, i;
294 u32 reg_addr;
295 unsigned long flags;
296
297#define BNAD_GET_REG(addr) \
298do { \
299 if (regs) \
300 regs[num++] = readl(bnad->bar0 + (addr)); \
301 else \
302 num++; \
303} while (0)
304
305 spin_lock_irqsave(&bnad->bna_lock, flags);
306
307 /* DMA Block Internal Registers */
308 BNAD_GET_REG(DMA_CTRL_REG0);
309 BNAD_GET_REG(DMA_CTRL_REG1);
310 BNAD_GET_REG(DMA_ERR_INT_STATUS);
311 BNAD_GET_REG(DMA_ERR_INT_ENABLE);
312 BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
313
314 /* APP Block Register Address Offset from BAR0 */
315 BNAD_GET_REG(HOSTFN0_INT_STATUS);
316 BNAD_GET_REG(HOSTFN0_INT_MASK);
317 BNAD_GET_REG(HOST_PAGE_NUM_FN0);
318 BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
319 BNAD_GET_REG(FN0_PCIE_ERR_REG);
320 BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
321 BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
322
323 BNAD_GET_REG(HOSTFN1_INT_STATUS);
324 BNAD_GET_REG(HOSTFN1_INT_MASK);
325 BNAD_GET_REG(HOST_PAGE_NUM_FN1);
326 BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
327 BNAD_GET_REG(FN1_PCIE_ERR_REG);
328 BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
329 BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
330
331 BNAD_GET_REG(PCIE_MISC_REG);
332
333 BNAD_GET_REG(HOST_SEM0_REG);
334 BNAD_GET_REG(HOST_SEM1_REG);
335 BNAD_GET_REG(HOST_SEM2_REG);
336 BNAD_GET_REG(HOST_SEM3_REG);
337 BNAD_GET_REG(HOST_SEM0_INFO_REG);
338 BNAD_GET_REG(HOST_SEM1_INFO_REG);
339 BNAD_GET_REG(HOST_SEM2_INFO_REG);
340 BNAD_GET_REG(HOST_SEM3_INFO_REG);
341
342 BNAD_GET_REG(TEMPSENSE_CNTL_REG);
343 BNAD_GET_REG(TEMPSENSE_STAT_REG);
344
345 BNAD_GET_REG(APP_LOCAL_ERR_STAT);
346 BNAD_GET_REG(APP_LOCAL_ERR_MSK);
347
348 BNAD_GET_REG(PCIE_LNK_ERR_STAT);
349 BNAD_GET_REG(PCIE_LNK_ERR_MSK);
350
351 BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
352 BNAD_GET_REG(RESV_ETH_TYPE);
353
354 BNAD_GET_REG(HOSTFN2_INT_STATUS);
355 BNAD_GET_REG(HOSTFN2_INT_MASK);
356 BNAD_GET_REG(HOST_PAGE_NUM_FN2);
357 BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
358 BNAD_GET_REG(FN2_PCIE_ERR_REG);
359 BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
360 BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
361
362 BNAD_GET_REG(HOSTFN3_INT_STATUS);
363 BNAD_GET_REG(HOSTFN3_INT_MASK);
364 BNAD_GET_REG(HOST_PAGE_NUM_FN3);
365 BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
366 BNAD_GET_REG(FN3_PCIE_ERR_REG);
367 BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
368 BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
369
370 /* Host Command Status Registers */
371 reg_addr = HOST_CMDSTS0_CLR_REG;
372 for (i = 0; i < 16; i++) {
373 BNAD_GET_REG(reg_addr);
374 BNAD_GET_REG(reg_addr + 4);
375 BNAD_GET_REG(reg_addr + 8);
376 reg_addr += 0x10;
377 }
378
379 /* Function ID register */
380 BNAD_GET_REG(FNC_ID_REG);
381
382 /* Function personality register */
383 BNAD_GET_REG(FNC_PERS_REG);
384
385 /* Operation mode register */
386 BNAD_GET_REG(OP_MODE);
387
388 /* LPU0 Registers */
389 BNAD_GET_REG(LPU0_MBOX_CTL_REG);
390 BNAD_GET_REG(LPU0_MBOX_CMD_REG);
391 BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
392 BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
393 BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
394 BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
395 BNAD_GET_REG(LPU0_ERR_STATUS_REG);
396 BNAD_GET_REG(LPU0_ERR_SET_REG);
397
398 /* LPU1 Registers */
399 BNAD_GET_REG(LPU1_MBOX_CTL_REG);
400 BNAD_GET_REG(LPU1_MBOX_CMD_REG);
401 BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
402 BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
403 BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
404 BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
405 BNAD_GET_REG(LPU1_ERR_STATUS_REG);
406 BNAD_GET_REG(LPU1_ERR_SET_REG);
407
408 /* PSS Registers */
409 BNAD_GET_REG(PSS_CTL_REG);
410 BNAD_GET_REG(PSS_ERR_STATUS_REG);
411 BNAD_GET_REG(ERR_STATUS_SET);
412 BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
413
414 /* Catapult CPQ Registers */
415 BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
416 BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
417 BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
418 BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
419
420 BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
421 BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
422 BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
423 BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
424
425 BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
426 BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
427 BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
428 BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
429
430 BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
431 BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
432 BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
433 BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
434
435 BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
436 BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
437 BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
438 BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
439
440 BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
441 BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
442 BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
443 BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
444
445 BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
446 BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
447 BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
448 BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
449
450 BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
451 BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
452 BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
453 BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
454
455 /* Host Function Force Parity Error Registers */
456 BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
457 BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
458 BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
459 BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
460
461 /* LL Port[0|1] Halt Mask Registers */
462 BNAD_GET_REG(LL_HALT_MSK_P0);
463 BNAD_GET_REG(LL_HALT_MSK_P1);
464
465 /* LL Port[0|1] Error Mask Registers */
466 BNAD_GET_REG(LL_ERR_MSK_P0);
467 BNAD_GET_REG(LL_ERR_MSK_P1);
468
469 /* EMC FLI Registers */
470 BNAD_GET_REG(FLI_CMD_REG);
471 BNAD_GET_REG(FLI_ADDR_REG);
472 BNAD_GET_REG(FLI_CTL_REG);
473 BNAD_GET_REG(FLI_WRDATA_REG);
474 BNAD_GET_REG(FLI_RDDATA_REG);
475 BNAD_GET_REG(FLI_DEV_STATUS_REG);
476 BNAD_GET_REG(FLI_SIG_WD_REG);
477
478 BNAD_GET_REG(FLI_DEV_VENDOR_REG);
479 BNAD_GET_REG(FLI_ERR_STATUS_REG);
480
481 /* RxAdm 0 Registers */
482 BNAD_GET_REG(RAD0_CTL_REG);
483 BNAD_GET_REG(RAD0_PE_PARM_REG);
484 BNAD_GET_REG(RAD0_BCN_REG);
485 BNAD_GET_REG(RAD0_DEFAULT_REG);
486 BNAD_GET_REG(RAD0_PROMISC_REG);
487 BNAD_GET_REG(RAD0_BCNQ_REG);
488 BNAD_GET_REG(RAD0_DEFAULTQ_REG);
489
490 BNAD_GET_REG(RAD0_ERR_STS);
491 BNAD_GET_REG(RAD0_SET_ERR_STS);
492 BNAD_GET_REG(RAD0_ERR_INT_EN);
493 BNAD_GET_REG(RAD0_FIRST_ERR);
494 BNAD_GET_REG(RAD0_FORCE_ERR);
495
496 BNAD_GET_REG(RAD0_MAC_MAN_1H);
497 BNAD_GET_REG(RAD0_MAC_MAN_1L);
498 BNAD_GET_REG(RAD0_MAC_MAN_2H);
499 BNAD_GET_REG(RAD0_MAC_MAN_2L);
500 BNAD_GET_REG(RAD0_MAC_MAN_3H);
501 BNAD_GET_REG(RAD0_MAC_MAN_3L);
502 BNAD_GET_REG(RAD0_MAC_MAN_4H);
503 BNAD_GET_REG(RAD0_MAC_MAN_4L);
504
505 BNAD_GET_REG(RAD0_LAST4_IP);
506
507 /* RxAdm 1 Registers */
508 BNAD_GET_REG(RAD1_CTL_REG);
509 BNAD_GET_REG(RAD1_PE_PARM_REG);
510 BNAD_GET_REG(RAD1_BCN_REG);
511 BNAD_GET_REG(RAD1_DEFAULT_REG);
512 BNAD_GET_REG(RAD1_PROMISC_REG);
513 BNAD_GET_REG(RAD1_BCNQ_REG);
514 BNAD_GET_REG(RAD1_DEFAULTQ_REG);
515
516 BNAD_GET_REG(RAD1_ERR_STS);
517 BNAD_GET_REG(RAD1_SET_ERR_STS);
518 BNAD_GET_REG(RAD1_ERR_INT_EN);
519
520 /* TxA0 Registers */
521 BNAD_GET_REG(TXA0_CTRL_REG);
522 /* TxA0 TSO Sequence # Registers (RO) */
523 for (i = 0; i < 8; i++) {
524 BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
525 BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
526 }
527
528 /* TxA1 Registers */
529 BNAD_GET_REG(TXA1_CTRL_REG);
530 /* TxA1 TSO Sequence # Registers (RO) */
531 for (i = 0; i < 8; i++) {
532 BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
533 BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
534 }
535
536 /* RxA Registers */
537 BNAD_GET_REG(RXA0_CTL_REG);
538 BNAD_GET_REG(RXA1_CTL_REG);
539
540 /* PLB0 Registers */
541 BNAD_GET_REG(PLB0_ECM_TIMER_REG);
542 BNAD_GET_REG(PLB0_RL_CTL);
543 for (i = 0; i < 8; i++)
544 BNAD_GET_REG(PLB0_RL_MAX_BC(i));
545 BNAD_GET_REG(PLB0_RL_TU_PRIO);
546 for (i = 0; i < 8; i++)
547 BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
548 BNAD_GET_REG(PLB0_RL_MIN_REG);
549 BNAD_GET_REG(PLB0_RL_MAX_REG);
550 BNAD_GET_REG(PLB0_EMS_ADD_REG);
551
552 /* PLB1 Registers */
553 BNAD_GET_REG(PLB1_ECM_TIMER_REG);
554 BNAD_GET_REG(PLB1_RL_CTL);
555 for (i = 0; i < 8; i++)
556 BNAD_GET_REG(PLB1_RL_MAX_BC(i));
557 BNAD_GET_REG(PLB1_RL_TU_PRIO);
558 for (i = 0; i < 8; i++)
559 BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
560 BNAD_GET_REG(PLB1_RL_MIN_REG);
561 BNAD_GET_REG(PLB1_RL_MAX_REG);
562 BNAD_GET_REG(PLB1_EMS_ADD_REG);
563
564 /* HQM Control Register */
565 BNAD_GET_REG(HQM0_CTL_REG);
566 BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
567 BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
568 BNAD_GET_REG(HQM1_CTL_REG);
569 BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
570 BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
571
572 /* LUT Registers */
573 BNAD_GET_REG(LUT0_ERR_STS);
574 BNAD_GET_REG(LUT0_SET_ERR_STS);
575 BNAD_GET_REG(LUT1_ERR_STS);
576 BNAD_GET_REG(LUT1_SET_ERR_STS);
577
578 /* TRC Registers */
579 BNAD_GET_REG(TRC_CTL_REG);
580 BNAD_GET_REG(TRC_MODS_REG);
581 BNAD_GET_REG(TRC_TRGC_REG);
582 BNAD_GET_REG(TRC_CNT1_REG);
583 BNAD_GET_REG(TRC_CNT2_REG);
584 BNAD_GET_REG(TRC_NXTS_REG);
585 BNAD_GET_REG(TRC_DIRR_REG);
586 for (i = 0; i < 10; i++)
587 BNAD_GET_REG(TRC_TRGM_REG(i));
588 for (i = 0; i < 10; i++)
589 BNAD_GET_REG(TRC_NXTM_REG(i));
590 for (i = 0; i < 10; i++)
591 BNAD_GET_REG(TRC_STRM_REG(i));
592
593 spin_unlock_irqrestore(&bnad->bna_lock, flags);
594#undef BNAD_GET_REG
595 return num;
596}
597static int
598bnad_get_regs_len(struct net_device *netdev)
599{
600 int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
601 return ret;
602}
603
604static void
605bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
606{
607 memset(buf, 0, bnad_get_regs_len(netdev));
608 get_regs(netdev_priv(netdev), buf);
609}
610
611static void
612bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
613{
614 wolinfo->supported = 0;
615 wolinfo->wolopts = 0;
616}
617
618static int
619bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
620{
621 struct bnad *bnad = netdev_priv(netdev);
622 unsigned long flags;
623
624 /* Lock rqd. to access bnad->bna_lock */
625 spin_lock_irqsave(&bnad->bna_lock, flags);
626 coalesce->use_adaptive_rx_coalesce =
627 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
628 spin_unlock_irqrestore(&bnad->bna_lock, flags);
629
630 coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
631 BFI_COALESCING_TIMER_UNIT;
632 coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
633 BFI_COALESCING_TIMER_UNIT;
634 coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
635
636 return 0;
637}
638
639static int
640bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
641{
642 struct bnad *bnad = netdev_priv(netdev);
643 unsigned long flags;
644 int dim_timer_del = 0;
645
646 if (coalesce->rx_coalesce_usecs == 0 ||
647 coalesce->rx_coalesce_usecs >
648 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
649 return -EINVAL;
650
651 if (coalesce->tx_coalesce_usecs == 0 ||
652 coalesce->tx_coalesce_usecs >
653 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
654 return -EINVAL;
655
656 mutex_lock(&bnad->conf_mutex);
657 /*
658 * Do not need to store rx_coalesce_usecs here
659 * Every time DIM is disabled, we can get it from the
660 * stack.
661 */
662 spin_lock_irqsave(&bnad->bna_lock, flags);
663 if (coalesce->use_adaptive_rx_coalesce) {
664 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
665 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
666 bnad_dim_timer_start(bnad);
667 }
668 } else {
669 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
670 bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
671 dim_timer_del = bnad_dim_timer_running(bnad);
672 if (dim_timer_del) {
673 clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
674 &bnad->run_flags);
675 spin_unlock_irqrestore(&bnad->bna_lock, flags);
676 del_timer_sync(&bnad->dim_timer);
677 spin_lock_irqsave(&bnad->bna_lock, flags);
678 }
679 bnad_rx_coalescing_timeo_set(bnad);
680 }
681 }
682 if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
683 BFI_COALESCING_TIMER_UNIT) {
684 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
685 BFI_COALESCING_TIMER_UNIT;
686 bnad_tx_coalescing_timeo_set(bnad);
687 }
688
689 if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
690 BFI_COALESCING_TIMER_UNIT) {
691 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
692 BFI_COALESCING_TIMER_UNIT;
693
694 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
695 bnad_rx_coalescing_timeo_set(bnad);
696
697 }
698
699 /* Add Tx Inter-pkt DMA count? */
700
701 spin_unlock_irqrestore(&bnad->bna_lock, flags);
702
703 mutex_unlock(&bnad->conf_mutex);
704 return 0;
705}
706
707static void
708bnad_get_ringparam(struct net_device *netdev,
709 struct ethtool_ringparam *ringparam)
710{
711 struct bnad *bnad = netdev_priv(netdev);
712
713 ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
714 ringparam->rx_mini_max_pending = 0;
715 ringparam->rx_jumbo_max_pending = 0;
716 ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
717
718 ringparam->rx_pending = bnad->rxq_depth;
719 ringparam->rx_mini_max_pending = 0;
720 ringparam->rx_jumbo_max_pending = 0;
721 ringparam->tx_pending = bnad->txq_depth;
722}
723
724static int
725bnad_set_ringparam(struct net_device *netdev,
726 struct ethtool_ringparam *ringparam)
727{
728 int i, current_err, err = 0;
729 struct bnad *bnad = netdev_priv(netdev);
730
731 mutex_lock(&bnad->conf_mutex);
732 if (ringparam->rx_pending == bnad->rxq_depth &&
733 ringparam->tx_pending == bnad->txq_depth) {
734 mutex_unlock(&bnad->conf_mutex);
735 return 0;
736 }
737
738 if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
739 ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
740 !BNA_POWER_OF_2(ringparam->rx_pending)) {
741 mutex_unlock(&bnad->conf_mutex);
742 return -EINVAL;
743 }
744 if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
745 ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
746 !BNA_POWER_OF_2(ringparam->tx_pending)) {
747 mutex_unlock(&bnad->conf_mutex);
748 return -EINVAL;
749 }
750
751 if (ringparam->rx_pending != bnad->rxq_depth) {
752 bnad->rxq_depth = ringparam->rx_pending;
753 for (i = 0; i < bnad->num_rx; i++) {
754 if (!bnad->rx_info[i].rx)
755 continue;
756 bnad_cleanup_rx(bnad, i);
757 current_err = bnad_setup_rx(bnad, i);
758 if (current_err && !err)
759 err = current_err;
760 }
761 }
762 if (ringparam->tx_pending != bnad->txq_depth) {
763 bnad->txq_depth = ringparam->tx_pending;
764 for (i = 0; i < bnad->num_tx; i++) {
765 if (!bnad->tx_info[i].tx)
766 continue;
767 bnad_cleanup_tx(bnad, i);
768 current_err = bnad_setup_tx(bnad, i);
769 if (current_err && !err)
770 err = current_err;
771 }
772 }
773
774 mutex_unlock(&bnad->conf_mutex);
775 return err;
776}
777
778static void
779bnad_get_pauseparam(struct net_device *netdev,
780 struct ethtool_pauseparam *pauseparam)
781{
782 struct bnad *bnad = netdev_priv(netdev);
783
784 pauseparam->autoneg = 0;
785 pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
786 pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
787}
788
789static int
790bnad_set_pauseparam(struct net_device *netdev,
791 struct ethtool_pauseparam *pauseparam)
792{
793 struct bnad *bnad = netdev_priv(netdev);
794 struct bna_pause_config pause_config;
795 unsigned long flags;
796
797 if (pauseparam->autoneg == AUTONEG_ENABLE)
798 return -EINVAL;
799
800 mutex_lock(&bnad->conf_mutex);
801 if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
802 pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
803 pause_config.rx_pause = pauseparam->rx_pause;
804 pause_config.tx_pause = pauseparam->tx_pause;
805 spin_lock_irqsave(&bnad->bna_lock, flags);
806 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
807 spin_unlock_irqrestore(&bnad->bna_lock, flags);
808 }
809 mutex_unlock(&bnad->conf_mutex);
810 return 0;
811}
812
813static u32
814bnad_get_rx_csum(struct net_device *netdev)
815{
816 u32 rx_csum;
817 struct bnad *bnad = netdev_priv(netdev);
818
819 rx_csum = bnad->rx_csum;
820 return rx_csum;
821}
822
823static int
824bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
825{
826 struct bnad *bnad = netdev_priv(netdev);
827
828 mutex_lock(&bnad->conf_mutex);
829 bnad->rx_csum = rx_csum;
830 mutex_unlock(&bnad->conf_mutex);
831 return 0;
832}
833
834static int
835bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
836{
837 struct bnad *bnad = netdev_priv(netdev);
838
839 mutex_lock(&bnad->conf_mutex);
840 if (tx_csum) {
841 netdev->features |= NETIF_F_IP_CSUM;
842 netdev->features |= NETIF_F_IPV6_CSUM;
843 } else {
844 netdev->features &= ~NETIF_F_IP_CSUM;
845 netdev->features &= ~NETIF_F_IPV6_CSUM;
846 }
847 mutex_unlock(&bnad->conf_mutex);
848 return 0;
849}
850
851static int
852bnad_set_tso(struct net_device *netdev, u32 tso)
853{
854 struct bnad *bnad = netdev_priv(netdev);
855
856 mutex_lock(&bnad->conf_mutex);
857 if (tso) {
858 netdev->features |= NETIF_F_TSO;
859 netdev->features |= NETIF_F_TSO6;
860 } else {
861 netdev->features &= ~NETIF_F_TSO;
862 netdev->features &= ~NETIF_F_TSO6;
863 }
864 mutex_unlock(&bnad->conf_mutex);
865 return 0;
866}
867
868static void
869bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
870{
871 struct bnad *bnad = netdev_priv(netdev);
872 int i, j, q_num;
873 u64 bmap;
874
875 mutex_lock(&bnad->conf_mutex);
876
877 switch (stringset) {
878 case ETH_SS_STATS:
879 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
880 BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
881 ETH_GSTRING_LEN));
882 memcpy(string, bnad_net_stats_strings[i],
883 ETH_GSTRING_LEN);
884 string += ETH_GSTRING_LEN;
885 }
886 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
887 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
888 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
889 if (bmap & 1) {
890 sprintf(string, "txf%d_ucast_octets", i);
891 string += ETH_GSTRING_LEN;
892 sprintf(string, "txf%d_ucast", i);
893 string += ETH_GSTRING_LEN;
894 sprintf(string, "txf%d_ucast_vlan", i);
895 string += ETH_GSTRING_LEN;
896 sprintf(string, "txf%d_mcast_octets", i);
897 string += ETH_GSTRING_LEN;
898 sprintf(string, "txf%d_mcast", i);
899 string += ETH_GSTRING_LEN;
900 sprintf(string, "txf%d_mcast_vlan", i);
901 string += ETH_GSTRING_LEN;
902 sprintf(string, "txf%d_bcast_octets", i);
903 string += ETH_GSTRING_LEN;
904 sprintf(string, "txf%d_bcast", i);
905 string += ETH_GSTRING_LEN;
906 sprintf(string, "txf%d_bcast_vlan", i);
907 string += ETH_GSTRING_LEN;
908 sprintf(string, "txf%d_errors", i);
909 string += ETH_GSTRING_LEN;
910 sprintf(string, "txf%d_filter_vlan", i);
911 string += ETH_GSTRING_LEN;
912 sprintf(string, "txf%d_filter_mac_sa", i);
913 string += ETH_GSTRING_LEN;
914 }
915 bmap >>= 1;
916 }
917
918 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
919 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
920 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
921 if (bmap & 1) {
922 sprintf(string, "rxf%d_ucast_octets", i);
923 string += ETH_GSTRING_LEN;
924 sprintf(string, "rxf%d_ucast", i);
925 string += ETH_GSTRING_LEN;
926 sprintf(string, "rxf%d_ucast_vlan", i);
927 string += ETH_GSTRING_LEN;
928 sprintf(string, "rxf%d_mcast_octets", i);
929 string += ETH_GSTRING_LEN;
930 sprintf(string, "rxf%d_mcast", i);
931 string += ETH_GSTRING_LEN;
932 sprintf(string, "rxf%d_mcast_vlan", i);
933 string += ETH_GSTRING_LEN;
934 sprintf(string, "rxf%d_bcast_octets", i);
935 string += ETH_GSTRING_LEN;
936 sprintf(string, "rxf%d_bcast", i);
937 string += ETH_GSTRING_LEN;
938 sprintf(string, "rxf%d_bcast_vlan", i);
939 string += ETH_GSTRING_LEN;
940 sprintf(string, "rxf%d_frame_drops", i);
941 string += ETH_GSTRING_LEN;
942 }
943 bmap >>= 1;
944 }
945
946 q_num = 0;
947 for (i = 0; i < bnad->num_rx; i++) {
948 if (!bnad->rx_info[i].rx)
949 continue;
950 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
951 sprintf(string, "cq%d_producer_index", q_num);
952 string += ETH_GSTRING_LEN;
953 sprintf(string, "cq%d_consumer_index", q_num);
954 string += ETH_GSTRING_LEN;
955 sprintf(string, "cq%d_hw_producer_index",
956 q_num);
957 string += ETH_GSTRING_LEN;
958 q_num++;
959 }
960 }
961
962 q_num = 0;
963 for (i = 0; i < bnad->num_rx; i++) {
964 if (!bnad->rx_info[i].rx)
965 continue;
966 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
967 sprintf(string, "rxq%d_packets", q_num);
968 string += ETH_GSTRING_LEN;
969 sprintf(string, "rxq%d_bytes", q_num);
970 string += ETH_GSTRING_LEN;
971 sprintf(string, "rxq%d_packets_with_error",
972 q_num);
973 string += ETH_GSTRING_LEN;
974 sprintf(string, "rxq%d_allocbuf_failed", q_num);
975 string += ETH_GSTRING_LEN;
976 sprintf(string, "rxq%d_producer_index", q_num);
977 string += ETH_GSTRING_LEN;
978 sprintf(string, "rxq%d_consumer_index", q_num);
979 string += ETH_GSTRING_LEN;
980 q_num++;
981 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
982 bnad->rx_info[i].rx_ctrl[j].ccb->
983 rcb[1] &&
984 bnad->rx_info[i].rx_ctrl[j].ccb->
985 rcb[1]->rxq) {
986 sprintf(string, "rxq%d_packets", q_num);
987 string += ETH_GSTRING_LEN;
988 sprintf(string, "rxq%d_bytes", q_num);
989 string += ETH_GSTRING_LEN;
990 sprintf(string,
991 "rxq%d_packets_with_error", q_num);
992 string += ETH_GSTRING_LEN;
993 sprintf(string, "rxq%d_allocbuf_failed",
994 q_num);
995 string += ETH_GSTRING_LEN;
996 sprintf(string, "rxq%d_producer_index",
997 q_num);
998 string += ETH_GSTRING_LEN;
999 sprintf(string, "rxq%d_consumer_index",
1000 q_num);
1001 string += ETH_GSTRING_LEN;
1002 q_num++;
1003 }
1004 }
1005 }
1006
1007 q_num = 0;
1008 for (i = 0; i < bnad->num_tx; i++) {
1009 if (!bnad->tx_info[i].tx)
1010 continue;
1011 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1012 sprintf(string, "txq%d_packets", q_num);
1013 string += ETH_GSTRING_LEN;
1014 sprintf(string, "txq%d_bytes", q_num);
1015 string += ETH_GSTRING_LEN;
1016 sprintf(string, "txq%d_producer_index", q_num);
1017 string += ETH_GSTRING_LEN;
1018 sprintf(string, "txq%d_consumer_index", q_num);
1019 string += ETH_GSTRING_LEN;
1020 sprintf(string, "txq%d_hw_consumer_index",
1021 q_num);
1022 string += ETH_GSTRING_LEN;
1023 q_num++;
1024 }
1025 }
1026
1027 break;
1028
1029 default:
1030 break;
1031 }
1032
1033 mutex_unlock(&bnad->conf_mutex);
1034}
1035
1036static int
1037bnad_get_stats_count_locked(struct net_device *netdev)
1038{
1039 struct bnad *bnad = netdev_priv(netdev);
1040 int i, j, count, rxf_active_num = 0, txf_active_num = 0;
1041 u64 bmap;
1042
1043 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
1044 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
1045 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
1046 if (bmap & 1)
1047 txf_active_num++;
1048 bmap >>= 1;
1049 }
1050 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
1051 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
1052 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
1053 if (bmap & 1)
1054 rxf_active_num++;
1055 bmap >>= 1;
1056 }
1057 count = BNAD_ETHTOOL_STATS_NUM +
1058 txf_active_num * BNAD_NUM_TXF_COUNTERS +
1059 rxf_active_num * BNAD_NUM_RXF_COUNTERS;
1060
1061 for (i = 0; i < bnad->num_rx; i++) {
1062 if (!bnad->rx_info[i].rx)
1063 continue;
1064 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
1065 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
1066 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1067 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
1068 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1069 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
1070 count += BNAD_NUM_RXQ_COUNTERS;
1071 }
1072
1073 for (i = 0; i < bnad->num_tx; i++) {
1074 if (!bnad->tx_info[i].tx)
1075 continue;
1076 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
1077 }
1078 return count;
1079}
1080
1081static int
1082bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
1083{
1084 int i, j;
1085 struct bna_rcb *rcb = NULL;
1086 struct bna_tcb *tcb = NULL;
1087
1088 for (i = 0; i < bnad->num_rx; i++) {
1089 if (!bnad->rx_info[i].rx)
1090 continue;
1091 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1092 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
1093 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
1094 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
1095 buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
1096 ccb->producer_index;
1097 buf[bi++] = 0; /* ccb->consumer_index */
1098 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
1099 ccb->hw_producer_index);
1100 }
1101 }
1102 for (i = 0; i < bnad->num_rx; i++) {
1103 if (!bnad->rx_info[i].rx)
1104 continue;
1105 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1106 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1107 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
1108 bnad->rx_info[i].rx_ctrl[j].ccb->
1109 rcb[0]->rxq) {
1110 rcb = bnad->rx_info[i].rx_ctrl[j].
1111 ccb->rcb[0];
1112 buf[bi++] = rcb->rxq->rx_packets;
1113 buf[bi++] = rcb->rxq->rx_bytes;
1114 buf[bi++] = rcb->rxq->
1115 rx_packets_with_error;
1116 buf[bi++] = rcb->rxq->
1117 rxbuf_alloc_failed;
1118 buf[bi++] = rcb->producer_index;
1119 buf[bi++] = rcb->consumer_index;
1120 }
1121 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1122 bnad->rx_info[i].rx_ctrl[j].ccb->
1123 rcb[1]->rxq) {
1124 rcb = bnad->rx_info[i].rx_ctrl[j].
1125 ccb->rcb[1];
1126 buf[bi++] = rcb->rxq->rx_packets;
1127 buf[bi++] = rcb->rxq->rx_bytes;
1128 buf[bi++] = rcb->rxq->
1129 rx_packets_with_error;
1130 buf[bi++] = rcb->rxq->
1131 rxbuf_alloc_failed;
1132 buf[bi++] = rcb->producer_index;
1133 buf[bi++] = rcb->consumer_index;
1134 }
1135 }
1136 }
1137
1138 for (i = 0; i < bnad->num_tx; i++) {
1139 if (!bnad->tx_info[i].tx)
1140 continue;
1141 for (j = 0; j < bnad->num_txq_per_tx; j++)
1142 if (bnad->tx_info[i].tcb[j] &&
1143 bnad->tx_info[i].tcb[j]->txq) {
1144 tcb = bnad->tx_info[i].tcb[j];
1145 buf[bi++] = tcb->txq->tx_packets;
1146 buf[bi++] = tcb->txq->tx_bytes;
1147 buf[bi++] = tcb->producer_index;
1148 buf[bi++] = tcb->consumer_index;
1149 buf[bi++] = *(tcb->hw_consumer_index);
1150 }
1151 }
1152
1153 return bi;
1154}
1155
1156static void
1157bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
1158 u64 *buf)
1159{
1160 struct bnad *bnad = netdev_priv(netdev);
1161 int i, j, bi;
1162 unsigned long flags;
1163 struct rtnl_link_stats64 *net_stats64;
1164 u64 *stats64;
1165 u64 bmap;
1166
1167 mutex_lock(&bnad->conf_mutex);
1168 if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
1169 mutex_unlock(&bnad->conf_mutex);
1170 return;
1171 }
1172
1173 /*
1174 * Used bna_lock to sync reads from bna_stats, which is written
1175 * under the same lock
1176 */
1177 spin_lock_irqsave(&bnad->bna_lock, flags);
1178 bi = 0;
1179 memset(buf, 0, stats->n_stats * sizeof(u64));
1180
1181 net_stats64 = (struct rtnl_link_stats64 *)buf;
1182 bnad_netdev_qstats_fill(bnad, net_stats64);
1183 bnad_netdev_hwstats_fill(bnad, net_stats64);
1184
1185 bi = sizeof(*net_stats64) / sizeof(u64);
1186
1187 /* Fill driver stats into ethtool buffers */
1188 stats64 = (u64 *)&bnad->stats.drv_stats;
1189 for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
1190 buf[bi++] = stats64[i];
1191
1192 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
1193 stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
1194 for (i = 0;
1195 i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
1196 i++)
1197 buf[bi++] = stats64[i];
1198
1199 /* Fill txf stats into ethtool buffers */
1200 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
1201 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
1202 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
1203 if (bmap & 1) {
1204 stats64 = (u64 *)&bnad->stats.bna_stats->
1205 hw_stats->txf_stats[i];
1206 for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
1207 sizeof(u64); j++)
1208 buf[bi++] = stats64[j];
1209 }
1210 bmap >>= 1;
1211 }
1212
1213 /* Fill rxf stats into ethtool buffers */
1214 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
1215 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
1216 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
1217 if (bmap & 1) {
1218 stats64 = (u64 *)&bnad->stats.bna_stats->
1219 hw_stats->rxf_stats[i];
1220 for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
1221 sizeof(u64); j++)
1222 buf[bi++] = stats64[j];
1223 }
1224 bmap >>= 1;
1225 }
1226
1227 /* Fill per Q stats into ethtool buffers */
1228 bi = bnad_per_q_stats_fill(bnad, buf, bi);
1229
1230 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1231
1232 mutex_unlock(&bnad->conf_mutex);
1233}
1234
1235static int
1236bnad_get_sset_count(struct net_device *netdev, int sset)
1237{
1238 switch (sset) {
1239 case ETH_SS_STATS:
1240 return bnad_get_stats_count_locked(netdev);
1241 default:
1242 return -EOPNOTSUPP;
1243 }
1244}
1245
1246static struct ethtool_ops bnad_ethtool_ops = {
1247 .get_settings = bnad_get_settings,
1248 .set_settings = bnad_set_settings,
1249 .get_drvinfo = bnad_get_drvinfo,
1250 .get_regs_len = bnad_get_regs_len,
1251 .get_regs = bnad_get_regs,
1252 .get_wol = bnad_get_wol,
1253 .get_link = ethtool_op_get_link,
1254 .get_coalesce = bnad_get_coalesce,
1255 .set_coalesce = bnad_set_coalesce,
1256 .get_ringparam = bnad_get_ringparam,
1257 .set_ringparam = bnad_set_ringparam,
1258 .get_pauseparam = bnad_get_pauseparam,
1259 .set_pauseparam = bnad_set_pauseparam,
1260 .get_rx_csum = bnad_get_rx_csum,
1261 .set_rx_csum = bnad_set_rx_csum,
1262 .get_tx_csum = ethtool_op_get_tx_csum,
1263 .set_tx_csum = bnad_set_tx_csum,
1264 .get_sg = ethtool_op_get_sg,
1265 .set_sg = ethtool_op_set_sg,
1266 .get_tso = ethtool_op_get_tso,
1267 .set_tso = bnad_set_tso,
1268 .get_strings = bnad_get_strings,
1269 .get_ethtool_stats = bnad_get_ethtool_stats,
1270 .get_sset_count = bnad_get_sset_count
1271};
1272
1273void
1274bnad_set_ethtool_ops(struct net_device *netdev)
1275{
1276 SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
1277}
diff --git a/drivers/net/bna/cna.h b/drivers/net/bna/cna.h
new file mode 100644
index 000000000000..bbd39dc65972
--- /dev/null
+++ b/drivers/net/bna/cna.h
@@ -0,0 +1,81 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __CNA_H__
20#define __CNA_H__
21
22#include <linux/version.h>
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/bitops.h>
28#include <linux/timer.h>
29#include <linux/interrupt.h>
30#include <linux/if_ether.h>
31#include <asm/page.h>
32#include <asm/io.h>
33#include <asm/string.h>
34
35#include <linux/list.h>
36
37#define bfa_sm_fault(__mod, __event) do { \
38 pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
39 __event); \
40} while (0)
41
42extern char bfa_version[];
43
44#define CNA_FW_FILE_CT "ctfw_cna.bin"
45#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
46
47#pragma pack(1)
48
49#define MAC_ADDRLEN (6)
50typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
51
52#pragma pack()
53
54#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
55#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
56#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
57
58/*
59 * bfa_q_qe_init - to initialize a queue element
60 */
61#define bfa_q_qe_init(_qe) { \
62 bfa_q_next(_qe) = (struct list_head *) NULL; \
63 bfa_q_prev(_qe) = (struct list_head *) NULL; \
64}
65
66/*
67 * bfa_q_deq - dequeue an element from head of the queue
68 */
69#define bfa_q_deq(_q, _qe) { \
70 if (!list_empty(_q)) { \
71 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
72 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
73 (struct list_head *) (_q); \
74 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
75 bfa_q_qe_init(*((struct list_head **) _qe)); \
76 } else { \
77 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
78 } \
79}
80
81#endif /* __CNA_H__ */
diff --git a/drivers/net/bna/cna_fwimg.c b/drivers/net/bna/cna_fwimg.c
new file mode 100644
index 000000000000..0bd1d3790a27
--- /dev/null
+++ b/drivers/net/bna/cna_fwimg.c
@@ -0,0 +1,64 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/firmware.h>
19#include "cna.h"
20
21const struct firmware *bfi_fw;
22static u32 *bfi_image_ct_cna;
23static u32 bfi_image_ct_cna_size;
24
25u32 *
26cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
27 u32 *bfi_image_size, char *fw_name)
28{
29 const struct firmware *fw;
30
31 if (request_firmware(&fw, fw_name, &pdev->dev)) {
32 pr_alert("Can't locate firmware %s\n", fw_name);
33 goto error;
34 }
35
36 *bfi_image = (u32 *)fw->data;
37 *bfi_image_size = fw->size/sizeof(u32);
38 bfi_fw = fw;
39
40 return *bfi_image;
41error:
42 return NULL;
43}
44
45u32 *
46cna_get_firmware_buf(struct pci_dev *pdev)
47{
48 if (bfi_image_ct_cna_size == 0)
49 cna_read_firmware(pdev, &bfi_image_ct_cna,
50 &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
51 return bfi_image_ct_cna;
52}
53
54u32 *
55bfa_cb_image_get_chunk(int type, u32 off)
56{
57 return (u32 *)(bfi_image_ct_cna + off);
58}
59
60u32
61bfa_cb_image_get_size(int type)
62{
63 return bfi_image_ct_cna_size;
64}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index e6a803f1c507..4ff76e38e788 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -49,6 +49,7 @@
49#include <linux/cache.h> 49#include <linux/cache.h>
50#include <linux/firmware.h> 50#include <linux/firmware.h>
51#include <linux/log2.h> 51#include <linux/log2.h>
52#include <linux/aer.h>
52 53
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 54#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1 55#define BCM_CNIC 1
@@ -3217,7 +3218,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3217 3218
3218 } 3219 }
3219 3220
3220 skb->ip_summed = CHECKSUM_NONE; 3221 skb_checksum_none_assert(skb);
3221 if (bp->rx_csum && 3222 if (bp->rx_csum &&
3222 (status & (L2_FHDR_STATUS_TCP_SEGMENT | 3223 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3223 L2_FHDR_STATUS_UDP_DATAGRAM))) { 3224 L2_FHDR_STATUS_UDP_DATAGRAM))) {
@@ -7890,6 +7891,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7890 int rc, i, j; 7891 int rc, i, j;
7891 u32 reg; 7892 u32 reg;
7892 u64 dma_mask, persist_dma_mask; 7893 u64 dma_mask, persist_dma_mask;
7894 int err;
7893 7895
7894 SET_NETDEV_DEV(dev, &pdev->dev); 7896 SET_NETDEV_DEV(dev, &pdev->dev);
7895 bp = netdev_priv(dev); 7897 bp = netdev_priv(dev);
@@ -7925,6 +7927,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7925 goto err_out_disable; 7927 goto err_out_disable;
7926 } 7928 }
7927 7929
7930 /* AER (Advanced Error Reporting) hooks */
7931 err = pci_enable_pcie_error_reporting(pdev);
7932 if (err) {
7933 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
7934 "0x%x\n", err);
7935 /* non-fatal, continue */
7936 }
7937
7928 pci_set_master(pdev); 7938 pci_set_master(pdev);
7929 pci_save_state(pdev); 7939 pci_save_state(pdev);
7930 7940
@@ -8246,6 +8256,7 @@ err_out_unmap:
8246 } 8256 }
8247 8257
8248err_out_release: 8258err_out_release:
8259 pci_disable_pcie_error_reporting(pdev);
8249 pci_release_regions(pdev); 8260 pci_release_regions(pdev);
8250 8261
8251err_out_disable: 8262err_out_disable:
@@ -8436,6 +8447,9 @@ bnx2_remove_one(struct pci_dev *pdev)
8436 kfree(bp->temp_stats_blk); 8447 kfree(bp->temp_stats_blk);
8437 8448
8438 free_netdev(dev); 8449 free_netdev(dev);
8450
8451 pci_disable_pcie_error_reporting(pdev);
8452
8439 pci_release_regions(pdev); 8453 pci_release_regions(pdev);
8440 pci_disable_device(pdev); 8454 pci_disable_device(pdev);
8441 pci_set_drvdata(pdev, NULL); 8455 pci_set_drvdata(pdev, NULL);
@@ -8527,25 +8541,35 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8527{ 8541{
8528 struct net_device *dev = pci_get_drvdata(pdev); 8542 struct net_device *dev = pci_get_drvdata(pdev);
8529 struct bnx2 *bp = netdev_priv(dev); 8543 struct bnx2 *bp = netdev_priv(dev);
8544 pci_ers_result_t result;
8545 int err;
8530 8546
8531 rtnl_lock(); 8547 rtnl_lock();
8532 if (pci_enable_device(pdev)) { 8548 if (pci_enable_device(pdev)) {
8533 dev_err(&pdev->dev, 8549 dev_err(&pdev->dev,
8534 "Cannot re-enable PCI device after reset\n"); 8550 "Cannot re-enable PCI device after reset\n");
8535 rtnl_unlock(); 8551 result = PCI_ERS_RESULT_DISCONNECT;
8536 return PCI_ERS_RESULT_DISCONNECT; 8552 } else {
8553 pci_set_master(pdev);
8554 pci_restore_state(pdev);
8555 pci_save_state(pdev);
8556
8557 if (netif_running(dev)) {
8558 bnx2_set_power_state(bp, PCI_D0);
8559 bnx2_init_nic(bp, 1);
8560 }
8561 result = PCI_ERS_RESULT_RECOVERED;
8537 } 8562 }
8538 pci_set_master(pdev); 8563 rtnl_unlock();
8539 pci_restore_state(pdev);
8540 pci_save_state(pdev);
8541 8564
8542 if (netif_running(dev)) { 8565 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8543 bnx2_set_power_state(bp, PCI_D0); 8566 if (err) {
8544 bnx2_init_nic(bp, 1); 8567 dev_err(&pdev->dev,
8568 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8569 err); /* non-fatal, continue */
8545 } 8570 }
8546 8571
8547 rtnl_unlock(); 8572 return result;
8548 return PCI_ERS_RESULT_RECOVERED;
8549} 8573}
8550 8574
8551/** 8575/**
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 0c2d96ed561c..b6aaf22a1b84 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.52.53-4" 23#define DRV_MODULE_VERSION "1.52.53-6"
24#define DRV_MODULE_RELDATE "2010/16/08" 24#define DRV_MODULE_RELDATE "2010/09/07"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -566,13 +566,13 @@ struct bnx2x_common {
566struct bnx2x_port { 566struct bnx2x_port {
567 u32 pmf; 567 u32 pmf;
568 568
569 u32 link_config; 569 u32 link_config[LINK_CONFIG_SIZE];
570 570
571 u32 supported; 571 u32 supported[LINK_CONFIG_SIZE];
572/* link settings - missing defines */ 572/* link settings - missing defines */
573#define SUPPORTED_2500baseX_Full (1 << 15) 573#define SUPPORTED_2500baseX_Full (1 << 15)
574 574
575 u32 advertising; 575 u32 advertising[LINK_CONFIG_SIZE];
576/* link settings - missing defines */ 576/* link settings - missing defines */
577#define ADVERTISED_2500baseX_Full (1 << 15) 577#define ADVERTISED_2500baseX_Full (1 << 15)
578 578
@@ -931,7 +931,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
931int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); 931int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
932int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 932int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
933int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 933int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
934u32 bnx2x_fw_command(struct bnx2x *bp, u32 command); 934u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
935void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); 935void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
936void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 936void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
937 u32 addr, u32 len); 937 u32 addr, u32 len);
@@ -939,7 +939,7 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp);
939int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 939int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
940 u32 data_hi, u32 data_lo, int common); 940 u32 data_hi, u32 data_lo, int common);
941void bnx2x_update_coalesce(struct bnx2x *bp); 941void bnx2x_update_coalesce(struct bnx2x *bp);
942 942int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
943static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 943static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
944 int wait) 944 int wait)
945{ 945{
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 02bf710629a3..7f1d291eaaa5 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -20,6 +20,7 @@
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/ipv6.h> 21#include <linux/ipv6.h>
22#include <net/ip6_checksum.h> 22#include <net/ip6_checksum.h>
23#include <linux/firmware.h>
23#include "bnx2x_cmn.h" 24#include "bnx2x_cmn.h"
24 25
25#ifdef BCM_VLAN 26#ifdef BCM_VLAN
@@ -622,7 +623,7 @@ reuse_rx:
622 /* Set Toeplitz hash for a none-LRO skb */ 623 /* Set Toeplitz hash for a none-LRO skb */
623 bnx2x_set_skb_rxhash(bp, cqe, skb); 624 bnx2x_set_skb_rxhash(bp, cqe, skb);
624 625
625 skb->ip_summed = CHECKSUM_NONE; 626 skb_checksum_none_assert(skb);
626 if (bp->rx_csum) { 627 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe))) 628 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY; 629 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1206,12 +1207,27 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
1206 return rc; 1207 return rc;
1207} 1208}
1208 1209
1210static void bnx2x_release_firmware(struct bnx2x *bp)
1211{
1212 kfree(bp->init_ops_offsets);
1213 kfree(bp->init_ops);
1214 kfree(bp->init_data);
1215 release_firmware(bp->firmware);
1216}
1217
1209/* must be called with rtnl_lock */ 1218/* must be called with rtnl_lock */
1210int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1219int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1211{ 1220{
1212 u32 load_code; 1221 u32 load_code;
1213 int i, rc; 1222 int i, rc;
1214 1223
1224 /* Set init arrays */
1225 rc = bnx2x_init_firmware(bp);
1226 if (rc) {
1227 BNX2X_ERR("Error loading firmware\n");
1228 return rc;
1229 }
1230
1215#ifdef BNX2X_STOP_ON_ERROR 1231#ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic)) 1232 if (unlikely(bp->panic))
1217 return -EPERM; 1233 return -EPERM;
@@ -1267,7 +1283,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1267 common blocks should be initialized, otherwise - not 1283 common blocks should be initialized, otherwise - not
1268 */ 1284 */
1269 if (!BP_NOMCP(bp)) { 1285 if (!BP_NOMCP(bp)) {
1270 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 1286 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1271 if (!load_code) { 1287 if (!load_code) {
1272 BNX2X_ERR("MCP response failure, aborting\n"); 1288 BNX2X_ERR("MCP response failure, aborting\n");
1273 rc = -EBUSY; 1289 rc = -EBUSY;
@@ -1306,9 +1322,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1306 rc = bnx2x_init_hw(bp, load_code); 1322 rc = bnx2x_init_hw(bp, load_code);
1307 if (rc) { 1323 if (rc) {
1308 BNX2X_ERR("HW init failed, aborting\n"); 1324 BNX2X_ERR("HW init failed, aborting\n");
1309 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 1325 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); 1326 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1311 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 1327 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1312 goto load_error2; 1328 goto load_error2;
1313 } 1329 }
1314 1330
@@ -1323,7 +1339,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1323 1339
1324 /* Send LOAD_DONE command to MCP */ 1340 /* Send LOAD_DONE command to MCP */
1325 if (!BP_NOMCP(bp)) { 1341 if (!BP_NOMCP(bp)) {
1326 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 1342 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1327 if (!load_code) { 1343 if (!load_code) {
1328 BNX2X_ERR("MCP response failure, aborting\n"); 1344 BNX2X_ERR("MCP response failure, aborting\n");
1329 rc = -EBUSY; 1345 rc = -EBUSY;
@@ -1427,6 +1443,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1427#endif 1443#endif
1428 bnx2x_inc_load_cnt(bp); 1444 bnx2x_inc_load_cnt(bp);
1429 1445
1446 bnx2x_release_firmware(bp);
1447
1430 return 0; 1448 return 0;
1431 1449
1432#ifdef BCM_CNIC 1450#ifdef BCM_CNIC
@@ -1437,8 +1455,8 @@ load_error4:
1437load_error3: 1455load_error3:
1438 bnx2x_int_disable_sync(bp, 1); 1456 bnx2x_int_disable_sync(bp, 1);
1439 if (!BP_NOMCP(bp)) { 1457 if (!BP_NOMCP(bp)) {
1440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); 1458 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 1459 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1442 } 1460 }
1443 bp->port.pmf = 0; 1461 bp->port.pmf = 0;
1444 /* Free SKBs, SGEs, TPA pool and driver internals */ 1462 /* Free SKBs, SGEs, TPA pool and driver internals */
@@ -1454,6 +1472,8 @@ load_error1:
1454 netif_napi_del(&bnx2x_fp(bp, i, napi)); 1472 netif_napi_del(&bnx2x_fp(bp, i, napi));
1455 bnx2x_free_mem(bp); 1473 bnx2x_free_mem(bp);
1456 1474
1475 bnx2x_release_firmware(bp);
1476
1457 return rc; 1477 return rc;
1458} 1478}
1459 1479
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index d1979b1a7ed2..d1e6a8c977d1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -49,10 +49,11 @@ void bnx2x_link_set(struct bnx2x *bp);
49 * Query link status 49 * Query link status
50 * 50 *
51 * @param bp 51 * @param bp
52 * @param is_serdes
52 * 53 *
53 * @return 0 - link is UP 54 * @return 0 - link is UP
54 */ 55 */
55u8 bnx2x_link_test(struct bnx2x *bp); 56u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
56 57
57/** 58/**
58 * Handles link status change 59 * Handles link status change
@@ -115,6 +116,15 @@ void bnx2x_int_enable(struct bnx2x *bp);
115void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 116void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
116 117
117/** 118/**
119 * Loads device firmware
120 *
121 * @param bp
122 *
123 * @return int
124 */
125int bnx2x_init_firmware(struct bnx2x *bp);
126
127/**
118 * Init HW blocks according to current initialization stage: 128 * Init HW blocks according to current initialization stage:
119 * COMMON, PORT or FUNCTION. 129 * COMMON, PORT or FUNCTION.
120 * 130 *
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 8b75b05e34c5..6f939c5a0089 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -29,9 +29,12 @@
29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
30{ 30{
31 struct bnx2x *bp = netdev_priv(dev); 31 struct bnx2x *bp = netdev_priv(dev);
32 32 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
33 cmd->supported = bp->port.supported; 33 /* Dual Media boards present all available port types */
34 cmd->advertising = bp->port.advertising; 34 cmd->supported = bp->port.supported[cfg_idx] |
35 (bp->port.supported[cfg_idx ^ 1] &
36 (SUPPORTED_TP | SUPPORTED_FIBRE));
37 cmd->advertising = bp->port.advertising[cfg_idx];
35 38
36 if ((bp->state == BNX2X_STATE_OPEN) && 39 if ((bp->state == BNX2X_STATE_OPEN) &&
37 !(bp->flags & MF_FUNC_DIS) && 40 !(bp->flags & MF_FUNC_DIS) &&
@@ -48,47 +51,21 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
48 cmd->speed = vn_max_rate; 51 cmd->speed = vn_max_rate;
49 } 52 }
50 } else { 53 } else {
51 cmd->speed = -1; 54 cmd->speed = bp->link_params.req_line_speed[cfg_idx];
52 cmd->duplex = -1; 55 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
53 } 56 }
54 57
55 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { 58 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
56 u32 ext_phy_type =
57 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
58
59 switch (ext_phy_type) {
60 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
61 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
62 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
63 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
64 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
65 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
66 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
67 cmd->port = PORT_FIBRE;
68 break;
69
70 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
71 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
72 cmd->port = PORT_TP;
73 break;
74
75 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
76 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
77 bp->link_params.ext_phy_config);
78 break;
79
80 default:
81 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
82 bp->link_params.ext_phy_config);
83 break;
84 }
85 } else
86 cmd->port = PORT_TP; 59 cmd->port = PORT_TP;
60 else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
61 cmd->port = PORT_FIBRE;
62 else
63 BNX2X_ERR("XGXS PHY Failure detected\n");
87 64
88 cmd->phy_address = bp->mdio.prtad; 65 cmd->phy_address = bp->mdio.prtad;
89 cmd->transceiver = XCVR_INTERNAL; 66 cmd->transceiver = XCVR_INTERNAL;
90 67
91 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) 68 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
92 cmd->autoneg = AUTONEG_ENABLE; 69 cmd->autoneg = AUTONEG_ENABLE;
93 else 70 else
94 cmd->autoneg = AUTONEG_DISABLE; 71 cmd->autoneg = AUTONEG_DISABLE;
@@ -110,7 +87,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
110static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 87static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
111{ 88{
112 struct bnx2x *bp = netdev_priv(dev); 89 struct bnx2x *bp = netdev_priv(dev);
113 u32 advertising; 90 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
114 91
115 if (IS_E1HMF(bp)) 92 if (IS_E1HMF(bp))
116 return 0; 93 return 0;
@@ -123,26 +100,81 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
123 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 100 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
124 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 101 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
125 102
103 cfg_idx = bnx2x_get_link_cfg_idx(bp);
104 old_multi_phy_config = bp->link_params.multi_phy_config;
105 switch (cmd->port) {
106 case PORT_TP:
107 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
108 break; /* no port change */
109
110 if (!(bp->port.supported[0] & SUPPORTED_TP ||
111 bp->port.supported[1] & SUPPORTED_TP)) {
112 DP(NETIF_MSG_LINK, "Unsupported port type\n");
113 return -EINVAL;
114 }
115 bp->link_params.multi_phy_config &=
116 ~PORT_HW_CFG_PHY_SELECTION_MASK;
117 if (bp->link_params.multi_phy_config &
118 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
119 bp->link_params.multi_phy_config |=
120 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
121 else
122 bp->link_params.multi_phy_config |=
123 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
124 break;
125 case PORT_FIBRE:
126 if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
127 break; /* no port change */
128
129 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
130 bp->port.supported[1] & SUPPORTED_FIBRE)) {
131 DP(NETIF_MSG_LINK, "Unsupported port type\n");
132 return -EINVAL;
133 }
134 bp->link_params.multi_phy_config &=
135 ~PORT_HW_CFG_PHY_SELECTION_MASK;
136 if (bp->link_params.multi_phy_config &
137 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
138 bp->link_params.multi_phy_config |=
139 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
140 else
141 bp->link_params.multi_phy_config |=
142 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
143 break;
144 default:
145 DP(NETIF_MSG_LINK, "Unsupported port type\n");
146 return -EINVAL;
147 }
148 /* Save new config in case command complete successuly */
149 new_multi_phy_config = bp->link_params.multi_phy_config;
150 /* Get the new cfg_idx */
151 cfg_idx = bnx2x_get_link_cfg_idx(bp);
152 /* Restore old config in case command failed */
153 bp->link_params.multi_phy_config = old_multi_phy_config;
154 DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
155
126 if (cmd->autoneg == AUTONEG_ENABLE) { 156 if (cmd->autoneg == AUTONEG_ENABLE) {
127 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 157 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
128 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 158 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
129 return -EINVAL; 159 return -EINVAL;
130 } 160 }
131 161
132 /* advertise the requested speed and duplex if supported */ 162 /* advertise the requested speed and duplex if supported */
133 cmd->advertising &= bp->port.supported; 163 cmd->advertising &= bp->port.supported[cfg_idx];
134 164
135 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 165 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
136 bp->link_params.req_duplex = DUPLEX_FULL; 166 bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
137 bp->port.advertising |= (ADVERTISED_Autoneg | 167 bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
138 cmd->advertising); 168 cmd->advertising);
139 169
140 } else { /* forced speed */ 170 } else { /* forced speed */
141 /* advertise the requested speed and duplex if supported */ 171 /* advertise the requested speed and duplex if supported */
142 switch (cmd->speed) { 172 u32 speed = cmd->speed;
173 speed |= (cmd->speed_hi << 16);
174 switch (speed) {
143 case SPEED_10: 175 case SPEED_10:
144 if (cmd->duplex == DUPLEX_FULL) { 176 if (cmd->duplex == DUPLEX_FULL) {
145 if (!(bp->port.supported & 177 if (!(bp->port.supported[cfg_idx] &
146 SUPPORTED_10baseT_Full)) { 178 SUPPORTED_10baseT_Full)) {
147 DP(NETIF_MSG_LINK, 179 DP(NETIF_MSG_LINK,
148 "10M full not supported\n"); 180 "10M full not supported\n");
@@ -152,7 +184,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
152 advertising = (ADVERTISED_10baseT_Full | 184 advertising = (ADVERTISED_10baseT_Full |
153 ADVERTISED_TP); 185 ADVERTISED_TP);
154 } else { 186 } else {
155 if (!(bp->port.supported & 187 if (!(bp->port.supported[cfg_idx] &
156 SUPPORTED_10baseT_Half)) { 188 SUPPORTED_10baseT_Half)) {
157 DP(NETIF_MSG_LINK, 189 DP(NETIF_MSG_LINK,
158 "10M half not supported\n"); 190 "10M half not supported\n");
@@ -166,7 +198,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
166 198
167 case SPEED_100: 199 case SPEED_100:
168 if (cmd->duplex == DUPLEX_FULL) { 200 if (cmd->duplex == DUPLEX_FULL) {
169 if (!(bp->port.supported & 201 if (!(bp->port.supported[cfg_idx] &
170 SUPPORTED_100baseT_Full)) { 202 SUPPORTED_100baseT_Full)) {
171 DP(NETIF_MSG_LINK, 203 DP(NETIF_MSG_LINK,
172 "100M full not supported\n"); 204 "100M full not supported\n");
@@ -176,7 +208,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
176 advertising = (ADVERTISED_100baseT_Full | 208 advertising = (ADVERTISED_100baseT_Full |
177 ADVERTISED_TP); 209 ADVERTISED_TP);
178 } else { 210 } else {
179 if (!(bp->port.supported & 211 if (!(bp->port.supported[cfg_idx] &
180 SUPPORTED_100baseT_Half)) { 212 SUPPORTED_100baseT_Half)) {
181 DP(NETIF_MSG_LINK, 213 DP(NETIF_MSG_LINK,
182 "100M half not supported\n"); 214 "100M half not supported\n");
@@ -194,7 +226,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
194 return -EINVAL; 226 return -EINVAL;
195 } 227 }
196 228
197 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { 229 if (!(bp->port.supported[cfg_idx] &
230 SUPPORTED_1000baseT_Full)) {
198 DP(NETIF_MSG_LINK, "1G full not supported\n"); 231 DP(NETIF_MSG_LINK, "1G full not supported\n");
199 return -EINVAL; 232 return -EINVAL;
200 } 233 }
@@ -210,7 +243,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
210 return -EINVAL; 243 return -EINVAL;
211 } 244 }
212 245
213 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { 246 if (!(bp->port.supported[cfg_idx]
247 & SUPPORTED_2500baseX_Full)) {
214 DP(NETIF_MSG_LINK, 248 DP(NETIF_MSG_LINK,
215 "2.5G full not supported\n"); 249 "2.5G full not supported\n");
216 return -EINVAL; 250 return -EINVAL;
@@ -226,7 +260,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
226 return -EINVAL; 260 return -EINVAL;
227 } 261 }
228 262
229 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { 263 if (!(bp->port.supported[cfg_idx]
264 & SUPPORTED_10000baseT_Full)) {
230 DP(NETIF_MSG_LINK, "10G full not supported\n"); 265 DP(NETIF_MSG_LINK, "10G full not supported\n");
231 return -EINVAL; 266 return -EINVAL;
232 } 267 }
@@ -236,20 +271,23 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
236 break; 271 break;
237 272
238 default: 273 default:
239 DP(NETIF_MSG_LINK, "Unsupported speed\n"); 274 DP(NETIF_MSG_LINK, "Unsupported speed %d\n", speed);
240 return -EINVAL; 275 return -EINVAL;
241 } 276 }
242 277
243 bp->link_params.req_line_speed = cmd->speed; 278 bp->link_params.req_line_speed[cfg_idx] = speed;
244 bp->link_params.req_duplex = cmd->duplex; 279 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
245 bp->port.advertising = advertising; 280 bp->port.advertising[cfg_idx] = advertising;
246 } 281 }
247 282
248 DP(NETIF_MSG_LINK, "req_line_speed %d\n" 283 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
249 DP_LEVEL " req_duplex %d advertising 0x%x\n", 284 DP_LEVEL " req_duplex %d advertising 0x%x\n",
250 bp->link_params.req_line_speed, bp->link_params.req_duplex, 285 bp->link_params.req_line_speed[cfg_idx],
251 bp->port.advertising); 286 bp->link_params.req_duplex[cfg_idx],
287 bp->port.advertising[cfg_idx]);
252 288
289 /* Set new config */
290 bp->link_params.multi_phy_config = new_multi_phy_config;
253 if (netif_running(dev)) { 291 if (netif_running(dev)) {
254 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 292 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
255 bnx2x_link_set(bp); 293 bnx2x_link_set(bp);
@@ -811,7 +849,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
811 struct bnx2x *bp = netdev_priv(dev); 849 struct bnx2x *bp = netdev_priv(dev);
812 int port = BP_PORT(bp); 850 int port = BP_PORT(bp);
813 int rc = 0; 851 int rc = 0;
814 852 u32 ext_phy_config;
815 if (!netif_running(dev)) 853 if (!netif_running(dev))
816 return -EAGAIN; 854 return -EAGAIN;
817 855
@@ -827,6 +865,10 @@ static int bnx2x_set_eeprom(struct net_device *dev,
827 !bp->port.pmf) 865 !bp->port.pmf)
828 return -EINVAL; 866 return -EINVAL;
829 867
868 ext_phy_config =
869 SHMEM_RD(bp,
870 dev_info.port_hw_config[port].external_phy_config);
871
830 if (eeprom->magic == 0x50485950) { 872 if (eeprom->magic == 0x50485950) {
831 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ 873 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
832 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -834,7 +876,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
834 bnx2x_acquire_phy_lock(bp); 876 bnx2x_acquire_phy_lock(bp);
835 rc |= bnx2x_link_reset(&bp->link_params, 877 rc |= bnx2x_link_reset(&bp->link_params,
836 &bp->link_vars, 0); 878 &bp->link_vars, 0);
837 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == 879 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
838 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) 880 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
839 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 881 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
840 MISC_REGISTERS_GPIO_HIGH, port); 882 MISC_REGISTERS_GPIO_HIGH, port);
@@ -855,10 +897,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
855 } 897 }
856 } else if (eeprom->magic == 0x53985943) { 898 } else if (eeprom->magic == 0x53985943) {
857 /* 'PHYC' (0x53985943): PHY FW upgrade completed */ 899 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
858 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == 900 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
859 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { 901 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
860 u8 ext_phy_addr =
861 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
862 902
863 /* DSP Remove Download Mode */ 903 /* DSP Remove Download Mode */
864 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 904 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
@@ -866,7 +906,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
866 906
867 bnx2x_acquire_phy_lock(bp); 907 bnx2x_acquire_phy_lock(bp);
868 908
869 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 909 bnx2x_sfx7101_sp_sw_reset(bp,
910 &bp->link_params.phy[EXT_PHY1]);
870 911
871 /* wait 0.5 sec to allow it to run */ 912 /* wait 0.5 sec to allow it to run */
872 msleep(500); 913 msleep(500);
@@ -959,10 +1000,9 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
959 struct ethtool_pauseparam *epause) 1000 struct ethtool_pauseparam *epause)
960{ 1001{
961 struct bnx2x *bp = netdev_priv(dev); 1002 struct bnx2x *bp = netdev_priv(dev);
962 1003 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
963 epause->autoneg = (bp->link_params.req_flow_ctrl == 1004 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
964 BNX2X_FLOW_CTRL_AUTO) && 1005 BNX2X_FLOW_CTRL_AUTO);
965 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
966 1006
967 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == 1007 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
968 BNX2X_FLOW_CTRL_RX); 1008 BNX2X_FLOW_CTRL_RX);
@@ -978,7 +1018,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
978 struct ethtool_pauseparam *epause) 1018 struct ethtool_pauseparam *epause)
979{ 1019{
980 struct bnx2x *bp = netdev_priv(dev); 1020 struct bnx2x *bp = netdev_priv(dev);
981 1021 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
982 if (IS_E1HMF(bp)) 1022 if (IS_E1HMF(bp))
983 return 0; 1023 return 0;
984 1024
@@ -986,29 +1026,31 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
986 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", 1026 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
987 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); 1027 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
988 1028
989 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; 1029 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
990 1030
991 if (epause->rx_pause) 1031 if (epause->rx_pause)
992 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX; 1032 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
993 1033
994 if (epause->tx_pause) 1034 if (epause->tx_pause)
995 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX; 1035 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
996 1036
997 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) 1037 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
998 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; 1038 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
999 1039
1000 if (epause->autoneg) { 1040 if (epause->autoneg) {
1001 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 1041 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
1002 DP(NETIF_MSG_LINK, "autoneg not supported\n"); 1042 DP(NETIF_MSG_LINK, "autoneg not supported\n");
1003 return -EINVAL; 1043 return -EINVAL;
1004 } 1044 }
1005 1045
1006 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) 1046 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
1007 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; 1047 bp->link_params.req_flow_ctrl[cfg_idx] =
1048 BNX2X_FLOW_CTRL_AUTO;
1049 }
1008 } 1050 }
1009 1051
1010 DP(NETIF_MSG_LINK, 1052 DP(NETIF_MSG_LINK,
1011 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); 1053 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
1012 1054
1013 if (netif_running(dev)) { 1055 if (netif_running(dev)) {
1014 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1056 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -1272,12 +1314,12 @@ test_mem_exit:
1272 return rc; 1314 return rc;
1273} 1315}
1274 1316
1275static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) 1317static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1276{ 1318{
1277 int cnt = 1000; 1319 int cnt = 1000;
1278 1320
1279 if (link_up) 1321 if (link_up)
1280 while (bnx2x_link_test(bp) && cnt--) 1322 while (bnx2x_link_test(bp, is_serdes) && cnt--)
1281 msleep(10); 1323 msleep(10);
1282} 1324}
1283 1325
@@ -1304,7 +1346,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1304 /* check the loopback mode */ 1346 /* check the loopback mode */
1305 switch (loopback_mode) { 1347 switch (loopback_mode) {
1306 case BNX2X_PHY_LOOPBACK: 1348 case BNX2X_PHY_LOOPBACK:
1307 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10) 1349 if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
1308 return -EINVAL; 1350 return -EINVAL;
1309 break; 1351 break;
1310 case BNX2X_MAC_LOOPBACK: 1352 case BNX2X_MAC_LOOPBACK:
@@ -1549,7 +1591,7 @@ static void bnx2x_self_test(struct net_device *dev,
1549 struct ethtool_test *etest, u64 *buf) 1591 struct ethtool_test *etest, u64 *buf)
1550{ 1592{
1551 struct bnx2x *bp = netdev_priv(dev); 1593 struct bnx2x *bp = netdev_priv(dev);
1552 1594 u8 is_serdes;
1553 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 1595 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1554 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 1596 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1555 etest->flags |= ETH_TEST_FL_FAILED; 1597 etest->flags |= ETH_TEST_FL_FAILED;
@@ -1564,6 +1606,7 @@ static void bnx2x_self_test(struct net_device *dev,
1564 /* offline tests are not supported in MF mode */ 1606 /* offline tests are not supported in MF mode */
1565 if (IS_E1HMF(bp)) 1607 if (IS_E1HMF(bp))
1566 etest->flags &= ~ETH_TEST_FL_OFFLINE; 1608 etest->flags &= ~ETH_TEST_FL_OFFLINE;
1609 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
1567 1610
1568 if (etest->flags & ETH_TEST_FL_OFFLINE) { 1611 if (etest->flags & ETH_TEST_FL_OFFLINE) {
1569 int port = BP_PORT(bp); 1612 int port = BP_PORT(bp);
@@ -1575,11 +1618,12 @@ static void bnx2x_self_test(struct net_device *dev,
1575 /* disable input for TX port IF */ 1618 /* disable input for TX port IF */
1576 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); 1619 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
1577 1620
1578 link_up = (bnx2x_link_test(bp) == 0); 1621 link_up = bp->link_vars.link_up;
1622
1579 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 1623 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1580 bnx2x_nic_load(bp, LOAD_DIAG); 1624 bnx2x_nic_load(bp, LOAD_DIAG);
1581 /* wait until link state is restored */ 1625 /* wait until link state is restored */
1582 bnx2x_wait_for_link(bp, link_up); 1626 bnx2x_wait_for_link(bp, link_up, is_serdes);
1583 1627
1584 if (bnx2x_test_registers(bp) != 0) { 1628 if (bnx2x_test_registers(bp) != 0) {
1585 buf[0] = 1; 1629 buf[0] = 1;
@@ -1600,7 +1644,7 @@ static void bnx2x_self_test(struct net_device *dev,
1600 1644
1601 bnx2x_nic_load(bp, LOAD_NORMAL); 1645 bnx2x_nic_load(bp, LOAD_NORMAL);
1602 /* wait until link state is restored */ 1646 /* wait until link state is restored */
1603 bnx2x_wait_for_link(bp, link_up); 1647 bnx2x_wait_for_link(bp, link_up, is_serdes);
1604 } 1648 }
1605 if (bnx2x_test_nvram(bp) != 0) { 1649 if (bnx2x_test_nvram(bp) != 0) {
1606 buf[3] = 1; 1650 buf[3] = 1;
@@ -1611,7 +1655,7 @@ static void bnx2x_self_test(struct net_device *dev,
1611 etest->flags |= ETH_TEST_FL_FAILED; 1655 etest->flags |= ETH_TEST_FL_FAILED;
1612 } 1656 }
1613 if (bp->port.pmf) 1657 if (bp->port.pmf)
1614 if (bnx2x_link_test(bp) != 0) { 1658 if (bnx2x_link_test(bp, is_serdes) != 0) {
1615 buf[5] = 1; 1659 buf[5] = 1;
1616 etest->flags |= ETH_TEST_FL_FAILED; 1660 etest->flags |= ETH_TEST_FL_FAILED;
1617 } 1661 }
@@ -1910,10 +1954,11 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
1910 1954
1911 for (i = 0; i < (data * 2); i++) { 1955 for (i = 0; i < (data * 2); i++) {
1912 if ((i % 2) == 0) 1956 if ((i % 2) == 0)
1913 bnx2x_set_led(&bp->link_params, LED_MODE_OPER, 1957 bnx2x_set_led(&bp->link_params, &bp->link_vars,
1914 SPEED_1000); 1958 LED_MODE_OPER, SPEED_1000);
1915 else 1959 else
1916 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0); 1960 bnx2x_set_led(&bp->link_params, &bp->link_vars,
1961 LED_MODE_OFF, 0);
1917 1962
1918 msleep_interruptible(500); 1963 msleep_interruptible(500);
1919 if (signal_pending(current)) 1964 if (signal_pending(current))
@@ -1921,7 +1966,7 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
1921 } 1966 }
1922 1967
1923 if (bp->link_vars.link_up) 1968 if (bp->link_vars.link_up)
1924 bnx2x_set_led(&bp->link_params, LED_MODE_OPER, 1969 bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
1925 bp->link_vars.line_speed); 1970 bp->link_vars.line_speed);
1926 1971
1927 return 0; 1972 return 0;
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index fd1f29e0317d..60d141cd9950 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -78,6 +78,8 @@ struct shared_hw_cfg { /* NVRAM Offset */
78#define SHARED_HW_CFG_LED_PHY11 0x000b0000 78#define SHARED_HW_CFG_LED_PHY11 0x000b0000
79#define SHARED_HW_CFG_LED_MAC4 0x000c0000 79#define SHARED_HW_CFG_LED_MAC4 0x000c0000
80#define SHARED_HW_CFG_LED_PHY8 0x000d0000 80#define SHARED_HW_CFG_LED_PHY8 0x000d0000
81#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
82
81 83
82#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 84#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
83#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 85#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
@@ -120,6 +122,23 @@ struct shared_hw_cfg { /* NVRAM Offset */
120#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 122#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
121#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 123#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
122 124
125 /* Set the MDC/MDIO access for the first external phy */
126#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
127#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
128#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
129#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
130#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
131#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
132#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
133
134 /* Set the MDC/MDIO access for the second external phy */
135#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
136#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
137#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
138#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
139#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
140#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
141#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
123 u32 power_dissipated; /* 0x11c */ 142 u32 power_dissipated; /* 0x11c */
124#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 143#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
125#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 144#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
@@ -221,7 +240,88 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
221 240
222 u16 xgxs_config_tx[4]; /* 0x1A0 */ 241 u16 xgxs_config_tx[4]; /* 0x1A0 */
223 242
224 u32 Reserved1[64]; /* 0x1A8 */ 243 u32 Reserved1[57]; /* 0x1A8 */
244 u32 speed_capability_mask2; /* 0x28C */
245#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
246#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
247#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
248#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
249#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
250#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
251#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
252#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
253#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
254#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080
255#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100
256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200
257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400
258#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800
259
260#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
261#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
262#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
263#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
264#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
265#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
266#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
267#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
268#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
269#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000
270#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000
271#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000
272#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000
273#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000
274
275 /* In the case where two media types (e.g. copper and fiber) are
276 present and electrically active at the same time, PHY Selection
277 will determine which of the two PHYs will be designated as the
278 Active PHY and used for a connection to the network. */
279 u32 multi_phy_config; /* 0x290 */
280#define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
281#define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
282#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
283#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
284#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
285#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
286#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
287
288 /* When enabled, all second phy nvram parameters will be swapped
289 with the first phy parameters */
290#define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
291#define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
292#define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
293#define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
294
295
296 /* Address of the second external phy */
297 u32 external_phy_config2; /* 0x294 */
298#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
299#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
300
301 /* The second XGXS external PHY type */
302#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
303#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
304#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
305#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
306#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
307#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
308#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
309#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
310#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
311#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
312#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
313#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
314#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
315#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
316#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
317#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
318#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
319#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
320
321 /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
322 8706, 8726 and 8727) not all 4 values are needed. */
323 u16 xgxs_config2_rx[4]; /* 0x296 */
324 u16 xgxs_config2_tx[4]; /* 0x2A0 */
225 325
226 u32 lane_config; 326 u32 lane_config;
227#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff 327#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
@@ -515,10 +615,17 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
515#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 615#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
516 616
517 /* The default for MCP link configuration, 617 /* The default for MCP link configuration,
518 uses the same defines as link_config */ 618 uses the same defines as link_config */
519 u32 mfw_wol_link_cfg; 619 u32 mfw_wol_link_cfg;
620 /* The default for the driver of the second external phy,
621 uses the same defines as link_config */
622 u32 link_config2; /* 0x47C */
623
624 /* The default for MCP of the second external phy,
625 uses the same defines as link_config */
626 u32 mfw_wol_link_cfg2; /* 0x480 */
520 627
521 u32 reserved[19]; 628 u32 Reserved2[17]; /* 0x484 */
522 629
523}; 630};
524 631
@@ -686,8 +793,14 @@ struct drv_func_mb {
686 * The optic module verification commands require bootcode 793 * The optic module verification commands require bootcode
687 * v5.0.6 or later 794 * v5.0.6 or later
688 */ 795 */
689#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000 796#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
690#define REQ_BC_VER_4_VRFY_OPT_MDL 0x00050006 797#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
798 /*
799 * The specific optic module verification command requires bootcode
800 * v5.2.12 or later
801 */
802#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
803#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
691 804
692#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 805#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
693#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 806#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
@@ -922,7 +1035,12 @@ struct shmem2_region {
922#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 1035#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
923#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 1036#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
924#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE 1037#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE
925 1038 u32 ext_phy_fw_version2[PORT_MAX];
1039 /*
1040 * For backwards compatibility, if the mf_cfg_addr does not exist
1041 * (the size filed is smaller than 0xc) the mf_cfg resides at the
1042 * end of struct shmem_region
1043 */
926}; 1044};
927 1045
928 1046
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 0383e3066313..a07a3a6abd40 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -168,50 +168,19 @@
168/**********************************************************/ 168/**********************************************************/
169/* INTERFACE */ 169/* INTERFACE */
170/**********************************************************/ 170/**********************************************************/
171#define CL45_WR_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \ 171
172 bnx2x_cl45_write(_bp, _port, 0, _phy_addr, \ 172#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
173 DEFAULT_PHY_DEV_ADDR, \ 173 bnx2x_cl45_write(_bp, _phy, \
174 (_phy)->def_md_devad, \
174 (_bank + (_addr & 0xf)), \ 175 (_bank + (_addr & 0xf)), \
175 _val) 176 _val)
176 177
177#define CL45_RD_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \ 178#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
178 bnx2x_cl45_read(_bp, _port, 0, _phy_addr, \ 179 bnx2x_cl45_read(_bp, _phy, \
179 DEFAULT_PHY_DEV_ADDR, \ 180 (_phy)->def_md_devad, \
180 (_bank + (_addr & 0xf)), \ 181 (_bank + (_addr & 0xf)), \
181 _val) 182 _val)
182 183
183static void bnx2x_set_serdes_access(struct link_params *params)
184{
185 struct bnx2x *bp = params->bp;
186 u32 emac_base = (params->port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
187
188 /* Set Clause 22 */
189 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 1);
190 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
191 udelay(500);
192 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
193 udelay(500);
194 /* Set Clause 45 */
195 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 0);
196}
197static void bnx2x_set_phy_mdio(struct link_params *params, u8 phy_flags)
198{
199 struct bnx2x *bp = params->bp;
200
201 if (phy_flags & PHY_XGXS_FLAG) {
202 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
203 params->port*0x18, 0);
204 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
205 DEFAULT_PHY_DEV_ADDR);
206 } else {
207 bnx2x_set_serdes_access(params);
208
209 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
210 params->port*0x10,
211 DEFAULT_PHY_DEV_ADDR);
212 }
213}
214
215static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 184static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
216{ 185{
217 u32 val = REG_RD(bp, reg); 186 u32 val = REG_RD(bp, reg);
@@ -527,162 +496,6 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
527 return 0; 496 return 0;
528} 497}
529 498
530static void bnx2x_phy_deassert(struct link_params *params, u8 phy_flags)
531{
532 struct bnx2x *bp = params->bp;
533 u32 val;
534
535 if (phy_flags & PHY_XGXS_FLAG) {
536 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:XGXS\n");
537 val = XGXS_RESET_BITS;
538
539 } else { /* SerDes */
540 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:SerDes\n");
541 val = SERDES_RESET_BITS;
542 }
543
544 val = val << (params->port*16);
545
546 /* reset and unreset the SerDes/XGXS */
547 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
548 val);
549 udelay(500);
550 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET,
551 val);
552 bnx2x_set_phy_mdio(params, phy_flags);
553}
554
555void bnx2x_link_status_update(struct link_params *params,
556 struct link_vars *vars)
557{
558 struct bnx2x *bp = params->bp;
559 u8 link_10g;
560 u8 port = params->port;
561
562 if (params->switch_cfg == SWITCH_CFG_1G)
563 vars->phy_flags = PHY_SERDES_FLAG;
564 else
565 vars->phy_flags = PHY_XGXS_FLAG;
566 vars->link_status = REG_RD(bp, params->shmem_base +
567 offsetof(struct shmem_region,
568 port_mb[port].link_status));
569
570 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
571
572 if (vars->link_up) {
573 DP(NETIF_MSG_LINK, "phy link up\n");
574
575 vars->phy_link_up = 1;
576 vars->duplex = DUPLEX_FULL;
577 switch (vars->link_status &
578 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
579 case LINK_10THD:
580 vars->duplex = DUPLEX_HALF;
581 /* fall thru */
582 case LINK_10TFD:
583 vars->line_speed = SPEED_10;
584 break;
585
586 case LINK_100TXHD:
587 vars->duplex = DUPLEX_HALF;
588 /* fall thru */
589 case LINK_100T4:
590 case LINK_100TXFD:
591 vars->line_speed = SPEED_100;
592 break;
593
594 case LINK_1000THD:
595 vars->duplex = DUPLEX_HALF;
596 /* fall thru */
597 case LINK_1000TFD:
598 vars->line_speed = SPEED_1000;
599 break;
600
601 case LINK_2500THD:
602 vars->duplex = DUPLEX_HALF;
603 /* fall thru */
604 case LINK_2500TFD:
605 vars->line_speed = SPEED_2500;
606 break;
607
608 case LINK_10GTFD:
609 vars->line_speed = SPEED_10000;
610 break;
611
612 case LINK_12GTFD:
613 vars->line_speed = SPEED_12000;
614 break;
615
616 case LINK_12_5GTFD:
617 vars->line_speed = SPEED_12500;
618 break;
619
620 case LINK_13GTFD:
621 vars->line_speed = SPEED_13000;
622 break;
623
624 case LINK_15GTFD:
625 vars->line_speed = SPEED_15000;
626 break;
627
628 case LINK_16GTFD:
629 vars->line_speed = SPEED_16000;
630 break;
631
632 default:
633 break;
634 }
635
636 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
637 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
638 else
639 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX;
640
641 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
642 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
643 else
644 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX;
645
646 if (vars->phy_flags & PHY_XGXS_FLAG) {
647 if (vars->line_speed &&
648 ((vars->line_speed == SPEED_10) ||
649 (vars->line_speed == SPEED_100))) {
650 vars->phy_flags |= PHY_SGMII_FLAG;
651 } else {
652 vars->phy_flags &= ~PHY_SGMII_FLAG;
653 }
654 }
655
656 /* anything 10 and over uses the bmac */
657 link_10g = ((vars->line_speed == SPEED_10000) ||
658 (vars->line_speed == SPEED_12000) ||
659 (vars->line_speed == SPEED_12500) ||
660 (vars->line_speed == SPEED_13000) ||
661 (vars->line_speed == SPEED_15000) ||
662 (vars->line_speed == SPEED_16000));
663 if (link_10g)
664 vars->mac_type = MAC_TYPE_BMAC;
665 else
666 vars->mac_type = MAC_TYPE_EMAC;
667
668 } else { /* link down */
669 DP(NETIF_MSG_LINK, "phy link down\n");
670
671 vars->phy_link_up = 0;
672
673 vars->line_speed = 0;
674 vars->duplex = DUPLEX_FULL;
675 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
676
677 /* indicate no mac active */
678 vars->mac_type = MAC_TYPE_NONE;
679 }
680
681 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
682 vars->link_status, vars->phy_link_up);
683 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
684 vars->line_speed, vars->duplex, vars->flow_ctrl);
685}
686 499
687static void bnx2x_update_mng(struct link_params *params, u32 link_status) 500static void bnx2x_update_mng(struct link_params *params, u32 link_status)
688{ 501{
@@ -800,62 +613,69 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
800 return 0; 613 return 0;
801} 614}
802 615
803static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 ext_phy_type, u8 port) 616static u32 bnx2x_get_emac_base(struct bnx2x *bp,
617 u32 mdc_mdio_access, u8 port)
804{ 618{
805 u32 emac_base; 619 u32 emac_base = 0;
806 620 switch (mdc_mdio_access) {
807 switch (ext_phy_type) { 621 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
808 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 622 break;
809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 623 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
810 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 624 if (REG_RD(bp, NIG_REG_PORT_SWAP))
811 /* All MDC/MDIO is directed through single EMAC */ 625 emac_base = GRCBASE_EMAC1;
626 else
627 emac_base = GRCBASE_EMAC0;
628 break;
629 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
812 if (REG_RD(bp, NIG_REG_PORT_SWAP)) 630 if (REG_RD(bp, NIG_REG_PORT_SWAP))
813 emac_base = GRCBASE_EMAC0; 631 emac_base = GRCBASE_EMAC0;
814 else 632 else
815 emac_base = GRCBASE_EMAC1; 633 emac_base = GRCBASE_EMAC1;
816 break; 634 break;
817 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 635 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
636 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
637 break;
638 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
818 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; 639 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
819 break; 640 break;
820 default: 641 default:
821 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
822 break; 642 break;
823 } 643 }
824 return emac_base; 644 return emac_base;
825 645
826} 646}
827 647
828u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type, 648u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
829 u8 phy_addr, u8 devad, u16 reg, u16 val) 649 u8 devad, u16 reg, u16 val)
830{ 650{
831 u32 tmp, saved_mode; 651 u32 tmp, saved_mode;
832 u8 i, rc = 0; 652 u8 i, rc = 0;
833 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
834 653
835 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 654 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
836 * (a value of 49==0x31) and make sure that the AUTO poll is off 655 * (a value of 49==0x31) and make sure that the AUTO poll is off
837 */ 656 */
838 657
839 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 658 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
840 tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL | 659 tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
841 EMAC_MDIO_MODE_CLOCK_CNT); 660 EMAC_MDIO_MODE_CLOCK_CNT);
842 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 | 661 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
843 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 662 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
844 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp); 663 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
845 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 664 REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
846 udelay(40); 665 udelay(40);
847 666
848 /* address */ 667 /* address */
849 668
850 tmp = ((phy_addr << 21) | (devad << 16) | reg | 669 tmp = ((phy->addr << 21) | (devad << 16) | reg |
851 EMAC_MDIO_COMM_COMMAND_ADDRESS | 670 EMAC_MDIO_COMM_COMMAND_ADDRESS |
852 EMAC_MDIO_COMM_START_BUSY); 671 EMAC_MDIO_COMM_START_BUSY);
853 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 672 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
854 673
855 for (i = 0; i < 50; i++) { 674 for (i = 0; i < 50; i++) {
856 udelay(10); 675 udelay(10);
857 676
858 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 677 tmp = REG_RD(bp, phy->mdio_ctrl +
678 EMAC_REG_EMAC_MDIO_COMM);
859 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 679 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
860 udelay(5); 680 udelay(5);
861 break; 681 break;
@@ -866,15 +686,15 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
866 rc = -EFAULT; 686 rc = -EFAULT;
867 } else { 687 } else {
868 /* data */ 688 /* data */
869 tmp = ((phy_addr << 21) | (devad << 16) | val | 689 tmp = ((phy->addr << 21) | (devad << 16) | val |
870 EMAC_MDIO_COMM_COMMAND_WRITE_45 | 690 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
871 EMAC_MDIO_COMM_START_BUSY); 691 EMAC_MDIO_COMM_START_BUSY);
872 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 692 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
873 693
874 for (i = 0; i < 50; i++) { 694 for (i = 0; i < 50; i++) {
875 udelay(10); 695 udelay(10);
876 696
877 tmp = REG_RD(bp, mdio_ctrl + 697 tmp = REG_RD(bp, phy->mdio_ctrl +
878 EMAC_REG_EMAC_MDIO_COMM); 698 EMAC_REG_EMAC_MDIO_COMM);
879 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 699 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
880 udelay(5); 700 udelay(5);
@@ -888,42 +708,41 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
888 } 708 }
889 709
890 /* Restore the saved mode */ 710 /* Restore the saved mode */
891 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); 711 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
892 712
893 return rc; 713 return rc;
894} 714}
895 715
896u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type, 716u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
897 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val) 717 u8 devad, u16 reg, u16 *ret_val)
898{ 718{
899 u32 val, saved_mode; 719 u32 val, saved_mode;
900 u16 i; 720 u16 i;
901 u8 rc = 0; 721 u8 rc = 0;
902 722
903 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
904 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 723 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
905 * (a value of 49==0x31) and make sure that the AUTO poll is off 724 * (a value of 49==0x31) and make sure that the AUTO poll is off
906 */ 725 */
907 726
908 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 727 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
909 val = saved_mode & ((EMAC_MDIO_MODE_AUTO_POLL | 728 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
910 EMAC_MDIO_MODE_CLOCK_CNT)); 729 EMAC_MDIO_MODE_CLOCK_CNT));
911 val |= (EMAC_MDIO_MODE_CLAUSE_45 | 730 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
912 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 731 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
913 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); 732 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
914 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 733 REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
915 udelay(40); 734 udelay(40);
916 735
917 /* address */ 736 /* address */
918 val = ((phy_addr << 21) | (devad << 16) | reg | 737 val = ((phy->addr << 21) | (devad << 16) | reg |
919 EMAC_MDIO_COMM_COMMAND_ADDRESS | 738 EMAC_MDIO_COMM_COMMAND_ADDRESS |
920 EMAC_MDIO_COMM_START_BUSY); 739 EMAC_MDIO_COMM_START_BUSY);
921 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 740 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
922 741
923 for (i = 0; i < 50; i++) { 742 for (i = 0; i < 50; i++) {
924 udelay(10); 743 udelay(10);
925 744
926 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 745 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
927 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 746 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
928 udelay(5); 747 udelay(5);
929 break; 748 break;
@@ -937,15 +756,15 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
937 756
938 } else { 757 } else {
939 /* data */ 758 /* data */
940 val = ((phy_addr << 21) | (devad << 16) | 759 val = ((phy->addr << 21) | (devad << 16) |
941 EMAC_MDIO_COMM_COMMAND_READ_45 | 760 EMAC_MDIO_COMM_COMMAND_READ_45 |
942 EMAC_MDIO_COMM_START_BUSY); 761 EMAC_MDIO_COMM_START_BUSY);
943 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 762 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
944 763
945 for (i = 0; i < 50; i++) { 764 for (i = 0; i < 50; i++) {
946 udelay(10); 765 udelay(10);
947 766
948 val = REG_RD(bp, mdio_ctrl + 767 val = REG_RD(bp, phy->mdio_ctrl +
949 EMAC_REG_EMAC_MDIO_COMM); 768 EMAC_REG_EMAC_MDIO_COMM);
950 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 769 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
951 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 770 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
@@ -961,13 +780,49 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
961 } 780 }
962 781
963 /* Restore the saved mode */ 782 /* Restore the saved mode */
964 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); 783 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
965 784
966 return rc; 785 return rc;
967} 786}
968 787
788u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
789 u8 devad, u16 reg, u16 *ret_val)
790{
791 u8 phy_index;
792 /**
793 * Probe for the phy according to the given phy_addr, and execute
794 * the read request on it
795 */
796 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
797 if (params->phy[phy_index].addr == phy_addr) {
798 return bnx2x_cl45_read(params->bp,
799 &params->phy[phy_index], devad,
800 reg, ret_val);
801 }
802 }
803 return -EINVAL;
804}
805
806u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
807 u8 devad, u16 reg, u16 val)
808{
809 u8 phy_index;
810 /**
811 * Probe for the phy according to the given phy_addr, and execute
812 * the write request on it
813 */
814 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
815 if (params->phy[phy_index].addr == phy_addr) {
816 return bnx2x_cl45_write(params->bp,
817 &params->phy[phy_index], devad,
818 reg, val);
819 }
820 }
821 return -EINVAL;
822}
823
969static void bnx2x_set_aer_mmd(struct link_params *params, 824static void bnx2x_set_aer_mmd(struct link_params *params,
970 struct link_vars *vars) 825 struct bnx2x_phy *phy)
971{ 826{
972 struct bnx2x *bp = params->bp; 827 struct bnx2x *bp = params->bp;
973 u32 ser_lane; 828 u32 ser_lane;
@@ -977,16 +832,202 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
977 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 832 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
978 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 833 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
979 834
980 offset = (vars->phy_flags & PHY_XGXS_FLAG) ? 835 offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
981 (params->phy_addr + ser_lane) : 0; 836 (phy->addr + ser_lane) : 0;
982 837
983 CL45_WR_OVER_CL22(bp, params->port, 838 CL45_WR_OVER_CL22(bp, phy,
984 params->phy_addr,
985 MDIO_REG_BANK_AER_BLOCK, 839 MDIO_REG_BANK_AER_BLOCK,
986 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset); 840 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
987} 841}
988 842
989static void bnx2x_set_master_ln(struct link_params *params) 843/******************************************************************/
844/* Internal phy section */
845/******************************************************************/
846
847static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
848{
849 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
850
851 /* Set Clause 22 */
852 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
853 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
854 udelay(500);
855 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
856 udelay(500);
857 /* Set Clause 45 */
858 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
859}
860
861static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
862{
863 u32 val;
864
865 DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
866
867 val = SERDES_RESET_BITS << (port*16);
868
869 /* reset and unreset the SerDes/XGXS */
870 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
871 udelay(500);
872 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
873
874 bnx2x_set_serdes_access(bp, port);
875
876 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
877 port*0x10,
878 DEFAULT_PHY_DEV_ADDR);
879}
880
881static void bnx2x_xgxs_deassert(struct link_params *params)
882{
883 struct bnx2x *bp = params->bp;
884 u8 port;
885 u32 val;
886 DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
887 port = params->port;
888
889 val = XGXS_RESET_BITS << (port*16);
890
891 /* reset and unreset the SerDes/XGXS */
892 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
893 udelay(500);
894 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
895
896 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
897 port*0x18, 0);
898 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
899 params->phy[INT_PHY].def_md_devad);
900}
901
902
903void bnx2x_link_status_update(struct link_params *params,
904 struct link_vars *vars)
905{
906 struct bnx2x *bp = params->bp;
907 u8 link_10g;
908 u8 port = params->port;
909
910 vars->link_status = REG_RD(bp, params->shmem_base +
911 offsetof(struct shmem_region,
912 port_mb[port].link_status));
913
914 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
915
916 if (vars->link_up) {
917 DP(NETIF_MSG_LINK, "phy link up\n");
918
919 vars->phy_link_up = 1;
920 vars->duplex = DUPLEX_FULL;
921 switch (vars->link_status &
922 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
923 case LINK_10THD:
924 vars->duplex = DUPLEX_HALF;
925 /* fall thru */
926 case LINK_10TFD:
927 vars->line_speed = SPEED_10;
928 break;
929
930 case LINK_100TXHD:
931 vars->duplex = DUPLEX_HALF;
932 /* fall thru */
933 case LINK_100T4:
934 case LINK_100TXFD:
935 vars->line_speed = SPEED_100;
936 break;
937
938 case LINK_1000THD:
939 vars->duplex = DUPLEX_HALF;
940 /* fall thru */
941 case LINK_1000TFD:
942 vars->line_speed = SPEED_1000;
943 break;
944
945 case LINK_2500THD:
946 vars->duplex = DUPLEX_HALF;
947 /* fall thru */
948 case LINK_2500TFD:
949 vars->line_speed = SPEED_2500;
950 break;
951
952 case LINK_10GTFD:
953 vars->line_speed = SPEED_10000;
954 break;
955
956 case LINK_12GTFD:
957 vars->line_speed = SPEED_12000;
958 break;
959
960 case LINK_12_5GTFD:
961 vars->line_speed = SPEED_12500;
962 break;
963
964 case LINK_13GTFD:
965 vars->line_speed = SPEED_13000;
966 break;
967
968 case LINK_15GTFD:
969 vars->line_speed = SPEED_15000;
970 break;
971
972 case LINK_16GTFD:
973 vars->line_speed = SPEED_16000;
974 break;
975
976 default:
977 break;
978 }
979 vars->flow_ctrl = 0;
980 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
981 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
982
983 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
984 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
985
986 if (!vars->flow_ctrl)
987 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
988
989 if (vars->line_speed &&
990 ((vars->line_speed == SPEED_10) ||
991 (vars->line_speed == SPEED_100))) {
992 vars->phy_flags |= PHY_SGMII_FLAG;
993 } else {
994 vars->phy_flags &= ~PHY_SGMII_FLAG;
995 }
996
997 /* anything 10 and over uses the bmac */
998 link_10g = ((vars->line_speed == SPEED_10000) ||
999 (vars->line_speed == SPEED_12000) ||
1000 (vars->line_speed == SPEED_12500) ||
1001 (vars->line_speed == SPEED_13000) ||
1002 (vars->line_speed == SPEED_15000) ||
1003 (vars->line_speed == SPEED_16000));
1004 if (link_10g)
1005 vars->mac_type = MAC_TYPE_BMAC;
1006 else
1007 vars->mac_type = MAC_TYPE_EMAC;
1008
1009 } else { /* link down */
1010 DP(NETIF_MSG_LINK, "phy link down\n");
1011
1012 vars->phy_link_up = 0;
1013
1014 vars->line_speed = 0;
1015 vars->duplex = DUPLEX_FULL;
1016 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1017
1018 /* indicate no mac active */
1019 vars->mac_type = MAC_TYPE_NONE;
1020 }
1021
1022 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
1023 vars->link_status, vars->phy_link_up);
1024 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
1025 vars->line_speed, vars->duplex, vars->flow_ctrl);
1026}
1027
1028
1029static void bnx2x_set_master_ln(struct link_params *params,
1030 struct bnx2x_phy *phy)
990{ 1031{
991 struct bnx2x *bp = params->bp; 1032 struct bnx2x *bp = params->bp;
992 u16 new_master_ln, ser_lane; 1033 u16 new_master_ln, ser_lane;
@@ -995,47 +1036,44 @@ static void bnx2x_set_master_ln(struct link_params *params)
995 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1036 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
996 1037
997 /* set the master_ln for AN */ 1038 /* set the master_ln for AN */
998 CL45_RD_OVER_CL22(bp, params->port, 1039 CL45_RD_OVER_CL22(bp, phy,
999 params->phy_addr,
1000 MDIO_REG_BANK_XGXS_BLOCK2, 1040 MDIO_REG_BANK_XGXS_BLOCK2,
1001 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1041 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1002 &new_master_ln); 1042 &new_master_ln);
1003 1043
1004 CL45_WR_OVER_CL22(bp, params->port, 1044 CL45_WR_OVER_CL22(bp, phy,
1005 params->phy_addr,
1006 MDIO_REG_BANK_XGXS_BLOCK2 , 1045 MDIO_REG_BANK_XGXS_BLOCK2 ,
1007 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1046 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1008 (new_master_ln | ser_lane)); 1047 (new_master_ln | ser_lane));
1009} 1048}
1010 1049
1011static u8 bnx2x_reset_unicore(struct link_params *params) 1050static u8 bnx2x_reset_unicore(struct link_params *params,
1051 struct bnx2x_phy *phy,
1052 u8 set_serdes)
1012{ 1053{
1013 struct bnx2x *bp = params->bp; 1054 struct bnx2x *bp = params->bp;
1014 u16 mii_control; 1055 u16 mii_control;
1015 u16 i; 1056 u16 i;
1016 1057
1017 CL45_RD_OVER_CL22(bp, params->port, 1058 CL45_RD_OVER_CL22(bp, phy,
1018 params->phy_addr,
1019 MDIO_REG_BANK_COMBO_IEEE0, 1059 MDIO_REG_BANK_COMBO_IEEE0,
1020 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); 1060 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1021 1061
1022 /* reset the unicore */ 1062 /* reset the unicore */
1023 CL45_WR_OVER_CL22(bp, params->port, 1063 CL45_WR_OVER_CL22(bp, phy,
1024 params->phy_addr,
1025 MDIO_REG_BANK_COMBO_IEEE0, 1064 MDIO_REG_BANK_COMBO_IEEE0,
1026 MDIO_COMBO_IEEE0_MII_CONTROL, 1065 MDIO_COMBO_IEEE0_MII_CONTROL,
1027 (mii_control | 1066 (mii_control |
1028 MDIO_COMBO_IEEO_MII_CONTROL_RESET)); 1067 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1029 if (params->switch_cfg == SWITCH_CFG_1G) 1068 if (set_serdes)
1030 bnx2x_set_serdes_access(params); 1069 bnx2x_set_serdes_access(bp, params->port);
1031 1070
1032 /* wait for the reset to self clear */ 1071 /* wait for the reset to self clear */
1033 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { 1072 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
1034 udelay(5); 1073 udelay(5);
1035 1074
1036 /* the reset erased the previous bank value */ 1075 /* the reset erased the previous bank value */
1037 CL45_RD_OVER_CL22(bp, params->port, 1076 CL45_RD_OVER_CL22(bp, phy,
1038 params->phy_addr,
1039 MDIO_REG_BANK_COMBO_IEEE0, 1077 MDIO_REG_BANK_COMBO_IEEE0,
1040 MDIO_COMBO_IEEE0_MII_CONTROL, 1078 MDIO_COMBO_IEEE0_MII_CONTROL,
1041 &mii_control); 1079 &mii_control);
@@ -1051,7 +1089,8 @@ static u8 bnx2x_reset_unicore(struct link_params *params)
1051 1089
1052} 1090}
1053 1091
1054static void bnx2x_set_swap_lanes(struct link_params *params) 1092static void bnx2x_set_swap_lanes(struct link_params *params,
1093 struct bnx2x_phy *phy)
1055{ 1094{
1056 struct bnx2x *bp = params->bp; 1095 struct bnx2x *bp = params->bp;
1057 /* Each two bits represents a lane number: 1096 /* Each two bits represents a lane number:
@@ -1069,71 +1108,62 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
1069 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); 1108 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1070 1109
1071 if (rx_lane_swap != 0x1b) { 1110 if (rx_lane_swap != 0x1b) {
1072 CL45_WR_OVER_CL22(bp, params->port, 1111 CL45_WR_OVER_CL22(bp, phy,
1073 params->phy_addr,
1074 MDIO_REG_BANK_XGXS_BLOCK2, 1112 MDIO_REG_BANK_XGXS_BLOCK2,
1075 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 1113 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1076 (rx_lane_swap | 1114 (rx_lane_swap |
1077 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | 1115 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1078 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); 1116 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1079 } else { 1117 } else {
1080 CL45_WR_OVER_CL22(bp, params->port, 1118 CL45_WR_OVER_CL22(bp, phy,
1081 params->phy_addr,
1082 MDIO_REG_BANK_XGXS_BLOCK2, 1119 MDIO_REG_BANK_XGXS_BLOCK2,
1083 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); 1120 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1084 } 1121 }
1085 1122
1086 if (tx_lane_swap != 0x1b) { 1123 if (tx_lane_swap != 0x1b) {
1087 CL45_WR_OVER_CL22(bp, params->port, 1124 CL45_WR_OVER_CL22(bp, phy,
1088 params->phy_addr,
1089 MDIO_REG_BANK_XGXS_BLOCK2, 1125 MDIO_REG_BANK_XGXS_BLOCK2,
1090 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 1126 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1091 (tx_lane_swap | 1127 (tx_lane_swap |
1092 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); 1128 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1093 } else { 1129 } else {
1094 CL45_WR_OVER_CL22(bp, params->port, 1130 CL45_WR_OVER_CL22(bp, phy,
1095 params->phy_addr,
1096 MDIO_REG_BANK_XGXS_BLOCK2, 1131 MDIO_REG_BANK_XGXS_BLOCK2,
1097 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); 1132 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1098 } 1133 }
1099} 1134}
1100 1135
1101static void bnx2x_set_parallel_detection(struct link_params *params, 1136static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
1102 u8 phy_flags) 1137 struct link_params *params)
1103{ 1138{
1104 struct bnx2x *bp = params->bp; 1139 struct bnx2x *bp = params->bp;
1105 u16 control2; 1140 u16 control2;
1106 1141 CL45_RD_OVER_CL22(bp, phy,
1107 CL45_RD_OVER_CL22(bp, params->port,
1108 params->phy_addr,
1109 MDIO_REG_BANK_SERDES_DIGITAL, 1142 MDIO_REG_BANK_SERDES_DIGITAL,
1110 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1143 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1111 &control2); 1144 &control2);
1112 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1145 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1113 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1146 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1114 else 1147 else
1115 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1148 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1116 DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n", 1149 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1117 params->speed_cap_mask, control2); 1150 phy->speed_cap_mask, control2);
1118 CL45_WR_OVER_CL22(bp, params->port, 1151 CL45_WR_OVER_CL22(bp, phy,
1119 params->phy_addr,
1120 MDIO_REG_BANK_SERDES_DIGITAL, 1152 MDIO_REG_BANK_SERDES_DIGITAL,
1121 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1153 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1122 control2); 1154 control2);
1123 1155
1124 if ((phy_flags & PHY_XGXS_FLAG) && 1156 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
1125 (params->speed_cap_mask & 1157 (phy->speed_cap_mask &
1126 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 1158 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1127 DP(NETIF_MSG_LINK, "XGXS\n"); 1159 DP(NETIF_MSG_LINK, "XGXS\n");
1128 1160
1129 CL45_WR_OVER_CL22(bp, params->port, 1161 CL45_WR_OVER_CL22(bp, phy,
1130 params->phy_addr,
1131 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1162 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1132 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 1163 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1133 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 1164 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1134 1165
1135 CL45_RD_OVER_CL22(bp, params->port, 1166 CL45_RD_OVER_CL22(bp, phy,
1136 params->phy_addr,
1137 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1167 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1138 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1168 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1139 &control2); 1169 &control2);
@@ -1142,15 +1172,13 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
1142 control2 |= 1172 control2 |=
1143 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 1173 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1144 1174
1145 CL45_WR_OVER_CL22(bp, params->port, 1175 CL45_WR_OVER_CL22(bp, phy,
1146 params->phy_addr,
1147 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1176 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1148 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1177 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1149 control2); 1178 control2);
1150 1179
1151 /* Disable parallel detection of HiG */ 1180 /* Disable parallel detection of HiG */
1152 CL45_WR_OVER_CL22(bp, params->port, 1181 CL45_WR_OVER_CL22(bp, phy,
1153 params->phy_addr,
1154 MDIO_REG_BANK_XGXS_BLOCK2, 1182 MDIO_REG_BANK_XGXS_BLOCK2,
1155 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, 1183 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1156 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | 1184 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
@@ -1158,7 +1186,8 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
1158 } 1186 }
1159} 1187}
1160 1188
1161static void bnx2x_set_autoneg(struct link_params *params, 1189static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1190 struct link_params *params,
1162 struct link_vars *vars, 1191 struct link_vars *vars,
1163 u8 enable_cl73) 1192 u8 enable_cl73)
1164{ 1193{
@@ -1166,9 +1195,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1166 u16 reg_val; 1195 u16 reg_val;
1167 1196
1168 /* CL37 Autoneg */ 1197 /* CL37 Autoneg */
1169 1198 CL45_RD_OVER_CL22(bp, phy,
1170 CL45_RD_OVER_CL22(bp, params->port,
1171 params->phy_addr,
1172 MDIO_REG_BANK_COMBO_IEEE0, 1199 MDIO_REG_BANK_COMBO_IEEE0,
1173 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1200 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1174 1201
@@ -1179,15 +1206,13 @@ static void bnx2x_set_autoneg(struct link_params *params,
1179 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1206 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1180 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); 1207 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1181 1208
1182 CL45_WR_OVER_CL22(bp, params->port, 1209 CL45_WR_OVER_CL22(bp, phy,
1183 params->phy_addr,
1184 MDIO_REG_BANK_COMBO_IEEE0, 1210 MDIO_REG_BANK_COMBO_IEEE0,
1185 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 1211 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1186 1212
1187 /* Enable/Disable Autodetection */ 1213 /* Enable/Disable Autodetection */
1188 1214
1189 CL45_RD_OVER_CL22(bp, params->port, 1215 CL45_RD_OVER_CL22(bp, phy,
1190 params->phy_addr,
1191 MDIO_REG_BANK_SERDES_DIGITAL, 1216 MDIO_REG_BANK_SERDES_DIGITAL,
1192 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1217 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1193 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | 1218 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
@@ -1198,14 +1223,12 @@ static void bnx2x_set_autoneg(struct link_params *params,
1198 else 1223 else
1199 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1224 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1200 1225
1201 CL45_WR_OVER_CL22(bp, params->port, 1226 CL45_WR_OVER_CL22(bp, phy,
1202 params->phy_addr,
1203 MDIO_REG_BANK_SERDES_DIGITAL, 1227 MDIO_REG_BANK_SERDES_DIGITAL,
1204 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); 1228 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1205 1229
1206 /* Enable TetonII and BAM autoneg */ 1230 /* Enable TetonII and BAM autoneg */
1207 CL45_RD_OVER_CL22(bp, params->port, 1231 CL45_RD_OVER_CL22(bp, phy,
1208 params->phy_addr,
1209 MDIO_REG_BANK_BAM_NEXT_PAGE, 1232 MDIO_REG_BANK_BAM_NEXT_PAGE,
1210 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1233 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1211 &reg_val); 1234 &reg_val);
@@ -1218,23 +1241,20 @@ static void bnx2x_set_autoneg(struct link_params *params,
1218 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1241 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1219 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1242 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1220 } 1243 }
1221 CL45_WR_OVER_CL22(bp, params->port, 1244 CL45_WR_OVER_CL22(bp, phy,
1222 params->phy_addr,
1223 MDIO_REG_BANK_BAM_NEXT_PAGE, 1245 MDIO_REG_BANK_BAM_NEXT_PAGE,
1224 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1246 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1225 reg_val); 1247 reg_val);
1226 1248
1227 if (enable_cl73) { 1249 if (enable_cl73) {
1228 /* Enable Cl73 FSM status bits */ 1250 /* Enable Cl73 FSM status bits */
1229 CL45_WR_OVER_CL22(bp, params->port, 1251 CL45_WR_OVER_CL22(bp, phy,
1230 params->phy_addr,
1231 MDIO_REG_BANK_CL73_USERB0, 1252 MDIO_REG_BANK_CL73_USERB0,
1232 MDIO_CL73_USERB0_CL73_UCTRL, 1253 MDIO_CL73_USERB0_CL73_UCTRL,
1233 0xe); 1254 0xe);
1234 1255
1235 /* Enable BAM Station Manager*/ 1256 /* Enable BAM Station Manager*/
1236 CL45_WR_OVER_CL22(bp, params->port, 1257 CL45_WR_OVER_CL22(bp, phy,
1237 params->phy_addr,
1238 MDIO_REG_BANK_CL73_USERB0, 1258 MDIO_REG_BANK_CL73_USERB0,
1239 MDIO_CL73_USERB0_CL73_BAM_CTRL1, 1259 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
1240 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | 1260 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -1242,20 +1262,18 @@ static void bnx2x_set_autoneg(struct link_params *params,
1242 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1262 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
1243 1263
1244 /* Advertise CL73 link speeds */ 1264 /* Advertise CL73 link speeds */
1245 CL45_RD_OVER_CL22(bp, params->port, 1265 CL45_RD_OVER_CL22(bp, phy,
1246 params->phy_addr,
1247 MDIO_REG_BANK_CL73_IEEEB1, 1266 MDIO_REG_BANK_CL73_IEEEB1,
1248 MDIO_CL73_IEEEB1_AN_ADV2, 1267 MDIO_CL73_IEEEB1_AN_ADV2,
1249 &reg_val); 1268 &reg_val);
1250 if (params->speed_cap_mask & 1269 if (phy->speed_cap_mask &
1251 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1252 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; 1271 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
1253 if (params->speed_cap_mask & 1272 if (phy->speed_cap_mask &
1254 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1273 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1255 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; 1274 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
1256 1275
1257 CL45_WR_OVER_CL22(bp, params->port, 1276 CL45_WR_OVER_CL22(bp, phy,
1258 params->phy_addr,
1259 MDIO_REG_BANK_CL73_IEEEB1, 1277 MDIO_REG_BANK_CL73_IEEEB1,
1260 MDIO_CL73_IEEEB1_AN_ADV2, 1278 MDIO_CL73_IEEEB1_AN_ADV2,
1261 reg_val); 1279 reg_val);
@@ -1266,38 +1284,35 @@ static void bnx2x_set_autoneg(struct link_params *params,
1266 } else /* CL73 Autoneg Disabled */ 1284 } else /* CL73 Autoneg Disabled */
1267 reg_val = 0; 1285 reg_val = 0;
1268 1286
1269 CL45_WR_OVER_CL22(bp, params->port, 1287 CL45_WR_OVER_CL22(bp, phy,
1270 params->phy_addr,
1271 MDIO_REG_BANK_CL73_IEEEB0, 1288 MDIO_REG_BANK_CL73_IEEEB0,
1272 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 1289 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
1273} 1290}
1274 1291
1275/* program SerDes, forced speed */ 1292/* program SerDes, forced speed */
1276static void bnx2x_program_serdes(struct link_params *params, 1293static void bnx2x_program_serdes(struct bnx2x_phy *phy,
1294 struct link_params *params,
1277 struct link_vars *vars) 1295 struct link_vars *vars)
1278{ 1296{
1279 struct bnx2x *bp = params->bp; 1297 struct bnx2x *bp = params->bp;
1280 u16 reg_val; 1298 u16 reg_val;
1281 1299
1282 /* program duplex, disable autoneg and sgmii*/ 1300 /* program duplex, disable autoneg and sgmii*/
1283 CL45_RD_OVER_CL22(bp, params->port, 1301 CL45_RD_OVER_CL22(bp, phy,
1284 params->phy_addr,
1285 MDIO_REG_BANK_COMBO_IEEE0, 1302 MDIO_REG_BANK_COMBO_IEEE0,
1286 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1303 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1287 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | 1304 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
1288 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1305 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1289 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); 1306 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
1290 if (params->req_duplex == DUPLEX_FULL) 1307 if (phy->req_duplex == DUPLEX_FULL)
1291 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 1308 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1292 CL45_WR_OVER_CL22(bp, params->port, 1309 CL45_WR_OVER_CL22(bp, phy,
1293 params->phy_addr,
1294 MDIO_REG_BANK_COMBO_IEEE0, 1310 MDIO_REG_BANK_COMBO_IEEE0,
1295 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 1311 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1296 1312
1297 /* program speed 1313 /* program speed
1298 - needed only if the speed is greater than 1G (2.5G or 10G) */ 1314 - needed only if the speed is greater than 1G (2.5G or 10G) */
1299 CL45_RD_OVER_CL22(bp, params->port, 1315 CL45_RD_OVER_CL22(bp, phy,
1300 params->phy_addr,
1301 MDIO_REG_BANK_SERDES_DIGITAL, 1316 MDIO_REG_BANK_SERDES_DIGITAL,
1302 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 1317 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
1303 /* clearing the speed value before setting the right speed */ 1318 /* clearing the speed value before setting the right speed */
@@ -1320,14 +1335,14 @@ static void bnx2x_program_serdes(struct link_params *params,
1320 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 1335 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
1321 } 1336 }
1322 1337
1323 CL45_WR_OVER_CL22(bp, params->port, 1338 CL45_WR_OVER_CL22(bp, phy,
1324 params->phy_addr,
1325 MDIO_REG_BANK_SERDES_DIGITAL, 1339 MDIO_REG_BANK_SERDES_DIGITAL,
1326 MDIO_SERDES_DIGITAL_MISC1, reg_val); 1340 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1327 1341
1328} 1342}
1329 1343
1330static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) 1344static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
1345 struct link_params *params)
1331{ 1346{
1332 struct bnx2x *bp = params->bp; 1347 struct bnx2x *bp = params->bp;
1333 u16 val = 0; 1348 u16 val = 0;
@@ -1335,29 +1350,28 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1335 /* configure the 48 bits for BAM AN */ 1350 /* configure the 48 bits for BAM AN */
1336 1351
1337 /* set extended capabilities */ 1352 /* set extended capabilities */
1338 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 1353 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
1339 val |= MDIO_OVER_1G_UP1_2_5G; 1354 val |= MDIO_OVER_1G_UP1_2_5G;
1340 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1355 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1341 val |= MDIO_OVER_1G_UP1_10G; 1356 val |= MDIO_OVER_1G_UP1_10G;
1342 CL45_WR_OVER_CL22(bp, params->port, 1357 CL45_WR_OVER_CL22(bp, phy,
1343 params->phy_addr,
1344 MDIO_REG_BANK_OVER_1G, 1358 MDIO_REG_BANK_OVER_1G,
1345 MDIO_OVER_1G_UP1, val); 1359 MDIO_OVER_1G_UP1, val);
1346 1360
1347 CL45_WR_OVER_CL22(bp, params->port, 1361 CL45_WR_OVER_CL22(bp, phy,
1348 params->phy_addr,
1349 MDIO_REG_BANK_OVER_1G, 1362 MDIO_REG_BANK_OVER_1G,
1350 MDIO_OVER_1G_UP3, 0x400); 1363 MDIO_OVER_1G_UP3, 0x400);
1351} 1364}
1352 1365
1353static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc) 1366static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
1367 struct link_params *params, u16 *ieee_fc)
1354{ 1368{
1355 struct bnx2x *bp = params->bp; 1369 struct bnx2x *bp = params->bp;
1356 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 1370 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1357 /* resolve pause mode and advertisement 1371 /* resolve pause mode and advertisement
1358 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1372 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
1359 1373
1360 switch (params->req_flow_ctrl) { 1374 switch (phy->req_flow_ctrl) {
1361 case BNX2X_FLOW_CTRL_AUTO: 1375 case BNX2X_FLOW_CTRL_AUTO:
1362 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { 1376 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
1363 *ieee_fc |= 1377 *ieee_fc |=
@@ -1385,30 +1399,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1385 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); 1399 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
1386} 1400}
1387 1401
1388static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 1402static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
1403 struct link_params *params,
1389 u16 ieee_fc) 1404 u16 ieee_fc)
1390{ 1405{
1391 struct bnx2x *bp = params->bp; 1406 struct bnx2x *bp = params->bp;
1392 u16 val; 1407 u16 val;
1393 /* for AN, we are always publishing full duplex */ 1408 /* for AN, we are always publishing full duplex */
1394 1409
1395 CL45_WR_OVER_CL22(bp, params->port, 1410 CL45_WR_OVER_CL22(bp, phy,
1396 params->phy_addr,
1397 MDIO_REG_BANK_COMBO_IEEE0, 1411 MDIO_REG_BANK_COMBO_IEEE0,
1398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 1412 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
1399 CL45_RD_OVER_CL22(bp, params->port, 1413 CL45_RD_OVER_CL22(bp, phy,
1400 params->phy_addr,
1401 MDIO_REG_BANK_CL73_IEEEB1, 1414 MDIO_REG_BANK_CL73_IEEEB1,
1402 MDIO_CL73_IEEEB1_AN_ADV1, &val); 1415 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1403 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; 1416 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
1404 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); 1417 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
1405 CL45_WR_OVER_CL22(bp, params->port, 1418 CL45_WR_OVER_CL22(bp, phy,
1406 params->phy_addr,
1407 MDIO_REG_BANK_CL73_IEEEB1, 1419 MDIO_REG_BANK_CL73_IEEEB1,
1408 MDIO_CL73_IEEEB1_AN_ADV1, val); 1420 MDIO_CL73_IEEEB1_AN_ADV1, val);
1409} 1421}
1410 1422
1411static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73) 1423static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
1424 struct link_params *params,
1425 u8 enable_cl73)
1412{ 1426{
1413 struct bnx2x *bp = params->bp; 1427 struct bnx2x *bp = params->bp;
1414 u16 mii_control; 1428 u16 mii_control;
@@ -1417,14 +1431,12 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1417 /* Enable and restart BAM/CL37 aneg */ 1431 /* Enable and restart BAM/CL37 aneg */
1418 1432
1419 if (enable_cl73) { 1433 if (enable_cl73) {
1420 CL45_RD_OVER_CL22(bp, params->port, 1434 CL45_RD_OVER_CL22(bp, phy,
1421 params->phy_addr,
1422 MDIO_REG_BANK_CL73_IEEEB0, 1435 MDIO_REG_BANK_CL73_IEEEB0,
1423 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 1436 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1424 &mii_control); 1437 &mii_control);
1425 1438
1426 CL45_WR_OVER_CL22(bp, params->port, 1439 CL45_WR_OVER_CL22(bp, phy,
1427 params->phy_addr,
1428 MDIO_REG_BANK_CL73_IEEEB0, 1440 MDIO_REG_BANK_CL73_IEEEB0,
1429 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 1441 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1430 (mii_control | 1442 (mii_control |
@@ -1432,16 +1444,14 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1432 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); 1444 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
1433 } else { 1445 } else {
1434 1446
1435 CL45_RD_OVER_CL22(bp, params->port, 1447 CL45_RD_OVER_CL22(bp, phy,
1436 params->phy_addr,
1437 MDIO_REG_BANK_COMBO_IEEE0, 1448 MDIO_REG_BANK_COMBO_IEEE0,
1438 MDIO_COMBO_IEEE0_MII_CONTROL, 1449 MDIO_COMBO_IEEE0_MII_CONTROL,
1439 &mii_control); 1450 &mii_control);
1440 DP(NETIF_MSG_LINK, 1451 DP(NETIF_MSG_LINK,
1441 "bnx2x_restart_autoneg mii_control before = 0x%x\n", 1452 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
1442 mii_control); 1453 mii_control);
1443 CL45_WR_OVER_CL22(bp, params->port, 1454 CL45_WR_OVER_CL22(bp, phy,
1444 params->phy_addr,
1445 MDIO_REG_BANK_COMBO_IEEE0, 1455 MDIO_REG_BANK_COMBO_IEEE0,
1446 MDIO_COMBO_IEEE0_MII_CONTROL, 1456 MDIO_COMBO_IEEE0_MII_CONTROL,
1447 (mii_control | 1457 (mii_control |
@@ -1450,7 +1460,8 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1450 } 1460 }
1451} 1461}
1452 1462
1453static void bnx2x_initialize_sgmii_process(struct link_params *params, 1463static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
1464 struct link_params *params,
1454 struct link_vars *vars) 1465 struct link_vars *vars)
1455{ 1466{
1456 struct bnx2x *bp = params->bp; 1467 struct bnx2x *bp = params->bp;
@@ -1458,8 +1469,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1458 1469
1459 /* in SGMII mode, the unicore is always slave */ 1470 /* in SGMII mode, the unicore is always slave */
1460 1471
1461 CL45_RD_OVER_CL22(bp, params->port, 1472 CL45_RD_OVER_CL22(bp, phy,
1462 params->phy_addr,
1463 MDIO_REG_BANK_SERDES_DIGITAL, 1473 MDIO_REG_BANK_SERDES_DIGITAL,
1464 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 1474 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1465 &control1); 1475 &control1);
@@ -1468,8 +1478,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1468 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 1478 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
1469 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 1479 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
1470 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 1480 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
1471 CL45_WR_OVER_CL22(bp, params->port, 1481 CL45_WR_OVER_CL22(bp, phy,
1472 params->phy_addr,
1473 MDIO_REG_BANK_SERDES_DIGITAL, 1482 MDIO_REG_BANK_SERDES_DIGITAL,
1474 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 1483 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1475 control1); 1484 control1);
@@ -1479,8 +1488,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1479 /* set speed, disable autoneg */ 1488 /* set speed, disable autoneg */
1480 u16 mii_control; 1489 u16 mii_control;
1481 1490
1482 CL45_RD_OVER_CL22(bp, params->port, 1491 CL45_RD_OVER_CL22(bp, phy,
1483 params->phy_addr,
1484 MDIO_REG_BANK_COMBO_IEEE0, 1492 MDIO_REG_BANK_COMBO_IEEE0,
1485 MDIO_COMBO_IEEE0_MII_CONTROL, 1493 MDIO_COMBO_IEEE0_MII_CONTROL,
1486 &mii_control); 1494 &mii_control);
@@ -1508,18 +1516,17 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1508 } 1516 }
1509 1517
1510 /* setting the full duplex */ 1518 /* setting the full duplex */
1511 if (params->req_duplex == DUPLEX_FULL) 1519 if (phy->req_duplex == DUPLEX_FULL)
1512 mii_control |= 1520 mii_control |=
1513 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 1521 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1514 CL45_WR_OVER_CL22(bp, params->port, 1522 CL45_WR_OVER_CL22(bp, phy,
1515 params->phy_addr,
1516 MDIO_REG_BANK_COMBO_IEEE0, 1523 MDIO_REG_BANK_COMBO_IEEE0,
1517 MDIO_COMBO_IEEE0_MII_CONTROL, 1524 MDIO_COMBO_IEEE0_MII_CONTROL,
1518 mii_control); 1525 mii_control);
1519 1526
1520 } else { /* AN mode */ 1527 } else { /* AN mode */
1521 /* enable and restart AN */ 1528 /* enable and restart AN */
1522 bnx2x_restart_autoneg(params, 0); 1529 bnx2x_restart_autoneg(phy, params, 0);
1523 } 1530 }
1524} 1531}
1525 1532
@@ -1549,91 +1556,24 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1549 default: 1556 default:
1550 break; 1557 break;
1551 } 1558 }
1559 if (pause_result & (1<<0))
1560 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
1561 if (pause_result & (1<<1))
1562 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
1552} 1563}
1553 1564
1554static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params, 1565static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
1555 struct link_vars *vars) 1566 struct link_params *params)
1556{
1557 struct bnx2x *bp = params->bp;
1558 u8 ext_phy_addr;
1559 u16 ld_pause; /* local */
1560 u16 lp_pause; /* link partner */
1561 u16 an_complete; /* AN complete */
1562 u16 pause_result;
1563 u8 ret = 0;
1564 u32 ext_phy_type;
1565 u8 port = params->port;
1566 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
1567 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
1568 /* read twice */
1569
1570 bnx2x_cl45_read(bp, port,
1571 ext_phy_type,
1572 ext_phy_addr,
1573 MDIO_AN_DEVAD,
1574 MDIO_AN_REG_STATUS, &an_complete);
1575 bnx2x_cl45_read(bp, port,
1576 ext_phy_type,
1577 ext_phy_addr,
1578 MDIO_AN_DEVAD,
1579 MDIO_AN_REG_STATUS, &an_complete);
1580
1581 if (an_complete & MDIO_AN_REG_STATUS_AN_COMPLETE) {
1582 ret = 1;
1583 bnx2x_cl45_read(bp, port,
1584 ext_phy_type,
1585 ext_phy_addr,
1586 MDIO_AN_DEVAD,
1587 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
1588 bnx2x_cl45_read(bp, port,
1589 ext_phy_type,
1590 ext_phy_addr,
1591 MDIO_AN_DEVAD,
1592 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
1593 pause_result = (ld_pause &
1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
1595 pause_result |= (lp_pause &
1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
1598 pause_result);
1599 bnx2x_pause_resolve(vars, pause_result);
1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
1601 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1602 bnx2x_cl45_read(bp, port,
1603 ext_phy_type,
1604 ext_phy_addr,
1605 MDIO_AN_DEVAD,
1606 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
1607
1608 bnx2x_cl45_read(bp, port,
1609 ext_phy_type,
1610 ext_phy_addr,
1611 MDIO_AN_DEVAD,
1612 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
1613 pause_result = (ld_pause &
1614 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
1615 pause_result |= (lp_pause &
1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1617
1618 bnx2x_pause_resolve(vars, pause_result);
1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
1620 pause_result);
1621 }
1622 }
1623 return ret;
1624}
1625
1626static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1627{ 1567{
1628 struct bnx2x *bp = params->bp; 1568 struct bnx2x *bp = params->bp;
1629 u16 pd_10g, status2_1000x; 1569 u16 pd_10g, status2_1000x;
1630 CL45_RD_OVER_CL22(bp, params->port, 1570 if (phy->req_line_speed != SPEED_AUTO_NEG)
1631 params->phy_addr, 1571 return 0;
1572 CL45_RD_OVER_CL22(bp, phy,
1632 MDIO_REG_BANK_SERDES_DIGITAL, 1573 MDIO_REG_BANK_SERDES_DIGITAL,
1633 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 1574 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1634 &status2_1000x); 1575 &status2_1000x);
1635 CL45_RD_OVER_CL22(bp, params->port, 1576 CL45_RD_OVER_CL22(bp, phy,
1636 params->phy_addr,
1637 MDIO_REG_BANK_SERDES_DIGITAL, 1577 MDIO_REG_BANK_SERDES_DIGITAL,
1638 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 1578 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1639 &status2_1000x); 1579 &status2_1000x);
@@ -1643,8 +1583,7 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1643 return 1; 1583 return 1;
1644 } 1584 }
1645 1585
1646 CL45_RD_OVER_CL22(bp, params->port, 1586 CL45_RD_OVER_CL22(bp, phy,
1647 params->phy_addr,
1648 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1587 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1649 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, 1588 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
1650 &pd_10g); 1589 &pd_10g);
@@ -1657,9 +1596,10 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1657 return 0; 1596 return 0;
1658} 1597}
1659 1598
1660static void bnx2x_flow_ctrl_resolve(struct link_params *params, 1599static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
1661 struct link_vars *vars, 1600 struct link_params *params,
1662 u32 gp_status) 1601 struct link_vars *vars,
1602 u32 gp_status)
1663{ 1603{
1664 struct bnx2x *bp = params->bp; 1604 struct bnx2x *bp = params->bp;
1665 u16 ld_pause; /* local driver */ 1605 u16 ld_pause; /* local driver */
@@ -1669,12 +1609,13 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1669 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 1609 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1670 1610
1671 /* resolve from gp_status in case of AN complete and not sgmii */ 1611 /* resolve from gp_status in case of AN complete and not sgmii */
1672 if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && 1612 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
1673 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && 1613 vars->flow_ctrl = phy->req_flow_ctrl;
1674 (!(vars->phy_flags & PHY_SGMII_FLAG)) && 1614 else if (phy->req_line_speed != SPEED_AUTO_NEG)
1675 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1615 vars->flow_ctrl = params->req_fc_auto_adv;
1676 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) { 1616 else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1677 if (bnx2x_direct_parallel_detect_used(params)) { 1617 (!(vars->phy_flags & PHY_SGMII_FLAG))) {
1618 if (bnx2x_direct_parallel_detect_used(phy, params)) {
1678 vars->flow_ctrl = params->req_fc_auto_adv; 1619 vars->flow_ctrl = params->req_fc_auto_adv;
1679 return; 1620 return;
1680 } 1621 }
@@ -1684,13 +1625,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1684 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 1625 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1685 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { 1626 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
1686 1627
1687 CL45_RD_OVER_CL22(bp, params->port, 1628 CL45_RD_OVER_CL22(bp, phy,
1688 params->phy_addr,
1689 MDIO_REG_BANK_CL73_IEEEB1, 1629 MDIO_REG_BANK_CL73_IEEEB1,
1690 MDIO_CL73_IEEEB1_AN_ADV1, 1630 MDIO_CL73_IEEEB1_AN_ADV1,
1691 &ld_pause); 1631 &ld_pause);
1692 CL45_RD_OVER_CL22(bp, params->port, 1632 CL45_RD_OVER_CL22(bp, phy,
1693 params->phy_addr,
1694 MDIO_REG_BANK_CL73_IEEEB1, 1633 MDIO_REG_BANK_CL73_IEEEB1,
1695 MDIO_CL73_IEEEB1_AN_LP_ADV1, 1634 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1696 &lp_pause); 1635 &lp_pause);
@@ -1703,14 +1642,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1703 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", 1642 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
1704 pause_result); 1643 pause_result);
1705 } else { 1644 } else {
1706 1645 CL45_RD_OVER_CL22(bp, phy,
1707 CL45_RD_OVER_CL22(bp, params->port,
1708 params->phy_addr,
1709 MDIO_REG_BANK_COMBO_IEEE0, 1646 MDIO_REG_BANK_COMBO_IEEE0,
1710 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 1647 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1711 &ld_pause); 1648 &ld_pause);
1712 CL45_RD_OVER_CL22(bp, params->port, 1649 CL45_RD_OVER_CL22(bp, phy,
1713 params->phy_addr,
1714 MDIO_REG_BANK_COMBO_IEEE0, 1650 MDIO_REG_BANK_COMBO_IEEE0,
1715 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 1651 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1716 &lp_pause); 1652 &lp_pause);
@@ -1722,26 +1658,18 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1722 pause_result); 1658 pause_result);
1723 } 1659 }
1724 bnx2x_pause_resolve(vars, pause_result); 1660 bnx2x_pause_resolve(vars, pause_result);
1725 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1726 (bnx2x_ext_phy_resolve_fc(params, vars))) {
1727 return;
1728 } else {
1729 if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
1730 vars->flow_ctrl = params->req_fc_auto_adv;
1731 else
1732 vars->flow_ctrl = params->req_flow_ctrl;
1733 } 1661 }
1734 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); 1662 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
1735} 1663}
1736 1664
1737static void bnx2x_check_fallback_to_cl37(struct link_params *params) 1665static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
1666 struct link_params *params)
1738{ 1667{
1739 struct bnx2x *bp = params->bp; 1668 struct bnx2x *bp = params->bp;
1740 u16 rx_status, ustat_val, cl37_fsm_recieved; 1669 u16 rx_status, ustat_val, cl37_fsm_recieved;
1741 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); 1670 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
1742 /* Step 1: Make sure signal is detected */ 1671 /* Step 1: Make sure signal is detected */
1743 CL45_RD_OVER_CL22(bp, params->port, 1672 CL45_RD_OVER_CL22(bp, phy,
1744 params->phy_addr,
1745 MDIO_REG_BANK_RX0, 1673 MDIO_REG_BANK_RX0,
1746 MDIO_RX0_RX_STATUS, 1674 MDIO_RX0_RX_STATUS,
1747 &rx_status); 1675 &rx_status);
@@ -1749,16 +1677,14 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1749 (MDIO_RX0_RX_STATUS_SIGDET)) { 1677 (MDIO_RX0_RX_STATUS_SIGDET)) {
1750 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." 1678 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
1751 "rx_status(0x80b0) = 0x%x\n", rx_status); 1679 "rx_status(0x80b0) = 0x%x\n", rx_status);
1752 CL45_WR_OVER_CL22(bp, params->port, 1680 CL45_WR_OVER_CL22(bp, phy,
1753 params->phy_addr,
1754 MDIO_REG_BANK_CL73_IEEEB0, 1681 MDIO_REG_BANK_CL73_IEEEB0,
1755 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 1682 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1756 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); 1683 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
1757 return; 1684 return;
1758 } 1685 }
1759 /* Step 2: Check CL73 state machine */ 1686 /* Step 2: Check CL73 state machine */
1760 CL45_RD_OVER_CL22(bp, params->port, 1687 CL45_RD_OVER_CL22(bp, phy,
1761 params->phy_addr,
1762 MDIO_REG_BANK_CL73_USERB0, 1688 MDIO_REG_BANK_CL73_USERB0,
1763 MDIO_CL73_USERB0_CL73_USTAT1, 1689 MDIO_CL73_USERB0_CL73_USTAT1,
1764 &ustat_val); 1690 &ustat_val);
@@ -1773,8 +1699,7 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1773 } 1699 }
1774 /* Step 3: Check CL37 Message Pages received to indicate LP 1700 /* Step 3: Check CL37 Message Pages received to indicate LP
1775 supports only CL37 */ 1701 supports only CL37 */
1776 CL45_RD_OVER_CL22(bp, params->port, 1702 CL45_RD_OVER_CL22(bp, phy,
1777 params->phy_addr,
1778 MDIO_REG_BANK_REMOTE_PHY, 1703 MDIO_REG_BANK_REMOTE_PHY,
1779 MDIO_REMOTE_PHY_MISC_RX_STATUS, 1704 MDIO_REMOTE_PHY_MISC_RX_STATUS,
1780 &cl37_fsm_recieved); 1705 &cl37_fsm_recieved);
@@ -1792,25 +1717,45 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1792 connected to a device which does not support cl73, but does support 1717 connected to a device which does not support cl73, but does support
1793 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ 1718 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
1794 /* Disable CL73 */ 1719 /* Disable CL73 */
1795 CL45_WR_OVER_CL22(bp, params->port, 1720 CL45_WR_OVER_CL22(bp, phy,
1796 params->phy_addr,
1797 MDIO_REG_BANK_CL73_IEEEB0, 1721 MDIO_REG_BANK_CL73_IEEEB0,
1798 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 1722 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1799 0); 1723 0);
1800 /* Restart CL37 autoneg */ 1724 /* Restart CL37 autoneg */
1801 bnx2x_restart_autoneg(params, 0); 1725 bnx2x_restart_autoneg(phy, params, 0);
1802 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); 1726 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
1803} 1727}
1804static u8 bnx2x_link_settings_status(struct link_params *params, 1728
1805 struct link_vars *vars, 1729static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
1806 u32 gp_status, 1730 struct link_params *params,
1807 u8 ext_phy_link_up) 1731 struct link_vars *vars,
1732 u32 gp_status)
1733{
1734 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1735 vars->link_status |=
1736 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1737
1738 if (bnx2x_direct_parallel_detect_used(phy, params))
1739 vars->link_status |=
1740 LINK_STATUS_PARALLEL_DETECTION_USED;
1741}
1742
1743static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
1744 struct link_params *params,
1745 struct link_vars *vars)
1808{ 1746{
1809 struct bnx2x *bp = params->bp; 1747 struct bnx2x *bp = params->bp;
1810 u16 new_line_speed; 1748 u16 new_line_speed , gp_status;
1811 u8 rc = 0; 1749 u8 rc = 0;
1812 vars->link_status = 0;
1813 1750
1751 /* Read gp_status */
1752 CL45_RD_OVER_CL22(bp, phy,
1753 MDIO_REG_BANK_GP_STATUS,
1754 MDIO_GP_STATUS_TOP_AN_STATUS1,
1755 &gp_status);
1756
1757 if (phy->req_line_speed == SPEED_AUTO_NEG)
1758 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1814 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { 1759 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1815 DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n", 1760 DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
1816 gp_status); 1761 gp_status);
@@ -1823,7 +1768,12 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1823 else 1768 else
1824 vars->duplex = DUPLEX_HALF; 1769 vars->duplex = DUPLEX_HALF;
1825 1770
1826 bnx2x_flow_ctrl_resolve(params, vars, gp_status); 1771 if (SINGLE_MEDIA_DIRECT(params)) {
1772 bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
1773 if (phy->req_line_speed == SPEED_AUTO_NEG)
1774 bnx2x_xgxs_an_resolve(phy, params, vars,
1775 gp_status);
1776 }
1827 1777
1828 switch (gp_status & GP_STATUS_SPEED_MASK) { 1778 switch (gp_status & GP_STATUS_SPEED_MASK) {
1829 case GP_STATUS_10M: 1779 case GP_STATUS_10M:
@@ -1905,56 +1855,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1905 return -EINVAL; 1855 return -EINVAL;
1906 } 1856 }
1907 1857
1908 /* Upon link speed change set the NIG into drain mode.
1909 Comes to deals with possible FIFO glitch due to clk change
1910 when speed is decreased without link down indicator */
1911 if (new_line_speed != vars->line_speed) {
1912 if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) !=
1913 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT &&
1914 ext_phy_link_up) {
1915 DP(NETIF_MSG_LINK, "Internal link speed %d is"
1916 " different than the external"
1917 " link speed %d\n", new_line_speed,
1918 vars->line_speed);
1919 vars->phy_link_up = 0;
1920 return 0;
1921 }
1922 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
1923 + params->port*4, 0);
1924 msleep(1);
1925 }
1926 vars->line_speed = new_line_speed; 1858 vars->line_speed = new_line_speed;
1927 vars->link_status |= LINK_STATUS_SERDES_LINK;
1928
1929 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1930 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
1932 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
1934 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
1936 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1937 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
1938 vars->autoneg = AUTO_NEG_ENABLED;
1939
1940 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
1941 vars->autoneg |= AUTO_NEG_COMPLETE;
1942 vars->link_status |=
1943 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1944 }
1945
1946 vars->autoneg |= AUTO_NEG_PARALLEL_DETECTION_USED;
1947 vars->link_status |=
1948 LINK_STATUS_PARALLEL_DETECTION_USED;
1949
1950 }
1951 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1952 vars->link_status |=
1953 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1954
1955 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1956 vars->link_status |=
1957 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1958 1859
1959 } else { /* link_down */ 1860 } else { /* link_down */
1960 DP(NETIF_MSG_LINK, "phy link down\n"); 1861 DP(NETIF_MSG_LINK, "phy link down\n");
@@ -1963,38 +1864,32 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1963 1864
1964 vars->duplex = DUPLEX_FULL; 1865 vars->duplex = DUPLEX_FULL;
1965 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 1866 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1966 vars->autoneg = AUTO_NEG_DISABLED;
1967 vars->mac_type = MAC_TYPE_NONE; 1867 vars->mac_type = MAC_TYPE_NONE;
1968 1868
1969 if ((params->req_line_speed == SPEED_AUTO_NEG) && 1869 if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
1970 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1870 SINGLE_MEDIA_DIRECT(params)) {
1971 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT))) {
1972 /* Check signal is detected */ 1871 /* Check signal is detected */
1973 bnx2x_check_fallback_to_cl37(params); 1872 bnx2x_check_fallback_to_cl37(phy, params);
1974 } 1873 }
1975 } 1874 }
1976 1875
1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n", 1876 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
1978 gp_status, vars->phy_link_up, vars->line_speed); 1877 gp_status, vars->phy_link_up, vars->line_speed);
1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x" 1878 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
1980 " autoneg 0x%x\n", 1879 vars->duplex, vars->flow_ctrl, vars->link_status);
1981 vars->duplex,
1982 vars->flow_ctrl, vars->autoneg);
1983 DP(NETIF_MSG_LINK, "link_status 0x%x\n", vars->link_status);
1984
1985 return rc; 1880 return rc;
1986} 1881}
1987 1882
1988static void bnx2x_set_gmii_tx_driver(struct link_params *params) 1883static void bnx2x_set_gmii_tx_driver(struct link_params *params)
1989{ 1884{
1990 struct bnx2x *bp = params->bp; 1885 struct bnx2x *bp = params->bp;
1886 struct bnx2x_phy *phy = &params->phy[INT_PHY];
1991 u16 lp_up2; 1887 u16 lp_up2;
1992 u16 tx_driver; 1888 u16 tx_driver;
1993 u16 bank; 1889 u16 bank;
1994 1890
1995 /* read precomp */ 1891 /* read precomp */
1996 CL45_RD_OVER_CL22(bp, params->port, 1892 CL45_RD_OVER_CL22(bp, phy,
1997 params->phy_addr,
1998 MDIO_REG_BANK_OVER_1G, 1893 MDIO_REG_BANK_OVER_1G,
1999 MDIO_OVER_1G_LP_UP2, &lp_up2); 1894 MDIO_OVER_1G_LP_UP2, &lp_up2);
2000 1895
@@ -2008,8 +1903,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2008 1903
2009 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; 1904 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2010 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { 1905 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2011 CL45_RD_OVER_CL22(bp, params->port, 1906 CL45_RD_OVER_CL22(bp, phy,
2012 params->phy_addr,
2013 bank, 1907 bank,
2014 MDIO_TX0_TX_DRIVER, &tx_driver); 1908 MDIO_TX0_TX_DRIVER, &tx_driver);
2015 1909
@@ -2018,8 +1912,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2018 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 1912 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2019 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 1913 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2020 tx_driver |= lp_up2; 1914 tx_driver |= lp_up2;
2021 CL45_WR_OVER_CL22(bp, params->port, 1915 CL45_WR_OVER_CL22(bp, phy,
2022 params->phy_addr,
2023 bank, 1916 bank,
2024 MDIO_TX0_TX_DRIVER, tx_driver); 1917 MDIO_TX0_TX_DRIVER, tx_driver);
2025 } 1918 }
@@ -2027,7 +1920,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2027} 1920}
2028 1921
2029static u8 bnx2x_emac_program(struct link_params *params, 1922static u8 bnx2x_emac_program(struct link_params *params,
2030 u32 line_speed, u32 duplex) 1923 struct link_vars *vars)
2031{ 1924{
2032 struct bnx2x *bp = params->bp; 1925 struct bnx2x *bp = params->bp;
2033 u8 port = params->port; 1926 u8 port = params->port;
@@ -2039,7 +1932,7 @@ static u8 bnx2x_emac_program(struct link_params *params,
2039 (EMAC_MODE_25G_MODE | 1932 (EMAC_MODE_25G_MODE |
2040 EMAC_MODE_PORT_MII_10M | 1933 EMAC_MODE_PORT_MII_10M |
2041 EMAC_MODE_HALF_DUPLEX)); 1934 EMAC_MODE_HALF_DUPLEX));
2042 switch (line_speed) { 1935 switch (vars->line_speed) {
2043 case SPEED_10: 1936 case SPEED_10:
2044 mode |= EMAC_MODE_PORT_MII_10M; 1937 mode |= EMAC_MODE_PORT_MII_10M;
2045 break; 1938 break;
@@ -2058,371 +1951,1369 @@ static u8 bnx2x_emac_program(struct link_params *params,
2058 1951
2059 default: 1952 default:
2060 /* 10G not valid for EMAC */ 1953 /* 10G not valid for EMAC */
2061 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed); 1954 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
1955 vars->line_speed);
2062 return -EINVAL; 1956 return -EINVAL;
2063 } 1957 }
2064 1958
2065 if (duplex == DUPLEX_HALF) 1959 if (vars->duplex == DUPLEX_HALF)
2066 mode |= EMAC_MODE_HALF_DUPLEX; 1960 mode |= EMAC_MODE_HALF_DUPLEX;
2067 bnx2x_bits_en(bp, 1961 bnx2x_bits_en(bp,
2068 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 1962 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2069 mode); 1963 mode);
2070 1964
2071 bnx2x_set_led(params, LED_MODE_OPER, line_speed); 1965 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
2072 return 0; 1966 return 0;
2073} 1967}
2074 1968
2075/*****************************************************************************/ 1969static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2076/* External Phy section */ 1970 struct link_params *params)
2077/*****************************************************************************/
2078void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
2079{ 1971{
2080 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1972
2081 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 1973 u16 bank, i = 0;
2082 msleep(1); 1974 struct bnx2x *bp = params->bp;
2083 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1975
2084 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 1976 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
1977 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
1978 CL45_WR_OVER_CL22(bp, phy,
1979 bank,
1980 MDIO_RX0_RX_EQ_BOOST,
1981 phy->rx_preemphasis[i]);
1982 }
1983
1984 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
1985 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
1986 CL45_WR_OVER_CL22(bp, phy,
1987 bank,
1988 MDIO_TX0_TX_DRIVER,
1989 phy->tx_preemphasis[i]);
1990 }
2085} 1991}
2086 1992
2087static void bnx2x_ext_phy_reset(struct link_params *params, 1993static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2088 struct link_vars *vars) 1994 struct link_params *params,
1995 struct link_vars *vars)
2089{ 1996{
2090 struct bnx2x *bp = params->bp; 1997 struct bnx2x *bp = params->bp;
2091 u32 ext_phy_type; 1998 u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
2092 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 1999 (params->loopback_mode == LOOPBACK_XGXS));
2000 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
2001 if (SINGLE_MEDIA_DIRECT(params) &&
2002 (params->feature_config_flags &
2003 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
2004 bnx2x_set_preemphasis(phy, params);
2093 2005
2094 DP(NETIF_MSG_LINK, "Port %x: bnx2x_ext_phy_reset\n", params->port); 2006 /* forced speed requested? */
2095 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 2007 if (vars->line_speed != SPEED_AUTO_NEG ||
2096 /* The PHY reset is controled by GPIO 1 2008 (SINGLE_MEDIA_DIRECT(params) &&
2097 * Give it 1ms of reset pulse 2009 params->loopback_mode == LOOPBACK_EXT)) {
2098 */ 2010 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2099 if (vars->phy_flags & PHY_XGXS_FLAG) {
2100 2011
2101 switch (ext_phy_type) { 2012 /* disable autoneg */
2102 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 2013 bnx2x_set_autoneg(phy, params, vars, 0);
2103 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2104 break;
2105 2014
2106 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 2015 /* program speed and duplex */
2107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 2016 bnx2x_program_serdes(phy, params, vars);
2108 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
2109 2017
2110 /* Restore normal power mode*/ 2018 } else { /* AN_mode */
2111 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2019 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2112 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2113 params->port);
2114 2020
2115 /* HW reset */ 2021 /* AN enabled */
2116 bnx2x_ext_phy_hw_reset(bp, params->port); 2022 bnx2x_set_brcm_cl37_advertisment(phy, params);
2117 2023
2118 bnx2x_cl45_write(bp, params->port, 2024 /* program duplex & pause advertisement (for aneg) */
2119 ext_phy_type, 2025 bnx2x_set_ieee_aneg_advertisment(phy, params,
2120 ext_phy_addr, 2026 vars->ieee_fc);
2121 MDIO_PMA_DEVAD,
2122 MDIO_PMA_REG_CTRL, 0xa040);
2123 break;
2124 2027
2125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 2028 /* enable autoneg */
2126 break; 2029 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
2127 2030
2128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 2031 /* enable and restart AN */
2032 bnx2x_restart_autoneg(phy, params, enable_cl73);
2033 }
2129 2034
2130 /* Restore normal power mode*/ 2035 } else { /* SGMII mode */
2131 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2036 DP(NETIF_MSG_LINK, "SGMII\n");
2132 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2133 params->port);
2134 2037
2135 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2038 bnx2x_initialize_sgmii_process(phy, params, vars);
2136 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2039 }
2137 params->port); 2040}
2138 2041
2139 bnx2x_cl45_write(bp, params->port, 2042static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
2140 ext_phy_type, 2043 struct link_params *params,
2141 ext_phy_addr, 2044 struct link_vars *vars)
2142 MDIO_PMA_DEVAD, 2045{
2143 MDIO_PMA_REG_CTRL, 2046 u8 rc;
2144 1<<15); 2047 vars->phy_flags |= PHY_SGMII_FLAG;
2145 break; 2048 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2049 bnx2x_set_aer_mmd(params, phy);
2050 rc = bnx2x_reset_unicore(params, phy, 1);
2051 /* reset the SerDes and wait for reset bit return low */
2052 if (rc != 0)
2053 return rc;
2054 bnx2x_set_aer_mmd(params, phy);
2146 2055
2147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 2056 return rc;
2148 DP(NETIF_MSG_LINK, "XGXS 8072\n"); 2057}
2149 2058
2150 /* Unset Low Power Mode and SW reset */ 2059static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2151 /* Restore normal power mode*/ 2060 struct link_params *params,
2152 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2061 struct link_vars *vars)
2153 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2062{
2154 params->port); 2063 u8 rc;
2064 vars->phy_flags = PHY_XGXS_FLAG;
2065 if ((phy->req_line_speed &&
2066 ((phy->req_line_speed == SPEED_100) ||
2067 (phy->req_line_speed == SPEED_10))) ||
2068 (!phy->req_line_speed &&
2069 (phy->speed_cap_mask >=
2070 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
2071 (phy->speed_cap_mask <
2072 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
2073 ))
2074 vars->phy_flags |= PHY_SGMII_FLAG;
2075 else
2076 vars->phy_flags &= ~PHY_SGMII_FLAG;
2155 2077
2156 bnx2x_cl45_write(bp, params->port, 2078 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2157 ext_phy_type, 2079 bnx2x_set_aer_mmd(params, phy);
2158 ext_phy_addr, 2080 bnx2x_set_master_ln(params, phy);
2159 MDIO_PMA_DEVAD,
2160 MDIO_PMA_REG_CTRL,
2161 1<<15);
2162 break;
2163 2081
2164 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 2082 rc = bnx2x_reset_unicore(params, phy, 0);
2165 DP(NETIF_MSG_LINK, "XGXS 8073\n"); 2083 /* reset the SerDes and wait for reset bit return low */
2084 if (rc != 0)
2085 return rc;
2166 2086
2167 /* Restore normal power mode*/ 2087 bnx2x_set_aer_mmd(params, phy);
2168 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2169 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2170 params->port);
2171 2088
2172 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2089 /* setting the masterLn_def again after the reset */
2173 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2090 bnx2x_set_master_ln(params, phy);
2174 params->port); 2091 bnx2x_set_swap_lanes(params, phy);
2092
2093 return rc;
2094}
2095
2096static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2097 struct bnx2x_phy *phy)
2098{
2099 u16 cnt, ctrl;
2100 /* Wait for soft reset to get cleared upto 1 sec */
2101 for (cnt = 0; cnt < 1000; cnt++) {
2102 bnx2x_cl45_read(bp, phy,
2103 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl);
2104 if (!(ctrl & (1<<15)))
2175 break; 2105 break;
2106 msleep(1);
2107 }
2108 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
2109 return cnt;
2110}
2176 2111
2177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 2112static void bnx2x_link_int_enable(struct link_params *params)
2178 DP(NETIF_MSG_LINK, "XGXS SFX7101\n"); 2113{
2114 u8 port = params->port;
2115 u32 mask;
2116 struct bnx2x *bp = params->bp;
2179 2117
2180 /* Restore normal power mode*/ 2118 /* setting the status to report on link up
2181 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2119 for either XGXS or SerDes */
2182 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2183 params->port);
2184 2120
2185 /* HW reset */ 2121 if (params->switch_cfg == SWITCH_CFG_10G) {
2186 bnx2x_ext_phy_hw_reset(bp, params->port); 2122 mask = (NIG_MASK_XGXS0_LINK10G |
2187 break; 2123 NIG_MASK_XGXS0_LINK_STATUS);
2124 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
2125 if (!(SINGLE_MEDIA_DIRECT(params)) &&
2126 params->phy[INT_PHY].type !=
2127 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
2128 mask |= NIG_MASK_MI_INT;
2129 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2130 }
2188 2131
2189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 2132 } else { /* SerDes */
2190 /* Restore normal power mode*/ 2133 mask = NIG_MASK_SERDES0_LINK_STATUS;
2191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2134 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
2192 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2135 if (!(SINGLE_MEDIA_DIRECT(params)) &&
2193 params->port); 2136 params->phy[INT_PHY].type !=
2137 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
2138 mask |= NIG_MASK_MI_INT;
2139 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2140 }
2141 }
2142 bnx2x_bits_en(bp,
2143 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2144 mask);
2194 2145
2195 /* HW reset */ 2146 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
2196 bnx2x_ext_phy_hw_reset(bp, params->port); 2147 (params->switch_cfg == SWITCH_CFG_10G),
2148 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
2149 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
2150 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
2151 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2152 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
2153 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
2154 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2155 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
2156}
2197 2157
2198 bnx2x_cl45_write(bp, params->port, 2158static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2199 ext_phy_type, 2159 u8 exp_mi_int)
2200 ext_phy_addr, 2160{
2201 MDIO_PMA_DEVAD, 2161 u32 latch_status = 0;
2202 MDIO_PMA_REG_CTRL,
2203 1<<15);
2204 break;
2205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
2206 break;
2207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2208 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
2209 break;
2210 2162
2211 default: 2163 /**
2212 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", 2164 * Disable the MI INT ( external phy int ) by writing 1 to the
2213 params->ext_phy_config); 2165 * status register. Link down indication is high-active-signal,
2214 break; 2166 * so in this case we need to write the status to clear the XOR
2167 */
2168 /* Read Latched signals */
2169 latch_status = REG_RD(bp,
2170 NIG_REG_LATCH_STATUS_0 + port*8);
2171 DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
2172 /* Handle only those with latched-signal=up.*/
2173 if (exp_mi_int)
2174 bnx2x_bits_en(bp,
2175 NIG_REG_STATUS_INTERRUPT_PORT0
2176 + port*4,
2177 NIG_STATUS_EMAC0_MI_INT);
2178 else
2179 bnx2x_bits_dis(bp,
2180 NIG_REG_STATUS_INTERRUPT_PORT0
2181 + port*4,
2182 NIG_STATUS_EMAC0_MI_INT);
2183
2184 if (latch_status & 1) {
2185
2186 /* For all latched-signal=up : Re-Arm Latch signals */
2187 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
2188 (latch_status & 0xfffe) | (latch_status & 1));
2189 }
2190 /* For all latched-signal=up,Write original_signal to status */
2191}
2192
2193static void bnx2x_link_int_ack(struct link_params *params,
2194 struct link_vars *vars, u8 is_10g)
2195{
2196 struct bnx2x *bp = params->bp;
2197 u8 port = params->port;
2198
2199 /* first reset all status
2200 * we assume only one line will be change at a time */
2201 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2202 (NIG_STATUS_XGXS0_LINK10G |
2203 NIG_STATUS_XGXS0_LINK_STATUS |
2204 NIG_STATUS_SERDES0_LINK_STATUS));
2205 if (vars->phy_link_up) {
2206 if (is_10g) {
2207 /* Disable the 10G link interrupt
2208 * by writing 1 to the status register
2209 */
2210 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
2211 bnx2x_bits_en(bp,
2212 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2213 NIG_STATUS_XGXS0_LINK10G);
2214
2215 } else if (params->switch_cfg == SWITCH_CFG_10G) {
2216 /* Disable the link interrupt
2217 * by writing 1 to the relevant lane
2218 * in the status register
2219 */
2220 u32 ser_lane = ((params->lane_config &
2221 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
2222 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
2223
2224 DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
2225 vars->line_speed);
2226 bnx2x_bits_en(bp,
2227 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2228 ((1 << ser_lane) <<
2229 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
2230
2231 } else { /* SerDes */
2232 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
2233 /* Disable the link interrupt
2234 * by writing 1 to the status register
2235 */
2236 bnx2x_bits_en(bp,
2237 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2238 NIG_STATUS_SERDES0_LINK_STATUS);
2215 } 2239 }
2216 2240
2217 } else { /* SerDes */ 2241 }
2218 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 2242}
2219 switch (ext_phy_type) { 2243
2220 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 2244static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len)
2221 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 2245{
2222 break; 2246 u8 *str_ptr = str;
2247 u32 mask = 0xf0000000;
2248 u8 shift = 8*4;
2249 u8 digit;
2250 u8 remove_leading_zeros = 1;
2251 if (*len < 10) {
2252 /* Need more than 10chars for this format */
2253 *str_ptr = '\0';
2254 (*len)--;
2255 return -EINVAL;
2256 }
2257 while (shift > 0) {
2258
2259 shift -= 4;
2260 digit = ((num & mask) >> shift);
2261 if (digit == 0 && remove_leading_zeros) {
2262 mask = mask >> 4;
2263 continue;
2264 } else if (digit < 0xa)
2265 *str_ptr = digit + '0';
2266 else
2267 *str_ptr = digit - 0xa + 'a';
2268 remove_leading_zeros = 0;
2269 str_ptr++;
2270 (*len)--;
2271 mask = mask >> 4;
2272 if (shift == 4*4) {
2273 *str_ptr = '.';
2274 str_ptr++;
2275 (*len)--;
2276 remove_leading_zeros = 1;
2277 }
2278 }
2279 return 0;
2280}
2281
2282
2283static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
2284{
2285 str[0] = '\0';
2286 (*len)--;
2287 return 0;
2288}
2289
2290u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
2291 u8 *version, u16 len)
2292{
2293 struct bnx2x *bp;
2294 u32 spirom_ver = 0;
2295 u8 status = 0;
2296 u8 *ver_p = version;
2297 u16 remain_len = len;
2298 if (version == NULL || params == NULL)
2299 return -EINVAL;
2300 bp = params->bp;
2301
2302 /* Extract first external phy*/
2303 version[0] = '\0';
2304 spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
2305
2306 if (params->phy[EXT_PHY1].format_fw_ver) {
2307 status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
2308 ver_p,
2309 &remain_len);
2310 ver_p += (len - remain_len);
2311 }
2312 if ((params->num_phys == MAX_PHYS) &&
2313 (params->phy[EXT_PHY2].ver_addr != 0)) {
2314 spirom_ver = REG_RD(bp,
2315 params->phy[EXT_PHY2].ver_addr);
2316 if (params->phy[EXT_PHY2].format_fw_ver) {
2317 *ver_p = '/';
2318 ver_p++;
2319 remain_len--;
2320 status |= params->phy[EXT_PHY2].format_fw_ver(
2321 spirom_ver,
2322 ver_p,
2323 &remain_len);
2324 ver_p = version + (len - remain_len);
2325 }
2326 }
2327 *ver_p = '\0';
2328 return status;
2329}
2330
2331static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
2332 struct link_params *params)
2333{
2334 u8 port = params->port;
2335 struct bnx2x *bp = params->bp;
2336
2337 if (phy->req_line_speed != SPEED_1000) {
2338 u32 md_devad;
2339
2340 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
2341
2342 /* change the uni_phy_addr in the nig */
2343 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
2344 port*0x18));
2345
2346 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
2347
2348 bnx2x_cl45_write(bp, phy,
2349 5,
2350 (MDIO_REG_BANK_AER_BLOCK +
2351 (MDIO_AER_BLOCK_AER_REG & 0xf)),
2352 0x2800);
2353
2354 bnx2x_cl45_write(bp, phy,
2355 5,
2356 (MDIO_REG_BANK_CL73_IEEEB0 +
2357 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
2358 0x6041);
2359 msleep(200);
2360 /* set aer mmd back */
2361 bnx2x_set_aer_mmd(params, phy);
2362
2363 /* and md_devad */
2364 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
2365 md_devad);
2366
2367 } else {
2368 u16 mii_ctrl;
2369 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
2370 bnx2x_cl45_read(bp, phy, 5,
2371 (MDIO_REG_BANK_COMBO_IEEE0 +
2372 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
2373 &mii_ctrl);
2374 bnx2x_cl45_write(bp, phy, 5,
2375 (MDIO_REG_BANK_COMBO_IEEE0 +
2376 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
2377 mii_ctrl |
2378 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
2379 }
2380}
2381
2382/*
2383 *------------------------------------------------------------------------
2384 * bnx2x_override_led_value -
2385 *
2386 * Override the led value of the requested led
2387 *
2388 *------------------------------------------------------------------------
2389 */
2390u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
2391 u32 led_idx, u32 value)
2392{
2393 u32 reg_val;
2223 2394
2224 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 2395 /* If port 0 then use EMAC0, else use EMAC1*/
2225 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 2396 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2226 bnx2x_ext_phy_hw_reset(bp, params->port); 2397
2398 DP(NETIF_MSG_LINK,
2399 "bnx2x_override_led_value() port %x led_idx %d value %d\n",
2400 port, led_idx, value);
2401
2402 switch (led_idx) {
2403 case 0: /* 10MB led */
2404 /* Read the current value of the LED register in
2405 the EMAC block */
2406 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2407 /* Set the OVERRIDE bit to 1 */
2408 reg_val |= EMAC_LED_OVERRIDE;
2409 /* If value is 1, set the 10M_OVERRIDE bit,
2410 otherwise reset it.*/
2411 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
2412 (reg_val & ~EMAC_LED_10MB_OVERRIDE);
2413 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2414 break;
2415 case 1: /*100MB led */
2416 /*Read the current value of the LED register in
2417 the EMAC block */
2418 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2419 /* Set the OVERRIDE bit to 1 */
2420 reg_val |= EMAC_LED_OVERRIDE;
2421 /* If value is 1, set the 100M_OVERRIDE bit,
2422 otherwise reset it.*/
2423 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
2424 (reg_val & ~EMAC_LED_100MB_OVERRIDE);
2425 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2426 break;
2427 case 2: /* 1000MB led */
2428 /* Read the current value of the LED register in the
2429 EMAC block */
2430 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2431 /* Set the OVERRIDE bit to 1 */
2432 reg_val |= EMAC_LED_OVERRIDE;
2433 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
2434 reset it. */
2435 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
2436 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
2437 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2438 break;
2439 case 3: /* 2500MB led */
2440 /* Read the current value of the LED register in the
2441 EMAC block*/
2442 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2443 /* Set the OVERRIDE bit to 1 */
2444 reg_val |= EMAC_LED_OVERRIDE;
2445 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
2446 reset it.*/
2447 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
2448 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
2449 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2450 break;
2451 case 4: /*10G led */
2452 if (port == 0) {
2453 REG_WR(bp, NIG_REG_LED_10G_P0,
2454 value);
2455 } else {
2456 REG_WR(bp, NIG_REG_LED_10G_P1,
2457 value);
2458 }
2459 break;
2460 case 5: /* TRAFFIC led */
2461 /* Find if the traffic control is via BMAC or EMAC */
2462 if (port == 0)
2463 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
2464 else
2465 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
2466
2467 /* Override the traffic led in the EMAC:*/
2468 if (reg_val == 1) {
2469 /* Read the current value of the LED register in
2470 the EMAC block */
2471 reg_val = REG_RD(bp, emac_base +
2472 EMAC_REG_EMAC_LED);
2473 /* Set the TRAFFIC_OVERRIDE bit to 1 */
2474 reg_val |= EMAC_LED_OVERRIDE;
2475 /* If value is 1, set the TRAFFIC bit, otherwise
2476 reset it.*/
2477 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
2478 (reg_val & ~EMAC_LED_TRAFFIC);
2479 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2480 } else { /* Override the traffic led in the BMAC: */
2481 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
2482 + port*4, 1);
2483 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
2484 value);
2485 }
2486 break;
2487 default:
2488 DP(NETIF_MSG_LINK,
2489 "bnx2x_override_led_value() unknown led index %d "
2490 "(should be 0-5)\n", led_idx);
2491 return -EINVAL;
2492 }
2493
2494 return 0;
2495}
2496
2497
2498u8 bnx2x_set_led(struct link_params *params,
2499 struct link_vars *vars, u8 mode, u32 speed)
2500{
2501 u8 port = params->port;
2502 u16 hw_led_mode = params->hw_led_mode;
2503 u8 rc = 0, phy_idx;
2504 u32 tmp;
2505 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2506 struct bnx2x *bp = params->bp;
2507 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
2508 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
2509 speed, hw_led_mode);
2510 /* In case */
2511 for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
2512 if (params->phy[phy_idx].set_link_led) {
2513 params->phy[phy_idx].set_link_led(
2514 &params->phy[phy_idx], params, mode);
2515 }
2516 }
2517
2518 switch (mode) {
2519 case LED_MODE_FRONT_PANEL_OFF:
2520 case LED_MODE_OFF:
2521 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
2522 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
2523 SHARED_HW_CFG_LED_MAC1);
2524
2525 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
2526 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
2527 break;
2528
2529 case LED_MODE_OPER:
2530 /**
2531 * For all other phys, OPER mode is same as ON, so in case
2532 * link is down, do nothing
2533 **/
2534 if (!vars->link_up)
2227 break; 2535 break;
2536 case LED_MODE_ON:
2537 if (SINGLE_MEDIA_DIRECT(params)) {
2538 /**
2539 * This is a work-around for HW issue found when link
2540 * is up in CL73
2541 */
2542 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
2543 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
2544 } else {
2545 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
2546 hw_led_mode);
2547 }
2228 2548
2229 default: 2549 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
2230 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", 2550 port*4, 0);
2231 params->ext_phy_config); 2551 /* Set blinking rate to ~15.9Hz */
2552 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
2553 LED_BLINK_RATE_VAL);
2554 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
2555 port*4, 1);
2556 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
2557 EMAC_WR(bp, EMAC_REG_EMAC_LED,
2558 (tmp & (~EMAC_LED_OVERRIDE)));
2559
2560 if (CHIP_IS_E1(bp) &&
2561 ((speed == SPEED_2500) ||
2562 (speed == SPEED_1000) ||
2563 (speed == SPEED_100) ||
2564 (speed == SPEED_10))) {
2565 /* On Everest 1 Ax chip versions for speeds less than
2566 10G LED scheme is different */
2567 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
2568 + port*4, 1);
2569 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
2570 port*4, 0);
2571 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
2572 port*4, 1);
2573 }
2574 break;
2575
2576 default:
2577 rc = -EINVAL;
2578 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
2579 mode);
2580 break;
2581 }
2582 return rc;
2583
2584}
2585
2586/**
2587 * This function comes to reflect the actual link state read DIRECTLY from the
2588 * HW
2589 */
2590u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
2591 u8 is_serdes)
2592{
2593 struct bnx2x *bp = params->bp;
2594 u16 gp_status = 0, phy_index = 0;
2595 u8 ext_phy_link_up = 0, serdes_phy_type;
2596 struct link_vars temp_vars;
2597
2598 CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
2599 MDIO_REG_BANK_GP_STATUS,
2600 MDIO_GP_STATUS_TOP_AN_STATUS1,
2601 &gp_status);
2602 /* link is up only if both local phy and external phy are up */
2603 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
2604 return -ESRCH;
2605
2606 switch (params->num_phys) {
2607 case 1:
2608 /* No external PHY */
2609 return 0;
2610 case 2:
2611 ext_phy_link_up = params->phy[EXT_PHY1].read_status(
2612 &params->phy[EXT_PHY1],
2613 params, &temp_vars);
2614 break;
2615 case 3: /* Dual Media */
2616 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
2617 phy_index++) {
2618 serdes_phy_type = ((params->phy[phy_index].media_type ==
2619 ETH_PHY_SFP_FIBER) ||
2620 (params->phy[phy_index].media_type ==
2621 ETH_PHY_XFP_FIBER));
2622
2623 if (is_serdes != serdes_phy_type)
2624 continue;
2625 if (params->phy[phy_index].read_status) {
2626 ext_phy_link_up |=
2627 params->phy[phy_index].read_status(
2628 &params->phy[phy_index],
2629 params, &temp_vars);
2630 }
2631 }
2632 break;
2633 }
2634 if (ext_phy_link_up)
2635 return 0;
2636 return -ESRCH;
2637}
2638
2639static u8 bnx2x_link_initialize(struct link_params *params,
2640 struct link_vars *vars)
2641{
2642 u8 rc = 0;
2643 u8 phy_index, non_ext_phy;
2644 struct bnx2x *bp = params->bp;
2645 /**
2646 * In case of external phy existence, the line speed would be the
2647 * line speed linked up by the external phy. In case it is direct
2648 * only, then the line_speed during initialization will be
2649 * equal to the req_line_speed
2650 */
2651 vars->line_speed = params->phy[INT_PHY].req_line_speed;
2652
2653 /**
2654 * Initialize the internal phy in case this is a direct board
2655 * (no external phys), or this board has external phy which requires
2656 * to first.
2657 */
2658
2659 if (params->phy[INT_PHY].config_init)
2660 params->phy[INT_PHY].config_init(
2661 &params->phy[INT_PHY],
2662 params, vars);
2663
2664 /* init ext phy and enable link state int */
2665 non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
2666 (params->loopback_mode == LOOPBACK_XGXS));
2667
2668 if (non_ext_phy ||
2669 (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
2670 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
2671 struct bnx2x_phy *phy = &params->phy[INT_PHY];
2672 if (vars->line_speed == SPEED_AUTO_NEG)
2673 bnx2x_set_parallel_detection(phy, params);
2674 bnx2x_init_internal_phy(phy, params, vars);
2675 }
2676
2677 /* Init external phy*/
2678 if (!non_ext_phy)
2679 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
2680 phy_index++) {
2681 /**
2682 * No need to initialize second phy in case of first
2683 * phy only selection. In case of second phy, we do
2684 * need to initialize the first phy, since they are
2685 * connected.
2686 **/
2687 if (phy_index == EXT_PHY2 &&
2688 (bnx2x_phy_selection(params) ==
2689 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
2690 DP(NETIF_MSG_LINK, "Not initializing"
2691 "second phy\n");
2692 continue;
2693 }
2694 params->phy[phy_index].config_init(
2695 &params->phy[phy_index],
2696 params, vars);
2697 }
2698
2699 /* Reset the interrupt indication after phy was initialized */
2700 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
2701 params->port*4,
2702 (NIG_STATUS_XGXS0_LINK10G |
2703 NIG_STATUS_XGXS0_LINK_STATUS |
2704 NIG_STATUS_SERDES0_LINK_STATUS |
2705 NIG_MASK_MI_INT));
2706 return rc;
2707}
2708
2709static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
2710 struct link_params *params)
2711{
2712 /* reset the SerDes/XGXS */
2713 REG_WR(params->bp, GRCBASE_MISC +
2714 MISC_REGISTERS_RESET_REG_3_CLEAR,
2715 (0x1ff << (params->port*16)));
2716}
2717
2718static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
2719 struct link_params *params)
2720{
2721 struct bnx2x *bp = params->bp;
2722 u8 gpio_port;
2723 /* HW reset */
2724 gpio_port = params->port;
2725 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2726 MISC_REGISTERS_GPIO_OUTPUT_LOW,
2727 gpio_port);
2728 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2729 MISC_REGISTERS_GPIO_OUTPUT_LOW,
2730 gpio_port);
2731 DP(NETIF_MSG_LINK, "reset external PHY\n");
2732}
2733
2734static u8 bnx2x_update_link_down(struct link_params *params,
2735 struct link_vars *vars)
2736{
2737 struct bnx2x *bp = params->bp;
2738 u8 port = params->port;
2739
2740 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
2741 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
2742
2743 /* indicate no mac active */
2744 vars->mac_type = MAC_TYPE_NONE;
2745
2746 /* update shared memory */
2747 vars->link_status = 0;
2748 vars->line_speed = 0;
2749 bnx2x_update_mng(params, vars->link_status);
2750
2751 /* activate nig drain */
2752 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2753
2754 /* disable emac */
2755 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
2756
2757 msleep(10);
2758
2759 /* reset BigMac */
2760 bnx2x_bmac_rx_disable(bp, params->port);
2761 REG_WR(bp, GRCBASE_MISC +
2762 MISC_REGISTERS_RESET_REG_2_CLEAR,
2763 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2764 return 0;
2765}
2766
2767static u8 bnx2x_update_link_up(struct link_params *params,
2768 struct link_vars *vars,
2769 u8 link_10g)
2770{
2771 struct bnx2x *bp = params->bp;
2772 u8 port = params->port;
2773 u8 rc = 0;
2774
2775 vars->link_status |= LINK_STATUS_LINK_UP;
2776
2777 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
2778 vars->link_status |=
2779 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
2780
2781 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
2782 vars->link_status |=
2783 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
2784
2785 if (link_10g) {
2786 bnx2x_bmac_enable(params, vars, 0);
2787 bnx2x_set_led(params, vars,
2788 LED_MODE_OPER, SPEED_10000);
2789 } else {
2790 rc = bnx2x_emac_program(params, vars);
2791
2792 bnx2x_emac_enable(params, vars, 0);
2793
2794 /* AN complete? */
2795 if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
2796 && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
2797 SINGLE_MEDIA_DIRECT(params))
2798 bnx2x_set_gmii_tx_driver(params);
2799 }
2800
2801 /* PBF - link up */
2802 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
2803 vars->line_speed);
2804
2805 /* disable drain */
2806 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2807
2808 /* update shared memory */
2809 bnx2x_update_mng(params, vars->link_status);
2810 msleep(20);
2811 return rc;
2812}
2813/**
2814 * The bnx2x_link_update function should be called upon link
2815 * interrupt.
2816 * Link is considered up as follows:
2817 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
2818 * to be up
2819 * - SINGLE_MEDIA - The link between the 577xx and the external
2820 * phy (XGXS) need to up as well as the external link of the
2821 * phy (PHY_EXT1)
2822 * - DUAL_MEDIA - The link between the 577xx and the first
2823 * external phy needs to be up, and at least one of the 2
2824 * external phy link must be up.
2825 */
2826u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
2827{
2828 struct bnx2x *bp = params->bp;
2829 struct link_vars phy_vars[MAX_PHYS];
2830 u8 port = params->port;
2831 u8 link_10g, phy_index;
2832 u8 ext_phy_link_up = 0, cur_link_up, rc = 0;
2833 u8 is_mi_int = 0;
2834 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
2835 u8 active_external_phy = INT_PHY;
2836 vars->link_status = 0;
2837 for (phy_index = INT_PHY; phy_index < params->num_phys;
2838 phy_index++) {
2839 phy_vars[phy_index].flow_ctrl = 0;
2840 phy_vars[phy_index].link_status = 0;
2841 phy_vars[phy_index].line_speed = 0;
2842 phy_vars[phy_index].duplex = DUPLEX_FULL;
2843 phy_vars[phy_index].phy_link_up = 0;
2844 phy_vars[phy_index].link_up = 0;
2845 }
2846
2847 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
2848 port, (vars->phy_flags & PHY_XGXS_FLAG),
2849 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
2850
2851 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
2852 port*0x18) > 0);
2853 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
2854 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
2855 is_mi_int,
2856 REG_RD(bp,
2857 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
2858
2859 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
2860 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2861 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
2862
2863 /* disable emac */
2864 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
2865
2866 /**
2867 * Step 1:
2868 * Check external link change only for external phys, and apply
2869 * priority selection between them in case the link on both phys
2870 * is up. Note that the instead of the common vars, a temporary
2871 * vars argument is used since each phy may have different link/
2872 * speed/duplex result
2873 */
2874 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
2875 phy_index++) {
2876 struct bnx2x_phy *phy = &params->phy[phy_index];
2877 if (!phy->read_status)
2878 continue;
2879 /* Read link status and params of this ext phy */
2880 cur_link_up = phy->read_status(phy, params,
2881 &phy_vars[phy_index]);
2882 if (cur_link_up) {
2883 DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
2884 phy_index);
2885 } else {
2886 DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
2887 phy_index);
2888 continue;
2889 }
2890
2891 if (!ext_phy_link_up) {
2892 ext_phy_link_up = 1;
2893 active_external_phy = phy_index;
2894 } else {
2895 switch (bnx2x_phy_selection(params)) {
2896 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
2897 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
2898 /**
2899 * In this option, the first PHY makes sure to pass the
2900 * traffic through itself only.
2901 * Its not clear how to reset the link on the second phy
2902 **/
2903 active_external_phy = EXT_PHY1;
2904 break;
2905 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
2906 /**
2907 * In this option, the first PHY makes sure to pass the
2908 * traffic through the second PHY.
2909 **/
2910 active_external_phy = EXT_PHY2;
2911 break;
2912 default:
2913 /**
2914 * Link indication on both PHYs with the following cases
2915 * is invalid:
2916 * - FIRST_PHY means that second phy wasn't initialized,
2917 * hence its link is expected to be down
2918 * - SECOND_PHY means that first phy should not be able
2919 * to link up by itself (using configuration)
2920 * - DEFAULT should be overriden during initialiazation
2921 **/
2922 DP(NETIF_MSG_LINK, "Invalid link indication"
2923 "mpc=0x%x. DISABLING LINK !!!\n",
2924 params->multi_phy_config);
2925 ext_phy_link_up = 0;
2926 break;
2927 }
2928 }
2929 }
2930 prev_line_speed = vars->line_speed;
2931 /**
2932 * Step 2:
2933 * Read the status of the internal phy. In case of
2934 * DIRECT_SINGLE_MEDIA board, this link is the external link,
2935 * otherwise this is the link between the 577xx and the first
2936 * external phy
2937 */
2938 if (params->phy[INT_PHY].read_status)
2939 params->phy[INT_PHY].read_status(
2940 &params->phy[INT_PHY],
2941 params, vars);
2942 /**
2943 * The INT_PHY flow control reside in the vars. This include the
2944 * case where the speed or flow control are not set to AUTO.
2945 * Otherwise, the active external phy flow control result is set
2946 * to the vars. The ext_phy_line_speed is needed to check if the
2947 * speed is different between the internal phy and external phy.
2948 * This case may be result of intermediate link speed change.
2949 */
2950 if (active_external_phy > INT_PHY) {
2951 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
2952 /**
2953 * Link speed is taken from the XGXS. AN and FC result from
2954 * the external phy.
2955 */
2956 vars->link_status |= phy_vars[active_external_phy].link_status;
2957
2958 /**
2959 * if active_external_phy is first PHY and link is up - disable
2960 * disable TX on second external PHY
2961 */
2962 if (active_external_phy == EXT_PHY1) {
2963 if (params->phy[EXT_PHY2].phy_specific_func) {
2964 DP(NETIF_MSG_LINK, "Disabling TX on"
2965 " EXT_PHY2\n");
2966 params->phy[EXT_PHY2].phy_specific_func(
2967 &params->phy[EXT_PHY2],
2968 params, DISABLE_TX);
2969 }
2970 }
2971
2972 ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
2973 vars->duplex = phy_vars[active_external_phy].duplex;
2974 if (params->phy[active_external_phy].supported &
2975 SUPPORTED_FIBRE)
2976 vars->link_status |= LINK_STATUS_SERDES_LINK;
2977 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
2978 active_external_phy);
2979 }
2980
2981 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
2982 phy_index++) {
2983 if (params->phy[phy_index].flags &
2984 FLAGS_REARM_LATCH_SIGNAL) {
2985 bnx2x_rearm_latch_signal(bp, port,
2986 phy_index ==
2987 active_external_phy);
2232 break; 2988 break;
2233 } 2989 }
2234 } 2990 }
2991 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
2992 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
2993 vars->link_status, ext_phy_line_speed);
2994 /**
2995 * Upon link speed change set the NIG into drain mode. Comes to
2996 * deals with possible FIFO glitch due to clk change when speed
2997 * is decreased without link down indicator
2998 */
2999
3000 if (vars->phy_link_up) {
3001 if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
3002 (ext_phy_line_speed != vars->line_speed)) {
3003 DP(NETIF_MSG_LINK, "Internal link speed %d is"
3004 " different than the external"
3005 " link speed %d\n", vars->line_speed,
3006 ext_phy_line_speed);
3007 vars->phy_link_up = 0;
3008 } else if (prev_line_speed != vars->line_speed) {
3009 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
3010 + params->port*4, 0);
3011 msleep(1);
3012 }
3013 }
3014
3015 /* anything 10 and over uses the bmac */
3016 link_10g = ((vars->line_speed == SPEED_10000) ||
3017 (vars->line_speed == SPEED_12000) ||
3018 (vars->line_speed == SPEED_12500) ||
3019 (vars->line_speed == SPEED_13000) ||
3020 (vars->line_speed == SPEED_15000) ||
3021 (vars->line_speed == SPEED_16000));
3022
3023 bnx2x_link_int_ack(params, vars, link_10g);
3024
3025 /**
3026 * In case external phy link is up, and internal link is down
3027 * (not initialized yet probably after link initialization, it
3028 * needs to be initialized.
3029 * Note that after link down-up as result of cable plug, the xgxs
3030 * link would probably become up again without the need
3031 * initialize it
3032 */
3033 if (!(SINGLE_MEDIA_DIRECT(params))) {
3034 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
3035 " init_preceding = %d\n", ext_phy_link_up,
3036 vars->phy_link_up,
3037 params->phy[EXT_PHY1].flags &
3038 FLAGS_INIT_XGXS_FIRST);
3039 if (!(params->phy[EXT_PHY1].flags &
3040 FLAGS_INIT_XGXS_FIRST)
3041 && ext_phy_link_up && !vars->phy_link_up) {
3042 vars->line_speed = ext_phy_line_speed;
3043 if (vars->line_speed < SPEED_1000)
3044 vars->phy_flags |= PHY_SGMII_FLAG;
3045 else
3046 vars->phy_flags &= ~PHY_SGMII_FLAG;
3047 bnx2x_init_internal_phy(&params->phy[INT_PHY],
3048 params,
3049 vars);
3050 }
3051 }
3052 /**
3053 * Link is up only if both local phy and external phy (in case of
3054 * non-direct board) are up
3055 */
3056 vars->link_up = (vars->phy_link_up &&
3057 (ext_phy_link_up ||
3058 SINGLE_MEDIA_DIRECT(params)));
3059
3060 if (vars->link_up)
3061 rc = bnx2x_update_link_up(params, vars, link_10g);
3062 else
3063 rc = bnx2x_update_link_down(params, vars);
3064
3065 return rc;
3066}
3067
3068
3069/*****************************************************************************/
3070/* External Phy section */
3071/*****************************************************************************/
3072void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
3073{
3074 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3075 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3076 msleep(1);
3077 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3078 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
2235} 3079}
2236 3080
2237static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, 3081static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
2238 u32 shmem_base, u32 spirom_ver) 3082 u32 spirom_ver, u32 ver_addr)
2239{ 3083{
2240 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n", 3084 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
2241 (u16)(spirom_ver>>16), (u16)spirom_ver, port); 3085 (u16)(spirom_ver>>16), (u16)spirom_ver, port);
2242 REG_WR(bp, shmem_base + 3086
2243 offsetof(struct shmem_region, 3087 if (ver_addr)
2244 port_mb[port].ext_phy_fw_version), 3088 REG_WR(bp, ver_addr, spirom_ver);
2245 spirom_ver);
2246} 3089}
2247 3090
2248static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u8 port, 3091static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
2249 u32 ext_phy_type, u8 ext_phy_addr, 3092 struct bnx2x_phy *phy,
2250 u32 shmem_base) 3093 u8 port)
2251{ 3094{
2252 u16 fw_ver1, fw_ver2; 3095 u16 fw_ver1, fw_ver2;
2253 3096
2254 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, 3097 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
2255 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 3098 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
2256 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, 3099 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
2257 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 3100 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
2258 bnx2x_save_spirom_version(bp, port, shmem_base, 3101 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
2259 (u32)(fw_ver1<<16 | fw_ver2)); 3102 phy->ver_addr);
2260} 3103}
2261 3104
2262 3105static void bnx2x_ext_phy_set_pause(struct link_params *params,
2263static void bnx2x_save_8481_spirom_version(struct bnx2x *bp, u8 port, 3106 struct bnx2x_phy *phy,
2264 u8 ext_phy_addr, u32 shmem_base) 3107 struct link_vars *vars)
2265{ 3108{
2266 u16 val, fw_ver1, fw_ver2, cnt; 3109 u16 val;
2267 /* For the 32 bits registers in 8481, access via MDIO2ARM interface.*/ 3110 struct bnx2x *bp = params->bp;
2268 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 3111 /* read modify write pause advertizing */
2269 bnx2x_cl45_write(bp, port, 3112 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
2270 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2271 ext_phy_addr, MDIO_PMA_DEVAD,
2272 0xA819, 0x0014);
2273 bnx2x_cl45_write(bp, port,
2274 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2275 ext_phy_addr,
2276 MDIO_PMA_DEVAD,
2277 0xA81A,
2278 0xc200);
2279 bnx2x_cl45_write(bp, port,
2280 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2281 ext_phy_addr,
2282 MDIO_PMA_DEVAD,
2283 0xA81B,
2284 0x0000);
2285 bnx2x_cl45_write(bp, port,
2286 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2287 ext_phy_addr,
2288 MDIO_PMA_DEVAD,
2289 0xA81C,
2290 0x0300);
2291 bnx2x_cl45_write(bp, port,
2292 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2293 ext_phy_addr,
2294 MDIO_PMA_DEVAD,
2295 0xA817,
2296 0x0009);
2297 3113
2298 for (cnt = 0; cnt < 100; cnt++) { 3114 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
2299 bnx2x_cl45_read(bp, port, 3115
2300 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3116 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2301 ext_phy_addr, 3117 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2302 MDIO_PMA_DEVAD, 3118 if ((vars->ieee_fc &
2303 0xA818, 3119 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2304 &val); 3120 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2305 if (val & 1) 3121 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
2306 break;
2307 udelay(5);
2308 } 3122 }
2309 if (cnt == 100) { 3123 if ((vars->ieee_fc &
2310 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(1)\n"); 3124 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2311 bnx2x_save_spirom_version(bp, port, 3125 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2312 shmem_base, 0); 3126 val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
2313 return;
2314 } 3127 }
3128 DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
3129 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
3130}
2315 3131
3132static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3133 struct link_params *params,
3134 struct link_vars *vars)
3135{
3136 struct bnx2x *bp = params->bp;
3137 u16 ld_pause; /* local */
3138 u16 lp_pause; /* link partner */
3139 u16 pause_result;
3140 u8 ret = 0;
3141 /* read twice */
2316 3142
2317 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 3143 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2318 bnx2x_cl45_write(bp, port, 3144
2319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3145 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
2320 ext_phy_addr, MDIO_PMA_DEVAD, 3146 vars->flow_ctrl = phy->req_flow_ctrl;
2321 0xA819, 0x0000); 3147 else if (phy->req_line_speed != SPEED_AUTO_NEG)
2322 bnx2x_cl45_write(bp, port, 3148 vars->flow_ctrl = params->req_fc_auto_adv;
2323 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3149 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
2324 ext_phy_addr, MDIO_PMA_DEVAD, 3150 ret = 1;
2325 0xA81A, 0xc200); 3151 bnx2x_cl45_read(bp, phy,
2326 bnx2x_cl45_write(bp, port, 3152 MDIO_AN_DEVAD,
2327 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3153 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
2328 ext_phy_addr, MDIO_PMA_DEVAD, 3154 bnx2x_cl45_read(bp, phy,
2329 0xA817, 0x000A); 3155 MDIO_AN_DEVAD,
2330 for (cnt = 0; cnt < 100; cnt++) { 3156 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
2331 bnx2x_cl45_read(bp, port, 3157 pause_result = (ld_pause &
2332 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3158 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
2333 ext_phy_addr, 3159 pause_result |= (lp_pause &
2334 MDIO_PMA_DEVAD, 3160 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
2335 0xA818, 3161 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
2336 &val); 3162 pause_result);
2337 if (val & 1) 3163 bnx2x_pause_resolve(vars, pause_result);
2338 break;
2339 udelay(5);
2340 } 3164 }
2341 if (cnt == 100) { 3165 return ret;
2342 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(2)\n"); 3166}
2343 bnx2x_save_spirom_version(bp, port, 3167
2344 shmem_base, 0); 3168static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
3169 struct bnx2x_phy *phy,
3170 struct link_vars *vars)
3171{
3172 u16 val;
3173 bnx2x_cl45_read(bp, phy,
3174 MDIO_AN_DEVAD,
3175 MDIO_AN_REG_STATUS, &val);
3176 bnx2x_cl45_read(bp, phy,
3177 MDIO_AN_DEVAD,
3178 MDIO_AN_REG_STATUS, &val);
3179 if (val & (1<<5))
3180 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
3181 if ((val & (1<<0)) == 0)
3182 vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
3183}
3184
3185/******************************************************************/
3186/* common BCM8073/BCM8727 PHY SECTION */
3187/******************************************************************/
3188static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
3189 struct link_params *params,
3190 struct link_vars *vars)
3191{
3192 struct bnx2x *bp = params->bp;
3193 if (phy->req_line_speed == SPEED_10 ||
3194 phy->req_line_speed == SPEED_100) {
3195 vars->flow_ctrl = phy->req_flow_ctrl;
2345 return; 3196 return;
2346 } 3197 }
2347 3198
2348 /* lower 16 bits of the register SPI_FW_STATUS */ 3199 if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
2349 bnx2x_cl45_read(bp, port, 3200 (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
2350 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3201 u16 pause_result;
2351 ext_phy_addr, 3202 u16 ld_pause; /* local */
2352 MDIO_PMA_DEVAD, 3203 u16 lp_pause; /* link partner */
2353 0xA81B, 3204 bnx2x_cl45_read(bp, phy,
2354 &fw_ver1); 3205 MDIO_AN_DEVAD,
2355 /* upper 16 bits of register SPI_FW_STATUS */ 3206 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
2356 bnx2x_cl45_read(bp, port, 3207
2357 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3208 bnx2x_cl45_read(bp, phy,
2358 ext_phy_addr, 3209 MDIO_AN_DEVAD,
2359 MDIO_PMA_DEVAD, 3210 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
2360 0xA81C, 3211 pause_result = (ld_pause &
2361 &fw_ver2); 3212 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
3213 pause_result |= (lp_pause &
3214 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
2362 3215
2363 bnx2x_save_spirom_version(bp, port, 3216 bnx2x_pause_resolve(vars, pause_result);
2364 shmem_base, (fw_ver2<<16) | fw_ver1); 3217 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
3218 pause_result);
3219 }
2365} 3220}
2366 3221
2367static void bnx2x_bcm8072_external_rom_boot(struct link_params *params) 3222static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3223 struct bnx2x_phy *phy,
3224 u8 port)
2368{ 3225{
2369 struct bnx2x *bp = params->bp; 3226 /* Boot port from external ROM */
2370 u8 port = params->port; 3227 /* EDC grst */
2371 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 3228 bnx2x_cl45_write(bp, phy,
2372 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 3229 MDIO_PMA_DEVAD,
3230 MDIO_PMA_REG_GEN_CTRL,
3231 0x0001);
2373 3232
2374 /* Need to wait 200ms after reset */ 3233 /* ucode reboot and rst */
2375 msleep(200); 3234 bnx2x_cl45_write(bp, phy,
2376 /* Boot port from external ROM 3235 MDIO_PMA_DEVAD,
2377 * Set ser_boot_ctl bit in the MISC_CTRL1 register 3236 MDIO_PMA_REG_GEN_CTRL,
2378 */ 3237 0x008c);
2379 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3238
2380 MDIO_PMA_DEVAD, 3239 bnx2x_cl45_write(bp, phy,
2381 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3240 MDIO_PMA_DEVAD,
3241 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2382 3242
2383 /* Reset internal microprocessor */ 3243 /* Reset internal microprocessor */
2384 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3244 bnx2x_cl45_write(bp, phy,
2385 MDIO_PMA_DEVAD, 3245 MDIO_PMA_DEVAD,
2386 MDIO_PMA_REG_GEN_CTRL, 3246 MDIO_PMA_REG_GEN_CTRL,
2387 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3247 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2388 /* set micro reset = 0 */ 3248
2389 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3249 /* Release srst bit */
2390 MDIO_PMA_DEVAD, 3250 bnx2x_cl45_write(bp, phy,
2391 MDIO_PMA_REG_GEN_CTRL, 3251 MDIO_PMA_DEVAD,
2392 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3252 MDIO_PMA_REG_GEN_CTRL,
2393 /* Reset internal microprocessor */ 3253 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2394 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3254
2395 MDIO_PMA_DEVAD, 3255 /* wait for 120ms for code download via SPI port */
2396 MDIO_PMA_REG_GEN_CTRL, 3256 msleep(120);
2397 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2398 /* wait for 100ms for code download via SPI port */
2399 msleep(100);
2400 3257
2401 /* Clear ser_boot_ctl bit */ 3258 /* Clear ser_boot_ctl bit */
2402 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3259 bnx2x_cl45_write(bp, phy,
2403 MDIO_PMA_DEVAD, 3260 MDIO_PMA_DEVAD,
2404 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3261 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2405 /* Wait 100ms */ 3262 bnx2x_save_bcm_spirom_ver(bp, phy, port);
2406 msleep(100); 3263}
3264
3265static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
3266 struct bnx2x_phy *phy)
3267{
3268 u16 val;
3269 bnx2x_cl45_read(bp, phy,
3270 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
2407 3271
2408 bnx2x_save_bcm_spirom_ver(bp, port, 3272 if (val == 0) {
2409 ext_phy_type, 3273 /* Mustn't set low power mode in 8073 A0 */
2410 ext_phy_addr, 3274 return;
2411 params->shmem_base); 3275 }
3276
3277 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3278 bnx2x_cl45_read(bp, phy,
3279 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3280 val &= ~(1<<13);
3281 bnx2x_cl45_write(bp, phy,
3282 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3283
3284 /* PLL controls */
3285 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
3286 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
3287 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
3288 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
3289 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
3290
3291 /* Tx Controls */
3292 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3293 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
3294 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
3295
3296 /* Rx Controls */
3297 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3298 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
3299 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
3300
3301 /* Enable PLL sequencer (use read-modify-write to set bit 13) */
3302 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3303 val |= (1<<13);
3304 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
2412} 3305}
2413 3306
2414static u8 bnx2x_8073_is_snr_needed(struct link_params *params) 3307/******************************************************************/
3308/* BCM8073 PHY SECTION */
3309/******************************************************************/
3310static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
2415{ 3311{
2416 /* This is only required for 8073A1, version 102 only */ 3312 /* This is only required for 8073A1, version 102 only */
2417
2418 struct bnx2x *bp = params->bp;
2419 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2420 u16 val; 3313 u16 val;
2421 3314
2422 /* Read 8073 HW revision*/ 3315 /* Read 8073 HW revision*/
2423 bnx2x_cl45_read(bp, params->port, 3316 bnx2x_cl45_read(bp, phy,
2424 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2425 ext_phy_addr,
2426 MDIO_PMA_DEVAD, 3317 MDIO_PMA_DEVAD,
2427 MDIO_PMA_REG_8073_CHIP_REV, &val); 3318 MDIO_PMA_REG_8073_CHIP_REV, &val);
2428 3319
@@ -2431,9 +3322,7 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
2431 return 0; 3322 return 0;
2432 } 3323 }
2433 3324
2434 bnx2x_cl45_read(bp, params->port, 3325 bnx2x_cl45_read(bp, phy,
2435 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2436 ext_phy_addr,
2437 MDIO_PMA_DEVAD, 3326 MDIO_PMA_DEVAD,
2438 MDIO_PMA_REG_ROM_VER2, &val); 3327 MDIO_PMA_REG_ROM_VER2, &val);
2439 3328
@@ -2444,15 +3333,11 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
2444 return 1; 3333 return 1;
2445} 3334}
2446 3335
2447static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params) 3336static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
2448{ 3337{
2449 struct bnx2x *bp = params->bp;
2450 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2451 u16 val, cnt, cnt1 ; 3338 u16 val, cnt, cnt1 ;
2452 3339
2453 bnx2x_cl45_read(bp, params->port, 3340 bnx2x_cl45_read(bp, phy,
2454 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2455 ext_phy_addr,
2456 MDIO_PMA_DEVAD, 3341 MDIO_PMA_DEVAD,
2457 MDIO_PMA_REG_8073_CHIP_REV, &val); 3342 MDIO_PMA_REG_8073_CHIP_REV, &val);
2458 3343
@@ -2466,9 +3351,7 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2466 poll Dev1, Reg $C820: */ 3351 poll Dev1, Reg $C820: */
2467 3352
2468 for (cnt = 0; cnt < 1000; cnt++) { 3353 for (cnt = 0; cnt < 1000; cnt++) {
2469 bnx2x_cl45_read(bp, params->port, 3354 bnx2x_cl45_read(bp, phy,
2470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2471 ext_phy_addr,
2472 MDIO_PMA_DEVAD, 3355 MDIO_PMA_DEVAD,
2473 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 3356 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
2474 &val); 3357 &val);
@@ -2485,9 +3368,7 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2485 XAUI workaround has completed), 3368 XAUI workaround has completed),
2486 then continue on with system initialization.*/ 3369 then continue on with system initialization.*/
2487 for (cnt1 = 0; cnt1 < 1000; cnt1++) { 3370 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
2488 bnx2x_cl45_read(bp, params->port, 3371 bnx2x_cl45_read(bp, phy,
2489 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2490 ext_phy_addr,
2491 MDIO_PMA_DEVAD, 3372 MDIO_PMA_DEVAD,
2492 MDIO_PMA_REG_8073_XAUI_WA, &val); 3373 MDIO_PMA_REG_8073_XAUI_WA, &val);
2493 if (val & (1<<15)) { 3374 if (val & (1<<15)) {
@@ -2505,143 +3386,385 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2505 return -EINVAL; 3386 return -EINVAL;
2506} 3387}
2507 3388
2508static void bnx2x_bcm8073_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port, 3389static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
2509 u8 ext_phy_addr,
2510 u32 ext_phy_type,
2511 u32 shmem_base)
2512{ 3390{
2513 /* Boot port from external ROM */ 3391 /* Force KR or KX */
2514 /* EDC grst */ 3392 bnx2x_cl45_write(bp, phy,
2515 bnx2x_cl45_write(bp, port, 3393 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
2516 ext_phy_type, 3394 bnx2x_cl45_write(bp, phy,
2517 ext_phy_addr, 3395 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
2518 MDIO_PMA_DEVAD, 3396 bnx2x_cl45_write(bp, phy,
2519 MDIO_PMA_REG_GEN_CTRL, 3397 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
2520 0x0001); 3398 bnx2x_cl45_write(bp, phy,
3399 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
3400}
2521 3401
2522 /* ucode reboot and rst */ 3402static void bnx2x_8073_set_pause_cl37(struct link_params *params,
2523 bnx2x_cl45_write(bp, port, 3403 struct bnx2x_phy *phy,
2524 ext_phy_type, 3404 struct link_vars *vars)
2525 ext_phy_addr, 3405{
2526 MDIO_PMA_DEVAD, 3406 u16 cl37_val;
2527 MDIO_PMA_REG_GEN_CTRL, 3407 struct bnx2x *bp = params->bp;
2528 0x008c); 3408 bnx2x_cl45_read(bp, phy,
3409 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
2529 3410
2530 bnx2x_cl45_write(bp, port, 3411 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2531 ext_phy_type, 3412 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2532 ext_phy_addr, 3413 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2533 MDIO_PMA_DEVAD, 3414 if ((vars->ieee_fc &
2534 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3415 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
3416 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
3417 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
3418 }
3419 if ((vars->ieee_fc &
3420 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3421 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3422 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3423 }
3424 if ((vars->ieee_fc &
3425 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3426 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3427 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3428 }
3429 DP(NETIF_MSG_LINK,
3430 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
2535 3431
2536 /* Reset internal microprocessor */ 3432 bnx2x_cl45_write(bp, phy,
2537 bnx2x_cl45_write(bp, port, 3433 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
2538 ext_phy_type, 3434 msleep(500);
2539 ext_phy_addr, 3435}
2540 MDIO_PMA_DEVAD,
2541 MDIO_PMA_REG_GEN_CTRL,
2542 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2543 3436
2544 /* Release srst bit */ 3437static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
2545 bnx2x_cl45_write(bp, port, 3438 struct link_params *params,
2546 ext_phy_type, 3439 struct link_vars *vars)
2547 ext_phy_addr, 3440{
2548 MDIO_PMA_DEVAD, 3441 struct bnx2x *bp = params->bp;
2549 MDIO_PMA_REG_GEN_CTRL, 3442 u16 val = 0, tmp1;
2550 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3443 u8 gpio_port;
3444 DP(NETIF_MSG_LINK, "Init 8073\n");
2551 3445
2552 /* wait for 100ms for code download via SPI port */ 3446 gpio_port = params->port;
2553 msleep(100); 3447 /* Restore normal power mode*/
3448 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3449 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
2554 3450
2555 /* Clear ser_boot_ctl bit */ 3451 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2556 bnx2x_cl45_write(bp, port, 3452 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
2557 ext_phy_type,
2558 ext_phy_addr,
2559 MDIO_PMA_DEVAD,
2560 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2561 3453
2562 bnx2x_save_bcm_spirom_ver(bp, port, 3454 /* enable LASI */
2563 ext_phy_type, 3455 bnx2x_cl45_write(bp, phy,
2564 ext_phy_addr, 3456 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2));
2565 shmem_base); 3457 bnx2x_cl45_write(bp, phy,
2566} 3458 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x0004);
2567 3459
2568static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port, 3460 bnx2x_8073_set_pause_cl37(params, phy, vars);
2569 u8 ext_phy_addr,
2570 u32 shmem_base)
2571{
2572 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
2573 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2574 shmem_base);
2575}
2576 3461
2577static void bnx2x_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port, 3462 bnx2x_8073_set_xaui_low_power_mode(bp, phy);
2578 u8 ext_phy_addr,
2579 u32 shmem_base)
2580{
2581 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
2582 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
2583 shmem_base);
2584 3463
2585} 3464 bnx2x_cl45_read(bp, phy,
3465 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
2586 3466
2587static void bnx2x_bcm8726_external_rom_boot(struct link_params *params) 3467 bnx2x_cl45_read(bp, phy,
2588{ 3468 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
2589 struct bnx2x *bp = params->bp;
2590 u8 port = params->port;
2591 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2592 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2593 3469
2594 /* Need to wait 100ms after reset */ 3470 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
2595 msleep(100);
2596 3471
2597 /* Micro controller re-boot */ 3472 /* Enable CL37 BAM */
2598 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3473 bnx2x_cl45_read(bp, phy,
2599 MDIO_PMA_DEVAD, 3474 MDIO_AN_DEVAD,
2600 MDIO_PMA_REG_GEN_CTRL, 3475 MDIO_AN_REG_8073_BAM, &val);
2601 0x018B); 3476 bnx2x_cl45_write(bp, phy,
3477 MDIO_AN_DEVAD,
3478 MDIO_AN_REG_8073_BAM, val | 1);
2602 3479
2603 /* Set soft reset */ 3480 if (params->loopback_mode == LOOPBACK_EXT) {
2604 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3481 bnx2x_807x_force_10G(bp, phy);
2605 MDIO_PMA_DEVAD, 3482 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
2606 MDIO_PMA_REG_GEN_CTRL, 3483 return 0;
2607 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3484 } else {
3485 bnx2x_cl45_write(bp, phy,
3486 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
3487 }
3488 if (phy->req_line_speed != SPEED_AUTO_NEG) {
3489 if (phy->req_line_speed == SPEED_10000) {
3490 val = (1<<7);
3491 } else if (phy->req_line_speed == SPEED_2500) {
3492 val = (1<<5);
3493 /* Note that 2.5G works only
3494 when used with 1G advertisment */
3495 } else
3496 val = (1<<5);
3497 } else {
3498 val = 0;
3499 if (phy->speed_cap_mask &
3500 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
3501 val |= (1<<7);
3502
3503 /* Note that 2.5G works only when
3504 used with 1G advertisment */
3505 if (phy->speed_cap_mask &
3506 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
3507 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
3508 val |= (1<<5);
3509 DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
3510 }
2608 3511
2609 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3512 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
2610 MDIO_PMA_DEVAD, 3513 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
2611 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3514
3515 if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
3516 (phy->req_line_speed == SPEED_AUTO_NEG)) ||
3517 (phy->req_line_speed == SPEED_2500)) {
3518 u16 phy_ver;
3519 /* Allow 2.5G for A1 and above */
3520 bnx2x_cl45_read(bp, phy,
3521 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
3522 &phy_ver);
3523 DP(NETIF_MSG_LINK, "Add 2.5G\n");
3524 if (phy_ver > 0)
3525 tmp1 |= 1;
3526 else
3527 tmp1 &= 0xfffe;
3528 } else {
3529 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
3530 tmp1 &= 0xfffe;
3531 }
2612 3532
2613 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3533 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
2614 MDIO_PMA_DEVAD, 3534 /* Add support for CL37 (passive mode) II */
2615 MDIO_PMA_REG_GEN_CTRL,
2616 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2617 3535
2618 /* wait for 150ms for microcode load */ 3536 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
2619 msleep(150); 3537 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
3538 (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
3539 0x20 : 0x40)));
2620 3540
2621 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 3541 /* Add support for CL37 (passive mode) III */
2622 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3542 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
2623 MDIO_PMA_DEVAD,
2624 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2625 3543
2626 msleep(200); 3544 /* The SNR will improve about 2db by changing
2627 bnx2x_save_bcm_spirom_ver(bp, port, 3545 BW and FEE main tap. Rest commands are executed
2628 ext_phy_type, 3546 after link is up*/
2629 ext_phy_addr, 3547 if (bnx2x_8073_is_snr_needed(bp, phy))
2630 params->shmem_base); 3548 bnx2x_cl45_write(bp, phy,
3549 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
3550 0xFB0C);
3551
3552 /* Enable FEC (Forware Error Correction) Request in the AN */
3553 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
3554 tmp1 |= (1<<15);
3555 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
3556
3557 bnx2x_ext_phy_set_pause(params, phy, vars);
3558
3559 /* Restart autoneg */
3560 msleep(500);
3561 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
3562 DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
3563 ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
3564 return 0;
3565}
3566
3567static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
3568 struct link_params *params,
3569 struct link_vars *vars)
3570{
3571 struct bnx2x *bp = params->bp;
3572 u8 link_up = 0;
3573 u16 val1, val2;
3574 u16 link_status = 0;
3575 u16 an1000_status = 0;
3576
3577 bnx2x_cl45_read(bp, phy,
3578 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
3579
3580 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
3581
3582 /* clear the interrupt LASI status register */
3583 bnx2x_cl45_read(bp, phy,
3584 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
3585 bnx2x_cl45_read(bp, phy,
3586 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
3587 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
3588 /* Clear MSG-OUT */
3589 bnx2x_cl45_read(bp, phy,
3590 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
3591
3592 /* Check the LASI */
3593 bnx2x_cl45_read(bp, phy,
3594 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
3595
3596 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
3597
3598 /* Check the link status */
3599 bnx2x_cl45_read(bp, phy,
3600 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
3601 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
3602
3603 bnx2x_cl45_read(bp, phy,
3604 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
3605 bnx2x_cl45_read(bp, phy,
3606 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
3607 link_up = ((val1 & 4) == 4);
3608 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
3609
3610 if (link_up &&
3611 ((phy->req_line_speed != SPEED_10000))) {
3612 if (bnx2x_8073_xaui_wa(bp, phy) != 0)
3613 return 0;
3614 }
3615 bnx2x_cl45_read(bp, phy,
3616 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
3617 bnx2x_cl45_read(bp, phy,
3618 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
3619
3620 /* Check the link status on 1.1.2 */
3621 bnx2x_cl45_read(bp, phy,
3622 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
3623 bnx2x_cl45_read(bp, phy,
3624 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
3625 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
3626 "an_link_status=0x%x\n", val2, val1, an1000_status);
3627
3628 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
3629 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
3630 /* The SNR will improve about 2dbby
3631 changing the BW and FEE main tap.*/
3632 /* The 1st write to change FFE main
3633 tap is set before restart AN */
3634 /* Change PLL Bandwidth in EDC
3635 register */
3636 bnx2x_cl45_write(bp, phy,
3637 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
3638 0x26BC);
3639
3640 /* Change CDR Bandwidth in EDC register */
3641 bnx2x_cl45_write(bp, phy,
3642 MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
3643 0x0333);
3644 }
3645 bnx2x_cl45_read(bp, phy,
3646 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
3647 &link_status);
3648
3649 /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
3650 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
3651 link_up = 1;
3652 vars->line_speed = SPEED_10000;
3653 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
3654 params->port);
3655 } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
3656 link_up = 1;
3657 vars->line_speed = SPEED_2500;
3658 DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
3659 params->port);
3660 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
3661 link_up = 1;
3662 vars->line_speed = SPEED_1000;
3663 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
3664 params->port);
3665 } else {
3666 link_up = 0;
3667 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
3668 params->port);
3669 }
3670
3671 if (link_up) {
3672 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
3673 bnx2x_8073_resolve_fc(phy, params, vars);
3674 }
3675 return link_up;
3676}
3677
3678static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
3679 struct link_params *params)
3680{
3681 struct bnx2x *bp = params->bp;
3682 u8 gpio_port;
3683 gpio_port = params->port;
3684 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
3685 gpio_port);
3686 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3687 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3688 gpio_port);
3689}
3690
3691/******************************************************************/
3692/* BCM8705 PHY SECTION */
3693/******************************************************************/
3694static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
3695 struct link_params *params,
3696 struct link_vars *vars)
3697{
3698 struct bnx2x *bp = params->bp;
3699 DP(NETIF_MSG_LINK, "init 8705\n");
3700 /* Restore normal power mode*/
3701 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3702 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3703 /* HW reset */
3704 bnx2x_ext_phy_hw_reset(bp, params->port);
3705 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
3706 bnx2x_wait_reset_complete(bp, phy);
3707
3708 bnx2x_cl45_write(bp, phy,
3709 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
3710 bnx2x_cl45_write(bp, phy,
3711 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
3712 bnx2x_cl45_write(bp, phy,
3713 MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
3714 bnx2x_cl45_write(bp, phy,
3715 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
3716 /* BCM8705 doesn't have microcode, hence the 0 */
3717 bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
3718 return 0;
3719}
3720
3721static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
3722 struct link_params *params,
3723 struct link_vars *vars)
3724{
3725 u8 link_up = 0;
3726 u16 val1, rx_sd;
3727 struct bnx2x *bp = params->bp;
3728 DP(NETIF_MSG_LINK, "read status 8705\n");
3729 bnx2x_cl45_read(bp, phy,
3730 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
3731 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
3732
3733 bnx2x_cl45_read(bp, phy,
3734 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
3735 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
3736
3737 bnx2x_cl45_read(bp, phy,
3738 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
3739
3740 bnx2x_cl45_read(bp, phy,
3741 MDIO_PMA_DEVAD, 0xc809, &val1);
3742 bnx2x_cl45_read(bp, phy,
3743 MDIO_PMA_DEVAD, 0xc809, &val1);
3744
3745 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
3746 link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
3747 if (link_up) {
3748 vars->line_speed = SPEED_10000;
3749 bnx2x_ext_phy_resolve_fc(phy, params, vars);
3750 }
3751 return link_up;
2631} 3752}
2632 3753
2633static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port, 3754/******************************************************************/
2634 u32 ext_phy_type, u8 ext_phy_addr, 3755/* SFP+ module Section */
2635 u8 tx_en) 3756/******************************************************************/
3757static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
3758 struct bnx2x_phy *phy,
3759 u8 port,
3760 u8 tx_en)
2636{ 3761{
2637 u16 val; 3762 u16 val;
2638 3763
2639 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n", 3764 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
2640 tx_en, port); 3765 tx_en, port);
2641 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ 3766 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
2642 bnx2x_cl45_read(bp, port, 3767 bnx2x_cl45_read(bp, phy,
2643 ext_phy_type,
2644 ext_phy_addr,
2645 MDIO_PMA_DEVAD, 3768 MDIO_PMA_DEVAD,
2646 MDIO_PMA_REG_PHY_IDENTIFIER, 3769 MDIO_PMA_REG_PHY_IDENTIFIER,
2647 &val); 3770 &val);
@@ -2651,58 +3774,42 @@ static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port,
2651 else 3774 else
2652 val |= (1<<15); 3775 val |= (1<<15);
2653 3776
2654 bnx2x_cl45_write(bp, port, 3777 bnx2x_cl45_write(bp, phy,
2655 ext_phy_type,
2656 ext_phy_addr,
2657 MDIO_PMA_DEVAD, 3778 MDIO_PMA_DEVAD,
2658 MDIO_PMA_REG_PHY_IDENTIFIER, 3779 MDIO_PMA_REG_PHY_IDENTIFIER,
2659 val); 3780 val);
2660} 3781}
2661 3782
2662static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params, 3783static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3784 struct link_params *params,
2663 u16 addr, u8 byte_cnt, u8 *o_buf) 3785 u16 addr, u8 byte_cnt, u8 *o_buf)
2664{ 3786{
2665 struct bnx2x *bp = params->bp; 3787 struct bnx2x *bp = params->bp;
2666 u16 val = 0; 3788 u16 val = 0;
2667 u16 i; 3789 u16 i;
2668 u8 port = params->port;
2669 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2670 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2671
2672 if (byte_cnt > 16) { 3790 if (byte_cnt > 16) {
2673 DP(NETIF_MSG_LINK, "Reading from eeprom is" 3791 DP(NETIF_MSG_LINK, "Reading from eeprom is"
2674 " is limited to 0xf\n"); 3792 " is limited to 0xf\n");
2675 return -EINVAL; 3793 return -EINVAL;
2676 } 3794 }
2677 /* Set the read command byte count */ 3795 /* Set the read command byte count */
2678 bnx2x_cl45_write(bp, port, 3796 bnx2x_cl45_write(bp, phy,
2679 ext_phy_type, 3797 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2680 ext_phy_addr,
2681 MDIO_PMA_DEVAD,
2682 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2683 (byte_cnt | 0xa000)); 3798 (byte_cnt | 0xa000));
2684 3799
2685 /* Set the read command address */ 3800 /* Set the read command address */
2686 bnx2x_cl45_write(bp, port, 3801 bnx2x_cl45_write(bp, phy,
2687 ext_phy_type, 3802 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2688 ext_phy_addr,
2689 MDIO_PMA_DEVAD,
2690 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2691 addr); 3803 addr);
2692 3804
2693 /* Activate read command */ 3805 /* Activate read command */
2694 bnx2x_cl45_write(bp, port, 3806 bnx2x_cl45_write(bp, phy,
2695 ext_phy_type, 3807 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2696 ext_phy_addr,
2697 MDIO_PMA_DEVAD,
2698 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2699 0x2c0f); 3808 0x2c0f);
2700 3809
2701 /* Wait up to 500us for command complete status */ 3810 /* Wait up to 500us for command complete status */
2702 for (i = 0; i < 100; i++) { 3811 for (i = 0; i < 100; i++) {
2703 bnx2x_cl45_read(bp, port, 3812 bnx2x_cl45_read(bp, phy,
2704 ext_phy_type,
2705 ext_phy_addr,
2706 MDIO_PMA_DEVAD, 3813 MDIO_PMA_DEVAD,
2707 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3814 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2708 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3815 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2721,18 +3828,14 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
2721 3828
2722 /* Read the buffer */ 3829 /* Read the buffer */
2723 for (i = 0; i < byte_cnt; i++) { 3830 for (i = 0; i < byte_cnt; i++) {
2724 bnx2x_cl45_read(bp, port, 3831 bnx2x_cl45_read(bp, phy,
2725 ext_phy_type,
2726 ext_phy_addr,
2727 MDIO_PMA_DEVAD, 3832 MDIO_PMA_DEVAD,
2728 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); 3833 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
2729 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); 3834 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
2730 } 3835 }
2731 3836
2732 for (i = 0; i < 100; i++) { 3837 for (i = 0; i < 100; i++) {
2733 bnx2x_cl45_read(bp, port, 3838 bnx2x_cl45_read(bp, phy,
2734 ext_phy_type,
2735 ext_phy_addr,
2736 MDIO_PMA_DEVAD, 3839 MDIO_PMA_DEVAD,
2737 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3840 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2738 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3841 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2743,14 +3846,12 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
2743 return -EINVAL; 3846 return -EINVAL;
2744} 3847}
2745 3848
2746static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params, 3849static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3850 struct link_params *params,
2747 u16 addr, u8 byte_cnt, u8 *o_buf) 3851 u16 addr, u8 byte_cnt, u8 *o_buf)
2748{ 3852{
2749 struct bnx2x *bp = params->bp; 3853 struct bnx2x *bp = params->bp;
2750 u16 val, i; 3854 u16 val, i;
2751 u8 port = params->port;
2752 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2753 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2754 3855
2755 if (byte_cnt > 16) { 3856 if (byte_cnt > 16) {
2756 DP(NETIF_MSG_LINK, "Reading from eeprom is" 3857 DP(NETIF_MSG_LINK, "Reading from eeprom is"
@@ -2759,40 +3860,30 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2759 } 3860 }
2760 3861
2761 /* Need to read from 1.8000 to clear it */ 3862 /* Need to read from 1.8000 to clear it */
2762 bnx2x_cl45_read(bp, port, 3863 bnx2x_cl45_read(bp, phy,
2763 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
2764 ext_phy_addr,
2765 MDIO_PMA_DEVAD, 3864 MDIO_PMA_DEVAD,
2766 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 3865 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2767 &val); 3866 &val);
2768 3867
2769 /* Set the read command byte count */ 3868 /* Set the read command byte count */
2770 bnx2x_cl45_write(bp, port, 3869 bnx2x_cl45_write(bp, phy,
2771 ext_phy_type,
2772 ext_phy_addr,
2773 MDIO_PMA_DEVAD, 3870 MDIO_PMA_DEVAD,
2774 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 3871 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2775 ((byte_cnt < 2) ? 2 : byte_cnt)); 3872 ((byte_cnt < 2) ? 2 : byte_cnt));
2776 3873
2777 /* Set the read command address */ 3874 /* Set the read command address */
2778 bnx2x_cl45_write(bp, port, 3875 bnx2x_cl45_write(bp, phy,
2779 ext_phy_type,
2780 ext_phy_addr,
2781 MDIO_PMA_DEVAD, 3876 MDIO_PMA_DEVAD,
2782 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 3877 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2783 addr); 3878 addr);
2784 /* Set the destination address */ 3879 /* Set the destination address */
2785 bnx2x_cl45_write(bp, port, 3880 bnx2x_cl45_write(bp, phy,
2786 ext_phy_type,
2787 ext_phy_addr,
2788 MDIO_PMA_DEVAD, 3881 MDIO_PMA_DEVAD,
2789 0x8004, 3882 0x8004,
2790 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); 3883 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
2791 3884
2792 /* Activate read command */ 3885 /* Activate read command */
2793 bnx2x_cl45_write(bp, port, 3886 bnx2x_cl45_write(bp, phy,
2794 ext_phy_type,
2795 ext_phy_addr,
2796 MDIO_PMA_DEVAD, 3887 MDIO_PMA_DEVAD,
2797 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 3888 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2798 0x8002); 3889 0x8002);
@@ -2802,9 +3893,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2802 3893
2803 /* Wait up to 500us for command complete status */ 3894 /* Wait up to 500us for command complete status */
2804 for (i = 0; i < 100; i++) { 3895 for (i = 0; i < 100; i++) {
2805 bnx2x_cl45_read(bp, port, 3896 bnx2x_cl45_read(bp, phy,
2806 ext_phy_type,
2807 ext_phy_addr,
2808 MDIO_PMA_DEVAD, 3897 MDIO_PMA_DEVAD,
2809 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3898 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2810 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3899 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2823,18 +3912,14 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2823 3912
2824 /* Read the buffer */ 3913 /* Read the buffer */
2825 for (i = 0; i < byte_cnt; i++) { 3914 for (i = 0; i < byte_cnt; i++) {
2826 bnx2x_cl45_read(bp, port, 3915 bnx2x_cl45_read(bp, phy,
2827 ext_phy_type,
2828 ext_phy_addr,
2829 MDIO_PMA_DEVAD, 3916 MDIO_PMA_DEVAD,
2830 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); 3917 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
2831 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); 3918 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
2832 } 3919 }
2833 3920
2834 for (i = 0; i < 100; i++) { 3921 for (i = 0; i < 100; i++) {
2835 bnx2x_cl45_read(bp, port, 3922 bnx2x_cl45_read(bp, phy,
2836 ext_phy_type,
2837 ext_phy_addr,
2838 MDIO_PMA_DEVAD, 3923 MDIO_PMA_DEVAD,
2839 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3924 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2840 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3925 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2846,21 +3931,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2846 return -EINVAL; 3931 return -EINVAL;
2847} 3932}
2848 3933
2849u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr, 3934u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3935 struct link_params *params, u16 addr,
2850 u8 byte_cnt, u8 *o_buf) 3936 u8 byte_cnt, u8 *o_buf)
2851{ 3937{
2852 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 3938 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
2853 3939 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
2854 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
2855 return bnx2x_8726_read_sfp_module_eeprom(params, addr,
2856 byte_cnt, o_buf); 3940 byte_cnt, o_buf);
2857 else if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 3941 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
2858 return bnx2x_8727_read_sfp_module_eeprom(params, addr, 3942 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
2859 byte_cnt, o_buf); 3943 byte_cnt, o_buf);
2860 return -EINVAL; 3944 return -EINVAL;
2861} 3945}
2862 3946
2863static u8 bnx2x_get_edc_mode(struct link_params *params, 3947static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
3948 struct link_params *params,
2864 u16 *edc_mode) 3949 u16 *edc_mode)
2865{ 3950{
2866 struct bnx2x *bp = params->bp; 3951 struct bnx2x *bp = params->bp;
@@ -2868,10 +3953,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2868 *edc_mode = EDC_MODE_LIMITING; 3953 *edc_mode = EDC_MODE_LIMITING;
2869 3954
2870 /* First check for copper cable */ 3955 /* First check for copper cable */
2871 if (bnx2x_read_sfp_module_eeprom(params, 3956 if (bnx2x_read_sfp_module_eeprom(phy,
2872 SFP_EEPROM_CON_TYPE_ADDR, 3957 params,
2873 1, 3958 SFP_EEPROM_CON_TYPE_ADDR,
2874 &val) != 0) { 3959 1,
3960 &val) != 0) {
2875 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); 3961 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
2876 return -EINVAL; 3962 return -EINVAL;
2877 } 3963 }
@@ -2883,7 +3969,8 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2883 3969
2884 /* Check if its active cable( includes SFP+ module) 3970 /* Check if its active cable( includes SFP+ module)
2885 of passive cable*/ 3971 of passive cable*/
2886 if (bnx2x_read_sfp_module_eeprom(params, 3972 if (bnx2x_read_sfp_module_eeprom(phy,
3973 params,
2887 SFP_EEPROM_FC_TX_TECH_ADDR, 3974 SFP_EEPROM_FC_TX_TECH_ADDR,
2888 1, 3975 1,
2889 &copper_module_type) != 3976 &copper_module_type) !=
@@ -2923,10 +4010,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2923 4010
2924 if (check_limiting_mode) { 4011 if (check_limiting_mode) {
2925 u8 options[SFP_EEPROM_OPTIONS_SIZE]; 4012 u8 options[SFP_EEPROM_OPTIONS_SIZE];
2926 if (bnx2x_read_sfp_module_eeprom(params, 4013 if (bnx2x_read_sfp_module_eeprom(phy,
2927 SFP_EEPROM_OPTIONS_ADDR, 4014 params,
2928 SFP_EEPROM_OPTIONS_SIZE, 4015 SFP_EEPROM_OPTIONS_ADDR,
2929 options) != 0) { 4016 SFP_EEPROM_OPTIONS_SIZE,
4017 options) != 0) {
2930 DP(NETIF_MSG_LINK, "Failed to read Option" 4018 DP(NETIF_MSG_LINK, "Failed to read Option"
2931 " field from module EEPROM\n"); 4019 " field from module EEPROM\n");
2932 return -EINVAL; 4020 return -EINVAL;
@@ -2939,17 +4027,17 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2939 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 4027 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
2940 return 0; 4028 return 0;
2941} 4029}
2942
2943/* This function read the relevant field from the module ( SFP+ ), 4030/* This function read the relevant field from the module ( SFP+ ),
2944 and verify it is compliant with this board */ 4031 and verify it is compliant with this board */
2945static u8 bnx2x_verify_sfp_module(struct link_params *params) 4032static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4033 struct link_params *params)
2946{ 4034{
2947 struct bnx2x *bp = params->bp; 4035 struct bnx2x *bp = params->bp;
2948 u32 val; 4036 u32 val, cmd;
2949 u32 fw_resp; 4037 u32 fw_resp, fw_cmd_param;
2950 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1]; 4038 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
2951 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1]; 4039 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
2952 4040 phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
2953 val = REG_RD(bp, params->shmem_base + 4041 val = REG_RD(bp, params->shmem_base +
2954 offsetof(struct shmem_region, dev_info. 4042 offsetof(struct shmem_region, dev_info.
2955 port_feature_config[params->port].config)); 4043 port_feature_config[params->port].config));
@@ -2959,29 +4047,43 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
2959 return 0; 4047 return 0;
2960 } 4048 }
2961 4049
2962 /* Ask the FW to validate the module */ 4050 if (params->feature_config_flags &
2963 if (!(params->feature_config_flags & 4051 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
2964 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY)) { 4052 /* Use specific phy request */
4053 cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
4054 } else if (params->feature_config_flags &
4055 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
4056 /* Use first phy request only in case of non-dual media*/
4057 if (DUAL_MEDIA(params)) {
4058 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
4059 "verification\n");
4060 return -EINVAL;
4061 }
4062 cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
4063 } else {
4064 /* No support in OPT MDL detection */
2965 DP(NETIF_MSG_LINK, "FW does not support OPT MDL " 4065 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
2966 "verification\n"); 4066 "verification\n");
2967 return -EINVAL; 4067 return -EINVAL;
2968 } 4068 }
2969 4069 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
2970 fw_resp = bnx2x_fw_command(bp, DRV_MSG_CODE_VRFY_OPT_MDL); 4070 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
2971 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { 4071 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
2972 DP(NETIF_MSG_LINK, "Approved module\n"); 4072 DP(NETIF_MSG_LINK, "Approved module\n");
2973 return 0; 4073 return 0;
2974 } 4074 }
2975 4075
2976 /* format the warning message */ 4076 /* format the warning message */
2977 if (bnx2x_read_sfp_module_eeprom(params, 4077 if (bnx2x_read_sfp_module_eeprom(phy,
4078 params,
2978 SFP_EEPROM_VENDOR_NAME_ADDR, 4079 SFP_EEPROM_VENDOR_NAME_ADDR,
2979 SFP_EEPROM_VENDOR_NAME_SIZE, 4080 SFP_EEPROM_VENDOR_NAME_SIZE,
2980 (u8 *)vendor_name)) 4081 (u8 *)vendor_name))
2981 vendor_name[0] = '\0'; 4082 vendor_name[0] = '\0';
2982 else 4083 else
2983 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 4084 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
2984 if (bnx2x_read_sfp_module_eeprom(params, 4085 if (bnx2x_read_sfp_module_eeprom(phy,
4086 params,
2985 SFP_EEPROM_PART_NO_ADDR, 4087 SFP_EEPROM_PART_NO_ADDR,
2986 SFP_EEPROM_PART_NO_SIZE, 4088 SFP_EEPROM_PART_NO_SIZE,
2987 (u8 *)vendor_pn)) 4089 (u8 *)vendor_pn))
@@ -2989,22 +4091,78 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
2989 else 4091 else
2990 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 4092 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
2991 4093
2992 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n", 4094 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
4095 " Port %d from %s part number %s\n",
2993 params->port, vendor_name, vendor_pn); 4096 params->port, vendor_name, vendor_pn);
4097 phy->flags |= FLAGS_SFP_NOT_APPROVED;
2994 return -EINVAL; 4098 return -EINVAL;
2995} 4099}
2996 4100
2997static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params, 4101static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
2998 u16 edc_mode) 4102 struct link_params *params)
4103
2999{ 4104{
4105 u8 val;
3000 struct bnx2x *bp = params->bp; 4106 struct bnx2x *bp = params->bp;
3001 u8 port = params->port; 4107 u16 timeout;
3002 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4108 /* Initialization time after hot-plug may take up to 300ms for some
4109 phys type ( e.g. JDSU ) */
4110 for (timeout = 0; timeout < 60; timeout++) {
4111 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
4112 == 0) {
4113 DP(NETIF_MSG_LINK, "SFP+ module initialization "
4114 "took %d ms\n", timeout * 5);
4115 return 0;
4116 }
4117 msleep(5);
4118 }
4119 return -EINVAL;
4120}
4121
4122static void bnx2x_8727_power_module(struct bnx2x *bp,
4123 struct bnx2x_phy *phy,
4124 u8 is_power_up) {
4125 /* Make sure GPIOs are not using for LED mode */
4126 u16 val;
4127 /*
4128 * In the GPIO register, bit 4 is use to detemine if the GPIOs are
4129 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
4130 * output
4131 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
4132 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
4133 * where the 1st bit is the over-current(only input), and 2nd bit is
4134 * for power( only output )
4135 */
4136
4137 /*
4138 * In case of NOC feature is disabled and power is up, set GPIO control
4139 * as input to enable listening of over-current indication
4140 */
4141 if (phy->flags & FLAGS_NOC)
4142 return;
4143 if (!(phy->flags &
4144 FLAGS_NOC) && is_power_up)
4145 val = (1<<4);
4146 else
4147 /*
4148 * Set GPIO control to OUTPUT, and set the power bit
4149 * to according to the is_power_up
4150 */
4151 val = ((!(is_power_up)) << 1);
4152
4153 bnx2x_cl45_write(bp, phy,
4154 MDIO_PMA_DEVAD,
4155 MDIO_PMA_REG_8727_GPIO_CTRL,
4156 val);
4157}
4158
4159static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4160 struct bnx2x_phy *phy,
4161 u16 edc_mode)
4162{
3003 u16 cur_limiting_mode; 4163 u16 cur_limiting_mode;
3004 4164
3005 bnx2x_cl45_read(bp, port, 4165 bnx2x_cl45_read(bp, phy,
3006 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3007 ext_phy_addr,
3008 MDIO_PMA_DEVAD, 4166 MDIO_PMA_DEVAD,
3009 MDIO_PMA_REG_ROM_VER2, 4167 MDIO_PMA_REG_ROM_VER2,
3010 &cur_limiting_mode); 4168 &cur_limiting_mode);
@@ -3014,12 +4172,10 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
3014 if (edc_mode == EDC_MODE_LIMITING) { 4172 if (edc_mode == EDC_MODE_LIMITING) {
3015 DP(NETIF_MSG_LINK, 4173 DP(NETIF_MSG_LINK,
3016 "Setting LIMITING MODE\n"); 4174 "Setting LIMITING MODE\n");
3017 bnx2x_cl45_write(bp, port, 4175 bnx2x_cl45_write(bp, phy,
3018 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, 4176 MDIO_PMA_DEVAD,
3019 ext_phy_addr, 4177 MDIO_PMA_REG_ROM_VER2,
3020 MDIO_PMA_DEVAD, 4178 EDC_MODE_LIMITING);
3021 MDIO_PMA_REG_ROM_VER2,
3022 EDC_MODE_LIMITING);
3023 } else { /* LRM mode ( default )*/ 4179 } else { /* LRM mode ( default )*/
3024 4180
3025 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 4181 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
@@ -3030,27 +4186,19 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
3030 if (cur_limiting_mode != EDC_MODE_LIMITING) 4186 if (cur_limiting_mode != EDC_MODE_LIMITING)
3031 return 0; 4187 return 0;
3032 4188
3033 bnx2x_cl45_write(bp, port, 4189 bnx2x_cl45_write(bp, phy,
3034 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3035 ext_phy_addr,
3036 MDIO_PMA_DEVAD, 4190 MDIO_PMA_DEVAD,
3037 MDIO_PMA_REG_LRM_MODE, 4191 MDIO_PMA_REG_LRM_MODE,
3038 0); 4192 0);
3039 bnx2x_cl45_write(bp, port, 4193 bnx2x_cl45_write(bp, phy,
3040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3041 ext_phy_addr,
3042 MDIO_PMA_DEVAD, 4194 MDIO_PMA_DEVAD,
3043 MDIO_PMA_REG_ROM_VER2, 4195 MDIO_PMA_REG_ROM_VER2,
3044 0x128); 4196 0x128);
3045 bnx2x_cl45_write(bp, port, 4197 bnx2x_cl45_write(bp, phy,
3046 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3047 ext_phy_addr,
3048 MDIO_PMA_DEVAD, 4198 MDIO_PMA_DEVAD,
3049 MDIO_PMA_REG_MISC_CTRL0, 4199 MDIO_PMA_REG_MISC_CTRL0,
3050 0x4008); 4200 0x4008);
3051 bnx2x_cl45_write(bp, port, 4201 bnx2x_cl45_write(bp, phy,
3052 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3053 ext_phy_addr,
3054 MDIO_PMA_DEVAD, 4202 MDIO_PMA_DEVAD,
3055 MDIO_PMA_REG_LRM_MODE, 4203 MDIO_PMA_REG_LRM_MODE,
3056 0xaaaa); 4204 0xaaaa);
@@ -3058,46 +4206,33 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
3058 return 0; 4206 return 0;
3059} 4207}
3060 4208
3061static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params, 4209static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
4210 struct bnx2x_phy *phy,
3062 u16 edc_mode) 4211 u16 edc_mode)
3063{ 4212{
3064 struct bnx2x *bp = params->bp;
3065 u8 port = params->port;
3066 u16 phy_identifier; 4213 u16 phy_identifier;
3067 u16 rom_ver2_val; 4214 u16 rom_ver2_val;
3068 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4215 bnx2x_cl45_read(bp, phy,
3069
3070 bnx2x_cl45_read(bp, port,
3071 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3072 ext_phy_addr,
3073 MDIO_PMA_DEVAD, 4216 MDIO_PMA_DEVAD,
3074 MDIO_PMA_REG_PHY_IDENTIFIER, 4217 MDIO_PMA_REG_PHY_IDENTIFIER,
3075 &phy_identifier); 4218 &phy_identifier);
3076 4219
3077 bnx2x_cl45_write(bp, port, 4220 bnx2x_cl45_write(bp, phy,
3078 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3079 ext_phy_addr,
3080 MDIO_PMA_DEVAD, 4221 MDIO_PMA_DEVAD,
3081 MDIO_PMA_REG_PHY_IDENTIFIER, 4222 MDIO_PMA_REG_PHY_IDENTIFIER,
3082 (phy_identifier & ~(1<<9))); 4223 (phy_identifier & ~(1<<9)));
3083 4224
3084 bnx2x_cl45_read(bp, port, 4225 bnx2x_cl45_read(bp, phy,
3085 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3086 ext_phy_addr,
3087 MDIO_PMA_DEVAD, 4226 MDIO_PMA_DEVAD,
3088 MDIO_PMA_REG_ROM_VER2, 4227 MDIO_PMA_REG_ROM_VER2,
3089 &rom_ver2_val); 4228 &rom_ver2_val);
3090 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ 4229 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
3091 bnx2x_cl45_write(bp, port, 4230 bnx2x_cl45_write(bp, phy,
3092 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3093 ext_phy_addr,
3094 MDIO_PMA_DEVAD, 4231 MDIO_PMA_DEVAD,
3095 MDIO_PMA_REG_ROM_VER2, 4232 MDIO_PMA_REG_ROM_VER2,
3096 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); 4233 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
3097 4234
3098 bnx2x_cl45_write(bp, port, 4235 bnx2x_cl45_write(bp, phy,
3099 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3100 ext_phy_addr,
3101 MDIO_PMA_DEVAD, 4236 MDIO_PMA_DEVAD,
3102 MDIO_PMA_REG_PHY_IDENTIFIER, 4237 MDIO_PMA_REG_PHY_IDENTIFIER,
3103 (phy_identifier | (1<<9))); 4238 (phy_identifier | (1<<9)));
@@ -3105,72 +4240,34 @@ static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
3105 return 0; 4240 return 0;
3106} 4241}
3107 4242
3108 4243static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
3109static u8 bnx2x_wait_for_sfp_module_initialized(struct link_params *params) 4244 struct link_params *params,
4245 u32 action)
3110{ 4246{
3111 u8 val;
3112 struct bnx2x *bp = params->bp; 4247 struct bnx2x *bp = params->bp;
3113 u16 timeout;
3114 /* Initialization time after hot-plug may take up to 300ms for some
3115 phys type ( e.g. JDSU ) */
3116 for (timeout = 0; timeout < 60; timeout++) {
3117 if (bnx2x_read_sfp_module_eeprom(params, 1, 1, &val)
3118 == 0) {
3119 DP(NETIF_MSG_LINK, "SFP+ module initialization "
3120 "took %d ms\n", timeout * 5);
3121 return 0;
3122 }
3123 msleep(5);
3124 }
3125 return -EINVAL;
3126}
3127 4248
3128static void bnx2x_8727_power_module(struct bnx2x *bp, 4249 switch (action) {
3129 struct link_params *params, 4250 case DISABLE_TX:
3130 u8 ext_phy_addr, u8 is_power_up) { 4251 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
3131 /* Make sure GPIOs are not using for LED mode */ 4252 break;
3132 u16 val; 4253 case ENABLE_TX:
3133 u8 port = params->port; 4254 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
3134 /* 4255 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
3135 * In the GPIO register, bit 4 is use to detemine if the GPIOs are 4256 break;
3136 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 4257 default:
3137 * output 4258 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
3138 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 4259 action);
3139 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 4260 return;
3140 * where the 1st bit is the over-current(only input), and 2nd bit is 4261 }
3141 * for power( only output )
3142 */
3143
3144 /*
3145 * In case of NOC feature is disabled and power is up, set GPIO control
3146 * as input to enable listening of over-current indication
3147 */
3148
3149 if (!(params->feature_config_flags &
3150 FEATURE_CONFIG_BCM8727_NOC) && is_power_up)
3151 val = (1<<4);
3152 else
3153 /*
3154 * Set GPIO control to OUTPUT, and set the power bit
3155 * to according to the is_power_up
3156 */
3157 val = ((!(is_power_up)) << 1);
3158
3159 bnx2x_cl45_write(bp, port,
3160 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3161 ext_phy_addr,
3162 MDIO_PMA_DEVAD,
3163 MDIO_PMA_REG_8727_GPIO_CTRL,
3164 val);
3165} 4262}
3166 4263
3167static u8 bnx2x_sfp_module_detection(struct link_params *params) 4264static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4265 struct link_params *params)
3168{ 4266{
3169 struct bnx2x *bp = params->bp; 4267 struct bnx2x *bp = params->bp;
3170 u16 edc_mode; 4268 u16 edc_mode;
3171 u8 rc = 0; 4269 u8 rc = 0;
3172 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4270
3173 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3174 u32 val = REG_RD(bp, params->shmem_base + 4271 u32 val = REG_RD(bp, params->shmem_base +
3175 offsetof(struct shmem_region, dev_info. 4272 offsetof(struct shmem_region, dev_info.
3176 port_feature_config[params->port].config)); 4273 port_feature_config[params->port].config));
@@ -3178,10 +4275,10 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3178 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", 4275 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
3179 params->port); 4276 params->port);
3180 4277
3181 if (bnx2x_get_edc_mode(params, &edc_mode) != 0) { 4278 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
3182 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 4279 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
3183 return -EINVAL; 4280 return -EINVAL;
3184 } else if (bnx2x_verify_sfp_module(params) != 4281 } else if (bnx2x_verify_sfp_module(phy, params) !=
3185 0) { 4282 0) {
3186 /* check SFP+ module compatibility */ 4283 /* check SFP+ module compatibility */
3187 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 4284 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
@@ -3190,13 +4287,12 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3190 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 4287 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3191 MISC_REGISTERS_GPIO_HIGH, 4288 MISC_REGISTERS_GPIO_HIGH,
3192 params->port); 4289 params->port);
3193 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && 4290 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
3194 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 4291 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3195 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { 4292 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
3196 /* Shutdown SFP+ module */ 4293 /* Shutdown SFP+ module */
3197 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); 4294 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
3198 bnx2x_8727_power_module(bp, params, 4295 bnx2x_8727_power_module(bp, phy, 0);
3199 ext_phy_addr, 0);
3200 return rc; 4296 return rc;
3201 } 4297 }
3202 } else { 4298 } else {
@@ -3208,15 +4304,15 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3208 } 4304 }
3209 4305
3210 /* power up the SFP module */ 4306 /* power up the SFP module */
3211 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 4307 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
3212 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1); 4308 bnx2x_8727_power_module(bp, phy, 1);
3213 4309
3214 /* Check and set limiting mode / LRM mode on 8726. 4310 /* Check and set limiting mode / LRM mode on 8726.
3215 On 8727 it is done automatically */ 4311 On 8727 it is done automatically */
3216 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 4312 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
3217 bnx2x_bcm8726_set_limiting_mode(params, edc_mode); 4313 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
3218 else 4314 else
3219 bnx2x_bcm8727_set_limiting_mode(params, edc_mode); 4315 bnx2x_8727_set_limiting_mode(bp, phy, edc_mode);
3220 /* 4316 /*
3221 * Enable transmit for this module if the module is approved, or 4317 * Enable transmit for this module if the module is approved, or
3222 * if unapproved modules should also enable the Tx laser 4318 * if unapproved modules should also enable the Tx laser
@@ -3224,11 +4320,9 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3224 if (rc == 0 || 4320 if (rc == 0 ||
3225 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 4321 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
3226 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 4322 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3227 bnx2x_sfp_set_transmitter(bp, params->port, 4323 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
3228 ext_phy_type, ext_phy_addr, 1);
3229 else 4324 else
3230 bnx2x_sfp_set_transmitter(bp, params->port, 4325 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
3231 ext_phy_type, ext_phy_addr, 0);
3232 4326
3233 return rc; 4327 return rc;
3234} 4328}
@@ -3236,6 +4330,7 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3236void bnx2x_handle_module_detect_int(struct link_params *params) 4330void bnx2x_handle_module_detect_int(struct link_params *params)
3237{ 4331{
3238 struct bnx2x *bp = params->bp; 4332 struct bnx2x *bp = params->bp;
4333 struct bnx2x_phy *phy = &params->phy[EXT_PHY1];
3239 u32 gpio_val; 4334 u32 gpio_val;
3240 u8 port = params->port; 4335 u8 port = params->port;
3241 4336
@@ -3245,1349 +4340,587 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
3245 params->port); 4340 params->port);
3246 4341
3247 /* Get current gpio val refelecting module plugged in / out*/ 4342 /* Get current gpio val refelecting module plugged in / out*/
3248 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); 4343 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
3249 4344
3250 /* Call the handling function in case module is detected */ 4345 /* Call the handling function in case module is detected */
3251 if (gpio_val == 0) { 4346 if (gpio_val == 0) {
3252 4347
3253 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 4348 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3254 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, 4349 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
3255 port); 4350 port);
3256 4351
3257 if (bnx2x_wait_for_sfp_module_initialized(params) == 4352 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
3258 0) 4353 bnx2x_sfp_module_detection(phy, params);
3259 bnx2x_sfp_module_detection(params);
3260 else 4354 else
3261 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 4355 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
3262 } else { 4356 } else {
3263 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3264
3265 u32 ext_phy_type =
3266 XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3267 u32 val = REG_RD(bp, params->shmem_base + 4357 u32 val = REG_RD(bp, params->shmem_base +
3268 offsetof(struct shmem_region, dev_info. 4358 offsetof(struct shmem_region, dev_info.
3269 port_feature_config[params->port]. 4359 port_feature_config[params->port].
3270 config)); 4360 config));
3271 4361
3272 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 4362 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3273 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 4363 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
3274 port); 4364 port);
3275 /* Module was plugged out. */ 4365 /* Module was plugged out. */
3276 /* Disable transmit for this module */ 4366 /* Disable transmit for this module */
3277 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 4367 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3278 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 4368 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3279 bnx2x_sfp_set_transmitter(bp, params->port, 4369 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
3280 ext_phy_type, ext_phy_addr, 0);
3281 } 4370 }
3282} 4371}
3283 4372
3284static void bnx2x_bcm807x_force_10G(struct link_params *params) 4373/******************************************************************/
4374/* common BCM8706/BCM8726 PHY SECTION */
4375/******************************************************************/
4376static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
4377 struct link_params *params,
4378 struct link_vars *vars)
3285{ 4379{
4380 u8 link_up = 0;
4381 u16 val1, val2, rx_sd, pcs_status;
3286 struct bnx2x *bp = params->bp; 4382 struct bnx2x *bp = params->bp;
3287 u8 port = params->port; 4383 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
3288 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4384 /* Clear RX Alarm*/
3289 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4385 bnx2x_cl45_read(bp, phy,
3290 4386 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
3291 /* Force KR or KX */ 4387 /* clear LASI indication*/
3292 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4388 bnx2x_cl45_read(bp, phy,
3293 MDIO_PMA_DEVAD, 4389 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
3294 MDIO_PMA_REG_CTRL, 4390 bnx2x_cl45_read(bp, phy,
3295 0x2040); 4391 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
3296 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4392 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
3297 MDIO_PMA_DEVAD, 4393
3298 MDIO_PMA_REG_10G_CTRL2, 4394 bnx2x_cl45_read(bp, phy,
3299 0x000b); 4395 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
3300 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4396 bnx2x_cl45_read(bp, phy,
3301 MDIO_PMA_DEVAD, 4397 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
3302 MDIO_PMA_REG_BCM_CTRL, 4398 bnx2x_cl45_read(bp, phy,
3303 0x0000); 4399 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
3304 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4400 bnx2x_cl45_read(bp, phy,
3305 MDIO_AN_DEVAD, 4401 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
3306 MDIO_AN_REG_CTRL, 4402
3307 0x0000); 4403 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
3308} 4404 " link_status 0x%x\n", rx_sd, pcs_status, val2);
3309 4405 /* link is up if both bit 0 of pmd_rx_sd and
3310static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) 4406 * bit 0 of pcs_status are set, or if the autoneg bit
3311{ 4407 * 1 is set
3312 struct bnx2x *bp = params->bp; 4408 */
3313 u8 port = params->port; 4409 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
3314 u16 val; 4410 if (link_up) {
3315 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4411 if (val2 & (1<<1))
3316 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4412 vars->line_speed = SPEED_1000;
3317 4413 else
3318 bnx2x_cl45_read(bp, params->port, 4414 vars->line_speed = SPEED_10000;
3319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 4415 bnx2x_ext_phy_resolve_fc(phy, params, vars);
3320 ext_phy_addr,
3321 MDIO_PMA_DEVAD,
3322 MDIO_PMA_REG_8073_CHIP_REV, &val);
3323
3324 if (val == 0) {
3325 /* Mustn't set low power mode in 8073 A0 */
3326 return;
3327 } 4416 }
3328 4417 return link_up;
3329 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3330 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3331 MDIO_XS_DEVAD,
3332 MDIO_XS_PLL_SEQUENCER, &val);
3333 val &= ~(1<<13);
3334 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3335 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3336
3337 /* PLL controls */
3338 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3339 MDIO_XS_DEVAD, 0x805E, 0x1077);
3340 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3341 MDIO_XS_DEVAD, 0x805D, 0x0000);
3342 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3343 MDIO_XS_DEVAD, 0x805C, 0x030B);
3344 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3345 MDIO_XS_DEVAD, 0x805B, 0x1240);
3346 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3347 MDIO_XS_DEVAD, 0x805A, 0x2490);
3348
3349 /* Tx Controls */
3350 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3351 MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3352 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3353 MDIO_XS_DEVAD, 0x80A6, 0x9041);
3354 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3355 MDIO_XS_DEVAD, 0x80A5, 0x4640);
3356
3357 /* Rx Controls */
3358 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3359 MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3360 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3361 MDIO_XS_DEVAD, 0x80FD, 0x9249);
3362 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3363 MDIO_XS_DEVAD, 0x80FC, 0x2015);
3364
3365 /* Enable PLL sequencer (use read-modify-write to set bit 13) */
3366 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3367 MDIO_XS_DEVAD,
3368 MDIO_XS_PLL_SEQUENCER, &val);
3369 val |= (1<<13);
3370 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3371 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3372} 4418}
3373 4419
3374static void bnx2x_8073_set_pause_cl37(struct link_params *params, 4420/******************************************************************/
3375 struct link_vars *vars) 4421/* BCM8706 PHY SECTION */
4422/******************************************************************/
4423static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
4424 struct link_params *params,
4425 struct link_vars *vars)
3376{ 4426{
4427 u16 cnt, val;
3377 struct bnx2x *bp = params->bp; 4428 struct bnx2x *bp = params->bp;
3378 u16 cl37_val; 4429 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3379 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4430 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3380 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4431 /* HW reset */
3381 4432 bnx2x_ext_phy_hw_reset(bp, params->port);
3382 bnx2x_cl45_read(bp, params->port, 4433 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
3383 ext_phy_type, 4434 bnx2x_wait_reset_complete(bp, phy);
3384 ext_phy_addr,
3385 MDIO_AN_DEVAD,
3386 MDIO_AN_REG_CL37_FC_LD, &cl37_val);
3387
3388 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3389 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3390 4435
3391 if ((vars->ieee_fc & 4436 /* Wait until fw is loaded */
3392 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == 4437 for (cnt = 0; cnt < 100; cnt++) {
3393 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { 4438 bnx2x_cl45_read(bp, phy,
3394 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; 4439 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
4440 if (val)
4441 break;
4442 msleep(10);
3395 } 4443 }
3396 if ((vars->ieee_fc & 4444 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
3397 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 4445 if ((params->feature_config_flags &
3398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 4446 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3399 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 4447 u8 i;
4448 u16 reg;
4449 for (i = 0; i < 4; i++) {
4450 reg = MDIO_XS_8706_REG_BANK_RX0 +
4451 i*(MDIO_XS_8706_REG_BANK_RX1 -
4452 MDIO_XS_8706_REG_BANK_RX0);
4453 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
4454 /* Clear first 3 bits of the control */
4455 val &= ~0x7;
4456 /* Set control bits according to configuration */
4457 val |= (phy->rx_preemphasis[i] & 0x7);
4458 DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
4459 " reg 0x%x <-- val 0x%x\n", reg, val);
4460 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
4461 }
3400 } 4462 }
3401 if ((vars->ieee_fc & 4463 /* Force speed */
3402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 4464 if (phy->req_line_speed == SPEED_10000) {
3403 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { 4465 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3404 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 4466
4467 bnx2x_cl45_write(bp, phy,
4468 MDIO_PMA_DEVAD,
4469 MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
4470 bnx2x_cl45_write(bp, phy,
4471 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
4472 } else {
4473 /* Force 1Gbps using autoneg with 1G advertisment */
4474
4475 /* Allow CL37 through CL73 */
4476 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
4477 bnx2x_cl45_write(bp, phy,
4478 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
4479
4480 /* Enable Full-Duplex advertisment on CL37 */
4481 bnx2x_cl45_write(bp, phy,
4482 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
4483 /* Enable CL37 AN */
4484 bnx2x_cl45_write(bp, phy,
4485 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
4486 /* 1G support */
4487 bnx2x_cl45_write(bp, phy,
4488 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
4489
4490 /* Enable clause 73 AN */
4491 bnx2x_cl45_write(bp, phy,
4492 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
4493 bnx2x_cl45_write(bp, phy,
4494 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
4495 0x0400);
4496 bnx2x_cl45_write(bp, phy,
4497 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
4498 0x0004);
3405 } 4499 }
3406 DP(NETIF_MSG_LINK, 4500 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
3407 "Ext phy AN advertize cl37 0x%x\n", cl37_val); 4501 return 0;
4502}
3408 4503
3409 bnx2x_cl45_write(bp, params->port, 4504static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
3410 ext_phy_type, 4505 struct link_params *params,
3411 ext_phy_addr, 4506 struct link_vars *vars)
3412 MDIO_AN_DEVAD, 4507{
3413 MDIO_AN_REG_CL37_FC_LD, cl37_val); 4508 return bnx2x_8706_8726_read_status(phy, params, vars);
3414 msleep(500);
3415} 4509}
3416 4510
3417static void bnx2x_ext_phy_set_pause(struct link_params *params, 4511/******************************************************************/
3418 struct link_vars *vars) 4512/* BCM8726 PHY SECTION */
4513/******************************************************************/
4514static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
4515 struct link_params *params)
3419{ 4516{
3420 struct bnx2x *bp = params->bp; 4517 struct bnx2x *bp = params->bp;
3421 u16 val; 4518 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
3422 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4519 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
3423 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3424
3425 /* read modify write pause advertizing */
3426 bnx2x_cl45_read(bp, params->port,
3427 ext_phy_type,
3428 ext_phy_addr,
3429 MDIO_AN_DEVAD,
3430 MDIO_AN_REG_ADV_PAUSE, &val);
3431
3432 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
3433
3434 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3435
3436 if ((vars->ieee_fc &
3437 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3438 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3439 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3440 }
3441 if ((vars->ieee_fc &
3442 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3443 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3444 val |=
3445 MDIO_AN_REG_ADV_PAUSE_PAUSE;
3446 }
3447 DP(NETIF_MSG_LINK,
3448 "Ext phy AN advertize 0x%x\n", val);
3449 bnx2x_cl45_write(bp, params->port,
3450 ext_phy_type,
3451 ext_phy_addr,
3452 MDIO_AN_DEVAD,
3453 MDIO_AN_REG_ADV_PAUSE, val);
3454} 4520}
3455static void bnx2x_set_preemphasis(struct link_params *params) 4521
4522static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
4523 struct link_params *params)
3456{ 4524{
3457 u16 bank, i = 0;
3458 struct bnx2x *bp = params->bp; 4525 struct bnx2x *bp = params->bp;
4526 /* Need to wait 100ms after reset */
4527 msleep(100);
3459 4528
3460 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; 4529 /* Micro controller re-boot */
3461 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { 4530 bnx2x_cl45_write(bp, phy,
3462 CL45_WR_OVER_CL22(bp, params->port, 4531 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
3463 params->phy_addr,
3464 bank,
3465 MDIO_RX0_RX_EQ_BOOST,
3466 params->xgxs_config_rx[i]);
3467 }
3468
3469 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
3470 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
3471 CL45_WR_OVER_CL22(bp, params->port,
3472 params->phy_addr,
3473 bank,
3474 MDIO_TX0_TX_DRIVER,
3475 params->xgxs_config_tx[i]);
3476 }
3477}
3478 4532
4533 /* Set soft reset */
4534 bnx2x_cl45_write(bp, phy,
4535 MDIO_PMA_DEVAD,
4536 MDIO_PMA_REG_GEN_CTRL,
4537 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
3479 4538
3480static void bnx2x_8481_set_led4(struct link_params *params, 4539 bnx2x_cl45_write(bp, phy,
3481 u32 ext_phy_type, u8 ext_phy_addr) 4540 MDIO_PMA_DEVAD,
3482{ 4541 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
3483 struct bnx2x *bp = params->bp;
3484 4542
3485 /* PHYC_CTL_LED_CTL */ 4543 bnx2x_cl45_write(bp, phy,
3486 bnx2x_cl45_write(bp, params->port,
3487 ext_phy_type,
3488 ext_phy_addr,
3489 MDIO_PMA_DEVAD, 4544 MDIO_PMA_DEVAD,
3490 MDIO_PMA_REG_8481_LINK_SIGNAL, 0xa482); 4545 MDIO_PMA_REG_GEN_CTRL,
4546 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3491 4547
3492 /* Unmask LED4 for 10G link */ 4548 /* wait for 150ms for microcode load */
3493 bnx2x_cl45_write(bp, params->port, 4549 msleep(150);
3494 ext_phy_type, 4550
3495 ext_phy_addr, 4551 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
4552 bnx2x_cl45_write(bp, phy,
3496 MDIO_PMA_DEVAD, 4553 MDIO_PMA_DEVAD,
3497 MDIO_PMA_REG_8481_SIGNAL_MASK, (1<<6)); 4554 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3498 /* 'Interrupt Mask' */
3499 bnx2x_cl45_write(bp, params->port,
3500 ext_phy_type,
3501 ext_phy_addr,
3502 MDIO_AN_DEVAD,
3503 0xFFFB, 0xFFFD);
3504}
3505static void bnx2x_8481_set_legacy_led_mode(struct link_params *params,
3506 u32 ext_phy_type, u8 ext_phy_addr)
3507{
3508 struct bnx2x *bp = params->bp;
3509 4555
3510 /* LED1 (10G Link): Disable LED1 when 10/100/1000 link */ 4556 msleep(200);
3511 /* LED2 (1G/100/10 Link): Enable LED2 when 10/100/1000 link) */ 4557 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
3512 bnx2x_cl45_write(bp, params->port,
3513 ext_phy_type,
3514 ext_phy_addr,
3515 MDIO_AN_DEVAD,
3516 MDIO_AN_REG_8481_LEGACY_SHADOW,
3517 (1<<15) | (0xd << 10) | (0xc<<4) | 0xe);
3518} 4558}
3519 4559
3520static void bnx2x_8481_set_10G_led_mode(struct link_params *params, 4560static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
3521 u32 ext_phy_type, u8 ext_phy_addr) 4561 struct link_params *params,
4562 struct link_vars *vars)
3522{ 4563{
3523 struct bnx2x *bp = params->bp; 4564 struct bnx2x *bp = params->bp;
3524 u16 val1; 4565 u16 val1;
3525 4566 u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
3526 /* LED1 (10G Link) */ 4567 if (link_up) {
3527 /* Enable continuse based on source 7(10G-link) */ 4568 bnx2x_cl45_read(bp, phy,
3528 bnx2x_cl45_read(bp, params->port, 4569 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
3529 ext_phy_type, 4570 &val1);
3530 ext_phy_addr, 4571 if (val1 & (1<<15)) {
3531 MDIO_PMA_DEVAD, 4572 DP(NETIF_MSG_LINK, "Tx is disabled\n");
3532 MDIO_PMA_REG_8481_LINK_SIGNAL, 4573 link_up = 0;
3533 &val1); 4574 vars->line_speed = 0;
3534 /* Set bit 2 to 0, and bits [1:0] to 10 */ 4575 }
3535 val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/ 4576 }
3536 val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */ 4577 return link_up;
3537
3538 bnx2x_cl45_write(bp, params->port,
3539 ext_phy_type,
3540 ext_phy_addr,
3541 MDIO_PMA_DEVAD,
3542 MDIO_PMA_REG_8481_LINK_SIGNAL,
3543 val1);
3544
3545 /* Unmask LED1 for 10G link */
3546 bnx2x_cl45_read(bp, params->port,
3547 ext_phy_type,
3548 ext_phy_addr,
3549 MDIO_PMA_DEVAD,
3550 MDIO_PMA_REG_8481_LED1_MASK,
3551 &val1);
3552 /* Set bit 2 to 0, and bits [1:0] to 10 */
3553 val1 |= (1<<7);
3554 bnx2x_cl45_write(bp, params->port,
3555 ext_phy_type,
3556 ext_phy_addr,
3557 MDIO_PMA_DEVAD,
3558 MDIO_PMA_REG_8481_LED1_MASK,
3559 val1);
3560
3561 /* LED2 (1G/100/10G Link) */
3562 /* Mask LED2 for 10G link */
3563 bnx2x_cl45_write(bp, params->port,
3564 ext_phy_type,
3565 ext_phy_addr,
3566 MDIO_PMA_DEVAD,
3567 MDIO_PMA_REG_8481_LED2_MASK,
3568 0);
3569
3570 /* Unmask LED3 for 10G link */
3571 bnx2x_cl45_write(bp, params->port,
3572 ext_phy_type,
3573 ext_phy_addr,
3574 MDIO_PMA_DEVAD,
3575 MDIO_PMA_REG_8481_LED3_MASK,
3576 0x6);
3577 bnx2x_cl45_write(bp, params->port,
3578 ext_phy_type,
3579 ext_phy_addr,
3580 MDIO_PMA_DEVAD,
3581 MDIO_PMA_REG_8481_LED3_BLINK,
3582 0);
3583} 4578}
3584 4579
3585 4580
3586static void bnx2x_init_internal_phy(struct link_params *params, 4581static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
3587 struct link_vars *vars, 4582 struct link_params *params,
3588 u8 enable_cl73) 4583 struct link_vars *vars)
3589{ 4584{
3590 struct bnx2x *bp = params->bp; 4585 struct bnx2x *bp = params->bp;
4586 u32 val;
4587 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4588 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
4589 /* Restore normal power mode*/
4590 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4591 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3591 4592
3592 if (!(vars->phy_flags & PHY_SGMII_FLAG)) { 4593 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3593 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 4594 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3594 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 4595
3595 (params->feature_config_flags & 4596 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
3596 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) 4597 bnx2x_wait_reset_complete(bp, phy);
3597 bnx2x_set_preemphasis(params); 4598
3598 4599 bnx2x_8726_external_rom_boot(phy, params);
3599 /* forced speed requested? */ 4600
3600 if (vars->line_speed != SPEED_AUTO_NEG || 4601 /* Need to call module detected on initialization since
3601 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 4602 the module detection triggered by actual module
3602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 4603 insertion might occur before driver is loaded, and when
3603 params->loopback_mode == LOOPBACK_EXT)) { 4604 driver is loaded, it reset all registers, including the
3604 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 4605 transmitter */
3605 4606 bnx2x_sfp_module_detection(phy, params);
3606 /* disable autoneg */ 4607
3607 bnx2x_set_autoneg(params, vars, 0); 4608 if (phy->req_line_speed == SPEED_1000) {
4609 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4610 bnx2x_cl45_write(bp, phy,
4611 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
4612 bnx2x_cl45_write(bp, phy,
4613 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
4614 bnx2x_cl45_write(bp, phy,
4615 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5);
4616 bnx2x_cl45_write(bp, phy,
4617 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
4618 0x400);
4619 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
4620 (phy->speed_cap_mask &
4621 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
4622 ((phy->speed_cap_mask &
4623 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
4624 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
4625 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4626 /* Set Flow control */
4627 bnx2x_ext_phy_set_pause(params, phy, vars);
4628 bnx2x_cl45_write(bp, phy,
4629 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
4630 bnx2x_cl45_write(bp, phy,
4631 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
4632 bnx2x_cl45_write(bp, phy,
4633 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
4634 bnx2x_cl45_write(bp, phy,
4635 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
4636 bnx2x_cl45_write(bp, phy,
4637 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
4638 /* Enable RX-ALARM control to receive
4639 interrupt for 1G speed change */
4640 bnx2x_cl45_write(bp, phy,
4641 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
4642 bnx2x_cl45_write(bp, phy,
4643 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
4644 0x400);
4645
4646 } else { /* Default 10G. Set only LASI control */
4647 bnx2x_cl45_write(bp, phy,
4648 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
4649 }
3608 4650
3609 /* program speed and duplex */ 4651 /* Set TX PreEmphasis if needed */
3610 bnx2x_program_serdes(params, vars); 4652 if ((params->feature_config_flags &
4653 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4654 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
4655 "TX_CTRL2 0x%x\n",
4656 phy->tx_preemphasis[0],
4657 phy->tx_preemphasis[1]);
4658 bnx2x_cl45_write(bp, phy,
4659 MDIO_PMA_DEVAD,
4660 MDIO_PMA_REG_8726_TX_CTRL1,
4661 phy->tx_preemphasis[0]);
4662
4663 bnx2x_cl45_write(bp, phy,
4664 MDIO_PMA_DEVAD,
4665 MDIO_PMA_REG_8726_TX_CTRL2,
4666 phy->tx_preemphasis[1]);
4667 }
3611 4668
3612 } else { /* AN_mode */ 4669 /* Set GPIO3 to trigger SFP+ module insertion/removal */
3613 DP(NETIF_MSG_LINK, "not SGMII, AN\n"); 4670 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4671 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
3614 4672
3615 /* AN enabled */ 4673 /* The GPIO should be swapped if the swap register is set and active */
3616 bnx2x_set_brcm_cl37_advertisment(params); 4674 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4675 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3617 4676
3618 /* program duplex & pause advertisement (for aneg) */ 4677 /* Select function upon port-swap configuration */
3619 bnx2x_set_ieee_aneg_advertisment(params, 4678 if (params->port == 0) {
3620 vars->ieee_fc); 4679 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4680 aeu_gpio_mask = (swap_val && swap_override) ?
4681 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4682 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4683 } else {
4684 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4685 aeu_gpio_mask = (swap_val && swap_override) ?
4686 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4687 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4688 }
4689 val = REG_RD(bp, offset);
4690 /* add GPIO3 to group */
4691 val |= aeu_gpio_mask;
4692 REG_WR(bp, offset, val);
4693 return 0;
3621 4694
3622 /* enable autoneg */ 4695}
3623 bnx2x_set_autoneg(params, vars, enable_cl73);
3624 4696
3625 /* enable and restart AN */ 4697static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
3626 bnx2x_restart_autoneg(params, enable_cl73); 4698 struct link_params *params)
3627 } 4699{
4700 struct bnx2x *bp = params->bp;
4701 DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
4702 /* Set serial boot control for external load */
4703 bnx2x_cl45_write(bp, phy,
4704 MDIO_PMA_DEVAD,
4705 MDIO_PMA_REG_GEN_CTRL, 0x0001);
4706}
3628 4707
3629 } else { /* SGMII mode */ 4708/******************************************************************/
3630 DP(NETIF_MSG_LINK, "SGMII\n"); 4709/* BCM8727 PHY SECTION */
4710/******************************************************************/
3631 4711
3632 bnx2x_initialize_sgmii_process(params, vars); 4712static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
4713 struct link_params *params, u8 mode)
4714{
4715 struct bnx2x *bp = params->bp;
4716 u16 led_mode_bitmask = 0;
4717 u16 gpio_pins_bitmask = 0;
4718 u16 val;
4719 /* Only NOC flavor requires to set the LED specifically */
4720 if (!(phy->flags & FLAGS_NOC))
4721 return;
4722 switch (mode) {
4723 case LED_MODE_FRONT_PANEL_OFF:
4724 case LED_MODE_OFF:
4725 led_mode_bitmask = 0;
4726 gpio_pins_bitmask = 0x03;
4727 break;
4728 case LED_MODE_ON:
4729 led_mode_bitmask = 0;
4730 gpio_pins_bitmask = 0x02;
4731 break;
4732 case LED_MODE_OPER:
4733 led_mode_bitmask = 0x60;
4734 gpio_pins_bitmask = 0x11;
4735 break;
3633 } 4736 }
4737 bnx2x_cl45_read(bp, phy,
4738 MDIO_PMA_DEVAD,
4739 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4740 &val);
4741 val &= 0xff8f;
4742 val |= led_mode_bitmask;
4743 bnx2x_cl45_write(bp, phy,
4744 MDIO_PMA_DEVAD,
4745 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4746 val);
4747 bnx2x_cl45_read(bp, phy,
4748 MDIO_PMA_DEVAD,
4749 MDIO_PMA_REG_8727_GPIO_CTRL,
4750 &val);
4751 val &= 0xffe0;
4752 val |= gpio_pins_bitmask;
4753 bnx2x_cl45_write(bp, phy,
4754 MDIO_PMA_DEVAD,
4755 MDIO_PMA_REG_8727_GPIO_CTRL,
4756 val);
4757}
4758static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
4759 struct link_params *params) {
4760 u32 swap_val, swap_override;
4761 u8 port;
4762 /**
4763 * The PHY reset is controlled by GPIO 1. Fake the port number
4764 * to cancel the swap done in set_gpio()
4765 */
4766 struct bnx2x *bp = params->bp;
4767 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4768 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4769 port = (swap_val && swap_override) ^ 1;
4770 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4771 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3634} 4772}
3635 4773
3636static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) 4774static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
4775 struct link_params *params,
4776 struct link_vars *vars)
3637{ 4777{
4778 u16 tmp1, val, mod_abs;
4779 u16 rx_alarm_ctrl_val;
4780 u16 lasi_ctrl_val;
3638 struct bnx2x *bp = params->bp; 4781 struct bnx2x *bp = params->bp;
3639 u32 ext_phy_type; 4782 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
3640 u8 ext_phy_addr; 4783
3641 u16 cnt; 4784 bnx2x_wait_reset_complete(bp, phy);
3642 u16 ctrl = 0; 4785 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
3643 u16 val = 0; 4786 lasi_ctrl_val = 0x0004;
3644 u8 rc = 0; 4787
3645 4788 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
3646 if (vars->phy_flags & PHY_XGXS_FLAG) { 4789 /* enable LASI */
3647 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4790 bnx2x_cl45_write(bp, phy,
3648 4791 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
3649 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4792 rx_alarm_ctrl_val);
3650 /* Make sure that the soft reset is off (expect for the 8072: 4793
3651 * due to the lock, it will be done inside the specific 4794 bnx2x_cl45_write(bp, phy,
3652 * handling) 4795 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
4796
4797 /* Initially configure MOD_ABS to interrupt when
4798 module is presence( bit 8) */
4799 bnx2x_cl45_read(bp, phy,
4800 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4801 /* Set EDC off by setting OPTXLOS signal input to low
4802 (bit 9).
4803 When the EDC is off it locks onto a reference clock and
4804 avoids becoming 'lost'.*/
4805 mod_abs &= ~(1<<8);
4806 if (!(phy->flags & FLAGS_NOC))
4807 mod_abs &= ~(1<<9);
4808 bnx2x_cl45_write(bp, phy,
4809 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4810
4811
4812 /* Make MOD_ABS give interrupt on change */
4813 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4814 &val);
4815 val |= (1<<12);
4816 if (phy->flags & FLAGS_NOC)
4817 val |= (3<<5);
4818
4819 /**
4820 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
4821 * status which reflect SFP+ module over-current
4822 */
4823 if (!(phy->flags & FLAGS_NOC))
4824 val &= 0xff8f; /* Reset bits 4-6 */
4825 bnx2x_cl45_write(bp, phy,
4826 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
4827
4828 bnx2x_8727_power_module(bp, phy, 1);
4829
4830 bnx2x_cl45_read(bp, phy,
4831 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4832
4833 bnx2x_cl45_read(bp, phy,
4834 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
4835
4836 /* Set option 1G speed */
4837 if (phy->req_line_speed == SPEED_1000) {
4838 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4839 bnx2x_cl45_write(bp, phy,
4840 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
4841 bnx2x_cl45_write(bp, phy,
4842 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
4843 bnx2x_cl45_read(bp, phy,
4844 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
4845 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4846 /**
4847 * Power down the XAUI until link is up in case of dual-media
4848 * and 1G
3653 */ 4849 */
3654 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 4850 if (DUAL_MEDIA(params)) {
3655 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 4851 bnx2x_cl45_read(bp, phy,
3656 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) && 4852 MDIO_PMA_DEVAD,
3657 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) && 4853 MDIO_PMA_REG_8727_PCS_GP, &val);
3658 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) { 4854 val |= (3<<10);
3659 /* Wait for soft reset to get cleared upto 1 sec */ 4855 bnx2x_cl45_write(bp, phy,
3660 for (cnt = 0; cnt < 1000; cnt++) {
3661 bnx2x_cl45_read(bp, params->port,
3662 ext_phy_type,
3663 ext_phy_addr,
3664 MDIO_PMA_DEVAD,
3665 MDIO_PMA_REG_CTRL, &ctrl);
3666 if (!(ctrl & (1<<15)))
3667 break;
3668 msleep(1);
3669 }
3670 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n",
3671 ctrl, cnt);
3672 }
3673
3674 switch (ext_phy_type) {
3675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3676 break;
3677
3678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3679 DP(NETIF_MSG_LINK, "XGXS 8705\n");
3680
3681 bnx2x_cl45_write(bp, params->port,
3682 ext_phy_type,
3683 ext_phy_addr,
3684 MDIO_PMA_DEVAD,
3685 MDIO_PMA_REG_MISC_CTRL,
3686 0x8288);
3687 bnx2x_cl45_write(bp, params->port,
3688 ext_phy_type,
3689 ext_phy_addr,
3690 MDIO_PMA_DEVAD,
3691 MDIO_PMA_REG_PHY_IDENTIFIER,
3692 0x7fbf);
3693 bnx2x_cl45_write(bp, params->port,
3694 ext_phy_type,
3695 ext_phy_addr,
3696 MDIO_PMA_DEVAD,
3697 MDIO_PMA_REG_CMU_PLL_BYPASS,
3698 0x0100);
3699 bnx2x_cl45_write(bp, params->port,
3700 ext_phy_type,
3701 ext_phy_addr,
3702 MDIO_WIS_DEVAD,
3703 MDIO_WIS_REG_LASI_CNTL, 0x1);
3704
3705 /* BCM8705 doesn't have microcode, hence the 0 */
3706 bnx2x_save_spirom_version(bp, params->port,
3707 params->shmem_base, 0);
3708 break;
3709
3710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3711 /* Wait until fw is loaded */
3712 for (cnt = 0; cnt < 100; cnt++) {
3713 bnx2x_cl45_read(bp, params->port, ext_phy_type,
3714 ext_phy_addr, MDIO_PMA_DEVAD,
3715 MDIO_PMA_REG_ROM_VER1, &val);
3716 if (val)
3717 break;
3718 msleep(10);
3719 }
3720 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized "
3721 "after %d ms\n", cnt);
3722 if ((params->feature_config_flags &
3723 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3724 u8 i;
3725 u16 reg;
3726 for (i = 0; i < 4; i++) {
3727 reg = MDIO_XS_8706_REG_BANK_RX0 +
3728 i*(MDIO_XS_8706_REG_BANK_RX1 -
3729 MDIO_XS_8706_REG_BANK_RX0);
3730 bnx2x_cl45_read(bp, params->port,
3731 ext_phy_type,
3732 ext_phy_addr,
3733 MDIO_XS_DEVAD,
3734 reg, &val);
3735 /* Clear first 3 bits of the control */
3736 val &= ~0x7;
3737 /* Set control bits according to
3738 configuation */
3739 val |= (params->xgxs_config_rx[i] &
3740 0x7);
3741 DP(NETIF_MSG_LINK, "Setting RX"
3742 "Equalizer to BCM8706 reg 0x%x"
3743 " <-- val 0x%x\n", reg, val);
3744 bnx2x_cl45_write(bp, params->port,
3745 ext_phy_type,
3746 ext_phy_addr,
3747 MDIO_XS_DEVAD,
3748 reg, val);
3749 }
3750 }
3751 /* Force speed */
3752 if (params->req_line_speed == SPEED_10000) {
3753 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3754
3755 bnx2x_cl45_write(bp, params->port,
3756 ext_phy_type,
3757 ext_phy_addr,
3758 MDIO_PMA_DEVAD,
3759 MDIO_PMA_REG_DIGITAL_CTRL,
3760 0x400);
3761 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3762 ext_phy_addr, MDIO_PMA_DEVAD,
3763 MDIO_PMA_REG_LASI_CTRL, 1);
3764 } else {
3765 /* Force 1Gbps using autoneg with 1G
3766 advertisment */
3767
3768 /* Allow CL37 through CL73 */
3769 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3770 bnx2x_cl45_write(bp, params->port,
3771 ext_phy_type,
3772 ext_phy_addr,
3773 MDIO_AN_DEVAD,
3774 MDIO_AN_REG_CL37_CL73,
3775 0x040c);
3776
3777 /* Enable Full-Duplex advertisment on CL37 */
3778 bnx2x_cl45_write(bp, params->port,
3779 ext_phy_type,
3780 ext_phy_addr,
3781 MDIO_AN_DEVAD,
3782 MDIO_AN_REG_CL37_FC_LP,
3783 0x0020);
3784 /* Enable CL37 AN */
3785 bnx2x_cl45_write(bp, params->port,
3786 ext_phy_type,
3787 ext_phy_addr,
3788 MDIO_AN_DEVAD,
3789 MDIO_AN_REG_CL37_AN,
3790 0x1000);
3791 /* 1G support */
3792 bnx2x_cl45_write(bp, params->port,
3793 ext_phy_type,
3794 ext_phy_addr,
3795 MDIO_AN_DEVAD,
3796 MDIO_AN_REG_ADV, (1<<5));
3797
3798 /* Enable clause 73 AN */
3799 bnx2x_cl45_write(bp, params->port,
3800 ext_phy_type,
3801 ext_phy_addr,
3802 MDIO_AN_DEVAD,
3803 MDIO_AN_REG_CTRL,
3804 0x1200);
3805 bnx2x_cl45_write(bp, params->port,
3806 ext_phy_type,
3807 ext_phy_addr,
3808 MDIO_PMA_DEVAD,
3809 MDIO_PMA_REG_RX_ALARM_CTRL,
3810 0x0400);
3811 bnx2x_cl45_write(bp, params->port,
3812 ext_phy_type,
3813 ext_phy_addr,
3814 MDIO_PMA_DEVAD,
3815 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3816
3817 }
3818 bnx2x_save_bcm_spirom_ver(bp, params->port,
3819 ext_phy_type,
3820 ext_phy_addr,
3821 params->shmem_base);
3822 break;
3823 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3824 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
3825 bnx2x_bcm8726_external_rom_boot(params);
3826
3827 /* Need to call module detected on initialization since
3828 the module detection triggered by actual module
3829 insertion might occur before driver is loaded, and when
3830 driver is loaded, it reset all registers, including the
3831 transmitter */
3832 bnx2x_sfp_module_detection(params);
3833
3834 /* Set Flow control */
3835 bnx2x_ext_phy_set_pause(params, vars);
3836 if (params->req_line_speed == SPEED_1000) {
3837 DP(NETIF_MSG_LINK, "Setting 1G force\n");
3838 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3839 ext_phy_addr, MDIO_PMA_DEVAD,
3840 MDIO_PMA_REG_CTRL, 0x40);
3841 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3842 ext_phy_addr, MDIO_PMA_DEVAD,
3843 MDIO_PMA_REG_10G_CTRL2, 0xD);
3844 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3845 ext_phy_addr, MDIO_PMA_DEVAD,
3846 MDIO_PMA_REG_LASI_CTRL, 0x5);
3847 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3848 ext_phy_addr, MDIO_PMA_DEVAD,
3849 MDIO_PMA_REG_RX_ALARM_CTRL,
3850 0x400);
3851 } else if ((params->req_line_speed ==
3852 SPEED_AUTO_NEG) &&
3853 ((params->speed_cap_mask &
3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
3855 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
3856 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3857 ext_phy_addr, MDIO_AN_DEVAD,
3858 MDIO_AN_REG_ADV, 0x20);
3859 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3860 ext_phy_addr, MDIO_AN_DEVAD,
3861 MDIO_AN_REG_CL37_CL73, 0x040c);
3862 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3863 ext_phy_addr, MDIO_AN_DEVAD,
3864 MDIO_AN_REG_CL37_FC_LD, 0x0020);
3865 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3866 ext_phy_addr, MDIO_AN_DEVAD,
3867 MDIO_AN_REG_CL37_AN, 0x1000);
3868 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3869 ext_phy_addr, MDIO_AN_DEVAD,
3870 MDIO_AN_REG_CTRL, 0x1200);
3871
3872 /* Enable RX-ALARM control to receive
3873 interrupt for 1G speed change */
3874 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3875 ext_phy_addr, MDIO_PMA_DEVAD,
3876 MDIO_PMA_REG_LASI_CTRL, 0x4);
3877 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3878 ext_phy_addr, MDIO_PMA_DEVAD,
3879 MDIO_PMA_REG_RX_ALARM_CTRL,
3880 0x400);
3881
3882 } else { /* Default 10G. Set only LASI control */
3883 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3884 ext_phy_addr, MDIO_PMA_DEVAD,
3885 MDIO_PMA_REG_LASI_CTRL, 1);
3886 }
3887
3888 /* Set TX PreEmphasis if needed */
3889 if ((params->feature_config_flags &
3890 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3891 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
3892 "TX_CTRL2 0x%x\n",
3893 params->xgxs_config_tx[0],
3894 params->xgxs_config_tx[1]);
3895 bnx2x_cl45_write(bp, params->port,
3896 ext_phy_type,
3897 ext_phy_addr,
3898 MDIO_PMA_DEVAD,
3899 MDIO_PMA_REG_8726_TX_CTRL1,
3900 params->xgxs_config_tx[0]);
3901
3902 bnx2x_cl45_write(bp, params->port,
3903 ext_phy_type,
3904 ext_phy_addr,
3905 MDIO_PMA_DEVAD,
3906 MDIO_PMA_REG_8726_TX_CTRL2,
3907 params->xgxs_config_tx[1]);
3908 }
3909 break;
3910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3912 {
3913 u16 tmp1;
3914 u16 rx_alarm_ctrl_val;
3915 u16 lasi_ctrl_val;
3916 if (ext_phy_type ==
3917 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
3918 rx_alarm_ctrl_val = 0x400;
3919 lasi_ctrl_val = 0x0004;
3920 } else {
3921 rx_alarm_ctrl_val = (1<<2);
3922 lasi_ctrl_val = 0x0004;
3923 }
3924
3925 /* enable LASI */
3926 bnx2x_cl45_write(bp, params->port,
3927 ext_phy_type,
3928 ext_phy_addr,
3929 MDIO_PMA_DEVAD,
3930 MDIO_PMA_REG_RX_ALARM_CTRL,
3931 rx_alarm_ctrl_val);
3932
3933 bnx2x_cl45_write(bp, params->port,
3934 ext_phy_type,
3935 ext_phy_addr,
3936 MDIO_PMA_DEVAD,
3937 MDIO_PMA_REG_LASI_CTRL,
3938 lasi_ctrl_val);
3939
3940 bnx2x_8073_set_pause_cl37(params, vars);
3941
3942 if (ext_phy_type ==
3943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)
3944 bnx2x_bcm8072_external_rom_boot(params);
3945 else
3946 /* In case of 8073 with long xaui lines,
3947 don't set the 8073 xaui low power*/
3948 bnx2x_bcm8073_set_xaui_low_power_mode(params);
3949
3950 bnx2x_cl45_read(bp, params->port,
3951 ext_phy_type,
3952 ext_phy_addr,
3953 MDIO_PMA_DEVAD,
3954 MDIO_PMA_REG_M8051_MSGOUT_REG,
3955 &tmp1);
3956
3957 bnx2x_cl45_read(bp, params->port,
3958 ext_phy_type,
3959 ext_phy_addr,
3960 MDIO_PMA_DEVAD,
3961 MDIO_PMA_REG_RX_ALARM, &tmp1);
3962
3963 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1):"
3964 "0x%x\n", tmp1);
3965
3966 /* If this is forced speed, set to KR or KX
3967 * (all other are not supported)
3968 */
3969 if (params->loopback_mode == LOOPBACK_EXT) {
3970 bnx2x_bcm807x_force_10G(params);
3971 DP(NETIF_MSG_LINK,
3972 "Forced speed 10G on 807X\n");
3973 break;
3974 } else {
3975 bnx2x_cl45_write(bp, params->port,
3976 ext_phy_type, ext_phy_addr,
3977 MDIO_PMA_DEVAD,
3978 MDIO_PMA_REG_BCM_CTRL,
3979 0x0002);
3980 }
3981 if (params->req_line_speed != SPEED_AUTO_NEG) {
3982 if (params->req_line_speed == SPEED_10000) {
3983 val = (1<<7);
3984 } else if (params->req_line_speed ==
3985 SPEED_2500) {
3986 val = (1<<5);
3987 /* Note that 2.5G works only
3988 when used with 1G advertisment */
3989 } else
3990 val = (1<<5);
3991 } else {
3992
3993 val = 0;
3994 if (params->speed_cap_mask &
3995 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
3996 val |= (1<<7);
3997
3998 /* Note that 2.5G works only when
3999 used with 1G advertisment */
4000 if (params->speed_cap_mask &
4001 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4002 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
4003 val |= (1<<5);
4004 DP(NETIF_MSG_LINK,
4005 "807x autoneg val = 0x%x\n", val);
4006 }
4007
4008 bnx2x_cl45_write(bp, params->port,
4009 ext_phy_type,
4010 ext_phy_addr,
4011 MDIO_AN_DEVAD,
4012 MDIO_AN_REG_ADV, val);
4013 if (ext_phy_type ==
4014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4015 bnx2x_cl45_read(bp, params->port,
4016 ext_phy_type,
4017 ext_phy_addr,
4018 MDIO_AN_DEVAD,
4019 MDIO_AN_REG_8073_2_5G, &tmp1);
4020
4021 if (((params->speed_cap_mask &
4022 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
4023 (params->req_line_speed ==
4024 SPEED_AUTO_NEG)) ||
4025 (params->req_line_speed ==
4026 SPEED_2500)) {
4027 u16 phy_ver;
4028 /* Allow 2.5G for A1 and above */
4029 bnx2x_cl45_read(bp, params->port,
4030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4031 ext_phy_addr,
4032 MDIO_PMA_DEVAD, 4856 MDIO_PMA_DEVAD,
4033 MDIO_PMA_REG_8073_CHIP_REV, &phy_ver); 4857 MDIO_PMA_REG_8727_PCS_GP, val);
4034 DP(NETIF_MSG_LINK, "Add 2.5G\n");
4035 if (phy_ver > 0)
4036 tmp1 |= 1;
4037 else
4038 tmp1 &= 0xfffe;
4039 } else {
4040 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
4041 tmp1 &= 0xfffe;
4042 }
4043
4044 bnx2x_cl45_write(bp, params->port,
4045 ext_phy_type,
4046 ext_phy_addr,
4047 MDIO_AN_DEVAD,
4048 MDIO_AN_REG_8073_2_5G, tmp1);
4049 }
4050
4051 /* Add support for CL37 (passive mode) II */
4052
4053 bnx2x_cl45_read(bp, params->port,
4054 ext_phy_type,
4055 ext_phy_addr,
4056 MDIO_AN_DEVAD,
4057 MDIO_AN_REG_CL37_FC_LD,
4058 &tmp1);
4059
4060 bnx2x_cl45_write(bp, params->port,
4061 ext_phy_type,
4062 ext_phy_addr,
4063 MDIO_AN_DEVAD,
4064 MDIO_AN_REG_CL37_FC_LD, (tmp1 |
4065 ((params->req_duplex == DUPLEX_FULL) ?
4066 0x20 : 0x40)));
4067
4068 /* Add support for CL37 (passive mode) III */
4069 bnx2x_cl45_write(bp, params->port,
4070 ext_phy_type,
4071 ext_phy_addr,
4072 MDIO_AN_DEVAD,
4073 MDIO_AN_REG_CL37_AN, 0x1000);
4074
4075 if (ext_phy_type ==
4076 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4077 /* The SNR will improve about 2db by changing
4078 BW and FEE main tap. Rest commands are executed
4079 after link is up*/
4080 /*Change FFE main cursor to 5 in EDC register*/
4081 if (bnx2x_8073_is_snr_needed(params))
4082 bnx2x_cl45_write(bp, params->port,
4083 ext_phy_type,
4084 ext_phy_addr,
4085 MDIO_PMA_DEVAD,
4086 MDIO_PMA_REG_EDC_FFE_MAIN,
4087 0xFB0C);
4088
4089 /* Enable FEC (Forware Error Correction)
4090 Request in the AN */
4091 bnx2x_cl45_read(bp, params->port,
4092 ext_phy_type,
4093 ext_phy_addr,
4094 MDIO_AN_DEVAD,
4095 MDIO_AN_REG_ADV2, &tmp1);
4096
4097 tmp1 |= (1<<15);
4098
4099 bnx2x_cl45_write(bp, params->port,
4100 ext_phy_type,
4101 ext_phy_addr,
4102 MDIO_AN_DEVAD,
4103 MDIO_AN_REG_ADV2, tmp1);
4104
4105 }
4106
4107 bnx2x_ext_phy_set_pause(params, vars);
4108
4109 /* Restart autoneg */
4110 msleep(500);
4111 bnx2x_cl45_write(bp, params->port,
4112 ext_phy_type,
4113 ext_phy_addr,
4114 MDIO_AN_DEVAD,
4115 MDIO_AN_REG_CTRL, 0x1200);
4116 DP(NETIF_MSG_LINK, "807x Autoneg Restart: "
4117 "Advertise 1G=%x, 10G=%x\n",
4118 ((val & (1<<5)) > 0),
4119 ((val & (1<<7)) > 0));
4120 break;
4121 }
4122
4123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4124 {
4125 u16 tmp1;
4126 u16 rx_alarm_ctrl_val;
4127 u16 lasi_ctrl_val;
4128
4129 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
4130
4131 u16 mod_abs;
4132 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
4133 lasi_ctrl_val = 0x0004;
4134
4135 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
4136 /* enable LASI */
4137 bnx2x_cl45_write(bp, params->port,
4138 ext_phy_type,
4139 ext_phy_addr,
4140 MDIO_PMA_DEVAD,
4141 MDIO_PMA_REG_RX_ALARM_CTRL,
4142 rx_alarm_ctrl_val);
4143
4144 bnx2x_cl45_write(bp, params->port,
4145 ext_phy_type,
4146 ext_phy_addr,
4147 MDIO_PMA_DEVAD,
4148 MDIO_PMA_REG_LASI_CTRL,
4149 lasi_ctrl_val);
4150
4151 /* Initially configure MOD_ABS to interrupt when
4152 module is presence( bit 8) */
4153 bnx2x_cl45_read(bp, params->port,
4154 ext_phy_type,
4155 ext_phy_addr,
4156 MDIO_PMA_DEVAD,
4157 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4158 /* Set EDC off by setting OPTXLOS signal input to low
4159 (bit 9).
4160 When the EDC is off it locks onto a reference clock and
4161 avoids becoming 'lost'.*/
4162 mod_abs &= ~((1<<8) | (1<<9));
4163 bnx2x_cl45_write(bp, params->port,
4164 ext_phy_type,
4165 ext_phy_addr,
4166 MDIO_PMA_DEVAD,
4167 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4168
4169 /* Make MOD_ABS give interrupt on change */
4170 bnx2x_cl45_read(bp, params->port,
4171 ext_phy_type,
4172 ext_phy_addr,
4173 MDIO_PMA_DEVAD,
4174 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4175 &val);
4176 val |= (1<<12);
4177 bnx2x_cl45_write(bp, params->port,
4178 ext_phy_type,
4179 ext_phy_addr,
4180 MDIO_PMA_DEVAD,
4181 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4182 val);
4183
4184 /* Set 8727 GPIOs to input to allow reading from the
4185 8727 GPIO0 status which reflect SFP+ module
4186 over-current */
4187
4188 bnx2x_cl45_read(bp, params->port,
4189 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4190 ext_phy_addr,
4191 MDIO_PMA_DEVAD,
4192 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4193 &val);
4194 val &= 0xff8f; /* Reset bits 4-6 */
4195 bnx2x_cl45_write(bp, params->port,
4196 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4197 ext_phy_addr,
4198 MDIO_PMA_DEVAD,
4199 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4200 val);
4201
4202 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
4203 bnx2x_bcm8073_set_xaui_low_power_mode(params);
4204
4205 bnx2x_cl45_read(bp, params->port,
4206 ext_phy_type,
4207 ext_phy_addr,
4208 MDIO_PMA_DEVAD,
4209 MDIO_PMA_REG_M8051_MSGOUT_REG,
4210 &tmp1);
4211
4212 bnx2x_cl45_read(bp, params->port,
4213 ext_phy_type,
4214 ext_phy_addr,
4215 MDIO_PMA_DEVAD,
4216 MDIO_PMA_REG_RX_ALARM, &tmp1);
4217
4218 /* Set option 1G speed */
4219 if (params->req_line_speed == SPEED_1000) {
4220
4221 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4222 bnx2x_cl45_write(bp, params->port,
4223 ext_phy_type,
4224 ext_phy_addr,
4225 MDIO_PMA_DEVAD,
4226 MDIO_PMA_REG_CTRL, 0x40);
4227 bnx2x_cl45_write(bp, params->port,
4228 ext_phy_type,
4229 ext_phy_addr,
4230 MDIO_PMA_DEVAD,
4231 MDIO_PMA_REG_10G_CTRL2, 0xD);
4232 bnx2x_cl45_read(bp, params->port,
4233 ext_phy_type,
4234 ext_phy_addr,
4235 MDIO_PMA_DEVAD,
4236 MDIO_PMA_REG_10G_CTRL2, &tmp1);
4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4238
4239 } else if ((params->req_line_speed ==
4240 SPEED_AUTO_NEG) &&
4241 ((params->speed_cap_mask &
4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
4243
4244 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4245 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4246 ext_phy_addr, MDIO_AN_DEVAD,
4247 MDIO_PMA_REG_8727_MISC_CTRL, 0);
4248 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4249 ext_phy_addr, MDIO_AN_DEVAD,
4250 MDIO_AN_REG_CL37_AN, 0x1300);
4251 } else {
4252 /* Since the 8727 has only single reset pin,
4253 need to set the 10G registers although it is
4254 default */
4255 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4256 ext_phy_addr, MDIO_AN_DEVAD,
4257 MDIO_AN_REG_CTRL, 0x0020);
4258 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4259 ext_phy_addr, MDIO_AN_DEVAD,
4260 0x7, 0x0100);
4261 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4262 ext_phy_addr, MDIO_PMA_DEVAD,
4263 MDIO_PMA_REG_CTRL, 0x2040);
4264 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4265 ext_phy_addr, MDIO_PMA_DEVAD,
4266 MDIO_PMA_REG_10G_CTRL2, 0x0008);
4267 }
4268
4269 /* Set 2-wire transfer rate of SFP+ module EEPROM
4270 * to 100Khz since some DACs(direct attached cables) do
4271 * not work at 400Khz.
4272 */
4273 bnx2x_cl45_write(bp, params->port,
4274 ext_phy_type,
4275 ext_phy_addr,
4276 MDIO_PMA_DEVAD,
4277 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4278 0xa001);
4279
4280 /* Set TX PreEmphasis if needed */
4281 if ((params->feature_config_flags &
4282 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4283 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
4284 "TX_CTRL2 0x%x\n",
4285 params->xgxs_config_tx[0],
4286 params->xgxs_config_tx[1]);
4287 bnx2x_cl45_write(bp, params->port,
4288 ext_phy_type,
4289 ext_phy_addr,
4290 MDIO_PMA_DEVAD,
4291 MDIO_PMA_REG_8727_TX_CTRL1,
4292 params->xgxs_config_tx[0]);
4293
4294 bnx2x_cl45_write(bp, params->port,
4295 ext_phy_type,
4296 ext_phy_addr,
4297 MDIO_PMA_DEVAD,
4298 MDIO_PMA_REG_8727_TX_CTRL2,
4299 params->xgxs_config_tx[1]);
4300 }
4301
4302 break;
4303 }
4304
4305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4306 {
4307 u16 fw_ver1, fw_ver2;
4308 DP(NETIF_MSG_LINK,
4309 "Setting the SFX7101 LASI indication\n");
4310
4311 bnx2x_cl45_write(bp, params->port,
4312 ext_phy_type,
4313 ext_phy_addr,
4314 MDIO_PMA_DEVAD,
4315 MDIO_PMA_REG_LASI_CTRL, 0x1);
4316 DP(NETIF_MSG_LINK,
4317 "Setting the SFX7101 LED to blink on traffic\n");
4318 bnx2x_cl45_write(bp, params->port,
4319 ext_phy_type,
4320 ext_phy_addr,
4321 MDIO_PMA_DEVAD,
4322 MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
4323
4324 bnx2x_ext_phy_set_pause(params, vars);
4325 /* Restart autoneg */
4326 bnx2x_cl45_read(bp, params->port,
4327 ext_phy_type,
4328 ext_phy_addr,
4329 MDIO_AN_DEVAD,
4330 MDIO_AN_REG_CTRL, &val);
4331 val |= 0x200;
4332 bnx2x_cl45_write(bp, params->port,
4333 ext_phy_type,
4334 ext_phy_addr,
4335 MDIO_AN_DEVAD,
4336 MDIO_AN_REG_CTRL, val);
4337
4338 /* Save spirom version */
4339 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4340 ext_phy_addr, MDIO_PMA_DEVAD,
4341 MDIO_PMA_REG_7101_VER1, &fw_ver1);
4342
4343 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4344 ext_phy_addr, MDIO_PMA_DEVAD,
4345 MDIO_PMA_REG_7101_VER2, &fw_ver2);
4346
4347 bnx2x_save_spirom_version(params->bp, params->port,
4348 params->shmem_base,
4349 (u32)(fw_ver1<<16 | fw_ver2));
4350 break;
4351 } 4858 }
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 4859 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
4353 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 4860 ((phy->speed_cap_mask &
4354 /* This phy uses the NIG latch mechanism since link 4861 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
4355 indication arrives through its LED4 and not via 4862 ((phy->speed_cap_mask &
4356 its LASI signal, so we get steady signal 4863 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
4357 instead of clear on read */ 4864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
4358 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 4865
4359 1 << NIG_LATCH_BC_ENABLE_MI_INT); 4866 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4360 4867 bnx2x_cl45_write(bp, phy,
4361 bnx2x_cl45_write(bp, params->port, 4868 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
4362 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 4869 bnx2x_cl45_write(bp, phy,
4363 ext_phy_addr, 4870 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
4364 MDIO_PMA_DEVAD, 4871 } else {
4365 MDIO_PMA_REG_CTRL, 0x0000); 4872 /**
4366 4873 * Since the 8727 has only single reset pin, need to set the 10G
4367 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr); 4874 * registers although it is default
4368 if (params->req_line_speed == SPEED_AUTO_NEG) { 4875 */
4369 4876 bnx2x_cl45_write(bp, phy,
4370 u16 autoneg_val, an_1000_val, an_10_100_val; 4877 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
4371 /* set 1000 speed advertisement */ 4878 0x0020);
4372 bnx2x_cl45_read(bp, params->port, 4879 bnx2x_cl45_write(bp, phy,
4373 ext_phy_type, 4880 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
4374 ext_phy_addr, 4881 bnx2x_cl45_write(bp, phy,
4375 MDIO_AN_DEVAD, 4882 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
4376 MDIO_AN_REG_8481_1000T_CTRL, 4883 bnx2x_cl45_write(bp, phy,
4377 &an_1000_val); 4884 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
4378 4885 0x0008);
4379 if (params->speed_cap_mask & 4886 }
4380 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
4381 an_1000_val |= (1<<8);
4382 if (params->req_duplex == DUPLEX_FULL)
4383 an_1000_val |= (1<<9);
4384 DP(NETIF_MSG_LINK, "Advertising 1G\n");
4385 } else
4386 an_1000_val &= ~((1<<8) | (1<<9));
4387
4388 bnx2x_cl45_write(bp, params->port,
4389 ext_phy_type,
4390 ext_phy_addr,
4391 MDIO_AN_DEVAD,
4392 MDIO_AN_REG_8481_1000T_CTRL,
4393 an_1000_val);
4394
4395 /* set 100 speed advertisement */
4396 bnx2x_cl45_read(bp, params->port,
4397 ext_phy_type,
4398 ext_phy_addr,
4399 MDIO_AN_DEVAD,
4400 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4401 &an_10_100_val);
4402
4403 if (params->speed_cap_mask &
4404 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
4405 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
4406 an_10_100_val |= (1<<7);
4407 if (params->req_duplex == DUPLEX_FULL)
4408 an_10_100_val |= (1<<8);
4409 DP(NETIF_MSG_LINK,
4410 "Advertising 100M\n");
4411 } else
4412 an_10_100_val &= ~((1<<7) | (1<<8));
4413
4414 /* set 10 speed advertisement */
4415 if (params->speed_cap_mask &
4416 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
4417 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
4418 an_10_100_val |= (1<<5);
4419 if (params->req_duplex == DUPLEX_FULL)
4420 an_10_100_val |= (1<<6);
4421 DP(NETIF_MSG_LINK, "Advertising 10M\n");
4422 }
4423 else
4424 an_10_100_val &= ~((1<<5) | (1<<6));
4425
4426 bnx2x_cl45_write(bp, params->port,
4427 ext_phy_type,
4428 ext_phy_addr,
4429 MDIO_AN_DEVAD,
4430 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4431 an_10_100_val);
4432
4433 bnx2x_cl45_read(bp, params->port,
4434 ext_phy_type,
4435 ext_phy_addr,
4436 MDIO_AN_DEVAD,
4437 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4438 &autoneg_val);
4439
4440 /* Disable forced speed */
4441 autoneg_val &= ~(1<<6|1<<13);
4442
4443 /* Enable autoneg and restart autoneg
4444 for legacy speeds */
4445 autoneg_val |= (1<<9|1<<12);
4446
4447 if (params->req_duplex == DUPLEX_FULL)
4448 autoneg_val |= (1<<8);
4449 else
4450 autoneg_val &= ~(1<<8);
4451
4452 bnx2x_cl45_write(bp, params->port,
4453 ext_phy_type,
4454 ext_phy_addr,
4455 MDIO_AN_DEVAD,
4456 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4457 autoneg_val);
4458
4459 if (params->speed_cap_mask &
4460 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
4461 DP(NETIF_MSG_LINK, "Advertising 10G\n");
4462 /* Restart autoneg for 10G*/
4463
4464 bnx2x_cl45_write(bp, params->port,
4465 ext_phy_type,
4466 ext_phy_addr,
4467 MDIO_AN_DEVAD,
4468 MDIO_AN_REG_CTRL, 0x3200);
4469 }
4470 } else {
4471 /* Force speed */
4472 u16 autoneg_ctrl, pma_ctrl;
4473 bnx2x_cl45_read(bp, params->port,
4474 ext_phy_type,
4475 ext_phy_addr,
4476 MDIO_AN_DEVAD,
4477 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4478 &autoneg_ctrl);
4479
4480 /* Disable autoneg */
4481 autoneg_ctrl &= ~(1<<12);
4482
4483 /* Set 1000 force */
4484 switch (params->req_line_speed) {
4485 case SPEED_10000:
4486 DP(NETIF_MSG_LINK,
4487 "Unable to set 10G force !\n");
4488 break;
4489 case SPEED_1000:
4490 bnx2x_cl45_read(bp, params->port,
4491 ext_phy_type,
4492 ext_phy_addr,
4493 MDIO_PMA_DEVAD,
4494 MDIO_PMA_REG_CTRL,
4495 &pma_ctrl);
4496 autoneg_ctrl &= ~(1<<13);
4497 autoneg_ctrl |= (1<<6);
4498 pma_ctrl &= ~(1<<13);
4499 pma_ctrl |= (1<<6);
4500 DP(NETIF_MSG_LINK,
4501 "Setting 1000M force\n");
4502 bnx2x_cl45_write(bp, params->port,
4503 ext_phy_type,
4504 ext_phy_addr,
4505 MDIO_PMA_DEVAD,
4506 MDIO_PMA_REG_CTRL,
4507 pma_ctrl);
4508 break;
4509 case SPEED_100:
4510 autoneg_ctrl |= (1<<13);
4511 autoneg_ctrl &= ~(1<<6);
4512 DP(NETIF_MSG_LINK,
4513 "Setting 100M force\n");
4514 break;
4515 case SPEED_10:
4516 autoneg_ctrl &= ~(1<<13);
4517 autoneg_ctrl &= ~(1<<6);
4518 DP(NETIF_MSG_LINK,
4519 "Setting 10M force\n");
4520 break;
4521 }
4522
4523 /* Duplex mode */
4524 if (params->req_duplex == DUPLEX_FULL) {
4525 autoneg_ctrl |= (1<<8);
4526 DP(NETIF_MSG_LINK,
4527 "Setting full duplex\n");
4528 } else
4529 autoneg_ctrl &= ~(1<<8);
4530
4531 /* Update autoneg ctrl and pma ctrl */
4532 bnx2x_cl45_write(bp, params->port,
4533 ext_phy_type,
4534 ext_phy_addr,
4535 MDIO_AN_DEVAD,
4536 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4537 autoneg_ctrl);
4538 }
4539
4540 /* Save spirom version */
4541 bnx2x_save_8481_spirom_version(bp, params->port,
4542 ext_phy_addr,
4543 params->shmem_base);
4544 break;
4545 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
4546 DP(NETIF_MSG_LINK,
4547 "XGXS PHY Failure detected 0x%x\n",
4548 params->ext_phy_config);
4549 rc = -EINVAL;
4550 break;
4551 default:
4552 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
4553 params->ext_phy_config);
4554 rc = -EINVAL;
4555 break;
4556 }
4557
4558 } else { /* SerDes */
4559
4560 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
4561 switch (ext_phy_type) {
4562 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
4563 DP(NETIF_MSG_LINK, "SerDes Direct\n");
4564 break;
4565
4566 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
4567 DP(NETIF_MSG_LINK, "SerDes 5482\n");
4568 break;
4569 4887
4570 default: 4888 /* Set 2-wire transfer rate of SFP+ module EEPROM
4571 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", 4889 * to 100Khz since some DACs(direct attached cables) do
4572 params->ext_phy_config); 4890 * not work at 400Khz.
4573 break; 4891 */
4574 } 4892 bnx2x_cl45_write(bp, phy,
4893 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4894 0xa001);
4895
4896 /* Set TX PreEmphasis if needed */
4897 if ((params->feature_config_flags &
4898 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4899 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
4900 phy->tx_preemphasis[0],
4901 phy->tx_preemphasis[1]);
4902 bnx2x_cl45_write(bp, phy,
4903 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
4904 phy->tx_preemphasis[0]);
4905
4906 bnx2x_cl45_write(bp, phy,
4907 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
4908 phy->tx_preemphasis[1]);
4575 } 4909 }
4576 return rc; 4910
4911 return 0;
4577} 4912}
4578 4913
4579static void bnx2x_8727_handle_mod_abs(struct link_params *params) 4914static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
4915 struct link_params *params)
4580{ 4916{
4581 struct bnx2x *bp = params->bp; 4917 struct bnx2x *bp = params->bp;
4582 u16 mod_abs, rx_alarm_status; 4918 u16 mod_abs, rx_alarm_status;
4583 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
4584 u32 val = REG_RD(bp, params->shmem_base + 4919 u32 val = REG_RD(bp, params->shmem_base +
4585 offsetof(struct shmem_region, dev_info. 4920 offsetof(struct shmem_region, dev_info.
4586 port_feature_config[params->port]. 4921 port_feature_config[params->port].
4587 config)); 4922 config));
4588 bnx2x_cl45_read(bp, params->port, 4923 bnx2x_cl45_read(bp, phy,
4589 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4590 ext_phy_addr,
4591 MDIO_PMA_DEVAD, 4924 MDIO_PMA_DEVAD,
4592 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 4925 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4593 if (mod_abs & (1<<8)) { 4926 if (mod_abs & (1<<8)) {
@@ -4602,18 +4935,16 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
4602 (bit 9). 4935 (bit 9).
4603 When the EDC is off it locks onto a reference clock and 4936 When the EDC is off it locks onto a reference clock and
4604 avoids becoming 'lost'.*/ 4937 avoids becoming 'lost'.*/
4605 mod_abs &= ~((1<<8)|(1<<9)); 4938 mod_abs &= ~(1<<8);
4606 bnx2x_cl45_write(bp, params->port, 4939 if (!(phy->flags & FLAGS_NOC))
4607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4940 mod_abs &= ~(1<<9);
4608 ext_phy_addr, 4941 bnx2x_cl45_write(bp, phy,
4609 MDIO_PMA_DEVAD, 4942 MDIO_PMA_DEVAD,
4610 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 4943 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4611 4944
4612 /* Clear RX alarm since it stays up as long as 4945 /* Clear RX alarm since it stays up as long as
4613 the mod_abs wasn't changed */ 4946 the mod_abs wasn't changed */
4614 bnx2x_cl45_read(bp, params->port, 4947 bnx2x_cl45_read(bp, phy,
4615 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4616 ext_phy_addr,
4617 MDIO_PMA_DEVAD, 4948 MDIO_PMA_DEVAD,
4618 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 4949 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4619 4950
@@ -4630,33 +4961,28 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
4630 2. Restore the default polarity of the OPRXLOS signal and 4961 2. Restore the default polarity of the OPRXLOS signal and
4631 this signal will then correctly indicate the presence or 4962 this signal will then correctly indicate the presence or
4632 absence of the Rx signal. (bit 9) */ 4963 absence of the Rx signal. (bit 9) */
4633 mod_abs |= ((1<<8)|(1<<9)); 4964 mod_abs |= (1<<8);
4634 bnx2x_cl45_write(bp, params->port, 4965 if (!(phy->flags & FLAGS_NOC))
4635 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4966 mod_abs |= (1<<9);
4636 ext_phy_addr, 4967 bnx2x_cl45_write(bp, phy,
4637 MDIO_PMA_DEVAD, 4968 MDIO_PMA_DEVAD,
4638 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 4969 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4639 4970
4640 /* Clear RX alarm since it stays up as long as 4971 /* Clear RX alarm since it stays up as long as
4641 the mod_abs wasn't changed. This is need to be done 4972 the mod_abs wasn't changed. This is need to be done
4642 before calling the module detection, otherwise it will clear 4973 before calling the module detection, otherwise it will clear
4643 the link update alarm */ 4974 the link update alarm */
4644 bnx2x_cl45_read(bp, params->port, 4975 bnx2x_cl45_read(bp, phy,
4645 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4976 MDIO_PMA_DEVAD,
4646 ext_phy_addr, 4977 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4647 MDIO_PMA_DEVAD,
4648 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4649 4978
4650 4979
4651 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 4980 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4652 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 4981 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
4653 bnx2x_sfp_set_transmitter(bp, params->port, 4982 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
4654 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4655 ext_phy_addr, 0);
4656 4983
4657 if (bnx2x_wait_for_sfp_module_initialized(params) 4984 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
4658 == 0) 4985 bnx2x_sfp_module_detection(phy, params);
4659 bnx2x_sfp_module_detection(params);
4660 else 4986 else
4661 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 4987 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
4662 } 4988 }
@@ -4667,1298 +4993,1711 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
4667 module plugged in/out */ 4993 module plugged in/out */
4668} 4994}
4669 4995
4996static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
4997 struct link_params *params,
4998 struct link_vars *vars)
4670 4999
4671static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
4672 struct link_vars *vars,
4673 u8 is_mi_int)
4674{ 5000{
4675 struct bnx2x *bp = params->bp; 5001 struct bnx2x *bp = params->bp;
4676 u32 ext_phy_type; 5002 u8 link_up = 0;
4677 u8 ext_phy_addr; 5003 u16 link_status = 0;
4678 u16 val1 = 0, val2; 5004 u16 rx_alarm_status, lasi_ctrl, val1;
4679 u16 rx_sd, pcs_status; 5005
4680 u8 ext_phy_link_up = 0; 5006 /* If PHY is not initialized, do not check link status */
4681 u8 port = params->port; 5007 bnx2x_cl45_read(bp, phy,
5008 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
5009 &lasi_ctrl);
5010 if (!lasi_ctrl)
5011 return 0;
4682 5012
4683 if (vars->phy_flags & PHY_XGXS_FLAG) { 5013 /* Check the LASI */
4684 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5014 bnx2x_cl45_read(bp, phy,
4685 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5015 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
4686 switch (ext_phy_type) { 5016 &rx_alarm_status);
4687 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 5017 vars->line_speed = 0;
4688 DP(NETIF_MSG_LINK, "XGXS Direct\n"); 5018 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
4689 ext_phy_link_up = 1;
4690 break;
4691 5019
4692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 5020 bnx2x_cl45_read(bp, phy,
4693 DP(NETIF_MSG_LINK, "XGXS 8705\n"); 5021 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
4694 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4695 ext_phy_addr,
4696 MDIO_WIS_DEVAD,
4697 MDIO_WIS_REG_LASI_STATUS, &val1);
4698 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4699
4700 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4701 ext_phy_addr,
4702 MDIO_WIS_DEVAD,
4703 MDIO_WIS_REG_LASI_STATUS, &val1);
4704 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4705
4706 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4707 ext_phy_addr,
4708 MDIO_PMA_DEVAD,
4709 MDIO_PMA_REG_RX_SD, &rx_sd);
4710
4711 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4712 ext_phy_addr,
4713 1,
4714 0xc809, &val1);
4715 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4716 ext_phy_addr,
4717 1,
4718 0xc809, &val1);
4719
4720 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
4721 ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) &&
4722 ((val1 & (1<<8)) == 0));
4723 if (ext_phy_link_up)
4724 vars->line_speed = SPEED_10000;
4725 break;
4726 5022
4727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 5023 DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
4728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4729 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
4730 /* Clear RX Alarm*/
4731 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4732 ext_phy_addr,
4733 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
4734 &val2);
4735 /* clear LASI indication*/
4736 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4737 ext_phy_addr,
4738 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4739 &val1);
4740 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4741 ext_phy_addr,
4742 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4743 &val2);
4744 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x-->"
4745 "0x%x\n", val1, val2);
4746
4747 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4748 ext_phy_addr,
4749 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD,
4750 &rx_sd);
4751 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4752 ext_phy_addr,
4753 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS,
4754 &pcs_status);
4755 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4756 ext_phy_addr,
4757 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4758 &val2);
4759 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4760 ext_phy_addr,
4761 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4762 &val2);
4763
4764 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x"
4765 " pcs_status 0x%x 1Gbps link_status 0x%x\n",
4766 rx_sd, pcs_status, val2);
4767 /* link is up if both bit 0 of pmd_rx_sd and
4768 * bit 0 of pcs_status are set, or if the autoneg bit
4769 1 is set
4770 */
4771 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
4772 (val2 & (1<<1)));
4773 if (ext_phy_link_up) {
4774 if (ext_phy_type ==
4775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
4776 /* If transmitter is disabled,
4777 ignore false link up indication */
4778 bnx2x_cl45_read(bp, params->port,
4779 ext_phy_type,
4780 ext_phy_addr,
4781 MDIO_PMA_DEVAD,
4782 MDIO_PMA_REG_PHY_IDENTIFIER,
4783 &val1);
4784 if (val1 & (1<<15)) {
4785 DP(NETIF_MSG_LINK, "Tx is "
4786 "disabled\n");
4787 ext_phy_link_up = 0;
4788 break;
4789 }
4790 }
4791 if (val2 & (1<<1))
4792 vars->line_speed = SPEED_1000;
4793 else
4794 vars->line_speed = SPEED_10000;
4795 }
4796 break;
4797
4798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4799 {
4800 u16 link_status = 0;
4801 u16 rx_alarm_status;
4802 /* Check the LASI */
4803 bnx2x_cl45_read(bp, params->port,
4804 ext_phy_type,
4805 ext_phy_addr,
4806 MDIO_PMA_DEVAD,
4807 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4808
4809 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
4810 rx_alarm_status);
4811
4812 bnx2x_cl45_read(bp, params->port,
4813 ext_phy_type,
4814 ext_phy_addr,
4815 MDIO_PMA_DEVAD,
4816 MDIO_PMA_REG_LASI_STATUS, &val1);
4817 5024
4818 DP(NETIF_MSG_LINK, 5025 /* Clear MSG-OUT */
4819 "8727 LASI status 0x%x\n", 5026 bnx2x_cl45_read(bp, phy,
4820 val1); 5027 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
4821 5028
4822 /* Clear MSG-OUT */ 5029 /**
4823 bnx2x_cl45_read(bp, params->port, 5030 * If a module is present and there is need to check
4824 ext_phy_type, 5031 * for over current
4825 ext_phy_addr, 5032 */
4826 MDIO_PMA_DEVAD, 5033 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
4827 MDIO_PMA_REG_M8051_MSGOUT_REG, 5034 /* Check over-current using 8727 GPIO0 input*/
4828 &val1); 5035 bnx2x_cl45_read(bp, phy,
5036 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
5037 &val1);
5038
5039 if ((val1 & (1<<8)) == 0) {
5040 DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
5041 " on port %d\n", params->port);
5042 netdev_err(bp->dev, "Error: Power fault on Port %d has"
5043 " been detected and the power to "
5044 "that SFP+ module has been removed"
5045 " to prevent failure of the card."
5046 " Please remove the SFP+ module and"
5047 " restart the system to clear this"
5048 " error.\n",
5049 params->port);
4829 5050
4830 /* 5051 /*
4831 * If a module is present and there is need to check 5052 * Disable all RX_ALARMs except for
4832 * for over current 5053 * mod_abs
4833 */ 5054 */
4834 if (!(params->feature_config_flags & 5055 bnx2x_cl45_write(bp, phy,
4835 FEATURE_CONFIG_BCM8727_NOC) && 5056 MDIO_PMA_DEVAD,
4836 !(rx_alarm_status & (1<<5))) { 5057 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
4837 /* Check over-current using 8727 GPIO0 input*/
4838 bnx2x_cl45_read(bp, params->port,
4839 ext_phy_type,
4840 ext_phy_addr,
4841 MDIO_PMA_DEVAD,
4842 MDIO_PMA_REG_8727_GPIO_CTRL,
4843 &val1);
4844
4845 if ((val1 & (1<<8)) == 0) {
4846 DP(NETIF_MSG_LINK, "8727 Power fault"
4847 " has been detected on "
4848 "port %d\n",
4849 params->port);
4850 netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
4851 params->port);
4852 /*
4853 * Disable all RX_ALARMs except for
4854 * mod_abs
4855 */
4856 bnx2x_cl45_write(bp, params->port,
4857 ext_phy_type,
4858 ext_phy_addr,
4859 MDIO_PMA_DEVAD,
4860 MDIO_PMA_REG_RX_ALARM_CTRL,
4861 (1<<5));
4862
4863 bnx2x_cl45_read(bp, params->port,
4864 ext_phy_type,
4865 ext_phy_addr,
4866 MDIO_PMA_DEVAD,
4867 MDIO_PMA_REG_PHY_IDENTIFIER,
4868 &val1);
4869 /* Wait for module_absent_event */
4870 val1 |= (1<<8);
4871 bnx2x_cl45_write(bp, params->port,
4872 ext_phy_type,
4873 ext_phy_addr,
4874 MDIO_PMA_DEVAD,
4875 MDIO_PMA_REG_PHY_IDENTIFIER,
4876 val1);
4877 /* Clear RX alarm */
4878 bnx2x_cl45_read(bp, params->port,
4879 ext_phy_type,
4880 ext_phy_addr,
4881 MDIO_PMA_DEVAD,
4882 MDIO_PMA_REG_RX_ALARM,
4883 &rx_alarm_status);
4884 break;
4885 }
4886 } /* Over current check */
4887
4888 /* When module absent bit is set, check module */
4889 if (rx_alarm_status & (1<<5)) {
4890 bnx2x_8727_handle_mod_abs(params);
4891 /* Enable all mod_abs and link detection bits */
4892 bnx2x_cl45_write(bp, params->port,
4893 ext_phy_type,
4894 ext_phy_addr,
4895 MDIO_PMA_DEVAD,
4896 MDIO_PMA_REG_RX_ALARM_CTRL,
4897 ((1<<5) | (1<<2)));
4898 }
4899
4900 /* If transmitter is disabled,
4901 ignore false link up indication */
4902 bnx2x_cl45_read(bp, params->port,
4903 ext_phy_type,
4904 ext_phy_addr,
4905 MDIO_PMA_DEVAD,
4906 MDIO_PMA_REG_PHY_IDENTIFIER,
4907 &val1);
4908 if (val1 & (1<<15)) {
4909 DP(NETIF_MSG_LINK, "Tx is disabled\n");
4910 ext_phy_link_up = 0;
4911 break;
4912 }
4913 5058
4914 bnx2x_cl45_read(bp, params->port, 5059 bnx2x_cl45_read(bp, phy,
4915 ext_phy_type, 5060 MDIO_PMA_DEVAD,
4916 ext_phy_addr, 5061 MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
4917 MDIO_PMA_DEVAD, 5062 /* Wait for module_absent_event */
4918 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 5063 val1 |= (1<<8);
4919 &link_status); 5064 bnx2x_cl45_write(bp, phy,
4920 5065 MDIO_PMA_DEVAD,
4921 /* Bits 0..2 --> speed detected, 5066 MDIO_PMA_REG_PHY_IDENTIFIER, val1);
4922 bits 13..15--> link is down */ 5067 /* Clear RX alarm */
4923 if ((link_status & (1<<2)) && 5068 bnx2x_cl45_read(bp, phy,
4924 (!(link_status & (1<<15)))) { 5069 MDIO_PMA_DEVAD,
4925 ext_phy_link_up = 1; 5070 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4926 vars->line_speed = SPEED_10000; 5071 return 0;
4927 } else if ((link_status & (1<<0)) &&
4928 (!(link_status & (1<<13)))) {
4929 ext_phy_link_up = 1;
4930 vars->line_speed = SPEED_1000;
4931 DP(NETIF_MSG_LINK,
4932 "port %x: External link"
4933 " up in 1G\n", params->port);
4934 } else {
4935 ext_phy_link_up = 0;
4936 DP(NETIF_MSG_LINK,
4937 "port %x: External link"
4938 " is down\n", params->port);
4939 }
4940 break;
4941 } 5072 }
5073 } /* Over current check */
5074
5075 /* When module absent bit is set, check module */
5076 if (rx_alarm_status & (1<<5)) {
5077 bnx2x_8727_handle_mod_abs(phy, params);
5078 /* Enable all mod_abs and link detection bits */
5079 bnx2x_cl45_write(bp, phy,
5080 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
5081 ((1<<5) | (1<<2)));
5082 }
5083 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
5084 bnx2x_8727_specific_func(phy, params, ENABLE_TX);
5085 /* If transmitter is disabled, ignore false link up indication */
5086 bnx2x_cl45_read(bp, phy,
5087 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
5088 if (val1 & (1<<15)) {
5089 DP(NETIF_MSG_LINK, "Tx is disabled\n");
5090 return 0;
5091 }
4942 5092
4943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 5093 bnx2x_cl45_read(bp, phy,
4944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 5094 MDIO_PMA_DEVAD,
4945 { 5095 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
4946 u16 link_status = 0;
4947 u16 an1000_status = 0;
4948
4949 if (ext_phy_type ==
4950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
4951 bnx2x_cl45_read(bp, params->port,
4952 ext_phy_type,
4953 ext_phy_addr,
4954 MDIO_PCS_DEVAD,
4955 MDIO_PCS_REG_LASI_STATUS, &val1);
4956 bnx2x_cl45_read(bp, params->port,
4957 ext_phy_type,
4958 ext_phy_addr,
4959 MDIO_PCS_DEVAD,
4960 MDIO_PCS_REG_LASI_STATUS, &val2);
4961 DP(NETIF_MSG_LINK,
4962 "870x LASI status 0x%x->0x%x\n",
4963 val1, val2);
4964 } else {
4965 /* In 8073, port1 is directed through emac0 and
4966 * port0 is directed through emac1
4967 */
4968 bnx2x_cl45_read(bp, params->port,
4969 ext_phy_type,
4970 ext_phy_addr,
4971 MDIO_PMA_DEVAD,
4972 MDIO_PMA_REG_LASI_STATUS, &val1);
4973
4974 DP(NETIF_MSG_LINK,
4975 "8703 LASI status 0x%x\n",
4976 val1);
4977 }
4978 5096
4979 /* clear the interrupt LASI status register */ 5097 /* Bits 0..2 --> speed detected,
4980 bnx2x_cl45_read(bp, params->port, 5098 bits 13..15--> link is down */
4981 ext_phy_type, 5099 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
4982 ext_phy_addr, 5100 link_up = 1;
4983 MDIO_PCS_DEVAD, 5101 vars->line_speed = SPEED_10000;
4984 MDIO_PCS_REG_STATUS, &val2); 5102 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
4985 bnx2x_cl45_read(bp, params->port, 5103 link_up = 1;
4986 ext_phy_type, 5104 vars->line_speed = SPEED_1000;
4987 ext_phy_addr, 5105 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
4988 MDIO_PCS_DEVAD, 5106 params->port);
4989 MDIO_PCS_REG_STATUS, &val1); 5107 } else {
4990 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", 5108 link_up = 0;
4991 val2, val1); 5109 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
4992 /* Clear MSG-OUT */ 5110 params->port);
4993 bnx2x_cl45_read(bp, params->port, 5111 }
4994 ext_phy_type, 5112 if (link_up)
4995 ext_phy_addr, 5113 bnx2x_ext_phy_resolve_fc(phy, params, vars);
4996 MDIO_PMA_DEVAD, 5114
4997 MDIO_PMA_REG_M8051_MSGOUT_REG, 5115 if ((DUAL_MEDIA(params)) &&
4998 &val1); 5116 (phy->req_line_speed == SPEED_1000)) {
4999 5117 bnx2x_cl45_read(bp, phy,
5000 /* Check the LASI */ 5118 MDIO_PMA_DEVAD,
5001 bnx2x_cl45_read(bp, params->port, 5119 MDIO_PMA_REG_8727_PCS_GP, &val1);
5002 ext_phy_type, 5120 /**
5003 ext_phy_addr, 5121 * In case of dual-media board and 1G, power up the XAUI side,
5004 MDIO_PMA_DEVAD, 5122 * otherwise power it down. For 10G it is done automatically
5005 MDIO_PMA_REG_RX_ALARM, &val2); 5123 */
5006 5124 if (link_up)
5007 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); 5125 val1 &= ~(3<<10);
5008 5126 else
5009 /* Check the link status */ 5127 val1 |= (3<<10);
5010 bnx2x_cl45_read(bp, params->port, 5128 bnx2x_cl45_write(bp, phy,
5011 ext_phy_type, 5129 MDIO_PMA_DEVAD,
5012 ext_phy_addr, 5130 MDIO_PMA_REG_8727_PCS_GP, val1);
5013 MDIO_PCS_DEVAD, 5131 }
5014 MDIO_PCS_REG_STATUS, &val2); 5132 return link_up;
5015 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2); 5133}
5016
5017 bnx2x_cl45_read(bp, params->port,
5018 ext_phy_type,
5019 ext_phy_addr,
5020 MDIO_PMA_DEVAD,
5021 MDIO_PMA_REG_STATUS, &val2);
5022 bnx2x_cl45_read(bp, params->port,
5023 ext_phy_type,
5024 ext_phy_addr,
5025 MDIO_PMA_DEVAD,
5026 MDIO_PMA_REG_STATUS, &val1);
5027 ext_phy_link_up = ((val1 & 4) == 4);
5028 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
5029 if (ext_phy_type ==
5030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
5031
5032 if (ext_phy_link_up &&
5033 ((params->req_line_speed !=
5034 SPEED_10000))) {
5035 if (bnx2x_bcm8073_xaui_wa(params)
5036 != 0) {
5037 ext_phy_link_up = 0;
5038 break;
5039 }
5040 }
5041 bnx2x_cl45_read(bp, params->port,
5042 ext_phy_type,
5043 ext_phy_addr,
5044 MDIO_AN_DEVAD,
5045 MDIO_AN_REG_LINK_STATUS,
5046 &an1000_status);
5047 bnx2x_cl45_read(bp, params->port,
5048 ext_phy_type,
5049 ext_phy_addr,
5050 MDIO_AN_DEVAD,
5051 MDIO_AN_REG_LINK_STATUS,
5052 &an1000_status);
5053
5054 /* Check the link status on 1.1.2 */
5055 bnx2x_cl45_read(bp, params->port,
5056 ext_phy_type,
5057 ext_phy_addr,
5058 MDIO_PMA_DEVAD,
5059 MDIO_PMA_REG_STATUS, &val2);
5060 bnx2x_cl45_read(bp, params->port,
5061 ext_phy_type,
5062 ext_phy_addr,
5063 MDIO_PMA_DEVAD,
5064 MDIO_PMA_REG_STATUS, &val1);
5065 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
5066 "an_link_status=0x%x\n",
5067 val2, val1, an1000_status);
5068
5069 ext_phy_link_up = (((val1 & 4) == 4) ||
5070 (an1000_status & (1<<1)));
5071 if (ext_phy_link_up &&
5072 bnx2x_8073_is_snr_needed(params)) {
5073 /* The SNR will improve about 2dbby
5074 changing the BW and FEE main tap.*/
5075
5076 /* The 1st write to change FFE main
5077 tap is set before restart AN */
5078 /* Change PLL Bandwidth in EDC
5079 register */
5080 bnx2x_cl45_write(bp, port, ext_phy_type,
5081 ext_phy_addr,
5082 MDIO_PMA_DEVAD,
5083 MDIO_PMA_REG_PLL_BANDWIDTH,
5084 0x26BC);
5085
5086 /* Change CDR Bandwidth in EDC
5087 register */
5088 bnx2x_cl45_write(bp, port, ext_phy_type,
5089 ext_phy_addr,
5090 MDIO_PMA_DEVAD,
5091 MDIO_PMA_REG_CDR_BANDWIDTH,
5092 0x0333);
5093 }
5094 bnx2x_cl45_read(bp, params->port,
5095 ext_phy_type,
5096 ext_phy_addr,
5097 MDIO_PMA_DEVAD,
5098 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
5099 &link_status);
5100
5101 /* Bits 0..2 --> speed detected,
5102 bits 13..15--> link is down */
5103 if ((link_status & (1<<2)) &&
5104 (!(link_status & (1<<15)))) {
5105 ext_phy_link_up = 1;
5106 vars->line_speed = SPEED_10000;
5107 DP(NETIF_MSG_LINK,
5108 "port %x: External link"
5109 " up in 10G\n", params->port);
5110 } else if ((link_status & (1<<1)) &&
5111 (!(link_status & (1<<14)))) {
5112 ext_phy_link_up = 1;
5113 vars->line_speed = SPEED_2500;
5114 DP(NETIF_MSG_LINK,
5115 "port %x: External link"
5116 " up in 2.5G\n", params->port);
5117 } else if ((link_status & (1<<0)) &&
5118 (!(link_status & (1<<13)))) {
5119 ext_phy_link_up = 1;
5120 vars->line_speed = SPEED_1000;
5121 DP(NETIF_MSG_LINK,
5122 "port %x: External link"
5123 " up in 1G\n", params->port);
5124 } else {
5125 ext_phy_link_up = 0;
5126 DP(NETIF_MSG_LINK,
5127 "port %x: External link"
5128 " is down\n", params->port);
5129 }
5130 } else {
5131 /* See if 1G link is up for the 8072 */
5132 bnx2x_cl45_read(bp, params->port,
5133 ext_phy_type,
5134 ext_phy_addr,
5135 MDIO_AN_DEVAD,
5136 MDIO_AN_REG_LINK_STATUS,
5137 &an1000_status);
5138 bnx2x_cl45_read(bp, params->port,
5139 ext_phy_type,
5140 ext_phy_addr,
5141 MDIO_AN_DEVAD,
5142 MDIO_AN_REG_LINK_STATUS,
5143 &an1000_status);
5144 if (an1000_status & (1<<1)) {
5145 ext_phy_link_up = 1;
5146 vars->line_speed = SPEED_1000;
5147 DP(NETIF_MSG_LINK,
5148 "port %x: External link"
5149 " up in 1G\n", params->port);
5150 } else if (ext_phy_link_up) {
5151 ext_phy_link_up = 1;
5152 vars->line_speed = SPEED_10000;
5153 DP(NETIF_MSG_LINK,
5154 "port %x: External link"
5155 " up in 10G\n", params->port);
5156 }
5157 }
5158 5134
5135static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5136 struct link_params *params)
5137{
5138 struct bnx2x *bp = params->bp;
5139 /* Disable Transmitter */
5140 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
5141 /* Clear LASI */
5142 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
5159 5143
5160 break; 5144}
5161 }
5162 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5163 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5164 ext_phy_addr,
5165 MDIO_PMA_DEVAD,
5166 MDIO_PMA_REG_LASI_STATUS, &val2);
5167 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5168 ext_phy_addr,
5169 MDIO_PMA_DEVAD,
5170 MDIO_PMA_REG_LASI_STATUS, &val1);
5171 DP(NETIF_MSG_LINK,
5172 "10G-base-T LASI status 0x%x->0x%x\n",
5173 val2, val1);
5174 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5175 ext_phy_addr,
5176 MDIO_PMA_DEVAD,
5177 MDIO_PMA_REG_STATUS, &val2);
5178 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5179 ext_phy_addr,
5180 MDIO_PMA_DEVAD,
5181 MDIO_PMA_REG_STATUS, &val1);
5182 DP(NETIF_MSG_LINK,
5183 "10G-base-T PMA status 0x%x->0x%x\n",
5184 val2, val1);
5185 ext_phy_link_up = ((val1 & 4) == 4);
5186 /* if link is up
5187 * print the AN outcome of the SFX7101 PHY
5188 */
5189 if (ext_phy_link_up) {
5190 bnx2x_cl45_read(bp, params->port,
5191 ext_phy_type,
5192 ext_phy_addr,
5193 MDIO_AN_DEVAD,
5194 MDIO_AN_REG_MASTER_STATUS,
5195 &val2);
5196 vars->line_speed = SPEED_10000;
5197 DP(NETIF_MSG_LINK,
5198 "SFX7101 AN status 0x%x->Master=%x\n",
5199 val2,
5200 (val2 & (1<<14)));
5201 }
5202 break;
5203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5205 /* Check 10G-BaseT link status */
5206 /* Check PMD signal ok */
5207 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5208 ext_phy_addr,
5209 MDIO_AN_DEVAD,
5210 0xFFFA,
5211 &val1);
5212 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5213 ext_phy_addr,
5214 MDIO_PMA_DEVAD,
5215 MDIO_PMA_REG_8481_PMD_SIGNAL,
5216 &val2);
5217 DP(NETIF_MSG_LINK, "PMD_SIGNAL 1.a811 = 0x%x\n", val2);
5218
5219 /* Check link 10G */
5220 if (val2 & (1<<11)) {
5221 vars->line_speed = SPEED_10000;
5222 ext_phy_link_up = 1;
5223 bnx2x_8481_set_10G_led_mode(params,
5224 ext_phy_type,
5225 ext_phy_addr);
5226 } else { /* Check Legacy speed link */
5227 u16 legacy_status, legacy_speed;
5228
5229 /* Enable expansion register 0x42
5230 (Operation mode status) */
5231 bnx2x_cl45_write(bp, params->port,
5232 ext_phy_type,
5233 ext_phy_addr,
5234 MDIO_AN_DEVAD,
5235 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS,
5236 0xf42);
5237
5238 /* Get legacy speed operation status */
5239 bnx2x_cl45_read(bp, params->port,
5240 ext_phy_type,
5241 ext_phy_addr,
5242 MDIO_AN_DEVAD,
5243 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
5244 &legacy_status);
5245
5246 DP(NETIF_MSG_LINK, "Legacy speed status"
5247 " = 0x%x\n", legacy_status);
5248 ext_phy_link_up = ((legacy_status & (1<<11))
5249 == (1<<11));
5250 if (ext_phy_link_up) {
5251 legacy_speed = (legacy_status & (3<<9));
5252 if (legacy_speed == (0<<9))
5253 vars->line_speed = SPEED_10;
5254 else if (legacy_speed == (1<<9))
5255 vars->line_speed =
5256 SPEED_100;
5257 else if (legacy_speed == (2<<9))
5258 vars->line_speed =
5259 SPEED_1000;
5260 else /* Should not happen */
5261 vars->line_speed = 0;
5262
5263 if (legacy_status & (1<<8))
5264 vars->duplex = DUPLEX_FULL;
5265 else
5266 vars->duplex = DUPLEX_HALF;
5267
5268 DP(NETIF_MSG_LINK, "Link is up "
5269 "in %dMbps, is_duplex_full"
5270 "= %d\n",
5271 vars->line_speed,
5272 (vars->duplex == DUPLEX_FULL));
5273 bnx2x_8481_set_legacy_led_mode(params,
5274 ext_phy_type,
5275 ext_phy_addr);
5276 }
5277 }
5278 break;
5279 default:
5280 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
5281 params->ext_phy_config);
5282 ext_phy_link_up = 0;
5283 break;
5284 }
5285 /* Set SGMII mode for external phy */
5286 if (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5287 if (vars->line_speed < SPEED_1000)
5288 vars->phy_flags |= PHY_SGMII_FLAG;
5289 else
5290 vars->phy_flags &= ~PHY_SGMII_FLAG;
5291 }
5292 5145
5293 } else { /* SerDes */ 5146/******************************************************************/
5294 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 5147/* BCM8481/BCM84823/BCM84833 PHY SECTION */
5295 switch (ext_phy_type) { 5148/******************************************************************/
5296 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 5149static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5297 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 5150 struct link_params *params)
5298 ext_phy_link_up = 1; 5151{
5299 break; 5152 u16 val, fw_ver1, fw_ver2, cnt;
5153 struct bnx2x *bp = params->bp;
5300 5154
5301 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 5155 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
5302 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 5156 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
5303 ext_phy_link_up = 1; 5157 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
5158 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
5159 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
5160 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
5161 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
5162
5163 for (cnt = 0; cnt < 100; cnt++) {
5164 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
5165 if (val & 1)
5304 break; 5166 break;
5167 udelay(5);
5168 }
5169 if (cnt == 100) {
5170 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
5171 bnx2x_save_spirom_version(bp, params->port, 0,
5172 phy->ver_addr);
5173 return;
5174 }
5305 5175
5306 default: 5176
5307 DP(NETIF_MSG_LINK, 5177 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
5308 "BAD SerDes ext_phy_config 0x%x\n", 5178 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
5309 params->ext_phy_config); 5179 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
5310 ext_phy_link_up = 0; 5180 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
5181 for (cnt = 0; cnt < 100; cnt++) {
5182 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
5183 if (val & 1)
5311 break; 5184 break;
5312 } 5185 udelay(5);
5313 } 5186 }
5187 if (cnt == 100) {
5188 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
5189 bnx2x_save_spirom_version(bp, params->port, 0,
5190 phy->ver_addr);
5191 return;
5192 }
5193
5194 /* lower 16 bits of the register SPI_FW_STATUS */
5195 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
5196 /* upper 16 bits of register SPI_FW_STATUS */
5197 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
5314 5198
5315 return ext_phy_link_up; 5199 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
5200 phy->ver_addr);
5316} 5201}
5317 5202
5318static void bnx2x_link_int_enable(struct link_params *params) 5203static void bnx2x_848xx_set_led(struct bnx2x *bp,
5204 struct bnx2x_phy *phy)
5319{ 5205{
5320 u8 port = params->port; 5206 u16 val;
5321 u32 ext_phy_type;
5322 u32 mask;
5323 struct bnx2x *bp = params->bp;
5324 5207
5325 /* setting the status to report on link up 5208 /* PHYC_CTL_LED_CTL */
5326 for either XGXS or SerDes */ 5209 bnx2x_cl45_read(bp, phy,
5210 MDIO_PMA_DEVAD,
5211 MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
5212 val &= 0xFE00;
5213 val |= 0x0092;
5214
5215 bnx2x_cl45_write(bp, phy,
5216 MDIO_PMA_DEVAD,
5217 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
5218
5219 bnx2x_cl45_write(bp, phy,
5220 MDIO_PMA_DEVAD,
5221 MDIO_PMA_REG_8481_LED1_MASK,
5222 0x80);
5223
5224 bnx2x_cl45_write(bp, phy,
5225 MDIO_PMA_DEVAD,
5226 MDIO_PMA_REG_8481_LED2_MASK,
5227 0x18);
5228
5229 bnx2x_cl45_write(bp, phy,
5230 MDIO_PMA_DEVAD,
5231 MDIO_PMA_REG_8481_LED3_MASK,
5232 0x0040);
5327 5233
5328 if (params->switch_cfg == SWITCH_CFG_10G) { 5234 /* 'Interrupt Mask' */
5329 mask = (NIG_MASK_XGXS0_LINK10G | 5235 bnx2x_cl45_write(bp, phy,
5330 NIG_MASK_XGXS0_LINK_STATUS); 5236 MDIO_AN_DEVAD,
5331 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); 5237 0xFFFB, 0xFFFD);
5332 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5238}
5333 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
5334 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
5335 (ext_phy_type !=
5336 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
5337 mask |= NIG_MASK_MI_INT;
5338 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5339 }
5340 5239
5341 } else { /* SerDes */ 5240static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5342 mask = NIG_MASK_SERDES0_LINK_STATUS; 5241 struct link_params *params,
5343 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n"); 5242 struct link_vars *vars)
5344 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 5243{
5345 if ((ext_phy_type != 5244 struct bnx2x *bp = params->bp;
5346 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && 5245 u16 autoneg_val, an_1000_val, an_10_100_val;
5347 (ext_phy_type != 5246 bnx2x_wait_reset_complete(bp, phy);
5348 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) { 5247 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5349 mask |= NIG_MASK_MI_INT; 5248 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5350 DP(NETIF_MSG_LINK, "enabled external phy int\n"); 5249
5351 } 5250 bnx2x_cl45_write(bp, phy,
5251 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
5252
5253 bnx2x_848xx_set_led(bp, phy);
5254
5255 /* set 1000 speed advertisement */
5256 bnx2x_cl45_read(bp, phy,
5257 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
5258 &an_1000_val);
5259
5260 bnx2x_ext_phy_set_pause(params, phy, vars);
5261 bnx2x_cl45_read(bp, phy,
5262 MDIO_AN_DEVAD,
5263 MDIO_AN_REG_8481_LEGACY_AN_ADV,
5264 &an_10_100_val);
5265 bnx2x_cl45_read(bp, phy,
5266 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
5267 &autoneg_val);
5268 /* Disable forced speed */
5269 autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
5270 an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
5271
5272 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
5273 (phy->speed_cap_mask &
5274 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
5275 (phy->req_line_speed == SPEED_1000)) {
5276 an_1000_val |= (1<<8);
5277 autoneg_val |= (1<<9 | 1<<12);
5278 if (phy->req_duplex == DUPLEX_FULL)
5279 an_1000_val |= (1<<9);
5280 DP(NETIF_MSG_LINK, "Advertising 1G\n");
5281 } else
5282 an_1000_val &= ~((1<<8) | (1<<9));
5283
5284 bnx2x_cl45_write(bp, phy,
5285 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
5286 an_1000_val);
5287
5288 /* set 10 speed advertisement */
5289 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
5290 (phy->speed_cap_mask &
5291 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
5292 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
5293 an_10_100_val |= (1<<7);
5294 /* Enable autoneg and restart autoneg for legacy speeds */
5295 autoneg_val |= (1<<9 | 1<<12);
5296
5297 if (phy->req_duplex == DUPLEX_FULL)
5298 an_10_100_val |= (1<<8);
5299 DP(NETIF_MSG_LINK, "Advertising 100M\n");
5300 }
5301 /* set 10 speed advertisement */
5302 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
5303 (phy->speed_cap_mask &
5304 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
5305 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
5306 an_10_100_val |= (1<<5);
5307 autoneg_val |= (1<<9 | 1<<12);
5308 if (phy->req_duplex == DUPLEX_FULL)
5309 an_10_100_val |= (1<<6);
5310 DP(NETIF_MSG_LINK, "Advertising 10M\n");
5352 } 5311 }
5353 bnx2x_bits_en(bp,
5354 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
5355 mask);
5356 5312
5357 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port, 5313 /* Only 10/100 are allowed to work in FORCE mode */
5358 (params->switch_cfg == SWITCH_CFG_10G), 5314 if (phy->req_line_speed == SPEED_100) {
5359 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 5315 autoneg_val |= (1<<13);
5360 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n", 5316 /* Enabled AUTO-MDIX when autoneg is disabled */
5361 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), 5317 bnx2x_cl45_write(bp, phy,
5362 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), 5318 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
5363 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c)); 5319 (1<<15 | 1<<9 | 7<<0));
5364 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 5320 DP(NETIF_MSG_LINK, "Setting 100M force\n");
5365 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 5321 }
5366 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 5322 if (phy->req_line_speed == SPEED_10) {
5367} 5323 /* Enabled AUTO-MDIX when autoneg is disabled */
5324 bnx2x_cl45_write(bp, phy,
5325 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
5326 (1<<15 | 1<<9 | 7<<0));
5327 DP(NETIF_MSG_LINK, "Setting 10M force\n");
5328 }
5368 5329
5369static void bnx2x_8481_rearm_latch_signal(struct bnx2x *bp, u8 port, 5330 bnx2x_cl45_write(bp, phy,
5370 u8 is_mi_int) 5331 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
5371{ 5332 an_10_100_val);
5372 u32 latch_status = 0, is_mi_int_status; 5333
5373 /* Disable the MI INT ( external phy int ) 5334 if (phy->req_duplex == DUPLEX_FULL)
5374 * by writing 1 to the status register. Link down indication 5335 autoneg_val |= (1<<8);
5375 * is high-active-signal, so in this case we need to write the 5336
5376 * status to clear the XOR 5337 bnx2x_cl45_write(bp, phy,
5377 */ 5338 MDIO_AN_DEVAD,
5378 /* Read Latched signals */ 5339 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
5379 latch_status = REG_RD(bp, 5340
5380 NIG_REG_LATCH_STATUS_0 + port*8); 5341 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
5381 is_mi_int_status = REG_RD(bp, 5342 (phy->speed_cap_mask &
5382 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4); 5343 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
5383 DP(NETIF_MSG_LINK, "original_signal = 0x%x, nig_status = 0x%x," 5344 (phy->req_line_speed == SPEED_10000)) {
5384 "latch_status = 0x%x\n", 5345 DP(NETIF_MSG_LINK, "Advertising 10G\n");
5385 is_mi_int, is_mi_int_status, latch_status); 5346 /* Restart autoneg for 10G*/
5386 /* Handle only those with latched-signal=up.*/ 5347
5387 if (latch_status & 1) { 5348 bnx2x_cl45_write(bp, phy,
5388 /* For all latched-signal=up,Write original_signal to status */ 5349 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
5389 if (is_mi_int) 5350 0x3200);
5390 bnx2x_bits_en(bp, 5351 } else if (phy->req_line_speed != SPEED_10 &&
5391 NIG_REG_STATUS_INTERRUPT_PORT0 5352 phy->req_line_speed != SPEED_100) {
5392 + port*4, 5353 bnx2x_cl45_write(bp, phy,
5393 NIG_STATUS_EMAC0_MI_INT); 5354 MDIO_AN_DEVAD,
5394 else 5355 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
5395 bnx2x_bits_dis(bp, 5356 1);
5396 NIG_REG_STATUS_INTERRUPT_PORT0
5397 + port*4,
5398 NIG_STATUS_EMAC0_MI_INT);
5399 /* For all latched-signal=up : Re-Arm Latch signals */
5400 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
5401 (latch_status & 0xfffe) | (latch_status & 1));
5402 } 5357 }
5358 /* Save spirom version */
5359 bnx2x_save_848xx_spirom_version(phy, params);
5360
5361 return 0;
5403} 5362}
5404/* 5363
5405 * link management 5364static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
5406 */ 5365 struct link_params *params,
5407static void bnx2x_link_int_ack(struct link_params *params, 5366 struct link_vars *vars)
5408 struct link_vars *vars, u8 is_10g,
5409 u8 is_mi_int)
5410{ 5367{
5411 struct bnx2x *bp = params->bp; 5368 struct bnx2x *bp = params->bp;
5412 u8 port = params->port; 5369 /* Restore normal power mode*/
5370 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5371 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5413 5372
5414 /* first reset all status 5373 /* HW reset */
5415 * we assume only one line will be change at a time */ 5374 bnx2x_ext_phy_hw_reset(bp, params->port);
5416 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5417 (NIG_STATUS_XGXS0_LINK10G |
5418 NIG_STATUS_XGXS0_LINK_STATUS |
5419 NIG_STATUS_SERDES0_LINK_STATUS));
5420 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5421 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
5422 (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5423 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
5424 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
5425 }
5426 if (vars->phy_link_up) {
5427 if (is_10g) {
5428 /* Disable the 10G link interrupt
5429 * by writing 1 to the status register
5430 */
5431 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
5432 bnx2x_bits_en(bp,
5433 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5434 NIG_STATUS_XGXS0_LINK10G);
5435 5375
5436 } else if (params->switch_cfg == SWITCH_CFG_10G) { 5376 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5437 /* Disable the link interrupt 5377 return bnx2x_848xx_cmn_config_init(phy, params, vars);
5438 * by writing 1 to the relevant lane 5378}
5439 * in the status register
5440 */
5441 u32 ser_lane = ((params->lane_config &
5442 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
5443 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
5444 5379
5445 DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n", 5380static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5446 vars->line_speed); 5381 struct link_params *params,
5447 bnx2x_bits_en(bp, 5382 struct link_vars *vars)
5448 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 5383{
5449 ((1 << ser_lane) << 5384 struct bnx2x *bp = params->bp;
5450 NIG_STATUS_XGXS0_LINK_STATUS_SIZE)); 5385 u8 port = params->port, initialize = 1;
5386 u16 val;
5387 u16 temp;
5388 u32 actual_phy_selection;
5389 u8 rc = 0;
5451 5390
5452 } else { /* SerDes */ 5391 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
5453 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
5454 /* Disable the link interrupt
5455 * by writing 1 to the status register
5456 */
5457 bnx2x_bits_en(bp,
5458 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5459 NIG_STATUS_SERDES0_LINK_STATUS);
5460 }
5461 5392
5462 } else { /* link_down */ 5393 msleep(1);
5394 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5395 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
5396 port);
5397 msleep(200); /* 100 is not enough */
5398
5399 /* BCM84823 requires that XGXS links up first @ 10G for normal
5400 behavior */
5401 temp = vars->line_speed;
5402 vars->line_speed = SPEED_10000;
5403 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
5404 bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
5405 vars->line_speed = temp;
5406
5407 /* Set dual-media configuration according to configuration */
5408
5409 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
5410 MDIO_CTL_REG_84823_MEDIA, &val);
5411 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
5412 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
5413 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
5414 MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
5415 MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
5416 val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI |
5417 MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L;
5418
5419 actual_phy_selection = bnx2x_phy_selection(params);
5420
5421 switch (actual_phy_selection) {
5422 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
5423 /* Do nothing. Essentialy this is like the priority copper */
5424 break;
5425 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
5426 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
5427 break;
5428 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
5429 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
5430 break;
5431 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
5432 /* Do nothing here. The first PHY won't be initialized at all */
5433 break;
5434 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
5435 val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
5436 initialize = 0;
5437 break;
5463 } 5438 }
5439 if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
5440 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
5441
5442 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
5443 MDIO_CTL_REG_84823_MEDIA, val);
5444 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
5445 params->multi_phy_config, val);
5446
5447 if (initialize)
5448 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
5449 else
5450 bnx2x_save_848xx_spirom_version(phy, params);
5451 return rc;
5464} 5452}
5465 5453
5466static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len) 5454static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
5455 struct link_params *params,
5456 struct link_vars *vars)
5467{ 5457{
5468 u8 *str_ptr = str; 5458 struct bnx2x *bp = params->bp;
5469 u32 mask = 0xf0000000; 5459 u16 val, val1, val2;
5470 u8 shift = 8*4; 5460 u8 link_up = 0;
5471 u8 digit; 5461
5472 if (len < 10) { 5462 /* Check 10G-BaseT link status */
5473 /* Need more than 10chars for this format */ 5463 /* Check PMD signal ok */
5474 *str_ptr = '\0'; 5464 bnx2x_cl45_read(bp, phy,
5475 return -EINVAL; 5465 MDIO_AN_DEVAD, 0xFFFA, &val1);
5476 } 5466 bnx2x_cl45_read(bp, phy,
5477 while (shift > 0) { 5467 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
5468 &val2);
5469 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
5470
5471 /* Check link 10G */
5472 if (val2 & (1<<11)) {
5473 vars->line_speed = SPEED_10000;
5474 link_up = 1;
5475 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
5476 } else { /* Check Legacy speed link */
5477 u16 legacy_status, legacy_speed;
5478
5479 /* Enable expansion register 0x42 (Operation mode status) */
5480 bnx2x_cl45_write(bp, phy,
5481 MDIO_AN_DEVAD,
5482 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
5483
5484 /* Get legacy speed operation status */
5485 bnx2x_cl45_read(bp, phy,
5486 MDIO_AN_DEVAD,
5487 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
5488 &legacy_status);
5489
5490 DP(NETIF_MSG_LINK, "Legacy speed status"
5491 " = 0x%x\n", legacy_status);
5492 link_up = ((legacy_status & (1<<11)) == (1<<11));
5493 if (link_up) {
5494 legacy_speed = (legacy_status & (3<<9));
5495 if (legacy_speed == (0<<9))
5496 vars->line_speed = SPEED_10;
5497 else if (legacy_speed == (1<<9))
5498 vars->line_speed = SPEED_100;
5499 else if (legacy_speed == (2<<9))
5500 vars->line_speed = SPEED_1000;
5501 else /* Should not happen */
5502 vars->line_speed = 0;
5478 5503
5479 shift -= 4; 5504 if (legacy_status & (1<<8))
5480 digit = ((num & mask) >> shift); 5505 vars->duplex = DUPLEX_FULL;
5481 if (digit < 0xa) 5506 else
5482 *str_ptr = digit + '0'; 5507 vars->duplex = DUPLEX_HALF;
5483 else 5508
5484 *str_ptr = digit - 0xa + 'a'; 5509 DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
5485 str_ptr++; 5510 " is_duplex_full= %d\n", vars->line_speed,
5486 mask = mask >> 4; 5511 (vars->duplex == DUPLEX_FULL));
5487 if (shift == 4*4) { 5512 /* Check legacy speed AN resolution */
5488 *str_ptr = ':'; 5513 bnx2x_cl45_read(bp, phy,
5489 str_ptr++; 5514 MDIO_AN_DEVAD,
5515 MDIO_AN_REG_8481_LEGACY_MII_STATUS,
5516 &val);
5517 if (val & (1<<5))
5518 vars->link_status |=
5519 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
5520 bnx2x_cl45_read(bp, phy,
5521 MDIO_AN_DEVAD,
5522 MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
5523 &val);
5524 if ((val & (1<<0)) == 0)
5525 vars->link_status |=
5526 LINK_STATUS_PARALLEL_DETECTION_USED;
5490 } 5527 }
5491 } 5528 }
5492 *str_ptr = '\0'; 5529 if (link_up) {
5493 return 0; 5530 DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
5531 vars->line_speed);
5532 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5533 }
5534
5535 return link_up;
5494} 5536}
5495 5537
5496u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, 5538static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
5497 u8 *version, u16 len)
5498{ 5539{
5499 struct bnx2x *bp; 5540 u8 status = 0;
5500 u32 ext_phy_type = 0; 5541 u32 spirom_ver;
5501 u32 spirom_ver = 0; 5542 spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
5502 u8 status; 5543 status = bnx2x_format_ver(spirom_ver, str, len);
5544 return status;
5545}
5503 5546
5504 if (version == NULL || params == NULL) 5547static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
5505 return -EINVAL; 5548 struct link_params *params)
5506 bp = params->bp; 5549{
5550 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
5551 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
5552 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
5553 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
5554}
5507 5555
5508 spirom_ver = REG_RD(bp, params->shmem_base + 5556static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
5509 offsetof(struct shmem_region, 5557 struct link_params *params)
5510 port_mb[params->port].ext_phy_fw_version)); 5558{
5559 bnx2x_cl45_write(params->bp, phy,
5560 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
5561 bnx2x_cl45_write(params->bp, phy,
5562 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
5563}
5511 5564
5512 status = 0; 5565static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
5513 /* reset the returned value to zero */ 5566 struct link_params *params)
5514 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5567{
5515 switch (ext_phy_type) { 5568 struct bnx2x *bp = params->bp;
5516 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 5569 u8 port = params->port;
5570 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5571 MISC_REGISTERS_GPIO_OUTPUT_LOW,
5572 port);
5573}
5517 5574
5518 if (len < 5) 5575static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
5519 return -EINVAL; 5576 struct link_params *params, u8 mode)
5577{
5578 struct bnx2x *bp = params->bp;
5579 u16 val;
5520 5580
5521 version[0] = (spirom_ver & 0xFF); 5581 switch (mode) {
5522 version[1] = (spirom_ver & 0xFF00) >> 8; 5582 case LED_MODE_OFF:
5523 version[2] = (spirom_ver & 0xFF0000) >> 16;
5524 version[3] = (spirom_ver & 0xFF000000) >> 24;
5525 version[4] = '\0';
5526 5583
5527 break; 5584 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port);
5528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5532 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5533 status = bnx2x_format_ver(spirom_ver, version, len);
5534 break;
5535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5536 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5537 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
5538 (spirom_ver & 0x7F);
5539 status = bnx2x_format_ver(spirom_ver, version, len);
5540 break;
5541 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5543 version[0] = '\0';
5544 break;
5545 5585
5546 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 5586 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5547 DP(NETIF_MSG_LINK, "bnx2x_get_ext_phy_fw_version:" 5587 SHARED_HW_CFG_LED_EXTPHY1) {
5548 " type is FAILURE!\n"); 5588
5549 status = -EINVAL; 5589 /* Set LED masks */
5590 bnx2x_cl45_write(bp, phy,
5591 MDIO_PMA_DEVAD,
5592 MDIO_PMA_REG_8481_LED1_MASK,
5593 0x0);
5594
5595 bnx2x_cl45_write(bp, phy,
5596 MDIO_PMA_DEVAD,
5597 MDIO_PMA_REG_8481_LED2_MASK,
5598 0x0);
5599
5600 bnx2x_cl45_write(bp, phy,
5601 MDIO_PMA_DEVAD,
5602 MDIO_PMA_REG_8481_LED3_MASK,
5603 0x0);
5604
5605 bnx2x_cl45_write(bp, phy,
5606 MDIO_PMA_DEVAD,
5607 MDIO_PMA_REG_8481_LED5_MASK,
5608 0x0);
5609
5610 } else {
5611 bnx2x_cl45_write(bp, phy,
5612 MDIO_PMA_DEVAD,
5613 MDIO_PMA_REG_8481_LED1_MASK,
5614 0x0);
5615 }
5550 break; 5616 break;
5617 case LED_MODE_FRONT_PANEL_OFF:
5551 5618
5552 default: 5619 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
5620 params->port);
5621
5622 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5623 SHARED_HW_CFG_LED_EXTPHY1) {
5624
5625 /* Set LED masks */
5626 bnx2x_cl45_write(bp, phy,
5627 MDIO_PMA_DEVAD,
5628 MDIO_PMA_REG_8481_LED1_MASK,
5629 0x0);
5630
5631 bnx2x_cl45_write(bp, phy,
5632 MDIO_PMA_DEVAD,
5633 MDIO_PMA_REG_8481_LED2_MASK,
5634 0x0);
5635
5636 bnx2x_cl45_write(bp, phy,
5637 MDIO_PMA_DEVAD,
5638 MDIO_PMA_REG_8481_LED3_MASK,
5639 0x0);
5640
5641 bnx2x_cl45_write(bp, phy,
5642 MDIO_PMA_DEVAD,
5643 MDIO_PMA_REG_8481_LED5_MASK,
5644 0x20);
5645
5646 } else {
5647 bnx2x_cl45_write(bp, phy,
5648 MDIO_PMA_DEVAD,
5649 MDIO_PMA_REG_8481_LED1_MASK,
5650 0x0);
5651 }
5553 break; 5652 break;
5554 } 5653 case LED_MODE_ON:
5555 return status;
5556}
5557 5654
5558static void bnx2x_set_xgxs_loopback(struct link_params *params, 5655 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port);
5559 struct link_vars *vars,
5560 u8 is_10g)
5561{
5562 u8 port = params->port;
5563 struct bnx2x *bp = params->bp;
5564 5656
5565 if (is_10g) { 5657 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5566 u32 md_devad; 5658 SHARED_HW_CFG_LED_EXTPHY1) {
5659 /* Set control reg */
5660 bnx2x_cl45_read(bp, phy,
5661 MDIO_PMA_DEVAD,
5662 MDIO_PMA_REG_8481_LINK_SIGNAL,
5663 &val);
5664 val &= 0x8000;
5665 val |= 0x2492;
5567 5666
5568 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 5667 bnx2x_cl45_write(bp, phy,
5668 MDIO_PMA_DEVAD,
5669 MDIO_PMA_REG_8481_LINK_SIGNAL,
5670 val);
5569 5671
5570 /* change the uni_phy_addr in the nig */ 5672 /* Set LED masks */
5571 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 5673 bnx2x_cl45_write(bp, phy,
5572 port*0x18)); 5674 MDIO_PMA_DEVAD,
5675 MDIO_PMA_REG_8481_LED1_MASK,
5676 0x0);
5573 5677
5574 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); 5678 bnx2x_cl45_write(bp, phy,
5679 MDIO_PMA_DEVAD,
5680 MDIO_PMA_REG_8481_LED2_MASK,
5681 0x20);
5575 5682
5576 bnx2x_cl45_write(bp, port, 0, 5683 bnx2x_cl45_write(bp, phy,
5577 params->phy_addr, 5684 MDIO_PMA_DEVAD,
5578 5, 5685 MDIO_PMA_REG_8481_LED3_MASK,
5579 (MDIO_REG_BANK_AER_BLOCK + 5686 0x20);
5580 (MDIO_AER_BLOCK_AER_REG & 0xf)),
5581 0x2800);
5582 5687
5583 bnx2x_cl45_write(bp, port, 0, 5688 bnx2x_cl45_write(bp, phy,
5584 params->phy_addr, 5689 MDIO_PMA_DEVAD,
5585 5, 5690 MDIO_PMA_REG_8481_LED5_MASK,
5586 (MDIO_REG_BANK_CL73_IEEEB0 + 5691 0x0);
5587 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 5692 } else {
5588 0x6041); 5693 bnx2x_cl45_write(bp, phy,
5589 msleep(200); 5694 MDIO_PMA_DEVAD,
5590 /* set aer mmd back */ 5695 MDIO_PMA_REG_8481_LED1_MASK,
5591 bnx2x_set_aer_mmd(params, vars); 5696 0x20);
5697 }
5698 break;
5592 5699
5593 /* and md_devad */ 5700 case LED_MODE_OPER:
5594 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
5595 md_devad);
5596 5701
5597 } else { 5702 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port);
5598 u16 mii_control;
5599 5703
5600 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); 5704 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5705 SHARED_HW_CFG_LED_EXTPHY1) {
5601 5706
5602 CL45_RD_OVER_CL22(bp, port, 5707 /* Set control reg */
5603 params->phy_addr, 5708 bnx2x_cl45_read(bp, phy,
5604 MDIO_REG_BANK_COMBO_IEEE0, 5709 MDIO_PMA_DEVAD,
5605 MDIO_COMBO_IEEE0_MII_CONTROL, 5710 MDIO_PMA_REG_8481_LINK_SIGNAL,
5606 &mii_control); 5711 &val);
5712
5713 if (!((val &
5714 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
5715 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
5716 DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
5717 bnx2x_cl45_write(bp, phy,
5718 MDIO_PMA_DEVAD,
5719 MDIO_PMA_REG_8481_LINK_SIGNAL,
5720 0xa492);
5721 }
5607 5722
5608 CL45_WR_OVER_CL22(bp, port, 5723 /* Set LED masks */
5609 params->phy_addr, 5724 bnx2x_cl45_write(bp, phy,
5610 MDIO_REG_BANK_COMBO_IEEE0, 5725 MDIO_PMA_DEVAD,
5611 MDIO_COMBO_IEEE0_MII_CONTROL, 5726 MDIO_PMA_REG_8481_LED1_MASK,
5612 (mii_control | 5727 0x10);
5613 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK)); 5728
5729 bnx2x_cl45_write(bp, phy,
5730 MDIO_PMA_DEVAD,
5731 MDIO_PMA_REG_8481_LED2_MASK,
5732 0x80);
5733
5734 bnx2x_cl45_write(bp, phy,
5735 MDIO_PMA_DEVAD,
5736 MDIO_PMA_REG_8481_LED3_MASK,
5737 0x98);
5738
5739 bnx2x_cl45_write(bp, phy,
5740 MDIO_PMA_DEVAD,
5741 MDIO_PMA_REG_8481_LED5_MASK,
5742 0x40);
5743
5744 } else {
5745 bnx2x_cl45_write(bp, phy,
5746 MDIO_PMA_DEVAD,
5747 MDIO_PMA_REG_8481_LED1_MASK,
5748 0x80);
5749 }
5750 break;
5614 } 5751 }
5615} 5752}
5753/******************************************************************/
5754/* SFX7101 PHY SECTION */
5755/******************************************************************/
5756static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
5757 struct link_params *params)
5758{
5759 struct bnx2x *bp = params->bp;
5760 /* SFX7101_XGXS_TEST1 */
5761 bnx2x_cl45_write(bp, phy,
5762 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
5763}
5616 5764
5617 5765static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
5618static void bnx2x_ext_phy_loopback(struct link_params *params) 5766 struct link_params *params,
5767 struct link_vars *vars)
5619{ 5768{
5769 u16 fw_ver1, fw_ver2, val;
5620 struct bnx2x *bp = params->bp; 5770 struct bnx2x *bp = params->bp;
5621 u8 ext_phy_addr; 5771 DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
5622 u32 ext_phy_type;
5623 5772
5624 if (params->switch_cfg == SWITCH_CFG_10G) { 5773 /* Restore normal power mode*/
5625 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5774 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5626 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5775 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5627 /* CL37 Autoneg Enabled */ 5776 /* HW reset */
5628 switch (ext_phy_type) { 5777 bnx2x_ext_phy_hw_reset(bp, params->port);
5629 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 5778 bnx2x_wait_reset_complete(bp, phy);
5630 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN: 5779
5631 DP(NETIF_MSG_LINK, 5780 bnx2x_cl45_write(bp, phy,
5632 "ext_phy_loopback: We should not get here\n"); 5781 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
5633 break; 5782 DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
5634 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 5783 bnx2x_cl45_write(bp, phy,
5635 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8705\n"); 5784 MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
5636 break; 5785
5637 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 5786 bnx2x_ext_phy_set_pause(params, phy, vars);
5638 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8706\n"); 5787 /* Restart autoneg */
5639 break; 5788 bnx2x_cl45_read(bp, phy,
5640 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 5789 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
5641 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n"); 5790 val |= 0x200;
5642 bnx2x_cl45_write(bp, params->port, ext_phy_type, 5791 bnx2x_cl45_write(bp, phy,
5643 ext_phy_addr, 5792 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
5644 MDIO_PMA_DEVAD, 5793
5645 MDIO_PMA_REG_CTRL, 5794 /* Save spirom version */
5646 0x0001); 5795 bnx2x_cl45_read(bp, phy,
5647 break; 5796 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
5648 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 5797
5649 /* SFX7101_XGXS_TEST1 */ 5798 bnx2x_cl45_read(bp, phy,
5650 bnx2x_cl45_write(bp, params->port, ext_phy_type, 5799 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
5651 ext_phy_addr, 5800 bnx2x_save_spirom_version(bp, params->port,
5652 MDIO_XS_DEVAD, 5801 (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
5653 MDIO_XS_SFX7101_XGXS_TEST1, 5802 return 0;
5654 0x100); 5803}
5655 DP(NETIF_MSG_LINK,
5656 "ext_phy_loopback: set ext phy loopback\n");
5657 break;
5658 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5659 5804
5660 break; 5805static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
5661 } /* switch external PHY type */ 5806 struct link_params *params,
5662 } else { 5807 struct link_vars *vars)
5663 /* serdes */ 5808{
5664 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 5809 struct bnx2x *bp = params->bp;
5665 ext_phy_addr = (params->ext_phy_config & 5810 u8 link_up;
5666 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) 5811 u16 val1, val2;
5667 >> PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT; 5812 bnx2x_cl45_read(bp, phy,
5813 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
5814 bnx2x_cl45_read(bp, phy,
5815 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
5816 DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
5817 val2, val1);
5818 bnx2x_cl45_read(bp, phy,
5819 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
5820 bnx2x_cl45_read(bp, phy,
5821 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
5822 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
5823 val2, val1);
5824 link_up = ((val1 & 4) == 4);
5825 /* if link is up
5826 * print the AN outcome of the SFX7101 PHY
5827 */
5828 if (link_up) {
5829 bnx2x_cl45_read(bp, phy,
5830 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
5831 &val2);
5832 vars->line_speed = SPEED_10000;
5833 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
5834 val2, (val2 & (1<<14)));
5835 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
5836 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5668 } 5837 }
5838 return link_up;
5669} 5839}
5670 5840
5671 5841
5672/* 5842static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
5673 *------------------------------------------------------------------------
5674 * bnx2x_override_led_value -
5675 *
5676 * Override the led value of the requsted led
5677 *
5678 *------------------------------------------------------------------------
5679 */
5680u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
5681 u32 led_idx, u32 value)
5682{ 5843{
5683 u32 reg_val; 5844 if (*len < 5)
5845 return -EINVAL;
5846 str[0] = (spirom_ver & 0xFF);
5847 str[1] = (spirom_ver & 0xFF00) >> 8;
5848 str[2] = (spirom_ver & 0xFF0000) >> 16;
5849 str[3] = (spirom_ver & 0xFF000000) >> 24;
5850 str[4] = '\0';
5851 *len -= 5;
5852 return 0;
5853}
5684 5854
5685 /* If port 0 then use EMAC0, else use EMAC1*/ 5855void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
5686 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 5856{
5857 u16 val, cnt;
5687 5858
5688 DP(NETIF_MSG_LINK, 5859 bnx2x_cl45_read(bp, phy,
5689 "bnx2x_override_led_value() port %x led_idx %d value %d\n", 5860 MDIO_PMA_DEVAD,
5690 port, led_idx, value); 5861 MDIO_PMA_REG_7101_RESET, &val);
5691 5862
5692 switch (led_idx) { 5863 for (cnt = 0; cnt < 10; cnt++) {
5693 case 0: /* 10MB led */ 5864 msleep(50);
5694 /* Read the current value of the LED register in 5865 /* Writes a self-clearing reset */
5695 the EMAC block */ 5866 bnx2x_cl45_write(bp, phy,
5696 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED); 5867 MDIO_PMA_DEVAD,
5697 /* Set the OVERRIDE bit to 1 */ 5868 MDIO_PMA_REG_7101_RESET,
5698 reg_val |= EMAC_LED_OVERRIDE; 5869 (val | (1<<15)));
5699 /* If value is 1, set the 10M_OVERRIDE bit, 5870 /* Wait for clear */
5700 otherwise reset it.*/ 5871 bnx2x_cl45_read(bp, phy,
5701 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) : 5872 MDIO_PMA_DEVAD,
5702 (reg_val & ~EMAC_LED_10MB_OVERRIDE); 5873 MDIO_PMA_REG_7101_RESET, &val);
5703 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val); 5874
5704 break; 5875 if ((val & (1<<15)) == 0)
5705 case 1: /*100MB led */ 5876 break;
5706 /*Read the current value of the LED register in 5877 }
5707 the EMAC block */ 5878}
5708 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED); 5879
5709 /* Set the OVERRIDE bit to 1 */ 5880static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
5710 reg_val |= EMAC_LED_OVERRIDE; 5881 struct link_params *params) {
5711 /* If value is 1, set the 100M_OVERRIDE bit, 5882 /* Low power mode is controlled by GPIO 2 */
5712 otherwise reset it.*/ 5883 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
5713 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) : 5884 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
5714 (reg_val & ~EMAC_LED_100MB_OVERRIDE); 5885 /* The PHY reset is controlled by GPIO 1 */
5715 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val); 5886 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
5887 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
5888}
5889
5890static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
5891 struct link_params *params, u8 mode)
5892{
5893 u16 val = 0;
5894 struct bnx2x *bp = params->bp;
5895 switch (mode) {
5896 case LED_MODE_FRONT_PANEL_OFF:
5897 case LED_MODE_OFF:
5898 val = 2;
5716 break; 5899 break;
5717 case 2: /* 1000MB led */ 5900 case LED_MODE_ON:
5718 /* Read the current value of the LED register in the 5901 val = 1;
5719 EMAC block */
5720 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5721 /* Set the OVERRIDE bit to 1 */
5722 reg_val |= EMAC_LED_OVERRIDE;
5723 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
5724 reset it. */
5725 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
5726 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
5727 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5728 break; 5902 break;
5729 case 3: /* 2500MB led */ 5903 case LED_MODE_OPER:
5730 /* Read the current value of the LED register in the 5904 val = 0;
5731 EMAC block*/
5732 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5733 /* Set the OVERRIDE bit to 1 */
5734 reg_val |= EMAC_LED_OVERRIDE;
5735 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
5736 reset it.*/
5737 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
5738 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
5739 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5740 break; 5905 break;
5741 case 4: /*10G led */ 5906 }
5742 if (port == 0) { 5907 bnx2x_cl45_write(bp, phy,
5743 REG_WR(bp, NIG_REG_LED_10G_P0, 5908 MDIO_PMA_DEVAD,
5744 value); 5909 MDIO_PMA_REG_7107_LINK_LED_CNTL,
5910 val);
5911}
5912
5913/******************************************************************/
5914/* STATIC PHY DECLARATION */
5915/******************************************************************/
5916
5917static struct bnx2x_phy phy_null = {
5918 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
5919 .addr = 0,
5920 .flags = FLAGS_INIT_XGXS_FIRST,
5921 .def_md_devad = 0,
5922 .reserved = 0,
5923 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5924 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5925 .mdio_ctrl = 0,
5926 .supported = 0,
5927 .media_type = ETH_PHY_NOT_PRESENT,
5928 .ver_addr = 0,
5929 .req_flow_ctrl = 0,
5930 .req_line_speed = 0,
5931 .speed_cap_mask = 0,
5932 .req_duplex = 0,
5933 .rsrv = 0,
5934 .config_init = (config_init_t)NULL,
5935 .read_status = (read_status_t)NULL,
5936 .link_reset = (link_reset_t)NULL,
5937 .config_loopback = (config_loopback_t)NULL,
5938 .format_fw_ver = (format_fw_ver_t)NULL,
5939 .hw_reset = (hw_reset_t)NULL,
5940 .set_link_led = (set_link_led_t)NULL,
5941 .phy_specific_func = (phy_specific_func_t)NULL
5942};
5943
5944static struct bnx2x_phy phy_serdes = {
5945 .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
5946 .addr = 0xff,
5947 .flags = 0,
5948 .def_md_devad = 0,
5949 .reserved = 0,
5950 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5951 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5952 .mdio_ctrl = 0,
5953 .supported = (SUPPORTED_10baseT_Half |
5954 SUPPORTED_10baseT_Full |
5955 SUPPORTED_100baseT_Half |
5956 SUPPORTED_100baseT_Full |
5957 SUPPORTED_1000baseT_Full |
5958 SUPPORTED_2500baseX_Full |
5959 SUPPORTED_TP |
5960 SUPPORTED_Autoneg |
5961 SUPPORTED_Pause |
5962 SUPPORTED_Asym_Pause),
5963 .media_type = ETH_PHY_UNSPECIFIED,
5964 .ver_addr = 0,
5965 .req_flow_ctrl = 0,
5966 .req_line_speed = 0,
5967 .speed_cap_mask = 0,
5968 .req_duplex = 0,
5969 .rsrv = 0,
5970 .config_init = (config_init_t)bnx2x_init_serdes,
5971 .read_status = (read_status_t)bnx2x_link_settings_status,
5972 .link_reset = (link_reset_t)bnx2x_int_link_reset,
5973 .config_loopback = (config_loopback_t)NULL,
5974 .format_fw_ver = (format_fw_ver_t)NULL,
5975 .hw_reset = (hw_reset_t)NULL,
5976 .set_link_led = (set_link_led_t)NULL,
5977 .phy_specific_func = (phy_specific_func_t)NULL
5978};
5979
5980static struct bnx2x_phy phy_xgxs = {
5981 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
5982 .addr = 0xff,
5983 .flags = 0,
5984 .def_md_devad = 0,
5985 .reserved = 0,
5986 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5987 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5988 .mdio_ctrl = 0,
5989 .supported = (SUPPORTED_10baseT_Half |
5990 SUPPORTED_10baseT_Full |
5991 SUPPORTED_100baseT_Half |
5992 SUPPORTED_100baseT_Full |
5993 SUPPORTED_1000baseT_Full |
5994 SUPPORTED_2500baseX_Full |
5995 SUPPORTED_10000baseT_Full |
5996 SUPPORTED_FIBRE |
5997 SUPPORTED_Autoneg |
5998 SUPPORTED_Pause |
5999 SUPPORTED_Asym_Pause),
6000 .media_type = ETH_PHY_UNSPECIFIED,
6001 .ver_addr = 0,
6002 .req_flow_ctrl = 0,
6003 .req_line_speed = 0,
6004 .speed_cap_mask = 0,
6005 .req_duplex = 0,
6006 .rsrv = 0,
6007 .config_init = (config_init_t)bnx2x_init_xgxs,
6008 .read_status = (read_status_t)bnx2x_link_settings_status,
6009 .link_reset = (link_reset_t)bnx2x_int_link_reset,
6010 .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
6011 .format_fw_ver = (format_fw_ver_t)NULL,
6012 .hw_reset = (hw_reset_t)NULL,
6013 .set_link_led = (set_link_led_t)NULL,
6014 .phy_specific_func = (phy_specific_func_t)NULL
6015};
6016
6017static struct bnx2x_phy phy_7101 = {
6018 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6019 .addr = 0xff,
6020 .flags = FLAGS_FAN_FAILURE_DET_REQ,
6021 .def_md_devad = 0,
6022 .reserved = 0,
6023 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6024 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6025 .mdio_ctrl = 0,
6026 .supported = (SUPPORTED_10000baseT_Full |
6027 SUPPORTED_TP |
6028 SUPPORTED_Autoneg |
6029 SUPPORTED_Pause |
6030 SUPPORTED_Asym_Pause),
6031 .media_type = ETH_PHY_BASE_T,
6032 .ver_addr = 0,
6033 .req_flow_ctrl = 0,
6034 .req_line_speed = 0,
6035 .speed_cap_mask = 0,
6036 .req_duplex = 0,
6037 .rsrv = 0,
6038 .config_init = (config_init_t)bnx2x_7101_config_init,
6039 .read_status = (read_status_t)bnx2x_7101_read_status,
6040 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
6041 .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
6042 .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
6043 .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
6044 .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
6045 .phy_specific_func = (phy_specific_func_t)NULL
6046};
6047static struct bnx2x_phy phy_8073 = {
6048 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6049 .addr = 0xff,
6050 .flags = FLAGS_HW_LOCK_REQUIRED,
6051 .def_md_devad = 0,
6052 .reserved = 0,
6053 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6054 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6055 .mdio_ctrl = 0,
6056 .supported = (SUPPORTED_10000baseT_Full |
6057 SUPPORTED_2500baseX_Full |
6058 SUPPORTED_1000baseT_Full |
6059 SUPPORTED_FIBRE |
6060 SUPPORTED_Autoneg |
6061 SUPPORTED_Pause |
6062 SUPPORTED_Asym_Pause),
6063 .media_type = ETH_PHY_UNSPECIFIED,
6064 .ver_addr = 0,
6065 .req_flow_ctrl = 0,
6066 .req_line_speed = 0,
6067 .speed_cap_mask = 0,
6068 .req_duplex = 0,
6069 .rsrv = 0,
6070 .config_init = (config_init_t)bnx2x_8073_config_init,
6071 .read_status = (read_status_t)bnx2x_8073_read_status,
6072 .link_reset = (link_reset_t)bnx2x_8073_link_reset,
6073 .config_loopback = (config_loopback_t)NULL,
6074 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6075 .hw_reset = (hw_reset_t)NULL,
6076 .set_link_led = (set_link_led_t)NULL,
6077 .phy_specific_func = (phy_specific_func_t)NULL
6078};
6079static struct bnx2x_phy phy_8705 = {
6080 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
6081 .addr = 0xff,
6082 .flags = FLAGS_INIT_XGXS_FIRST,
6083 .def_md_devad = 0,
6084 .reserved = 0,
6085 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6086 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6087 .mdio_ctrl = 0,
6088 .supported = (SUPPORTED_10000baseT_Full |
6089 SUPPORTED_FIBRE |
6090 SUPPORTED_Pause |
6091 SUPPORTED_Asym_Pause),
6092 .media_type = ETH_PHY_XFP_FIBER,
6093 .ver_addr = 0,
6094 .req_flow_ctrl = 0,
6095 .req_line_speed = 0,
6096 .speed_cap_mask = 0,
6097 .req_duplex = 0,
6098 .rsrv = 0,
6099 .config_init = (config_init_t)bnx2x_8705_config_init,
6100 .read_status = (read_status_t)bnx2x_8705_read_status,
6101 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
6102 .config_loopback = (config_loopback_t)NULL,
6103 .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
6104 .hw_reset = (hw_reset_t)NULL,
6105 .set_link_led = (set_link_led_t)NULL,
6106 .phy_specific_func = (phy_specific_func_t)NULL
6107};
6108static struct bnx2x_phy phy_8706 = {
6109 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
6110 .addr = 0xff,
6111 .flags = FLAGS_INIT_XGXS_FIRST,
6112 .def_md_devad = 0,
6113 .reserved = 0,
6114 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6115 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6116 .mdio_ctrl = 0,
6117 .supported = (SUPPORTED_10000baseT_Full |
6118 SUPPORTED_1000baseT_Full |
6119 SUPPORTED_FIBRE |
6120 SUPPORTED_Pause |
6121 SUPPORTED_Asym_Pause),
6122 .media_type = ETH_PHY_SFP_FIBER,
6123 .ver_addr = 0,
6124 .req_flow_ctrl = 0,
6125 .req_line_speed = 0,
6126 .speed_cap_mask = 0,
6127 .req_duplex = 0,
6128 .rsrv = 0,
6129 .config_init = (config_init_t)bnx2x_8706_config_init,
6130 .read_status = (read_status_t)bnx2x_8706_read_status,
6131 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
6132 .config_loopback = (config_loopback_t)NULL,
6133 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6134 .hw_reset = (hw_reset_t)NULL,
6135 .set_link_led = (set_link_led_t)NULL,
6136 .phy_specific_func = (phy_specific_func_t)NULL
6137};
6138
6139static struct bnx2x_phy phy_8726 = {
6140 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
6141 .addr = 0xff,
6142 .flags = (FLAGS_HW_LOCK_REQUIRED |
6143 FLAGS_INIT_XGXS_FIRST),
6144 .def_md_devad = 0,
6145 .reserved = 0,
6146 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6147 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6148 .mdio_ctrl = 0,
6149 .supported = (SUPPORTED_10000baseT_Full |
6150 SUPPORTED_1000baseT_Full |
6151 SUPPORTED_Autoneg |
6152 SUPPORTED_FIBRE |
6153 SUPPORTED_Pause |
6154 SUPPORTED_Asym_Pause),
6155 .media_type = ETH_PHY_SFP_FIBER,
6156 .ver_addr = 0,
6157 .req_flow_ctrl = 0,
6158 .req_line_speed = 0,
6159 .speed_cap_mask = 0,
6160 .req_duplex = 0,
6161 .rsrv = 0,
6162 .config_init = (config_init_t)bnx2x_8726_config_init,
6163 .read_status = (read_status_t)bnx2x_8726_read_status,
6164 .link_reset = (link_reset_t)bnx2x_8726_link_reset,
6165 .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
6166 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6167 .hw_reset = (hw_reset_t)NULL,
6168 .set_link_led = (set_link_led_t)NULL,
6169 .phy_specific_func = (phy_specific_func_t)NULL
6170};
6171
6172static struct bnx2x_phy phy_8727 = {
6173 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6174 .addr = 0xff,
6175 .flags = FLAGS_FAN_FAILURE_DET_REQ,
6176 .def_md_devad = 0,
6177 .reserved = 0,
6178 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6179 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6180 .mdio_ctrl = 0,
6181 .supported = (SUPPORTED_10000baseT_Full |
6182 SUPPORTED_1000baseT_Full |
6183 SUPPORTED_FIBRE |
6184 SUPPORTED_Pause |
6185 SUPPORTED_Asym_Pause),
6186 .media_type = ETH_PHY_SFP_FIBER,
6187 .ver_addr = 0,
6188 .req_flow_ctrl = 0,
6189 .req_line_speed = 0,
6190 .speed_cap_mask = 0,
6191 .req_duplex = 0,
6192 .rsrv = 0,
6193 .config_init = (config_init_t)bnx2x_8727_config_init,
6194 .read_status = (read_status_t)bnx2x_8727_read_status,
6195 .link_reset = (link_reset_t)bnx2x_8727_link_reset,
6196 .config_loopback = (config_loopback_t)NULL,
6197 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6198 .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
6199 .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
6200 .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
6201};
6202static struct bnx2x_phy phy_8481 = {
6203 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6204 .addr = 0xff,
6205 .flags = FLAGS_FAN_FAILURE_DET_REQ |
6206 FLAGS_REARM_LATCH_SIGNAL,
6207 .def_md_devad = 0,
6208 .reserved = 0,
6209 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6210 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6211 .mdio_ctrl = 0,
6212 .supported = (SUPPORTED_10baseT_Half |
6213 SUPPORTED_10baseT_Full |
6214 SUPPORTED_100baseT_Half |
6215 SUPPORTED_100baseT_Full |
6216 SUPPORTED_1000baseT_Full |
6217 SUPPORTED_10000baseT_Full |
6218 SUPPORTED_TP |
6219 SUPPORTED_Autoneg |
6220 SUPPORTED_Pause |
6221 SUPPORTED_Asym_Pause),
6222 .media_type = ETH_PHY_BASE_T,
6223 .ver_addr = 0,
6224 .req_flow_ctrl = 0,
6225 .req_line_speed = 0,
6226 .speed_cap_mask = 0,
6227 .req_duplex = 0,
6228 .rsrv = 0,
6229 .config_init = (config_init_t)bnx2x_8481_config_init,
6230 .read_status = (read_status_t)bnx2x_848xx_read_status,
6231 .link_reset = (link_reset_t)bnx2x_8481_link_reset,
6232 .config_loopback = (config_loopback_t)NULL,
6233 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
6234 .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
6235 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
6236 .phy_specific_func = (phy_specific_func_t)NULL
6237};
6238
6239static struct bnx2x_phy phy_84823 = {
6240 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
6241 .addr = 0xff,
6242 .flags = FLAGS_FAN_FAILURE_DET_REQ |
6243 FLAGS_REARM_LATCH_SIGNAL,
6244 .def_md_devad = 0,
6245 .reserved = 0,
6246 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6247 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6248 .mdio_ctrl = 0,
6249 .supported = (SUPPORTED_10baseT_Half |
6250 SUPPORTED_10baseT_Full |
6251 SUPPORTED_100baseT_Half |
6252 SUPPORTED_100baseT_Full |
6253 SUPPORTED_1000baseT_Full |
6254 SUPPORTED_10000baseT_Full |
6255 SUPPORTED_TP |
6256 SUPPORTED_Autoneg |
6257 SUPPORTED_Pause |
6258 SUPPORTED_Asym_Pause),
6259 .media_type = ETH_PHY_BASE_T,
6260 .ver_addr = 0,
6261 .req_flow_ctrl = 0,
6262 .req_line_speed = 0,
6263 .speed_cap_mask = 0,
6264 .req_duplex = 0,
6265 .rsrv = 0,
6266 .config_init = (config_init_t)bnx2x_848x3_config_init,
6267 .read_status = (read_status_t)bnx2x_848xx_read_status,
6268 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
6269 .config_loopback = (config_loopback_t)NULL,
6270 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
6271 .hw_reset = (hw_reset_t)NULL,
6272 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
6273 .phy_specific_func = (phy_specific_func_t)NULL
6274};
6275
6276/*****************************************************************/
6277/* */
6278/* Populate the phy according. Main function: bnx2x_populate_phy */
6279/* */
6280/*****************************************************************/
6281
6282static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
6283 struct bnx2x_phy *phy, u8 port,
6284 u8 phy_index)
6285{
6286 /* Get the 4 lanes xgxs config rx and tx */
6287 u32 rx = 0, tx = 0, i;
6288 for (i = 0; i < 2; i++) {
6289 /**
6290 * INT_PHY and EXT_PHY1 share the same value location in the
6291 * shmem. When num_phys is greater than 1, than this value
6292 * applies only to EXT_PHY1
6293 */
6294 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
6295 rx = REG_RD(bp, shmem_base +
6296 offsetof(struct shmem_region,
6297 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
6298
6299 tx = REG_RD(bp, shmem_base +
6300 offsetof(struct shmem_region,
6301 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
5745 } else { 6302 } else {
5746 REG_WR(bp, NIG_REG_LED_10G_P1, 6303 rx = REG_RD(bp, shmem_base +
5747 value); 6304 offsetof(struct shmem_region,
6305 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
6306
6307 tx = REG_RD(bp, shmem_base +
6308 offsetof(struct shmem_region,
6309 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
5748 } 6310 }
6311
6312 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
6313 phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
6314
6315 phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
6316 phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
6317 }
6318}
6319
6320static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
6321 u8 phy_index, u8 port)
6322{
6323 u32 ext_phy_config = 0;
6324 switch (phy_index) {
6325 case EXT_PHY1:
6326 ext_phy_config = REG_RD(bp, shmem_base +
6327 offsetof(struct shmem_region,
6328 dev_info.port_hw_config[port].external_phy_config));
5749 break; 6329 break;
5750 case 5: /* TRAFFIC led */ 6330 case EXT_PHY2:
5751 /* Find if the traffic control is via BMAC or EMAC */ 6331 ext_phy_config = REG_RD(bp, shmem_base +
5752 if (port == 0) 6332 offsetof(struct shmem_region,
5753 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN); 6333 dev_info.port_hw_config[port].external_phy_config2));
5754 else 6334 break;
5755 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN); 6335 default:
6336 DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
6337 return -EINVAL;
6338 }
5756 6339
5757 /* Override the traffic led in the EMAC:*/ 6340 return ext_phy_config;
5758 if (reg_val == 1) { 6341}
5759 /* Read the current value of the LED register in 6342static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
5760 the EMAC block */ 6343 struct bnx2x_phy *phy)
5761 reg_val = REG_RD(bp, emac_base + 6344{
5762 EMAC_REG_EMAC_LED); 6345 u32 phy_addr;
5763 /* Set the TRAFFIC_OVERRIDE bit to 1 */ 6346 u32 chip_id;
5764 reg_val |= EMAC_LED_OVERRIDE; 6347 u32 switch_cfg = (REG_RD(bp, shmem_base +
5765 /* If value is 1, set the TRAFFIC bit, otherwise 6348 offsetof(struct shmem_region,
5766 reset it.*/ 6349 dev_info.port_feature_config[port].link_config)) &
5767 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) : 6350 PORT_FEATURE_CONNECTED_SWITCH_MASK);
5768 (reg_val & ~EMAC_LED_TRAFFIC); 6351 chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
5769 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val); 6352 switch (switch_cfg) {
5770 } else { /* Override the traffic led in the BMAC: */ 6353 case SWITCH_CFG_1G:
5771 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 6354 phy_addr = REG_RD(bp,
5772 + port*4, 1); 6355 NIG_REG_SERDES0_CTRL_PHY_ADDR +
5773 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 6356 port * 0x10);
5774 value); 6357 *phy = phy_serdes;
5775 } 6358 break;
6359 case SWITCH_CFG_10G:
6360 phy_addr = REG_RD(bp,
6361 NIG_REG_XGXS0_CTRL_PHY_ADDR +
6362 port * 0x18);
6363 *phy = phy_xgxs;
5776 break; 6364 break;
5777 default: 6365 default:
5778 DP(NETIF_MSG_LINK, 6366 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
5779 "bnx2x_override_led_value() unknown led index %d "
5780 "(should be 0-5)\n", led_idx);
5781 return -EINVAL; 6367 return -EINVAL;
5782 } 6368 }
6369 phy->addr = (u8)phy_addr;
6370 phy->mdio_ctrl = bnx2x_get_emac_base(bp,
6371 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
6372 port);
6373 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
6374
6375 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
6376 port, phy->addr, phy->mdio_ctrl);
5783 6377
6378 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
5784 return 0; 6379 return 0;
5785} 6380}
5786 6381
5787 6382static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
5788u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed) 6383 u8 phy_index,
6384 u32 shmem_base,
6385 u32 shmem2_base,
6386 u8 port,
6387 struct bnx2x_phy *phy)
5789{ 6388{
5790 u8 port = params->port; 6389 u32 ext_phy_config, phy_type, config2;
5791 u16 hw_led_mode = params->hw_led_mode; 6390 u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
5792 u8 rc = 0; 6391 ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
5793 u32 tmp; 6392 phy_index, port);
5794 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6393 phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
5795 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 6394 /* Select the phy type */
5796 struct bnx2x *bp = params->bp; 6395 switch (phy_type) {
5797 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 6396 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5798 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 6397 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
5799 speed, hw_led_mode); 6398 *phy = phy_8073;
5800 switch (mode) {
5801 case LED_MODE_OFF:
5802 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
5803 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5804 SHARED_HW_CFG_LED_MAC1);
5805
5806 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5807 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
5808 break; 6399 break;
6400 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6401 *phy = phy_8705;
6402 break;
6403 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6404 *phy = phy_8706;
6405 break;
6406 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6407 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
6408 *phy = phy_8726;
6409 break;
6410 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
6411 /* BCM8727_NOC => BCM8727 no over current */
6412 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
6413 *phy = phy_8727;
6414 phy->flags |= FLAGS_NOC;
6415 break;
6416 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6417 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
6418 *phy = phy_8727;
6419 break;
6420 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6421 *phy = phy_8481;
6422 break;
6423 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6424 *phy = phy_84823;
6425 break;
6426 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6427 *phy = phy_7101;
6428 break;
6429 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6430 *phy = phy_null;
6431 return -EINVAL;
6432 default:
6433 *phy = phy_null;
6434 return 0;
6435 }
5809 6436
5810 case LED_MODE_OPER: 6437 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
5811 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { 6438 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
5812 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5813 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5814 } else {
5815 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5816 hw_led_mode);
5817 }
5818 6439
5819 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 6440 /**
5820 port*4, 0); 6441 * The shmem address of the phy version is located on different
5821 /* Set blinking rate to ~15.9Hz */ 6442 * structures. In case this structure is too old, do not set
5822 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, 6443 * the address
5823 LED_BLINK_RATE_VAL); 6444 */
5824 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 6445 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
5825 port*4, 1); 6446 dev_info.shared_hw_config.config2));
5826 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6447 if (phy_index == EXT_PHY1) {
5827 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6448 phy->ver_addr = shmem_base + offsetof(struct shmem_region,
5828 (tmp & (~EMAC_LED_OVERRIDE))); 6449 port_mb[port].ext_phy_fw_version);
6450
6451 /* Check specific mdc mdio settings */
6452 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
6453 mdc_mdio_access = config2 &
6454 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
6455 } else {
6456 u32 size = REG_RD(bp, shmem2_base);
5829 6457
5830 if (CHIP_IS_E1(bp) && 6458 if (size >
5831 ((speed == SPEED_2500) || 6459 offsetof(struct shmem2_region, ext_phy_fw_version2)) {
5832 (speed == SPEED_1000) || 6460 phy->ver_addr = shmem2_base +
5833 (speed == SPEED_100) || 6461 offsetof(struct shmem2_region,
5834 (speed == SPEED_10))) { 6462 ext_phy_fw_version2[port]);
5835 /* On Everest 1 Ax chip versions for speeds less than
5836 10G LED scheme is different */
5837 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5838 + port*4, 1);
5839 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
5840 port*4, 0);
5841 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
5842 port*4, 1);
5843 } 6463 }
5844 break; 6464 /* Check specific mdc mdio settings */
5845 6465 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
5846 default: 6466 mdc_mdio_access = (config2 &
5847 rc = -EINVAL; 6467 SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
5848 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n", 6468 (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
5849 mode); 6469 SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
5850 break;
5851 } 6470 }
5852 return rc; 6471 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
5853 6472
6473 /**
6474 * In case mdc/mdio_access of the external phy is different than the
6475 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
6476 * to prevent one port interfere with another port's CL45 operations.
6477 */
6478 if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
6479 phy->flags |= FLAGS_HW_LOCK_REQUIRED;
6480 DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
6481 phy_type, port, phy_index);
6482 DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
6483 phy->addr, phy->mdio_ctrl);
6484 return 0;
5854} 6485}
5855 6486
5856u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars) 6487static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
6488 u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
5857{ 6489{
5858 struct bnx2x *bp = params->bp; 6490 u8 status = 0;
5859 u16 gp_status = 0; 6491 phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
5860 6492 if (phy_index == INT_PHY)
5861 CL45_RD_OVER_CL22(bp, params->port, 6493 return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
5862 params->phy_addr, 6494 status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
5863 MDIO_REG_BANK_GP_STATUS, 6495 port, phy);
5864 MDIO_GP_STATUS_TOP_AN_STATUS1, 6496 return status;
5865 &gp_status);
5866 /* link is up only if both local phy and external phy are up */
5867 if ((gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) &&
5868 bnx2x_ext_phy_is_link_up(params, vars, 1))
5869 return 0;
5870
5871 return -ESRCH;
5872} 6497}
5873 6498
5874static u8 bnx2x_link_initialize(struct link_params *params, 6499static void bnx2x_phy_def_cfg(struct link_params *params,
5875 struct link_vars *vars) 6500 struct bnx2x_phy *phy,
6501 u8 phy_index)
5876{ 6502{
5877 struct bnx2x *bp = params->bp; 6503 struct bnx2x *bp = params->bp;
5878 u8 port = params->port; 6504 u32 link_config;
5879 u8 rc = 0; 6505 /* Populate the default phy configuration for MF mode */
5880 u8 non_ext_phy; 6506 if (phy_index == EXT_PHY2) {
5881 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 6507 link_config = REG_RD(bp, params->shmem_base +
5882 6508 offsetof(struct shmem_region, dev_info.
5883 /* Activate the external PHY */ 6509 port_feature_config[params->port].link_config2));
5884 bnx2x_ext_phy_reset(params, vars); 6510 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
5885 6511 offsetof(struct shmem_region, dev_info.
5886 bnx2x_set_aer_mmd(params, vars); 6512 port_hw_config[params->port].speed_capability_mask2));
6513 } else {
6514 link_config = REG_RD(bp, params->shmem_base +
6515 offsetof(struct shmem_region, dev_info.
6516 port_feature_config[params->port].link_config));
6517 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
6518 offsetof(struct shmem_region, dev_info.
6519 port_hw_config[params->port].speed_capability_mask));
6520 }
6521 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
6522 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
6523
6524 phy->req_duplex = DUPLEX_FULL;
6525 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6526 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6527 phy->req_duplex = DUPLEX_HALF;
6528 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6529 phy->req_line_speed = SPEED_10;
6530 break;
6531 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6532 phy->req_duplex = DUPLEX_HALF;
6533 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6534 phy->req_line_speed = SPEED_100;
6535 break;
6536 case PORT_FEATURE_LINK_SPEED_1G:
6537 phy->req_line_speed = SPEED_1000;
6538 break;
6539 case PORT_FEATURE_LINK_SPEED_2_5G:
6540 phy->req_line_speed = SPEED_2500;
6541 break;
6542 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6543 phy->req_line_speed = SPEED_10000;
6544 break;
6545 default:
6546 phy->req_line_speed = SPEED_AUTO_NEG;
6547 break;
6548 }
5887 6549
5888 if (vars->phy_flags & PHY_XGXS_FLAG) 6550 switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
5889 bnx2x_set_master_ln(params); 6551 case PORT_FEATURE_FLOW_CONTROL_AUTO:
6552 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
6553 break;
6554 case PORT_FEATURE_FLOW_CONTROL_TX:
6555 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
6556 break;
6557 case PORT_FEATURE_FLOW_CONTROL_RX:
6558 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
6559 break;
6560 case PORT_FEATURE_FLOW_CONTROL_BOTH:
6561 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
6562 break;
6563 default:
6564 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6565 break;
6566 }
6567}
5890 6568
5891 rc = bnx2x_reset_unicore(params); 6569u32 bnx2x_phy_selection(struct link_params *params)
5892 /* reset the SerDes and wait for reset bit return low */ 6570{
5893 if (rc != 0) 6571 u32 phy_config_swapped, prio_cfg;
5894 return rc; 6572 u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
6573
6574 phy_config_swapped = params->multi_phy_config &
6575 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
6576
6577 prio_cfg = params->multi_phy_config &
6578 PORT_HW_CFG_PHY_SELECTION_MASK;
6579
6580 if (phy_config_swapped) {
6581 switch (prio_cfg) {
6582 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
6583 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
6584 break;
6585 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
6586 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
6587 break;
6588 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
6589 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
6590 break;
6591 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
6592 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
6593 break;
6594 }
6595 } else
6596 return_cfg = prio_cfg;
5895 6597
5896 bnx2x_set_aer_mmd(params, vars); 6598 return return_cfg;
6599}
5897 6600
5898 /* setting the masterLn_def again after the reset */
5899 if (vars->phy_flags & PHY_XGXS_FLAG) {
5900 bnx2x_set_master_ln(params);
5901 bnx2x_set_swap_lanes(params);
5902 }
5903 6601
5904 if (vars->phy_flags & PHY_XGXS_FLAG) { 6602u8 bnx2x_phy_probe(struct link_params *params)
5905 if ((params->req_line_speed && 6603{
5906 ((params->req_line_speed == SPEED_100) || 6604 u8 phy_index, actual_phy_idx, link_cfg_idx;
5907 (params->req_line_speed == SPEED_10))) || 6605 u32 phy_config_swapped;
5908 (!params->req_line_speed && 6606 struct bnx2x *bp = params->bp;
5909 (params->speed_cap_mask >= 6607 struct bnx2x_phy *phy;
5910 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && 6608 params->num_phys = 0;
5911 (params->speed_cap_mask < 6609 DP(NETIF_MSG_LINK, "Begin phy probe\n");
5912 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 6610 phy_config_swapped = params->multi_phy_config &
5913 )) { 6611 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
5914 vars->phy_flags |= PHY_SGMII_FLAG; 6612
5915 } else { 6613 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
5916 vars->phy_flags &= ~PHY_SGMII_FLAG; 6614 phy_index++) {
6615 link_cfg_idx = LINK_CONFIG_IDX(phy_index);
6616 actual_phy_idx = phy_index;
6617 if (phy_config_swapped) {
6618 if (phy_index == EXT_PHY1)
6619 actual_phy_idx = EXT_PHY2;
6620 else if (phy_index == EXT_PHY2)
6621 actual_phy_idx = EXT_PHY1;
5917 } 6622 }
6623 DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
6624 " actual_phy_idx %x\n", phy_config_swapped,
6625 phy_index, actual_phy_idx);
6626 phy = &params->phy[actual_phy_idx];
6627 if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
6628 params->shmem2_base, params->port,
6629 phy) != 0) {
6630 params->num_phys = 0;
6631 DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
6632 phy_index);
6633 for (phy_index = INT_PHY;
6634 phy_index < MAX_PHYS;
6635 phy_index++)
6636 *phy = phy_null;
6637 return -EINVAL;
6638 }
6639 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
6640 break;
6641
6642 bnx2x_phy_def_cfg(params, phy, phy_index);
6643 params->num_phys++;
5918 } 6644 }
5919 /* In case of external phy existance, the line speed would be the
5920 line speed linked up by the external phy. In case it is direct only,
5921 then the line_speed during initialization will be equal to the
5922 req_line_speed*/
5923 vars->line_speed = params->req_line_speed;
5924 6645
5925 bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc); 6646 DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
6647 return 0;
6648}
5926 6649
5927 /* init ext phy and enable link state int */ 6650u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
5928 non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) || 6651{
5929 (params->loopback_mode == LOOPBACK_XGXS_10)); 6652 if (phy_idx < params->num_phys)
6653 return params->phy[phy_idx].supported;
6654 return 0;
6655}
5930 6656
5931 if (non_ext_phy || 6657static void set_phy_vars(struct link_params *params)
5932 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 6658{
5933 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) || 6659 struct bnx2x *bp = params->bp;
5934 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) || 6660 u8 actual_phy_idx, phy_index, link_cfg_idx;
5935 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 6661 u8 phy_config_swapped = params->multi_phy_config &
5936 if (params->req_line_speed == SPEED_AUTO_NEG) 6662 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
5937 bnx2x_set_parallel_detection(params, vars->phy_flags); 6663 for (phy_index = INT_PHY; phy_index < params->num_phys;
5938 bnx2x_init_internal_phy(params, vars, non_ext_phy); 6664 phy_index++) {
5939 } 6665 link_cfg_idx = LINK_CONFIG_IDX(phy_index);
6666 actual_phy_idx = phy_index;
6667 if (phy_config_swapped) {
6668 if (phy_index == EXT_PHY1)
6669 actual_phy_idx = EXT_PHY2;
6670 else if (phy_index == EXT_PHY2)
6671 actual_phy_idx = EXT_PHY1;
6672 }
6673 params->phy[actual_phy_idx].req_flow_ctrl =
6674 params->req_flow_ctrl[link_cfg_idx];
5940 6675
5941 if (!non_ext_phy) 6676 params->phy[actual_phy_idx].req_line_speed =
5942 rc |= bnx2x_ext_phy_init(params, vars); 6677 params->req_line_speed[link_cfg_idx];
5943 6678
5944 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 6679 params->phy[actual_phy_idx].speed_cap_mask =
5945 (NIG_STATUS_XGXS0_LINK10G | 6680 params->speed_cap_mask[link_cfg_idx];
5946 NIG_STATUS_XGXS0_LINK_STATUS |
5947 NIG_STATUS_SERDES0_LINK_STATUS));
5948 6681
5949 return rc; 6682 params->phy[actual_phy_idx].req_duplex =
6683 params->req_duplex[link_cfg_idx];
5950 6684
6685 DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
6686 " speed_cap_mask %x\n",
6687 params->phy[actual_phy_idx].req_flow_ctrl,
6688 params->phy[actual_phy_idx].req_line_speed,
6689 params->phy[actual_phy_idx].speed_cap_mask);
6690 }
5951} 6691}
5952 6692
5953
5954u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) 6693u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5955{ 6694{
5956 struct bnx2x *bp = params->bp; 6695 struct bnx2x *bp = params->bp;
5957 u32 val;
5958
5959 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 6696 DP(NETIF_MSG_LINK, "Phy Initialization started\n");
5960 DP(NETIF_MSG_LINK, "req_speed %d, req_flowctrl %d\n", 6697 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
5961 params->req_line_speed, params->req_flow_ctrl); 6698 params->req_line_speed[0], params->req_flow_ctrl[0]);
6699 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
6700 params->req_line_speed[1], params->req_flow_ctrl[1]);
5962 vars->link_status = 0; 6701 vars->link_status = 0;
5963 vars->phy_link_up = 0; 6702 vars->phy_link_up = 0;
5964 vars->link_up = 0; 6703 vars->link_up = 0;
@@ -5966,11 +6705,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5966 vars->duplex = DUPLEX_FULL; 6705 vars->duplex = DUPLEX_FULL;
5967 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 6706 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5968 vars->mac_type = MAC_TYPE_NONE; 6707 vars->mac_type = MAC_TYPE_NONE;
5969 6708 vars->phy_flags = 0;
5970 if (params->switch_cfg == SWITCH_CFG_1G)
5971 vars->phy_flags = PHY_SERDES_FLAG;
5972 else
5973 vars->phy_flags = PHY_XGXS_FLAG;
5974 6709
5975 /* disable attentions */ 6710 /* disable attentions */
5976 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 6711 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -5981,6 +6716,13 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5981 6716
5982 bnx2x_emac_init(params, vars); 6717 bnx2x_emac_init(params, vars);
5983 6718
6719 if (params->num_phys == 0) {
6720 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
6721 return -EINVAL;
6722 }
6723 set_phy_vars(params);
6724
6725 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
5984 if (CHIP_REV_IS_FPGA(bp)) { 6726 if (CHIP_REV_IS_FPGA(bp)) {
5985 6727
5986 vars->link_up = 1; 6728 vars->link_up = 1;
@@ -6040,7 +6782,8 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6040 6782
6041 vars->phy_flags = PHY_XGXS_FLAG; 6783 vars->phy_flags = PHY_XGXS_FLAG;
6042 6784
6043 bnx2x_phy_deassert(params, vars->phy_flags); 6785 bnx2x_xgxs_deassert(params);
6786
6044 /* set bmac loopback */ 6787 /* set bmac loopback */
6045 bnx2x_bmac_enable(params, vars, 1); 6788 bnx2x_bmac_enable(params, vars, 1);
6046 6789
@@ -6057,80 +6800,66 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6057 6800
6058 vars->phy_flags = PHY_XGXS_FLAG; 6801 vars->phy_flags = PHY_XGXS_FLAG;
6059 6802
6060 bnx2x_phy_deassert(params, vars->phy_flags); 6803 bnx2x_xgxs_deassert(params);
6061 /* set bmac loopback */ 6804 /* set bmac loopback */
6062 bnx2x_emac_enable(params, vars, 1); 6805 bnx2x_emac_enable(params, vars, 1);
6063 bnx2x_emac_program(params, vars->line_speed, 6806 bnx2x_emac_program(params, vars);
6064 vars->duplex);
6065 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 6807 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6066 params->port*4, 0); 6808 params->port*4, 0);
6067 6809
6068 } else if ((params->loopback_mode == LOOPBACK_XGXS_10) || 6810 } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
6069 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 6811 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
6070 6812
6071 vars->link_up = 1; 6813 vars->link_up = 1;
6072 vars->line_speed = SPEED_10000;
6073 vars->duplex = DUPLEX_FULL;
6074 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 6814 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6815 vars->duplex = DUPLEX_FULL;
6816 if (params->req_line_speed[0] == SPEED_1000) {
6817 vars->line_speed = SPEED_1000;
6818 vars->mac_type = MAC_TYPE_EMAC;
6819 } else {
6820 vars->line_speed = SPEED_10000;
6821 vars->mac_type = MAC_TYPE_BMAC;
6822 }
6075 6823
6076 vars->phy_flags = PHY_XGXS_FLAG; 6824 bnx2x_xgxs_deassert(params);
6077
6078 val = REG_RD(bp,
6079 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6080 params->port*0x18);
6081 params->phy_addr = (u8)val;
6082
6083 bnx2x_phy_deassert(params, vars->phy_flags);
6084 bnx2x_link_initialize(params, vars); 6825 bnx2x_link_initialize(params, vars);
6085 6826
6086 vars->mac_type = MAC_TYPE_BMAC; 6827 if (params->req_line_speed[0] == SPEED_1000) {
6087 6828 bnx2x_emac_program(params, vars);
6829 bnx2x_emac_enable(params, vars, 0);
6830 } else
6088 bnx2x_bmac_enable(params, vars, 0); 6831 bnx2x_bmac_enable(params, vars, 0);
6089 6832
6090 if (params->loopback_mode == LOOPBACK_XGXS_10) { 6833 if (params->loopback_mode == LOOPBACK_XGXS) {
6091 /* set 10G XGXS loopback */ 6834 /* set 10G XGXS loopback */
6092 bnx2x_set_xgxs_loopback(params, vars, 1); 6835 params->phy[INT_PHY].config_loopback(
6836 &params->phy[INT_PHY],
6837 params);
6838
6093 } else { 6839 } else {
6094 /* set external phy loopback */ 6840 /* set external phy loopback */
6095 bnx2x_ext_phy_loopback(params); 6841 u8 phy_index;
6842 for (phy_index = EXT_PHY1;
6843 phy_index < params->num_phys; phy_index++) {
6844 if (params->phy[phy_index].config_loopback)
6845 params->phy[phy_index].config_loopback(
6846 &params->phy[phy_index],
6847 params);
6848 }
6096 } 6849 }
6850
6097 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 6851 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6098 params->port*4, 0); 6852 params->port*4, 0);
6099 6853
6100 bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed); 6854 bnx2x_set_led(params, vars,
6855 LED_MODE_OPER, vars->line_speed);
6101 } else 6856 } else
6102 /* No loopback */ 6857 /* No loopback */
6103 { 6858 {
6104 bnx2x_phy_deassert(params, vars->phy_flags); 6859 if (params->switch_cfg == SWITCH_CFG_10G)
6105 switch (params->switch_cfg) { 6860 bnx2x_xgxs_deassert(params);
6106 case SWITCH_CFG_1G: 6861 else
6107 vars->phy_flags |= PHY_SERDES_FLAG; 6862 bnx2x_serdes_deassert(bp, params->port);
6108 if ((params->ext_phy_config &
6109 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) ==
6110 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482) {
6111 vars->phy_flags |= PHY_SGMII_FLAG;
6112 }
6113
6114 val = REG_RD(bp,
6115 NIG_REG_SERDES0_CTRL_PHY_ADDR+
6116 params->port*0x10);
6117
6118 params->phy_addr = (u8)val;
6119
6120 break;
6121 case SWITCH_CFG_10G:
6122 vars->phy_flags |= PHY_XGXS_FLAG;
6123 val = REG_RD(bp,
6124 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6125 params->port*0x18);
6126 params->phy_addr = (u8)val;
6127
6128 break;
6129 default:
6130 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
6131 return -EINVAL;
6132 }
6133 DP(NETIF_MSG_LINK, "Phy address = 0x%x\n", params->phy_addr);
6134 6863
6135 bnx2x_link_initialize(params, vars); 6864 bnx2x_link_initialize(params, vars);
6136 msleep(30); 6865 msleep(30);
@@ -6138,29 +6867,11 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6138 } 6867 }
6139 return 0; 6868 return 0;
6140} 6869}
6141
6142static void bnx2x_8726_reset_phy(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
6143{
6144 DP(NETIF_MSG_LINK, "bnx2x_8726_reset_phy port %d\n", port);
6145
6146 /* Set serial boot control for external load */
6147 bnx2x_cl45_write(bp, port,
6148 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, ext_phy_addr,
6149 MDIO_PMA_DEVAD,
6150 MDIO_PMA_REG_GEN_CTRL, 0x0001);
6151}
6152
6153u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 6870u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6154 u8 reset_ext_phy) 6871 u8 reset_ext_phy)
6155{ 6872{
6156 struct bnx2x *bp = params->bp; 6873 struct bnx2x *bp = params->bp;
6157 u32 ext_phy_config = params->ext_phy_config; 6874 u8 phy_index, port = params->port;
6158 u8 port = params->port;
6159 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6160 u32 val = REG_RD(bp, params->shmem_base +
6161 offsetof(struct shmem_region, dev_info.
6162 port_feature_config[params->port].
6163 config));
6164 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 6875 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6165 /* disable attentions */ 6876 /* disable attentions */
6166 vars->link_status = 0; 6877 vars->link_status = 0;
@@ -6189,73 +6900,21 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6189 * Hold it as vars low 6900 * Hold it as vars low
6190 */ 6901 */
6191 /* clear link led */ 6902 /* clear link led */
6192 bnx2x_set_led(params, LED_MODE_OFF, 0); 6903 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
6193 if (reset_ext_phy) {
6194 switch (ext_phy_type) {
6195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6196 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6197 break;
6198 6904
6199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 6905 if (reset_ext_phy) {
6200 { 6906 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6201 6907 phy_index++) {
6202 /* Disable Transmitter */ 6908 if (params->phy[phy_index].link_reset)
6203 u8 ext_phy_addr = 6909 params->phy[phy_index].link_reset(
6204 XGXS_EXT_PHY_ADDR(params->ext_phy_config); 6910 &params->phy[phy_index],
6205 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 6911 params);
6206 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
6207 bnx2x_sfp_set_transmitter(bp, port,
6208 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6209 ext_phy_addr, 0);
6210 break;
6211 }
6212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6213 DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
6214 "low power mode\n",
6215 port);
6216 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6217 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6218 port);
6219 break;
6220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6221 {
6222 u8 ext_phy_addr =
6223 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6224 /* Set soft reset */
6225 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
6226 break;
6227 }
6228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6229 {
6230 u8 ext_phy_addr =
6231 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6232 bnx2x_cl45_write(bp, port,
6233 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6234 ext_phy_addr,
6235 MDIO_AN_DEVAD,
6236 MDIO_AN_REG_CTRL, 0x0000);
6237 bnx2x_cl45_write(bp, port,
6238 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6239 ext_phy_addr,
6240 MDIO_PMA_DEVAD,
6241 MDIO_PMA_REG_CTRL, 1);
6242 break;
6243 }
6244 default:
6245 /* HW reset */
6246 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6247 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6248 port);
6249 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6250 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6251 port);
6252 DP(NETIF_MSG_LINK, "reset external PHY\n");
6253 } 6912 }
6254 } 6913 }
6255 /* reset the SerDes/XGXS */
6256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
6257 (0x1ff << (port*16)));
6258 6914
6915 if (params->phy[INT_PHY].link_reset)
6916 params->phy[INT_PHY].link_reset(
6917 &params->phy[INT_PHY], params);
6259 /* reset BigMac */ 6918 /* reset BigMac */
6260 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6919 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6261 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 6920 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -6269,183 +6928,25 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6269 return 0; 6928 return 0;
6270} 6929}
6271 6930
6272static u8 bnx2x_update_link_down(struct link_params *params, 6931/****************************************************************************/
6273 struct link_vars *vars) 6932/* Common function */
6933/****************************************************************************/
6934static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base, u8 phy_index)
6274{ 6935{
6275 struct bnx2x *bp = params->bp; 6936 struct bnx2x_phy phy[PORT_MAX];
6276 u8 port = params->port; 6937 struct bnx2x_phy *phy_blk[PORT_MAX];
6277
6278 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6279 bnx2x_set_led(params, LED_MODE_OFF, 0);
6280
6281 /* indicate no mac active */
6282 vars->mac_type = MAC_TYPE_NONE;
6283
6284 /* update shared memory */
6285 vars->link_status = 0;
6286 vars->line_speed = 0;
6287 bnx2x_update_mng(params, vars->link_status);
6288
6289 /* activate nig drain */
6290 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6291
6292 /* disable emac */
6293 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6294
6295 msleep(10);
6296
6297 /* reset BigMac */
6298 bnx2x_bmac_rx_disable(bp, params->port);
6299 REG_WR(bp, GRCBASE_MISC +
6300 MISC_REGISTERS_RESET_REG_2_CLEAR,
6301 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6302 return 0;
6303}
6304
6305static u8 bnx2x_update_link_up(struct link_params *params,
6306 struct link_vars *vars,
6307 u8 link_10g, u32 gp_status)
6308{
6309 struct bnx2x *bp = params->bp;
6310 u8 port = params->port;
6311 u8 rc = 0;
6312
6313 vars->link_status |= LINK_STATUS_LINK_UP;
6314 if (link_10g) {
6315 bnx2x_bmac_enable(params, vars, 0);
6316 bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
6317 } else {
6318 rc = bnx2x_emac_program(params, vars->line_speed,
6319 vars->duplex);
6320
6321 bnx2x_emac_enable(params, vars, 0);
6322
6323 /* AN complete? */
6324 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
6325 if (!(vars->phy_flags &
6326 PHY_SGMII_FLAG))
6327 bnx2x_set_gmii_tx_driver(params);
6328 }
6329 }
6330
6331 /* PBF - link up */
6332 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
6333 vars->line_speed);
6334
6335 /* disable drain */
6336 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
6337
6338 /* update shared memory */
6339 bnx2x_update_mng(params, vars->link_status);
6340 msleep(20);
6341 return rc;
6342}
6343/* This function should called upon link interrupt */
6344/* In case vars->link_up, driver needs to
6345 1. Update the pbf
6346 2. Disable drain
6347 3. Update the shared memory
6348 4. Indicate link up
6349 5. Set LEDs
6350 Otherwise,
6351 1. Update shared memory
6352 2. Reset BigMac
6353 3. Report link down
6354 4. Unset LEDs
6355*/
6356u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6357{
6358 struct bnx2x *bp = params->bp;
6359 u8 port = params->port;
6360 u16 gp_status;
6361 u8 link_10g;
6362 u8 ext_phy_link_up, rc = 0;
6363 u32 ext_phy_type;
6364 u8 is_mi_int = 0;
6365
6366 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
6367 port, (vars->phy_flags & PHY_XGXS_FLAG),
6368 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
6369
6370 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
6371 port*0x18) > 0);
6372 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
6373 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
6374 is_mi_int,
6375 REG_RD(bp,
6376 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
6377
6378 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
6379 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
6380 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
6381
6382 /* disable emac */
6383 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6384
6385 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
6386
6387 /* Check external link change only for non-direct */
6388 ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars, is_mi_int);
6389
6390 /* Read gp_status */
6391 CL45_RD_OVER_CL22(bp, port, params->phy_addr,
6392 MDIO_REG_BANK_GP_STATUS,
6393 MDIO_GP_STATUS_TOP_AN_STATUS1,
6394 &gp_status);
6395
6396 rc = bnx2x_link_settings_status(params, vars, gp_status,
6397 ext_phy_link_up);
6398 if (rc != 0)
6399 return rc;
6400
6401 /* anything 10 and over uses the bmac */
6402 link_10g = ((vars->line_speed == SPEED_10000) ||
6403 (vars->line_speed == SPEED_12000) ||
6404 (vars->line_speed == SPEED_12500) ||
6405 (vars->line_speed == SPEED_13000) ||
6406 (vars->line_speed == SPEED_15000) ||
6407 (vars->line_speed == SPEED_16000));
6408
6409 bnx2x_link_int_ack(params, vars, link_10g, is_mi_int);
6410
6411 /* In case external phy link is up, and internal link is down
6412 ( not initialized yet probably after link initialization, it needs
6413 to be initialized.
6414 Note that after link down-up as result of cable plug,
6415 the xgxs link would probably become up again without the need to
6416 initialize it*/
6417
6418 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
6419 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
6420 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
6421 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
6422 (ext_phy_link_up && !vars->phy_link_up))
6423 bnx2x_init_internal_phy(params, vars, 0);
6424
6425 /* link is up only if both local phy and external phy are up */
6426 vars->link_up = (ext_phy_link_up && vars->phy_link_up);
6427
6428 if (vars->link_up)
6429 rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
6430 else
6431 rc = bnx2x_update_link_down(params, vars);
6432
6433 return rc;
6434}
6435
6436static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6437{
6438 u8 ext_phy_addr[PORT_MAX];
6439 u16 val; 6938 u16 val;
6440 s8 port; 6939 s8 port;
6441 6940
6442 /* PART1 - Reset both phys */ 6941 /* PART1 - Reset both phys */
6443 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 6942 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6444 /* Extract the ext phy address for the port */ 6943 /* Extract the ext phy address for the port */
6445 u32 ext_phy_config = REG_RD(bp, shmem_base + 6944 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
6446 offsetof(struct shmem_region, 6945 port, &phy[port]) !=
6447 dev_info.port_hw_config[port].external_phy_config)); 6946 0) {
6448 6947 DP(NETIF_MSG_LINK, "populate_phy failed\n");
6948 return -EINVAL;
6949 }
6449 /* disable attentions */ 6950 /* disable attentions */
6450 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 6951 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6451 (NIG_MASK_XGXS0_LINK_STATUS | 6952 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -6453,17 +6954,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6453 NIG_MASK_SERDES0_LINK_STATUS | 6954 NIG_MASK_SERDES0_LINK_STATUS |
6454 NIG_MASK_MI_INT)); 6955 NIG_MASK_MI_INT));
6455 6956
6456 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6457
6458 /* Need to take the phy out of low power mode in order 6957 /* Need to take the phy out of low power mode in order
6459 to write to access its registers */ 6958 to write to access its registers */
6460 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6959 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6461 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 6960 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
6462 6961
6463 /* Reset the phy */ 6962 /* Reset the phy */
6464 bnx2x_cl45_write(bp, port, 6963 bnx2x_cl45_write(bp, &phy[port],
6465 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6466 ext_phy_addr[port],
6467 MDIO_PMA_DEVAD, 6964 MDIO_PMA_DEVAD,
6468 MDIO_PMA_REG_CTRL, 6965 MDIO_PMA_REG_CTRL,
6469 1<<15); 6966 1<<15);
@@ -6472,15 +6969,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6472 /* Add delay of 150ms after reset */ 6969 /* Add delay of 150ms after reset */
6473 msleep(150); 6970 msleep(150);
6474 6971
6972 if (phy[PORT_0].addr & 0x1) {
6973 phy_blk[PORT_0] = &(phy[PORT_1]);
6974 phy_blk[PORT_1] = &(phy[PORT_0]);
6975 } else {
6976 phy_blk[PORT_0] = &(phy[PORT_0]);
6977 phy_blk[PORT_1] = &(phy[PORT_1]);
6978 }
6979
6475 /* PART2 - Download firmware to both phys */ 6980 /* PART2 - Download firmware to both phys */
6476 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 6981 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6477 u16 fw_ver1; 6982 u16 fw_ver1;
6478 6983
6479 bnx2x_bcm8073_external_rom_boot(bp, port, 6984 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
6480 ext_phy_addr[port], shmem_base); 6985 port);
6481 6986
6482 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 6987 bnx2x_cl45_read(bp, phy_blk[port],
6483 ext_phy_addr[port],
6484 MDIO_PMA_DEVAD, 6988 MDIO_PMA_DEVAD,
6485 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 6989 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6486 if (fw_ver1 == 0 || fw_ver1 == 0x4321) { 6990 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
@@ -6492,16 +6996,12 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6492 } 6996 }
6493 6997
6494 /* Only set bit 10 = 1 (Tx power down) */ 6998 /* Only set bit 10 = 1 (Tx power down) */
6495 bnx2x_cl45_read(bp, port, 6999 bnx2x_cl45_read(bp, phy_blk[port],
6496 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6497 ext_phy_addr[port],
6498 MDIO_PMA_DEVAD, 7000 MDIO_PMA_DEVAD,
6499 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7001 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6500 7002
6501 /* Phase1 of TX_POWER_DOWN reset */ 7003 /* Phase1 of TX_POWER_DOWN reset */
6502 bnx2x_cl45_write(bp, port, 7004 bnx2x_cl45_write(bp, phy_blk[port],
6503 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6504 ext_phy_addr[port],
6505 MDIO_PMA_DEVAD, 7005 MDIO_PMA_DEVAD,
6506 MDIO_PMA_REG_TX_POWER_DOWN, 7006 MDIO_PMA_REG_TX_POWER_DOWN,
6507 (val | 1<<10)); 7007 (val | 1<<10));
@@ -6515,28 +7015,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6515 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7015 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6516 /* Phase2 of POWER_DOWN_RESET */ 7016 /* Phase2 of POWER_DOWN_RESET */
6517 /* Release bit 10 (Release Tx power down) */ 7017 /* Release bit 10 (Release Tx power down) */
6518 bnx2x_cl45_read(bp, port, 7018 bnx2x_cl45_read(bp, phy_blk[port],
6519 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6520 ext_phy_addr[port],
6521 MDIO_PMA_DEVAD, 7019 MDIO_PMA_DEVAD,
6522 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7020 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6523 7021
6524 bnx2x_cl45_write(bp, port, 7022 bnx2x_cl45_write(bp, phy_blk[port],
6525 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6526 ext_phy_addr[port],
6527 MDIO_PMA_DEVAD, 7023 MDIO_PMA_DEVAD,
6528 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 7024 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
6529 msleep(15); 7025 msleep(15);
6530 7026
6531 /* Read modify write the SPI-ROM version select register */ 7027 /* Read modify write the SPI-ROM version select register */
6532 bnx2x_cl45_read(bp, port, 7028 bnx2x_cl45_read(bp, phy_blk[port],
6533 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6534 ext_phy_addr[port],
6535 MDIO_PMA_DEVAD, 7029 MDIO_PMA_DEVAD,
6536 MDIO_PMA_REG_EDC_FFE_MAIN, &val); 7030 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
6537 bnx2x_cl45_write(bp, port, 7031 bnx2x_cl45_write(bp, phy_blk[port],
6538 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6539 ext_phy_addr[port],
6540 MDIO_PMA_DEVAD, 7032 MDIO_PMA_DEVAD,
6541 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); 7033 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
6542 7034
@@ -6545,33 +7037,74 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6545 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 7037 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6546 } 7038 }
6547 return 0; 7039 return 0;
6548
6549} 7040}
6550 7041
6551static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base) 7042static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7043 u32 shmem2_base, u8 phy_index)
7044{
7045 u32 val;
7046 s8 port;
7047 struct bnx2x_phy phy;
7048 /* Use port1 because of the static port-swap */
7049 /* Enable the module detection interrupt */
7050 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
7051 val |= ((1<<MISC_REGISTERS_GPIO_3)|
7052 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
7053 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
7054
7055 bnx2x_ext_phy_hw_reset(bp, 1);
7056 msleep(5);
7057 for (port = 0; port < PORT_MAX; port++) {
7058 /* Extract the ext phy address for the port */
7059 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7060 port, &phy) !=
7061 0) {
7062 DP(NETIF_MSG_LINK, "populate phy failed\n");
7063 return -EINVAL;
7064 }
7065
7066 /* Reset phy*/
7067 bnx2x_cl45_write(bp, &phy,
7068 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
7069
7070
7071 /* Set fault module detected LED on */
7072 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
7073 MISC_REGISTERS_GPIO_HIGH,
7074 port);
7075 }
7076
7077 return 0;
7078}
7079static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7080 u32 shmem2_base, u8 phy_index)
6552{ 7081{
6553 u8 ext_phy_addr[PORT_MAX]; 7082 s8 port;
6554 s8 port, first_port, i;
6555 u32 swap_val, swap_override; 7083 u32 swap_val, swap_override;
7084 struct bnx2x_phy phy[PORT_MAX];
7085 struct bnx2x_phy *phy_blk[PORT_MAX];
6556 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n"); 7086 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n");
6557 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 7087 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6558 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 7088 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6559 7089
6560 bnx2x_ext_phy_hw_reset(bp, 1 ^ (swap_val && swap_override)); 7090 port = 1;
6561 msleep(5);
6562 7091
6563 if (swap_val && swap_override) 7092 bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
6564 first_port = PORT_0; 7093
6565 else 7094 /* Calculate the port based on port swap */
6566 first_port = PORT_1; 7095 port ^= (swap_val && swap_override);
7096
7097 msleep(5);
6567 7098
6568 /* PART1 - Reset both phys */ 7099 /* PART1 - Reset both phys */
6569 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) { 7100 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6570 /* Extract the ext phy address for the port */ 7101 /* Extract the ext phy address for the port */
6571 u32 ext_phy_config = REG_RD(bp, shmem_base + 7102 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
6572 offsetof(struct shmem_region, 7103 port, &phy[port]) !=
6573 dev_info.port_hw_config[port].external_phy_config)); 7104 0) {
6574 7105 DP(NETIF_MSG_LINK, "populate phy failed\n");
7106 return -EINVAL;
7107 }
6575 /* disable attentions */ 7108 /* disable attentions */
6576 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7109 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6577 (NIG_MASK_XGXS0_LINK_STATUS | 7110 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -6579,12 +7112,9 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6579 NIG_MASK_SERDES0_LINK_STATUS | 7112 NIG_MASK_SERDES0_LINK_STATUS |
6580 NIG_MASK_MI_INT)); 7113 NIG_MASK_MI_INT));
6581 7114
6582 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6583 7115
6584 /* Reset the phy */ 7116 /* Reset the phy */
6585 bnx2x_cl45_write(bp, port, 7117 bnx2x_cl45_write(bp, &phy[port],
6586 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6587 ext_phy_addr[port],
6588 MDIO_PMA_DEVAD, 7118 MDIO_PMA_DEVAD,
6589 MDIO_PMA_REG_CTRL, 7119 MDIO_PMA_REG_CTRL,
6590 1<<15); 7120 1<<15);
@@ -6592,16 +7122,20 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6592 7122
6593 /* Add delay of 150ms after reset */ 7123 /* Add delay of 150ms after reset */
6594 msleep(150); 7124 msleep(150);
6595 7125 if (phy[PORT_0].addr & 0x1) {
7126 phy_blk[PORT_0] = &(phy[PORT_1]);
7127 phy_blk[PORT_1] = &(phy[PORT_0]);
7128 } else {
7129 phy_blk[PORT_0] = &(phy[PORT_0]);
7130 phy_blk[PORT_1] = &(phy[PORT_1]);
7131 }
6596 /* PART2 - Download firmware to both phys */ 7132 /* PART2 - Download firmware to both phys */
6597 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) { 7133 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6598 u16 fw_ver1; 7134 u16 fw_ver1;
6599 7135
6600 bnx2x_bcm8727_external_rom_boot(bp, port, 7136 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
6601 ext_phy_addr[port], shmem_base); 7137 port);
6602 7138 bnx2x_cl45_read(bp, phy_blk[port],
6603 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6604 ext_phy_addr[port],
6605 MDIO_PMA_DEVAD, 7139 MDIO_PMA_DEVAD,
6606 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 7140 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6607 if (fw_ver1 == 0 || fw_ver1 == 0x4321) { 7141 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
@@ -6616,82 +7150,32 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6616 return 0; 7150 return 0;
6617} 7151}
6618 7152
6619 7153static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base,
6620static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base) 7154 u32 shmem2_base, u8 phy_index,
6621{ 7155 u32 ext_phy_type)
6622 u8 ext_phy_addr;
6623 u32 val;
6624 s8 port;
6625
6626 /* Use port1 because of the static port-swap */
6627 /* Enable the module detection interrupt */
6628 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
6629 val |= ((1<<MISC_REGISTERS_GPIO_3)|
6630 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
6631 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
6632
6633 bnx2x_ext_phy_hw_reset(bp, 1);
6634 msleep(5);
6635 for (port = 0; port < PORT_MAX; port++) {
6636 /* Extract the ext phy address for the port */
6637 u32 ext_phy_config = REG_RD(bp, shmem_base +
6638 offsetof(struct shmem_region,
6639 dev_info.port_hw_config[port].external_phy_config));
6640
6641 ext_phy_addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
6642 DP(NETIF_MSG_LINK, "8726_common_init : ext_phy_addr = 0x%x\n",
6643 ext_phy_addr);
6644
6645 bnx2x_8726_reset_phy(bp, port, ext_phy_addr);
6646
6647 /* Set fault module detected LED on */
6648 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
6649 MISC_REGISTERS_GPIO_HIGH,
6650 port);
6651 }
6652
6653 return 0;
6654}
6655
6656
6657static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6658{
6659 /* HW reset */
6660 bnx2x_ext_phy_hw_reset(bp, 1);
6661 return 0;
6662}
6663u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6664{ 7156{
6665 u8 rc = 0; 7157 u8 rc = 0;
6666 u32 ext_phy_type;
6667
6668 DP(NETIF_MSG_LINK, "Begin common phy init\n");
6669
6670 /* Read the ext_phy_type for arbitrary port(0) */
6671 ext_phy_type = XGXS_EXT_PHY_TYPE(
6672 REG_RD(bp, shmem_base +
6673 offsetof(struct shmem_region,
6674 dev_info.port_hw_config[0].external_phy_config)));
6675 7158
6676 switch (ext_phy_type) { 7159 switch (ext_phy_type) {
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 7160 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6678 { 7161 rc = bnx2x_8073_common_init_phy(bp, shmem_base,
6679 rc = bnx2x_8073_common_init_phy(bp, shmem_base); 7162 shmem2_base, phy_index);
6680 break; 7163 break;
6681 }
6682 7164
6683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 7165 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6684 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: 7166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
6685 rc = bnx2x_8727_common_init_phy(bp, shmem_base); 7167 rc = bnx2x_8727_common_init_phy(bp, shmem_base,
7168 shmem2_base, phy_index);
6686 break; 7169 break;
6687 7170
6688 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6689 /* GPIO1 affects both ports, so there's need to pull 7172 /* GPIO1 affects both ports, so there's need to pull
6690 it for single port alone */ 7173 it for single port alone */
6691 rc = bnx2x_8726_common_init_phy(bp, shmem_base); 7174 rc = bnx2x_8726_common_init_phy(bp, shmem_base,
7175 shmem2_base, phy_index);
6692 break; 7176 break;
6693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 7177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6694 rc = bnx2x_84823_common_init_phy(bp, shmem_base); 7178 rc = -EINVAL;
6695 break; 7179 break;
6696 default: 7180 default:
6697 DP(NETIF_MSG_LINK, 7181 DP(NETIF_MSG_LINK,
@@ -6703,33 +7187,80 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6703 return rc; 7187 return rc;
6704} 7188}
6705 7189
6706void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) 7190u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7191 u32 shmem2_base)
6707{ 7192{
6708 u16 val, cnt; 7193 u8 rc = 0;
7194 u8 phy_index;
7195 u32 ext_phy_type, ext_phy_config;
7196 DP(NETIF_MSG_LINK, "Begin common phy init\n");
6709 7197
6710 bnx2x_cl45_read(bp, port, 7198 if (CHIP_REV_IS_EMUL(bp))
6711 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 7199 return 0;
6712 phy_addr,
6713 MDIO_PMA_DEVAD,
6714 MDIO_PMA_REG_7101_RESET, &val);
6715 7200
6716 for (cnt = 0; cnt < 10; cnt++) { 7201 /* Read the ext_phy_type for arbitrary port(0) */
6717 msleep(50); 7202 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
6718 /* Writes a self-clearing reset */ 7203 phy_index++) {
6719 bnx2x_cl45_write(bp, port, 7204 ext_phy_config = bnx2x_get_ext_phy_config(bp,
6720 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 7205 shmem_base,
6721 phy_addr, 7206 phy_index, 0);
6722 MDIO_PMA_DEVAD, 7207 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6723 MDIO_PMA_REG_7101_RESET, 7208 rc |= bnx2x_ext_phy_common_init(bp, shmem_base,
6724 (val | (1<<15))); 7209 shmem2_base,
6725 /* Wait for clear */ 7210 phy_index, ext_phy_type);
6726 bnx2x_cl45_read(bp, port, 7211 }
6727 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 7212 return rc;
6728 phy_addr, 7213}
6729 MDIO_PMA_DEVAD,
6730 MDIO_PMA_REG_7101_RESET, &val);
6731 7214
6732 if ((val & (1<<15)) == 0) 7215u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
6733 break; 7216{
7217 u8 phy_index;
7218 struct bnx2x_phy phy;
7219 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
7220 phy_index++) {
7221 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7222 0, &phy) != 0) {
7223 DP(NETIF_MSG_LINK, "populate phy failed\n");
7224 return 0;
7225 }
7226
7227 if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
7228 return 1;
7229 }
7230 return 0;
7231}
7232
7233u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
7234 u32 shmem_base,
7235 u32 shmem2_base,
7236 u8 port)
7237{
7238 u8 phy_index, fan_failure_det_req = 0;
7239 struct bnx2x_phy phy;
7240 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7241 phy_index++) {
7242 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7243 port, &phy)
7244 != 0) {
7245 DP(NETIF_MSG_LINK, "populate phy failed\n");
7246 return 0;
7247 }
7248 fan_failure_det_req |= (phy.flags &
7249 FLAGS_FAN_FAILURE_DET_REQ);
7250 }
7251 return fan_failure_det_req;
7252}
7253
7254void bnx2x_hw_reset_phy(struct link_params *params)
7255{
7256 u8 phy_index;
7257 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7258 phy_index++) {
7259 if (params->phy[phy_index].hw_reset) {
7260 params->phy[phy_index].hw_reset(
7261 &params->phy[phy_index],
7262 params);
7263 params->phy[phy_index] = phy_null;
7264 }
6734 } 7265 }
6735} 7266}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 40c2981de8ed..e98ea3d19471 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
1/* Copyright 2008-2009 Broadcom Corporation 1/* Copyright 2008-2010 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -46,9 +46,137 @@
46#define SFP_EEPROM_PART_NO_ADDR 0x28 46#define SFP_EEPROM_PART_NO_ADDR 0x28
47#define SFP_EEPROM_PART_NO_SIZE 16 47#define SFP_EEPROM_PART_NO_SIZE 16
48#define PWR_FLT_ERR_MSG_LEN 250 48#define PWR_FLT_ERR_MSG_LEN 250
49
50#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
51 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
52#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
53 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
54 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
55#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
56 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
57
58/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
59#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
60/* Single Media board contains single external phy */
61#define SINGLE_MEDIA(params) (params->num_phys == 2)
62/* Dual Media board contains two external phy with different media */
63#define DUAL_MEDIA(params) (params->num_phys == 3)
64#define FW_PARAM_MDIO_CTRL_OFFSET 16
65#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
66 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
49/***********************************************************/ 67/***********************************************************/
50/* Structs */ 68/* Structs */
51/***********************************************************/ 69/***********************************************************/
70#define INT_PHY 0
71#define EXT_PHY1 1
72#define EXT_PHY2 2
73#define MAX_PHYS 3
74
75/* Same configuration is shared between the XGXS and the first external phy */
76#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
77#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
78 0 : (_phy_idx - 1))
79/***********************************************************/
80/* bnx2x_phy struct */
81/* Defines the required arguments and function per phy */
82/***********************************************************/
83struct link_vars;
84struct link_params;
85struct bnx2x_phy;
86
87typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
88 struct link_vars *vars);
89typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
90 struct link_vars *vars);
91typedef void (*link_reset_t)(struct bnx2x_phy *phy,
92 struct link_params *params);
93typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
94 struct link_params *params);
95typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
96typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
97typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
98 struct link_params *params, u8 mode);
99typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
100 struct link_params *params, u32 action);
101
102struct bnx2x_phy {
103 u32 type;
104
105 /* Loaded during init */
106 u8 addr;
107
108 u8 flags;
109 /* Require HW lock */
110#define FLAGS_HW_LOCK_REQUIRED (1<<0)
111 /* No Over-Current detection */
112#define FLAGS_NOC (1<<1)
113 /* Fan failure detection required */
114#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
115 /* Initialize first the XGXS and only then the phy itself */
116#define FLAGS_INIT_XGXS_FIRST (1<<3)
117#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
118#define FLAGS_SFP_NOT_APPROVED (1<<7)
119
120 u8 def_md_devad;
121 u8 reserved;
122 /* preemphasis values for the rx side */
123 u16 rx_preemphasis[4];
124
125 /* preemphasis values for the tx side */
126 u16 tx_preemphasis[4];
127
128 /* EMAC address for access MDIO */
129 u32 mdio_ctrl;
130
131 u32 supported;
132
133 u32 media_type;
134#define ETH_PHY_UNSPECIFIED 0x0
135#define ETH_PHY_SFP_FIBER 0x1
136#define ETH_PHY_XFP_FIBER 0x2
137#define ETH_PHY_DA_TWINAX 0x3
138#define ETH_PHY_BASE_T 0x4
139#define ETH_PHY_NOT_PRESENT 0xff
140
141 /* The address in which version is located*/
142 u32 ver_addr;
143
144 u16 req_flow_ctrl;
145
146 u16 req_line_speed;
147
148 u32 speed_cap_mask;
149
150 u16 req_duplex;
151 u16 rsrv;
152 /* Called per phy/port init, and it configures LASI, speed, autoneg,
153 duplex, flow control negotiation, etc. */
154 config_init_t config_init;
155
156 /* Called due to interrupt. It determines the link, speed */
157 read_status_t read_status;
158
159 /* Called when driver is unloading. Should reset the phy */
160 link_reset_t link_reset;
161
162 /* Set the loopback configuration for the phy */
163 config_loopback_t config_loopback;
164
165 /* Format the given raw number into str up to len */
166 format_fw_ver_t format_fw_ver;
167
168 /* Reset the phy (both ports) */
169 hw_reset_t hw_reset;
170
171 /* Set link led mode (on/off/oper)*/
172 set_link_led_t set_link_led;
173
174 /* PHY Specific tasks */
175 phy_specific_func_t phy_specific_func;
176#define DISABLE_TX 1
177#define ENABLE_TX 2
178};
179
52/* Inputs parameters to the CLC */ 180/* Inputs parameters to the CLC */
53struct link_params { 181struct link_params {
54 182
@@ -59,56 +187,50 @@ struct link_params {
59#define LOOPBACK_NONE 0 187#define LOOPBACK_NONE 0
60#define LOOPBACK_EMAC 1 188#define LOOPBACK_EMAC 1
61#define LOOPBACK_BMAC 2 189#define LOOPBACK_BMAC 2
62#define LOOPBACK_XGXS_10 3 190#define LOOPBACK_XGXS 3
63#define LOOPBACK_EXT_PHY 4 191#define LOOPBACK_EXT_PHY 4
64#define LOOPBACK_EXT 5 192#define LOOPBACK_EXT 5
65 193
66 u16 req_duplex;
67 u16 req_flow_ctrl;
68 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
69 req_flow_ctrl is set to AUTO */
70 u16 req_line_speed; /* Also determine AutoNeg */
71
72 /* Device parameters */ 194 /* Device parameters */
73 u8 mac_addr[6]; 195 u8 mac_addr[6];
74 196
197 u16 req_duplex[LINK_CONFIG_SIZE];
198 u16 req_flow_ctrl[LINK_CONFIG_SIZE];
199
200 u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
201
75 /* shmem parameters */ 202 /* shmem parameters */
76 u32 shmem_base; 203 u32 shmem_base;
77 u32 speed_cap_mask; 204 u32 shmem2_base;
205 u32 speed_cap_mask[LINK_CONFIG_SIZE];
78 u32 switch_cfg; 206 u32 switch_cfg;
79#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH 207#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
80#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH 208#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
81#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT 209#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
82 210
83 u16 hw_led_mode; /* part of the hw_config read from the shmem */
84
85 /* phy_addr populated by the phy_init function */
86 u8 phy_addr;
87 /*u8 reserved1;*/
88
89 u32 lane_config; 211 u32 lane_config;
90 u32 ext_phy_config;
91#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
92 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
93#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
94 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
95 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
96#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
97 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
98 212
99 /* Phy register parameter */ 213 /* Phy register parameter */
100 u32 chip_id; 214 u32 chip_id;
101 215
102 u16 xgxs_config_rx[4]; /* preemphasis values for the rx side */
103 u16 xgxs_config_tx[4]; /* preemphasis values for the tx side */
104
105 u32 feature_config_flags; 216 u32 feature_config_flags;
106#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) 217#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
107#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 218#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
108#define FEATURE_CONFIG_BCM8727_NOC (1<<3) 219#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
220 /* Will be populated during common init */
221 struct bnx2x_phy phy[MAX_PHYS];
222
223 /* Will be populated during common init */
224 u8 num_phys;
225
226 u8 rsrv;
227 u16 hw_led_mode; /* part of the hw_config read from the shmem */
228 u32 multi_phy_config;
109 229
110 /* Device pointer passed to all callback functions */ 230 /* Device pointer passed to all callback functions */
111 struct bnx2x *bp; 231 struct bnx2x *bp;
232 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
233 req_flow_ctrl is set to AUTO */
112}; 234};
113 235
114/* Output parameters */ 236/* Output parameters */
@@ -129,12 +251,6 @@ struct link_vars {
129 u16 flow_ctrl; 251 u16 flow_ctrl;
130 u16 ieee_fc; 252 u16 ieee_fc;
131 253
132 u32 autoneg;
133#define AUTO_NEG_DISABLED 0x0
134#define AUTO_NEG_ENABLED 0x1
135#define AUTO_NEG_COMPLETE 0x2
136#define AUTO_NEG_PARALLEL_DETECTION_USED 0x3
137
138 /* The same definitions as the shmem parameter */ 254 /* The same definitions as the shmem parameter */
139 u32 link_status; 255 u32 link_status;
140}; 256};
@@ -142,8 +258,6 @@ struct link_vars {
142/***********************************************************/ 258/***********************************************************/
143/* Functions */ 259/* Functions */
144/***********************************************************/ 260/***********************************************************/
145
146/* Initialize the phy */
147u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output); 261u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
148 262
149/* Reset the link. Should be called when driver or interface goes down 263/* Reset the link. Should be called when driver or interface goes down
@@ -155,17 +269,21 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
155/* bnx2x_link_update should be called upon link interrupt */ 269/* bnx2x_link_update should be called upon link interrupt */
156u8 bnx2x_link_update(struct link_params *input, struct link_vars *output); 270u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
157 271
158/* use the following cl45 functions to read/write from external_phy 272/* use the following phy functions to read/write from external_phy
159 In order to use it to read/write internal phy registers, use 273 In order to use it to read/write internal phy registers, use
160 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as 274 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
161 Use ext_phy_type of 0 in case of cl22 over cl45
162 the register */ 275 the register */
163u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type, 276u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
164 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val); 277 u8 devad, u16 reg, u16 *ret_val);
278
279u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
280 u8 devad, u16 reg, u16 val);
165 281
166u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type, 282u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
167 u8 phy_addr, u8 devad, u16 reg, u16 val); 283 u8 devad, u16 reg, u16 *ret_val);
168 284
285u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
286 u8 devad, u16 reg, u16 val);
169/* Reads the link_status from the shmem, 287/* Reads the link_status from the shmem,
170 and update the link vars accordingly */ 288 and update the link vars accordingly */
171void bnx2x_link_status_update(struct link_params *input, 289void bnx2x_link_status_update(struct link_params *input,
@@ -178,9 +296,12 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
178 Basically, the CLC takes care of the led for the link, but in case one needs 296 Basically, the CLC takes care of the led for the link, but in case one needs
179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to 297 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
180 blink the led, and LED_MODE_OFF to set the led off.*/ 298 blink the led, and LED_MODE_OFF to set the led off.*/
181u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed); 299u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
182#define LED_MODE_OFF 0 300 u8 mode, u32 speed);
183#define LED_MODE_OPER 2 301#define LED_MODE_OFF 0
302#define LED_MODE_ON 1
303#define LED_MODE_OPER 2
304#define LED_MODE_FRONT_PANEL_OFF 3
184 305
185u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value); 306u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
186 307
@@ -190,17 +311,38 @@ void bnx2x_handle_module_detect_int(struct link_params *params);
190 311
191/* Get the actual link status. In case it returns 0, link is up, 312/* Get the actual link status. In case it returns 0, link is up,
192 otherwise link is down*/ 313 otherwise link is down*/
193u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); 314u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
315 u8 is_serdes);
194 316
195/* One-time initialization for external phy after power up */ 317/* One-time initialization for external phy after power up */
196u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base); 318u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base);
197 319
198/* Reset the external PHY using GPIO */ 320/* Reset the external PHY using GPIO */
199void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); 321void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
200 322
201void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr); 323/* Reset the external of SFX7101 */
324void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
202 325
203u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr, 326u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
327 struct link_params *params, u16 addr,
204 u8 byte_cnt, u8 *o_buf); 328 u8 byte_cnt, u8 *o_buf);
205 329
330void bnx2x_hw_reset_phy(struct link_params *params);
331
332/* Checks if HW lock is required for this phy/board type */
333u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
334 u32 shmem2_base);
335
336/* Returns the aggregative supported attributes of the phys on board */
337u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
338
339/* Check swap bit and adjust PHY order */
340u32 bnx2x_phy_selection(struct link_params *params);
341
342/* Probe the phys on board, and populate them in "params" */
343u8 bnx2x_phy_probe(struct link_params *params);
344/* Checks if fan failure detection is required on one of the phys on board */
345u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
346 u32 shmem2_base, u8 port);
347
206#endif /* BNX2X_LINK_H */ 348#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f8c3f08e4ce7..7ba3a6d96fd5 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -781,7 +781,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
781 DP(NETIF_MSG_HW, 781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE); 783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
784 return -EINVAL; 784 return false;
785 } 785 }
786 786
787 if (func <= 5) 787 if (func <= 5)
@@ -1227,26 +1227,66 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1227 return 0; 1227 return 0;
1228} 1228}
1229 1229
1230int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1231{
1232 u32 sel_phy_idx = 0;
1233 if (bp->link_vars.link_up) {
1234 sel_phy_idx = EXT_PHY1;
1235 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1236 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1237 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1238 sel_phy_idx = EXT_PHY2;
1239 } else {
1240
1241 switch (bnx2x_phy_selection(&bp->link_params)) {
1242 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1243 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1244 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1245 sel_phy_idx = EXT_PHY1;
1246 break;
1247 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1248 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1249 sel_phy_idx = EXT_PHY2;
1250 break;
1251 }
1252 }
1253 /*
1254 * The selected actived PHY is always after swapping (in case PHY
1255 * swapping is enabled). So when swapping is enabled, we need to reverse
1256 * the configuration
1257 */
1258
1259 if (bp->link_params.multi_phy_config &
1260 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1261 if (sel_phy_idx == EXT_PHY1)
1262 sel_phy_idx = EXT_PHY2;
1263 else if (sel_phy_idx == EXT_PHY2)
1264 sel_phy_idx = EXT_PHY1;
1265 }
1266 return LINK_CONFIG_IDX(sel_phy_idx);
1267}
1268
1230void bnx2x_calc_fc_adv(struct bnx2x *bp) 1269void bnx2x_calc_fc_adv(struct bnx2x *bp)
1231{ 1270{
1271 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1232 switch (bp->link_vars.ieee_fc & 1272 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 1273 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 1274 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause | 1275 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1236 ADVERTISED_Pause); 1276 ADVERTISED_Pause);
1237 break; 1277 break;
1238 1278
1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 1279 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240 bp->port.advertising |= (ADVERTISED_Asym_Pause | 1280 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1241 ADVERTISED_Pause); 1281 ADVERTISED_Pause);
1242 break; 1282 break;
1243 1283
1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 1284 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245 bp->port.advertising |= ADVERTISED_Asym_Pause; 1285 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1246 break; 1286 break;
1247 1287
1248 default: 1288 default:
1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause | 1289 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1250 ADVERTISED_Pause); 1290 ADVERTISED_Pause);
1251 break; 1291 break;
1252 } 1292 }
@@ -1257,7 +1297,8 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1257{ 1297{
1258 if (!BP_NOMCP(bp)) { 1298 if (!BP_NOMCP(bp)) {
1259 u8 rc; 1299 u8 rc;
1260 1300 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1301 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1261 /* Initialize link parameters structure variables */ 1302 /* Initialize link parameters structure variables */
1262 /* It is recommended to turn off RX FC for jumbo frames 1303 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */ 1304 for better performance */
@@ -1268,8 +1309,10 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1268 1309
1269 bnx2x_acquire_phy_lock(bp); 1310 bnx2x_acquire_phy_lock(bp);
1270 1311
1271 if (load_mode == LOAD_DIAG) 1312 if (load_mode == LOAD_DIAG) {
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 1313 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1314 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1315 }
1273 1316
1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1317 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1275 1318
@@ -1281,7 +1324,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 1324 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282 bnx2x_link_report(bp); 1325 bnx2x_link_report(bp);
1283 } 1326 }
1284 1327 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1285 return rc; 1328 return rc;
1286 } 1329 }
1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 1330 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
@@ -1292,6 +1335,7 @@ void bnx2x_link_set(struct bnx2x *bp)
1292{ 1335{
1293 if (!BP_NOMCP(bp)) { 1336 if (!BP_NOMCP(bp)) {
1294 bnx2x_acquire_phy_lock(bp); 1337 bnx2x_acquire_phy_lock(bp);
1338 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1295 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1339 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1296 bnx2x_release_phy_lock(bp); 1340 bnx2x_release_phy_lock(bp);
1297 1341
@@ -1310,13 +1354,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
1310 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 1354 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1311} 1355}
1312 1356
1313u8 bnx2x_link_test(struct bnx2x *bp) 1357u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1314{ 1358{
1315 u8 rc = 0; 1359 u8 rc = 0;
1316 1360
1317 if (!BP_NOMCP(bp)) { 1361 if (!BP_NOMCP(bp)) {
1318 bnx2x_acquire_phy_lock(bp); 1362 bnx2x_acquire_phy_lock(bp);
1319 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 1363 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1364 is_serdes);
1320 bnx2x_release_phy_lock(bp); 1365 bnx2x_release_phy_lock(bp);
1321 } else 1366 } else
1322 BNX2X_ERR("Bootcode is missing - can not test link\n"); 1367 BNX2X_ERR("Bootcode is missing - can not test link\n");
@@ -1585,7 +1630,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1585 */ 1630 */
1586 1631
1587/* send the MCP a request, block until there is a reply */ 1632/* send the MCP a request, block until there is a reply */
1588u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) 1633u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1589{ 1634{
1590 int func = BP_FUNC(bp); 1635 int func = BP_FUNC(bp);
1591 u32 seq = ++bp->fw_seq; 1636 u32 seq = ++bp->fw_seq;
@@ -1594,6 +1639,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1594 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 1639 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1595 1640
1596 mutex_lock(&bp->fw_mb_mutex); 1641 mutex_lock(&bp->fw_mb_mutex);
1642 SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
1597 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 1643 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1598 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 1644 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1599 1645
@@ -1715,9 +1761,9 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1715 1761
1716 /* Report results to MCP */ 1762 /* Report results to MCP */
1717 if (dcc_event) 1763 if (dcc_event)
1718 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE); 1764 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
1719 else 1765 else
1720 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); 1766 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
1721} 1767}
1722 1768
1723/* must be called under the spq lock */ 1769/* must be called under the spq lock */
@@ -1959,12 +2005,16 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1959static inline void bnx2x_fan_failure(struct bnx2x *bp) 2005static inline void bnx2x_fan_failure(struct bnx2x *bp)
1960{ 2006{
1961 int port = BP_PORT(bp); 2007 int port = BP_PORT(bp);
1962 2008 u32 ext_phy_config;
1963 /* mark the failure */ 2009 /* mark the failure */
1964 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2010 ext_phy_config =
1965 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 2011 SHMEM_RD(bp,
2012 dev_info.port_hw_config[port].external_phy_config);
2013
2014 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2015 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1966 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 2016 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1967 bp->link_params.ext_phy_config); 2017 ext_phy_config);
1968 2018
1969 /* log the failure */ 2019 /* log the failure */
1970 netdev_err(bp->dev, "Fan Failure on Network Controller has caused" 2020 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
@@ -1976,7 +2026,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1976{ 2026{
1977 int port = BP_PORT(bp); 2027 int port = BP_PORT(bp);
1978 int reg_offset; 2028 int reg_offset;
1979 u32 val, swap_val, swap_override; 2029 u32 val;
1980 2030
1981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 2031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 2032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -1990,30 +2040,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1990 BNX2X_ERR("SPIO5 hw attention\n"); 2040 BNX2X_ERR("SPIO5 hw attention\n");
1991 2041
1992 /* Fan failure attention */ 2042 /* Fan failure attention */
1993 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 2043 bnx2x_hw_reset_phy(&bp->link_params);
1994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1995 /* Low power mode is controlled by GPIO 2 */
1996 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1997 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1998 /* The PHY reset is controlled by GPIO 1 */
1999 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2000 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2001 break;
2002
2003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2004 /* The PHY reset is controlled by GPIO 1 */
2005 /* fake the port number to cancel the swap done in
2006 set_gpio() */
2007 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2008 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2009 port = (swap_val && swap_override) ^ 1;
2010 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2011 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2012 break;
2013
2014 default:
2015 break;
2016 }
2017 bnx2x_fan_failure(bp); 2044 bnx2x_fan_failure(bp);
2018 } 2045 }
2019 2046
@@ -3803,10 +3830,9 @@ static const struct {
3803 3830
3804static void enable_blocks_parity(struct bnx2x *bp) 3831static void enable_blocks_parity(struct bnx2x *bp)
3805{ 3832{
3806 int i, mask_arr_len = 3833 int i;
3807 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3808 3834
3809 for (i = 0; i < mask_arr_len; i++) 3835 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
3810 REG_WR(bp, bnx2x_parity_mask[i].addr, 3836 REG_WR(bp, bnx2x_parity_mask[i].addr,
3811 bnx2x_parity_mask[i].mask); 3837 bnx2x_parity_mask[i].mask);
3812} 3838}
@@ -3862,17 +3888,12 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3862 */ 3888 */
3863 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 3889 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3864 for (port = PORT_0; port < PORT_MAX; port++) { 3890 for (port = PORT_0; port < PORT_MAX; port++) {
3865 u32 phy_type =
3866 SHMEM_RD(bp, dev_info.port_hw_config[port].
3867 external_phy_config) &
3868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869 is_required |= 3891 is_required |=
3870 ((phy_type == 3892 bnx2x_fan_failure_det_req(
3871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) || 3893 bp,
3872 (phy_type == 3894 bp->common.shmem_base,
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || 3895 bp->common.shmem2_base,
3874 (phy_type == 3896 port);
3875 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3876 } 3897 }
3877 3898
3878 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 3899 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
@@ -4139,17 +4160,9 @@ static int bnx2x_init_common(struct bnx2x *bp)
4139 return -EBUSY; 4160 return -EBUSY;
4140 } 4161 }
4141 4162
4142 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 4163 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 4164 bp->common.shmem_base,
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 4165 bp->common.shmem2_base);
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4147 bp->port.need_hw_lock = 1;
4148 break;
4149
4150 default:
4151 break;
4152 }
4153 4166
4154 bnx2x_setup_fan_failure_detection(bp); 4167 bnx2x_setup_fan_failure_detection(bp);
4155 4168
@@ -4162,7 +4175,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
4162 4175
4163 if (!BP_NOMCP(bp)) { 4176 if (!BP_NOMCP(bp)) {
4164 bnx2x_acquire_phy_lock(bp); 4177 bnx2x_acquire_phy_lock(bp);
4165 bnx2x_common_init_phy(bp, bp->common.shmem_base); 4178 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4179 bp->common.shmem2_base);
4166 bnx2x_release_phy_lock(bp); 4180 bnx2x_release_phy_lock(bp);
4167 } else 4181 } else
4168 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 4182 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
@@ -4297,60 +4311,17 @@ static int bnx2x_init_port(struct bnx2x *bp)
4297 4311
4298 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 4312 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4299 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 4313 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4300 4314 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4301 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 4315 bp->common.shmem_base,
4302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 4316 bp->common.shmem2_base);
4303 { 4317 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4304 u32 swap_val, swap_override, aeu_gpio_mask, offset; 4318 bp->common.shmem2_base, port)) {
4305
4306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4307 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4308
4309 /* The GPIO should be swapped if the swap register is
4310 set and active */
4311 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4312 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4313
4314 /* Select function upon port-swap configuration */
4315 if (port == 0) {
4316 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4317 aeu_gpio_mask = (swap_val && swap_override) ?
4318 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4320 } else {
4321 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4322 aeu_gpio_mask = (swap_val && swap_override) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4325 }
4326 val = REG_RD(bp, offset);
4327 /* add GPIO3 to group */
4328 val |= aeu_gpio_mask;
4329 REG_WR(bp, offset, val);
4330 }
4331 bp->port.need_hw_lock = 1;
4332 break;
4333
4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4335 bp->port.need_hw_lock = 1;
4336 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4337 /* add SPIO 5 to group 0 */
4338 {
4339 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4319 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4340 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4320 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4341 val = REG_RD(bp, reg_addr); 4321 val = REG_RD(bp, reg_addr);
4342 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 4322 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4343 REG_WR(bp, reg_addr, val); 4323 REG_WR(bp, reg_addr, val);
4344 }
4345 break;
4346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4348 bp->port.need_hw_lock = 1;
4349 break;
4350 default:
4351 break;
4352 } 4324 }
4353
4354 bnx2x__link_reset(bp); 4325 bnx2x__link_reset(bp);
4355 4326
4356 return 0; 4327 return 0;
@@ -4480,7 +4451,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
4480 /* Reset PCIE errors for debug */ 4451 /* Reset PCIE errors for debug */
4481 REG_WR(bp, 0x2114, 0xffffffff); 4452 REG_WR(bp, 0x2114, 0xffffffff);
4482 REG_WR(bp, 0x2120, 0xffffffff); 4453 REG_WR(bp, 0x2120, 0xffffffff);
4483 4454 bnx2x_phy_probe(&bp->link_params);
4484 return 0; 4455 return 0;
4485} 4456}
4486 4457
@@ -5302,7 +5273,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5302 5273
5303unload_error: 5274unload_error:
5304 if (!BP_NOMCP(bp)) 5275 if (!BP_NOMCP(bp))
5305 reset_code = bnx2x_fw_command(bp, reset_code); 5276 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5306 else { 5277 else {
5307 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n", 5278 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
5308 load_count[0], load_count[1], load_count[2]); 5279 load_count[0], load_count[1], load_count[2]);
@@ -5327,7 +5298,7 @@ unload_error:
5327 5298
5328 /* Report UNLOAD_DONE to MCP */ 5299 /* Report UNLOAD_DONE to MCP */
5329 if (!BP_NOMCP(bp)) 5300 if (!BP_NOMCP(bp))
5330 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 5301 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5331 5302
5332} 5303}
5333 5304
@@ -5892,13 +5863,14 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5892 bp->fw_seq = 5863 bp->fw_seq =
5893 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 5864 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5894 DRV_MSG_SEQ_NUMBER_MASK); 5865 DRV_MSG_SEQ_NUMBER_MASK);
5895 reset_code = bnx2x_fw_command(bp, reset_code); 5866 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5896 5867
5897 /* if UNDI is loaded on the other port */ 5868 /* if UNDI is loaded on the other port */
5898 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 5869 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5899 5870
5900 /* send "DONE" for previous unload */ 5871 /* send "DONE" for previous unload */
5901 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 5872 bnx2x_fw_command(bp,
5873 DRV_MSG_CODE_UNLOAD_DONE, 0);
5902 5874
5903 /* unload UNDI on port 1 */ 5875 /* unload UNDI on port 1 */
5904 bp->func = 1; 5876 bp->func = 1;
@@ -5907,7 +5879,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5907 DRV_MSG_SEQ_NUMBER_MASK); 5879 DRV_MSG_SEQ_NUMBER_MASK);
5908 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 5880 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5909 5881
5910 bnx2x_fw_command(bp, reset_code); 5882 bnx2x_fw_command(bp, reset_code, 0);
5911 } 5883 }
5912 5884
5913 /* now it's safe to release the lock */ 5885 /* now it's safe to release the lock */
@@ -5949,7 +5921,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5949 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); 5921 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5950 5922
5951 /* send unload done to the MCP */ 5923 /* send unload done to the MCP */
5952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 5924 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5953 5925
5954 /* restore our func and fw_seq */ 5926 /* restore our func and fw_seq */
5955 bp->func = func; 5927 bp->func = func;
@@ -5997,6 +5969,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5997 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 5969 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5998 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0); 5970 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5999 bp->link_params.shmem_base = bp->common.shmem_base; 5971 bp->link_params.shmem_base = bp->common.shmem_base;
5972 bp->link_params.shmem2_base = bp->common.shmem2_base;
6000 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 5973 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6001 bp->common.shmem_base, bp->common.shmem2_base); 5974 bp->common.shmem_base, bp->common.shmem2_base);
6002 5975
@@ -6039,8 +6012,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6039 "please upgrade BC\n", BNX2X_BC_VER, val); 6012 "please upgrade BC\n", BNX2X_BC_VER, val);
6040 } 6013 }
6041 bp->link_params.feature_config_flags |= 6014 bp->link_params.feature_config_flags |=
6042 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? 6015 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
6043 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 6016 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6017 bp->link_params.feature_config_flags |=
6018 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6019 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
6044 6020
6045 if (BP_E1HVN(bp) == 0) { 6021 if (BP_E1HVN(bp) == 0) {
6046 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 6022 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
@@ -6064,194 +6040,55 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6064static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 6040static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6065 u32 switch_cfg) 6041 u32 switch_cfg)
6066{ 6042{
6067 int port = BP_PORT(bp); 6043 int cfg_size = 0, idx, port = BP_PORT(bp);
6068 u32 ext_phy_type;
6069
6070 switch (switch_cfg) {
6071 case SWITCH_CFG_1G:
6072 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6073
6074 ext_phy_type =
6075 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6076 switch (ext_phy_type) {
6077 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6078 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6079 ext_phy_type);
6080
6081 bp->port.supported |= (SUPPORTED_10baseT_Half |
6082 SUPPORTED_10baseT_Full |
6083 SUPPORTED_100baseT_Half |
6084 SUPPORTED_100baseT_Full |
6085 SUPPORTED_1000baseT_Full |
6086 SUPPORTED_2500baseX_Full |
6087 SUPPORTED_TP |
6088 SUPPORTED_FIBRE |
6089 SUPPORTED_Autoneg |
6090 SUPPORTED_Pause |
6091 SUPPORTED_Asym_Pause);
6092 break;
6093 6044
6094 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 6045 /* Aggregation of supported attributes of all external phys */
6095 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", 6046 bp->port.supported[0] = 0;
6096 ext_phy_type); 6047 bp->port.supported[1] = 0;
6097 6048 switch (bp->link_params.num_phys) {
6098 bp->port.supported |= (SUPPORTED_10baseT_Half | 6049 case 1:
6099 SUPPORTED_10baseT_Full | 6050 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6100 SUPPORTED_100baseT_Half | 6051 cfg_size = 1;
6101 SUPPORTED_100baseT_Full | 6052 break;
6102 SUPPORTED_1000baseT_Full | 6053 case 2:
6103 SUPPORTED_TP | 6054 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6104 SUPPORTED_FIBRE | 6055 cfg_size = 1;
6105 SUPPORTED_Autoneg | 6056 break;
6106 SUPPORTED_Pause | 6057 case 3:
6107 SUPPORTED_Asym_Pause); 6058 if (bp->link_params.multi_phy_config &
6108 break; 6059 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6060 bp->port.supported[1] =
6061 bp->link_params.phy[EXT_PHY1].supported;
6062 bp->port.supported[0] =
6063 bp->link_params.phy[EXT_PHY2].supported;
6064 } else {
6065 bp->port.supported[0] =
6066 bp->link_params.phy[EXT_PHY1].supported;
6067 bp->port.supported[1] =
6068 bp->link_params.phy[EXT_PHY2].supported;
6069 }
6070 cfg_size = 2;
6071 break;
6072 }
6109 6073
6110 default: 6074 if (!(bp->port.supported[0] || bp->port.supported[1])) {
6111 BNX2X_ERR("NVRAM config error. " 6075 BNX2X_ERR("NVRAM config error. BAD phy config."
6112 "BAD SerDes ext_phy_config 0x%x\n", 6076 "PHY1 config 0x%x, PHY2 config 0x%x\n",
6113 bp->link_params.ext_phy_config); 6077 SHMEM_RD(bp,
6078 dev_info.port_hw_config[port].external_phy_config),
6079 SHMEM_RD(bp,
6080 dev_info.port_hw_config[port].external_phy_config2));
6114 return; 6081 return;
6115 } 6082 }
6116 6083
6084 switch (switch_cfg) {
6085 case SWITCH_CFG_1G:
6117 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + 6086 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6118 port*0x10); 6087 port*0x10);
6119 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 6088 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6120 break; 6089 break;
6121 6090
6122 case SWITCH_CFG_10G: 6091 case SWITCH_CFG_10G:
6123 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6124
6125 ext_phy_type =
6126 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6127 switch (ext_phy_type) {
6128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6129 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6130 ext_phy_type);
6131
6132 bp->port.supported |= (SUPPORTED_10baseT_Half |
6133 SUPPORTED_10baseT_Full |
6134 SUPPORTED_100baseT_Half |
6135 SUPPORTED_100baseT_Full |
6136 SUPPORTED_1000baseT_Full |
6137 SUPPORTED_2500baseX_Full |
6138 SUPPORTED_10000baseT_Full |
6139 SUPPORTED_TP |
6140 SUPPORTED_FIBRE |
6141 SUPPORTED_Autoneg |
6142 SUPPORTED_Pause |
6143 SUPPORTED_Asym_Pause);
6144 break;
6145
6146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6147 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6148 ext_phy_type);
6149
6150 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6151 SUPPORTED_1000baseT_Full |
6152 SUPPORTED_FIBRE |
6153 SUPPORTED_Autoneg |
6154 SUPPORTED_Pause |
6155 SUPPORTED_Asym_Pause);
6156 break;
6157
6158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6159 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6160 ext_phy_type);
6161
6162 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6163 SUPPORTED_2500baseX_Full |
6164 SUPPORTED_1000baseT_Full |
6165 SUPPORTED_FIBRE |
6166 SUPPORTED_Autoneg |
6167 SUPPORTED_Pause |
6168 SUPPORTED_Asym_Pause);
6169 break;
6170
6171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6172 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6173 ext_phy_type);
6174
6175 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6176 SUPPORTED_FIBRE |
6177 SUPPORTED_Pause |
6178 SUPPORTED_Asym_Pause);
6179 break;
6180
6181 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6182 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6183 ext_phy_type);
6184
6185 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6186 SUPPORTED_1000baseT_Full |
6187 SUPPORTED_FIBRE |
6188 SUPPORTED_Pause |
6189 SUPPORTED_Asym_Pause);
6190 break;
6191
6192 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6193 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6194 ext_phy_type);
6195
6196 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6197 SUPPORTED_1000baseT_Full |
6198 SUPPORTED_Autoneg |
6199 SUPPORTED_FIBRE |
6200 SUPPORTED_Pause |
6201 SUPPORTED_Asym_Pause);
6202 break;
6203
6204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6205 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6206 ext_phy_type);
6207
6208 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6209 SUPPORTED_1000baseT_Full |
6210 SUPPORTED_Autoneg |
6211 SUPPORTED_FIBRE |
6212 SUPPORTED_Pause |
6213 SUPPORTED_Asym_Pause);
6214 break;
6215
6216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6217 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6218 ext_phy_type);
6219
6220 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6221 SUPPORTED_TP |
6222 SUPPORTED_Autoneg |
6223 SUPPORTED_Pause |
6224 SUPPORTED_Asym_Pause);
6225 break;
6226
6227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6228 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6229 ext_phy_type);
6230
6231 bp->port.supported |= (SUPPORTED_10baseT_Half |
6232 SUPPORTED_10baseT_Full |
6233 SUPPORTED_100baseT_Half |
6234 SUPPORTED_100baseT_Full |
6235 SUPPORTED_1000baseT_Full |
6236 SUPPORTED_10000baseT_Full |
6237 SUPPORTED_TP |
6238 SUPPORTED_Autoneg |
6239 SUPPORTED_Pause |
6240 SUPPORTED_Asym_Pause);
6241 break;
6242
6243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6244 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6245 bp->link_params.ext_phy_config);
6246 break;
6247
6248 default:
6249 BNX2X_ERR("NVRAM config error. "
6250 "BAD XGXS ext_phy_config 0x%x\n",
6251 bp->link_params.ext_phy_config);
6252 return;
6253 }
6254
6255 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + 6092 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6256 port*0x18); 6093 port*0x18);
6257 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 6094 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
@@ -6260,164 +6097,183 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6260 6097
6261 default: 6098 default:
6262 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 6099 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6263 bp->port.link_config); 6100 bp->port.link_config[0]);
6264 return; 6101 return;
6265 } 6102 }
6266 bp->link_params.phy_addr = bp->port.phy_addr; 6103 /* mask what we support according to speed_cap_mask per configuration */
6267 6104 for (idx = 0; idx < cfg_size; idx++) {
6268 /* mask what we support according to speed_cap_mask */ 6105 if (!(bp->link_params.speed_cap_mask[idx] &
6269 if (!(bp->link_params.speed_cap_mask &
6270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 6106 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6271 bp->port.supported &= ~SUPPORTED_10baseT_Half; 6107 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
6272 6108
6273 if (!(bp->link_params.speed_cap_mask & 6109 if (!(bp->link_params.speed_cap_mask[idx] &
6274 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 6110 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6275 bp->port.supported &= ~SUPPORTED_10baseT_Full; 6111 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
6276 6112
6277 if (!(bp->link_params.speed_cap_mask & 6113 if (!(bp->link_params.speed_cap_mask[idx] &
6278 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 6114 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6279 bp->port.supported &= ~SUPPORTED_100baseT_Half; 6115 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
6280 6116
6281 if (!(bp->link_params.speed_cap_mask & 6117 if (!(bp->link_params.speed_cap_mask[idx] &
6282 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 6118 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6283 bp->port.supported &= ~SUPPORTED_100baseT_Full; 6119 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
6284 6120
6285 if (!(bp->link_params.speed_cap_mask & 6121 if (!(bp->link_params.speed_cap_mask[idx] &
6286 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 6122 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6287 bp->port.supported &= ~(SUPPORTED_1000baseT_Half | 6123 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
6288 SUPPORTED_1000baseT_Full); 6124 SUPPORTED_1000baseT_Full);
6289 6125
6290 if (!(bp->link_params.speed_cap_mask & 6126 if (!(bp->link_params.speed_cap_mask[idx] &
6291 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 6127 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6292 bp->port.supported &= ~SUPPORTED_2500baseX_Full; 6128 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
6293 6129
6294 if (!(bp->link_params.speed_cap_mask & 6130 if (!(bp->link_params.speed_cap_mask[idx] &
6295 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 6131 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6296 bp->port.supported &= ~SUPPORTED_10000baseT_Full; 6132 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6133
6134 }
6297 6135
6298 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported); 6136 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6137 bp->port.supported[1]);
6299} 6138}
6300 6139
6301static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) 6140static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6302{ 6141{
6303 bp->link_params.req_duplex = DUPLEX_FULL; 6142 u32 link_config, idx, cfg_size = 0;
6304 6143 bp->port.advertising[0] = 0;
6305 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) { 6144 bp->port.advertising[1] = 0;
6145 switch (bp->link_params.num_phys) {
6146 case 1:
6147 case 2:
6148 cfg_size = 1;
6149 break;
6150 case 3:
6151 cfg_size = 2;
6152 break;
6153 }
6154 for (idx = 0; idx < cfg_size; idx++) {
6155 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6156 link_config = bp->port.link_config[idx];
6157 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6306 case PORT_FEATURE_LINK_SPEED_AUTO: 6158 case PORT_FEATURE_LINK_SPEED_AUTO:
6307 if (bp->port.supported & SUPPORTED_Autoneg) { 6159 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6308 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 6160 bp->link_params.req_line_speed[idx] =
6309 bp->port.advertising = bp->port.supported; 6161 SPEED_AUTO_NEG;
6162 bp->port.advertising[idx] |=
6163 bp->port.supported[idx];
6310 } else { 6164 } else {
6311 u32 ext_phy_type = 6165 /* force 10G, no AN */
6312 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 6166 bp->link_params.req_line_speed[idx] =
6313 6167 SPEED_10000;
6314 if ((ext_phy_type == 6168 bp->port.advertising[idx] |=
6315 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 6169 (ADVERTISED_10000baseT_Full |
6316 (ext_phy_type ==
6317 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6318 /* force 10G, no AN */
6319 bp->link_params.req_line_speed = SPEED_10000;
6320 bp->port.advertising =
6321 (ADVERTISED_10000baseT_Full |
6322 ADVERTISED_FIBRE); 6170 ADVERTISED_FIBRE);
6323 break; 6171 continue;
6324 }
6325 BNX2X_ERR("NVRAM config error. "
6326 "Invalid link_config 0x%x"
6327 " Autoneg not supported\n",
6328 bp->port.link_config);
6329 return;
6330 } 6172 }
6331 break; 6173 break;
6332 6174
6333 case PORT_FEATURE_LINK_SPEED_10M_FULL: 6175 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6334 if (bp->port.supported & SUPPORTED_10baseT_Full) { 6176 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6335 bp->link_params.req_line_speed = SPEED_10; 6177 bp->link_params.req_line_speed[idx] =
6336 bp->port.advertising = (ADVERTISED_10baseT_Full | 6178 SPEED_10;
6179 bp->port.advertising[idx] |=
6180 (ADVERTISED_10baseT_Full |
6337 ADVERTISED_TP); 6181 ADVERTISED_TP);
6338 } else { 6182 } else {
6339 BNX2X_ERROR("NVRAM config error. " 6183 BNX2X_ERROR("NVRAM config error. "
6340 "Invalid link_config 0x%x" 6184 "Invalid link_config 0x%x"
6341 " speed_cap_mask 0x%x\n", 6185 " speed_cap_mask 0x%x\n",
6342 bp->port.link_config, 6186 link_config,
6343 bp->link_params.speed_cap_mask); 6187 bp->link_params.speed_cap_mask[idx]);
6344 return; 6188 return;
6345 } 6189 }
6346 break; 6190 break;
6347 6191
6348 case PORT_FEATURE_LINK_SPEED_10M_HALF: 6192 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6349 if (bp->port.supported & SUPPORTED_10baseT_Half) { 6193 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6350 bp->link_params.req_line_speed = SPEED_10; 6194 bp->link_params.req_line_speed[idx] =
6351 bp->link_params.req_duplex = DUPLEX_HALF; 6195 SPEED_10;
6352 bp->port.advertising = (ADVERTISED_10baseT_Half | 6196 bp->link_params.req_duplex[idx] =
6197 DUPLEX_HALF;
6198 bp->port.advertising[idx] |=
6199 (ADVERTISED_10baseT_Half |
6353 ADVERTISED_TP); 6200 ADVERTISED_TP);
6354 } else { 6201 } else {
6355 BNX2X_ERROR("NVRAM config error. " 6202 BNX2X_ERROR("NVRAM config error. "
6356 "Invalid link_config 0x%x" 6203 "Invalid link_config 0x%x"
6357 " speed_cap_mask 0x%x\n", 6204 " speed_cap_mask 0x%x\n",
6358 bp->port.link_config, 6205 link_config,
6359 bp->link_params.speed_cap_mask); 6206 bp->link_params.speed_cap_mask[idx]);
6360 return; 6207 return;
6361 } 6208 }
6362 break; 6209 break;
6363 6210
6364 case PORT_FEATURE_LINK_SPEED_100M_FULL: 6211 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6365 if (bp->port.supported & SUPPORTED_100baseT_Full) { 6212 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6366 bp->link_params.req_line_speed = SPEED_100; 6213 bp->link_params.req_line_speed[idx] =
6367 bp->port.advertising = (ADVERTISED_100baseT_Full | 6214 SPEED_100;
6215 bp->port.advertising[idx] |=
6216 (ADVERTISED_100baseT_Full |
6368 ADVERTISED_TP); 6217 ADVERTISED_TP);
6369 } else { 6218 } else {
6370 BNX2X_ERROR("NVRAM config error. " 6219 BNX2X_ERROR("NVRAM config error. "
6371 "Invalid link_config 0x%x" 6220 "Invalid link_config 0x%x"
6372 " speed_cap_mask 0x%x\n", 6221 " speed_cap_mask 0x%x\n",
6373 bp->port.link_config, 6222 link_config,
6374 bp->link_params.speed_cap_mask); 6223 bp->link_params.speed_cap_mask[idx]);
6375 return; 6224 return;
6376 } 6225 }
6377 break; 6226 break;
6378 6227
6379 case PORT_FEATURE_LINK_SPEED_100M_HALF: 6228 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6380 if (bp->port.supported & SUPPORTED_100baseT_Half) { 6229 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6381 bp->link_params.req_line_speed = SPEED_100; 6230 bp->link_params.req_line_speed[idx] = SPEED_100;
6382 bp->link_params.req_duplex = DUPLEX_HALF; 6231 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6383 bp->port.advertising = (ADVERTISED_100baseT_Half | 6232 bp->port.advertising[idx] |=
6233 (ADVERTISED_100baseT_Half |
6384 ADVERTISED_TP); 6234 ADVERTISED_TP);
6385 } else { 6235 } else {
6386 BNX2X_ERROR("NVRAM config error. " 6236 BNX2X_ERROR("NVRAM config error. "
6387 "Invalid link_config 0x%x" 6237 "Invalid link_config 0x%x"
6388 " speed_cap_mask 0x%x\n", 6238 " speed_cap_mask 0x%x\n",
6389 bp->port.link_config, 6239 link_config,
6390 bp->link_params.speed_cap_mask); 6240 bp->link_params.speed_cap_mask[idx]);
6391 return; 6241 return;
6392 } 6242 }
6393 break; 6243 break;
6394 6244
6395 case PORT_FEATURE_LINK_SPEED_1G: 6245 case PORT_FEATURE_LINK_SPEED_1G:
6396 if (bp->port.supported & SUPPORTED_1000baseT_Full) { 6246 if (bp->port.supported[idx] &
6397 bp->link_params.req_line_speed = SPEED_1000; 6247 SUPPORTED_1000baseT_Full) {
6398 bp->port.advertising = (ADVERTISED_1000baseT_Full | 6248 bp->link_params.req_line_speed[idx] =
6249 SPEED_1000;
6250 bp->port.advertising[idx] |=
6251 (ADVERTISED_1000baseT_Full |
6399 ADVERTISED_TP); 6252 ADVERTISED_TP);
6400 } else { 6253 } else {
6401 BNX2X_ERROR("NVRAM config error. " 6254 BNX2X_ERROR("NVRAM config error. "
6402 "Invalid link_config 0x%x" 6255 "Invalid link_config 0x%x"
6403 " speed_cap_mask 0x%x\n", 6256 " speed_cap_mask 0x%x\n",
6404 bp->port.link_config, 6257 link_config,
6405 bp->link_params.speed_cap_mask); 6258 bp->link_params.speed_cap_mask[idx]);
6406 return; 6259 return;
6407 } 6260 }
6408 break; 6261 break;
6409 6262
6410 case PORT_FEATURE_LINK_SPEED_2_5G: 6263 case PORT_FEATURE_LINK_SPEED_2_5G:
6411 if (bp->port.supported & SUPPORTED_2500baseX_Full) { 6264 if (bp->port.supported[idx] &
6412 bp->link_params.req_line_speed = SPEED_2500; 6265 SUPPORTED_2500baseX_Full) {
6413 bp->port.advertising = (ADVERTISED_2500baseX_Full | 6266 bp->link_params.req_line_speed[idx] =
6267 SPEED_2500;
6268 bp->port.advertising[idx] |=
6269 (ADVERTISED_2500baseX_Full |
6414 ADVERTISED_TP); 6270 ADVERTISED_TP);
6415 } else { 6271 } else {
6416 BNX2X_ERROR("NVRAM config error. " 6272 BNX2X_ERROR("NVRAM config error. "
6417 "Invalid link_config 0x%x" 6273 "Invalid link_config 0x%x"
6418 " speed_cap_mask 0x%x\n", 6274 " speed_cap_mask 0x%x\n",
6419 bp->port.link_config, 6275 link_config,
6420 bp->link_params.speed_cap_mask); 6276 bp->link_params.speed_cap_mask[idx]);
6421 return; 6277 return;
6422 } 6278 }
6423 break; 6279 break;
@@ -6425,16 +6281,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6425 case PORT_FEATURE_LINK_SPEED_10G_CX4: 6281 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6426 case PORT_FEATURE_LINK_SPEED_10G_KX4: 6282 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6427 case PORT_FEATURE_LINK_SPEED_10G_KR: 6283 case PORT_FEATURE_LINK_SPEED_10G_KR:
6428 if (bp->port.supported & SUPPORTED_10000baseT_Full) { 6284 if (bp->port.supported[idx] &
6429 bp->link_params.req_line_speed = SPEED_10000; 6285 SUPPORTED_10000baseT_Full) {
6430 bp->port.advertising = (ADVERTISED_10000baseT_Full | 6286 bp->link_params.req_line_speed[idx] =
6287 SPEED_10000;
6288 bp->port.advertising[idx] |=
6289 (ADVERTISED_10000baseT_Full |
6431 ADVERTISED_FIBRE); 6290 ADVERTISED_FIBRE);
6432 } else { 6291 } else {
6433 BNX2X_ERROR("NVRAM config error. " 6292 BNX2X_ERROR("NVRAM config error. "
6434 "Invalid link_config 0x%x" 6293 "Invalid link_config 0x%x"
6435 " speed_cap_mask 0x%x\n", 6294 " speed_cap_mask 0x%x\n",
6436 bp->port.link_config, 6295 link_config,
6437 bp->link_params.speed_cap_mask); 6296 bp->link_params.speed_cap_mask[idx]);
6438 return; 6297 return;
6439 } 6298 }
6440 break; 6299 break;
@@ -6442,23 +6301,28 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6442 default: 6301 default:
6443 BNX2X_ERROR("NVRAM config error. " 6302 BNX2X_ERROR("NVRAM config error. "
6444 "BAD link speed link_config 0x%x\n", 6303 "BAD link speed link_config 0x%x\n",
6445 bp->port.link_config); 6304 link_config);
6446 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 6305 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
6447 bp->port.advertising = bp->port.supported; 6306 bp->port.advertising[idx] = bp->port.supported[idx];
6448 break; 6307 break;
6449 } 6308 }
6450 6309
6451 bp->link_params.req_flow_ctrl = (bp->port.link_config & 6310 bp->link_params.req_flow_ctrl[idx] = (link_config &
6452 PORT_FEATURE_FLOW_CONTROL_MASK); 6311 PORT_FEATURE_FLOW_CONTROL_MASK);
6453 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && 6312 if ((bp->link_params.req_flow_ctrl[idx] ==
6454 !(bp->port.supported & SUPPORTED_Autoneg)) 6313 BNX2X_FLOW_CTRL_AUTO) &&
6455 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; 6314 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
6315 bp->link_params.req_flow_ctrl[idx] =
6316 BNX2X_FLOW_CTRL_NONE;
6317 }
6456 6318
6457 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" 6319 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
6458 " advertising 0x%x\n", 6320 " 0x%x advertising 0x%x\n",
6459 bp->link_params.req_line_speed, 6321 bp->link_params.req_line_speed[idx],
6460 bp->link_params.req_duplex, 6322 bp->link_params.req_duplex[idx],
6461 bp->link_params.req_flow_ctrl, bp->port.advertising); 6323 bp->link_params.req_flow_ctrl[idx],
6324 bp->port.advertising[idx]);
6325 }
6462} 6326}
6463 6327
6464static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 6328static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
@@ -6474,48 +6338,28 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6474 int port = BP_PORT(bp); 6338 int port = BP_PORT(bp);
6475 u32 val, val2; 6339 u32 val, val2;
6476 u32 config; 6340 u32 config;
6477 u16 i; 6341 u32 ext_phy_type, ext_phy_config;;
6478 u32 ext_phy_type;
6479 6342
6480 bp->link_params.bp = bp; 6343 bp->link_params.bp = bp;
6481 bp->link_params.port = port; 6344 bp->link_params.port = port;
6482 6345
6483 bp->link_params.lane_config = 6346 bp->link_params.lane_config =
6484 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 6347 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6485 bp->link_params.ext_phy_config =
6486 SHMEM_RD(bp,
6487 dev_info.port_hw_config[port].external_phy_config);
6488 /* BCM8727_NOC => BCM8727 no over current */
6489 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6490 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6491 bp->link_params.ext_phy_config &=
6492 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6493 bp->link_params.ext_phy_config |=
6494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6495 bp->link_params.feature_config_flags |=
6496 FEATURE_CONFIG_BCM8727_NOC;
6497 }
6498 6348
6499 bp->link_params.speed_cap_mask = 6349 bp->link_params.speed_cap_mask[0] =
6500 SHMEM_RD(bp, 6350 SHMEM_RD(bp,
6501 dev_info.port_hw_config[port].speed_capability_mask); 6351 dev_info.port_hw_config[port].speed_capability_mask);
6502 6352 bp->link_params.speed_cap_mask[1] =
6503 bp->port.link_config = 6353 SHMEM_RD(bp,
6354 dev_info.port_hw_config[port].speed_capability_mask2);
6355 bp->port.link_config[0] =
6504 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 6356 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6505 6357
6506 /* Get the 4 lanes xgxs config rx and tx */ 6358 bp->port.link_config[1] =
6507 for (i = 0; i < 2; i++) { 6359 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
6508 val = SHMEM_RD(bp,
6509 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6510 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6511 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6512
6513 val = SHMEM_RD(bp,
6514 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6515 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6516 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6517 }
6518 6360
6361 bp->link_params.multi_phy_config =
6362 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
6519 /* If the device is capable of WoL, set the default state according 6363 /* If the device is capable of WoL, set the default state according
6520 * to the HW 6364 * to the HW
6521 */ 6365 */
@@ -6523,14 +6367,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6523 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 6367 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6524 (config & PORT_FEATURE_WOL_ENABLED)); 6368 (config & PORT_FEATURE_WOL_ENABLED));
6525 6369
6526 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x" 6370 BNX2X_DEV_INFO("lane_config 0x%08x"
6527 " speed_cap_mask 0x%08x link_config 0x%08x\n", 6371 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
6528 bp->link_params.lane_config, 6372 bp->link_params.lane_config,
6529 bp->link_params.ext_phy_config, 6373 bp->link_params.speed_cap_mask[0],
6530 bp->link_params.speed_cap_mask, bp->port.link_config); 6374 bp->port.link_config[0]);
6531 6375
6532 bp->link_params.switch_cfg |= (bp->port.link_config & 6376 bp->link_params.switch_cfg = (bp->port.link_config[0] &
6533 PORT_FEATURE_CONNECTED_SWITCH_MASK); 6377 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6378 bnx2x_phy_probe(&bp->link_params);
6534 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 6379 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6535 6380
6536 bnx2x_link_settings_requested(bp); 6381 bnx2x_link_settings_requested(bp);
@@ -6539,14 +6384,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6539 * If connected directly, work with the internal PHY, otherwise, work 6384 * If connected directly, work with the internal PHY, otherwise, work
6540 * with the external PHY 6385 * with the external PHY
6541 */ 6386 */
6542 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 6387 ext_phy_config =
6388 SHMEM_RD(bp,
6389 dev_info.port_hw_config[port].external_phy_config);
6390 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6543 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 6391 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6544 bp->mdio.prtad = bp->link_params.phy_addr; 6392 bp->mdio.prtad = bp->port.phy_addr;
6545 6393
6546 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 6394 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6547 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 6395 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6548 bp->mdio.prtad = 6396 bp->mdio.prtad =
6549 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); 6397 XGXS_EXT_PHY_ADDR(ext_phy_config);
6550 6398
6551 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 6399 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6552 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 6400 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
@@ -6982,23 +6830,15 @@ static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6982 struct bnx2x *bp = netdev_priv(netdev); 6830 struct bnx2x *bp = netdev_priv(netdev);
6983 u16 value; 6831 u16 value;
6984 int rc; 6832 int rc;
6985 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6986 6833
6987 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 6834 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6988 prtad, devad, addr); 6835 prtad, devad, addr);
6989 6836
6990 if (prtad != bp->mdio.prtad) {
6991 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6992 prtad, bp->mdio.prtad);
6993 return -EINVAL;
6994 }
6995
6996 /* The HW expects different devad if CL22 is used */ 6837 /* The HW expects different devad if CL22 is used */
6997 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 6838 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6998 6839
6999 bnx2x_acquire_phy_lock(bp); 6840 bnx2x_acquire_phy_lock(bp);
7000 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad, 6841 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
7001 devad, addr, &value);
7002 bnx2x_release_phy_lock(bp); 6842 bnx2x_release_phy_lock(bp);
7003 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 6843 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7004 6844
@@ -7012,24 +6852,16 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7012 u16 addr, u16 value) 6852 u16 addr, u16 value)
7013{ 6853{
7014 struct bnx2x *bp = netdev_priv(netdev); 6854 struct bnx2x *bp = netdev_priv(netdev);
7015 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7016 int rc; 6855 int rc;
7017 6856
7018 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x," 6857 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7019 " value 0x%x\n", prtad, devad, addr, value); 6858 " value 0x%x\n", prtad, devad, addr, value);
7020 6859
7021 if (prtad != bp->mdio.prtad) {
7022 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7023 prtad, bp->mdio.prtad);
7024 return -EINVAL;
7025 }
7026
7027 /* The HW expects different devad if CL22 is used */ 6860 /* The HW expects different devad if CL22 is used */
7028 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 6861 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7029 6862
7030 bnx2x_acquire_phy_lock(bp); 6863 bnx2x_acquire_phy_lock(bp);
7031 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad, 6864 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
7032 devad, addr, value);
7033 bnx2x_release_phy_lock(bp); 6865 bnx2x_release_phy_lock(bp);
7034 return rc; 6866 return rc;
7035} 6867}
@@ -7259,7 +7091,7 @@ static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7259 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; 7091 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7260} 7092}
7261 7093
7262static int __devinit bnx2x_check_firmware(struct bnx2x *bp) 7094static int bnx2x_check_firmware(struct bnx2x *bp)
7263{ 7095{
7264 const struct firmware *firmware = bp->firmware; 7096 const struct firmware *firmware = bp->firmware;
7265 struct bnx2x_fw_file_hdr *fw_hdr; 7097 struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7370,7 +7202,7 @@ do { \
7370 (u8 *)bp->arr, len); \ 7202 (u8 *)bp->arr, len); \
7371} while (0) 7203} while (0)
7372 7204
7373static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) 7205int bnx2x_init_firmware(struct bnx2x *bp)
7374{ 7206{
7375 const char *fw_file_name; 7207 const char *fw_file_name;
7376 struct bnx2x_fw_file_hdr *fw_hdr; 7208 struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7381,21 +7213,21 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7381 else if (CHIP_IS_E1H(bp)) 7213 else if (CHIP_IS_E1H(bp))
7382 fw_file_name = FW_FILE_NAME_E1H; 7214 fw_file_name = FW_FILE_NAME_E1H;
7383 else { 7215 else {
7384 dev_err(dev, "Unsupported chip revision\n"); 7216 BNX2X_ERR("Unsupported chip revision\n");
7385 return -EINVAL; 7217 return -EINVAL;
7386 } 7218 }
7387 7219
7388 dev_info(dev, "Loading %s\n", fw_file_name); 7220 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7389 7221
7390 rc = request_firmware(&bp->firmware, fw_file_name, dev); 7222 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7391 if (rc) { 7223 if (rc) {
7392 dev_err(dev, "Can't load firmware file %s\n", fw_file_name); 7224 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7393 goto request_firmware_exit; 7225 goto request_firmware_exit;
7394 } 7226 }
7395 7227
7396 rc = bnx2x_check_firmware(bp); 7228 rc = bnx2x_check_firmware(bp);
7397 if (rc) { 7229 if (rc) {
7398 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name); 7230 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7399 goto request_firmware_exit; 7231 goto request_firmware_exit;
7400 } 7232 }
7401 7233
@@ -7473,13 +7305,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7473 if (rc) 7305 if (rc)
7474 goto init_one_exit; 7306 goto init_one_exit;
7475 7307
7476 /* Set init arrays */
7477 rc = bnx2x_init_firmware(bp, &pdev->dev);
7478 if (rc) {
7479 dev_err(&pdev->dev, "Error loading firmware\n");
7480 goto init_one_exit;
7481 }
7482
7483 rc = register_netdev(dev); 7308 rc = register_netdev(dev);
7484 if (rc) { 7309 if (rc) {
7485 dev_err(&pdev->dev, "Cannot register net device\n"); 7310 dev_err(&pdev->dev, "Cannot register net device\n");
@@ -7530,11 +7355,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7530 /* Make sure RESET task is not scheduled before continuing */ 7355 /* Make sure RESET task is not scheduled before continuing */
7531 cancel_delayed_work_sync(&bp->reset_task); 7356 cancel_delayed_work_sync(&bp->reset_task);
7532 7357
7533 kfree(bp->init_ops_offsets);
7534 kfree(bp->init_ops);
7535 kfree(bp->init_data);
7536 release_firmware(bp->firmware);
7537
7538 if (bp->regview) 7358 if (bp->regview)
7539 iounmap(bp->regview); 7359 iounmap(bp->regview);
7540 7360
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index a1f3bf0cd630..6be0d09ad3fd 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -4964,6 +4964,8 @@
4964#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001 4964#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
4965#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040 4965#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
4966#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14 4966#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
4967#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
4968#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
4967#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004 4969#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
4968#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018 4970#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
4969#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3 4971#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
@@ -5135,28 +5137,35 @@ Theotherbitsarereservedandshouldbezero*/
5135#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005 5137#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
5136#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007 5138#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
5137#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff 5139#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
5138#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
5139#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02 5140#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
5140#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05 5141#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
5141#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 5142#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
5142#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e 5143#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
5144#define MDIO_PMA_REG_8727_PCS_GP 0xc842
5145
5146#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
5143 5147
5144#define MDIO_PMA_REG_8073_CHIP_REV 0xc801 5148#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
5145#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820 5149#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
5146#define MDIO_PMA_REG_8073_XAUI_WA 0xc841 5150#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
5151#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
5147 5152
5148#define MDIO_PMA_REG_7101_RESET 0xc000 5153#define MDIO_PMA_REG_7101_RESET 0xc000
5149#define MDIO_PMA_REG_7107_LED_CNTL 0xc007 5154#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
5155#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
5150#define MDIO_PMA_REG_7101_VER1 0xc026 5156#define MDIO_PMA_REG_7101_VER1 0xc026
5151#define MDIO_PMA_REG_7101_VER2 0xc027 5157#define MDIO_PMA_REG_7101_VER2 0xc027
5152 5158
5153#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811 5159#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
5154#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c 5160#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
5155#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f 5161#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
5156#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 5162#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
5157#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834 5163#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
5158#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 5164#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
5159#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b 5165#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
5166#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
5167#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
5168#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
5160 5169
5161 5170
5162#define MDIO_WIS_DEVAD 0x2 5171#define MDIO_WIS_DEVAD 0x2
@@ -5188,6 +5197,8 @@ Theotherbitsarereservedandshouldbezero*/
5188#define MDIO_XS_8706_REG_BANK_RX3 0x80ec 5197#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
5189#define MDIO_XS_8706_REG_BANK_RXA 0x80fc 5198#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
5190 5199
5200#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
5201
5191#define MDIO_AN_DEVAD 0x7 5202#define MDIO_AN_DEVAD 0x7
5192/*ieee*/ 5203/*ieee*/
5193#define MDIO_AN_REG_CTRL 0x0000 5204#define MDIO_AN_REG_CTRL 0x0000
@@ -5210,14 +5221,40 @@ Theotherbitsarereservedandshouldbezero*/
5210#define MDIO_AN_REG_CL37_FC_LP 0xffe5 5221#define MDIO_AN_REG_CL37_FC_LP 0xffe5
5211 5222
5212#define MDIO_AN_REG_8073_2_5G 0x8329 5223#define MDIO_AN_REG_8073_2_5G 0x8329
5224#define MDIO_AN_REG_8073_BAM 0x8350
5213 5225
5226#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
5214#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 5227#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
5228#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
5215#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 5229#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
5230#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
5216#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 5231#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
5217#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 5232#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
5218#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 5233#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
5234#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
5219#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc 5235#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
5220 5236
5237/* BCM84823 only */
5238#define MDIO_CTL_DEVAD 0x1e
5239#define MDIO_CTL_REG_84823_MEDIA 0x401a
5240#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
5241 /* These pins configure the BCM84823 interface to MAC after reset. */
5242#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
5243#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
5244 /* These pins configure the BCM84823 interface to Line after reset. */
5245#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
5246#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
5247#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
5248 /* When this pin is active high during reset, 10GBASE-T core is power
5249 * down, When it is active low the 10GBASE-T is power up
5250 */
5251#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
5252#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
5253#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
5254#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
5255#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
5256
5257
5221#define IGU_FUNC_BASE 0x0400 5258#define IGU_FUNC_BASE 0x0400
5222 5259
5223#define IGU_ADDR_MSIX 0x0000 5260#define IGU_ADDR_MSIX 0x0000
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index c74724461020..efa1403ebf82 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -969,6 +969,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
969{ 969{
970 struct bnx2x_eth_stats *estats = &bp->eth_stats; 970 struct bnx2x_eth_stats *estats = &bp->eth_stats;
971 struct net_device_stats *nstats = &bp->dev->stats; 971 struct net_device_stats *nstats = &bp->dev->stats;
972 unsigned long tmp;
972 int i; 973 int i;
973 974
974 nstats->rx_packets = 975 nstats->rx_packets =
@@ -985,10 +986,10 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
985 986
986 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 987 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
987 988
988 nstats->rx_dropped = estats->mac_discard; 989 tmp = estats->mac_discard;
989 for_each_queue(bp, i) 990 for_each_queue(bp, i)
990 nstats->rx_dropped += 991 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
991 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 992 nstats->rx_dropped = tmp;
992 993
993 nstats->tx_dropped = 0; 994 nstats->tx_dropped = 0;
994 995
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index b1bdc909090f..312b9c8f4f3b 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -143,12 +143,12 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
143 np_clock = of_find_matching_node(NULL, mpc512x_clock_ids); 143 np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
144 if (!np_clock) { 144 if (!np_clock) {
145 dev_err(&ofdev->dev, "couldn't find clock node\n"); 145 dev_err(&ofdev->dev, "couldn't find clock node\n");
146 return -ENODEV; 146 return 0;
147 } 147 }
148 clockctl = of_iomap(np_clock, 0); 148 clockctl = of_iomap(np_clock, 0);
149 if (!clockctl) { 149 if (!clockctl) {
150 dev_err(&ofdev->dev, "couldn't map clock registers\n"); 150 dev_err(&ofdev->dev, "couldn't map clock registers\n");
151 return 0; 151 goto exit_put;
152 } 152 }
153 153
154 /* Determine the MSCAN device index from the physical address */ 154 /* Determine the MSCAN device index from the physical address */
@@ -233,9 +233,9 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
233 clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv); 233 clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
234 234
235exit_unmap: 235exit_unmap:
236 of_node_put(np_clock);
237 iounmap(clockctl); 236 iounmap(clockctl);
238 237exit_put:
238 of_node_put(np_clock);
239 return freq; 239 return freq;
240} 240}
241#else /* !CONFIG_PPC_MPC512x */ 241#else /* !CONFIG_PPC_MPC512x */
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 28c88eeec757..32aaadc4734f 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2149,7 +2149,7 @@ end_copy_pkt:
2149 skb->csum = csum_unfold(~csum); 2149 skb->csum = csum_unfold(~csum);
2150 skb->ip_summed = CHECKSUM_COMPLETE; 2150 skb->ip_summed = CHECKSUM_COMPLETE;
2151 } else 2151 } else
2152 skb->ip_summed = CHECKSUM_NONE; 2152 skb_checksum_none_assert(skb);
2153 return len; 2153 return len;
2154} 2154}
2155 2155
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index f01cfdb995de..1950b9a20ecd 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1388,7 +1388,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1388 ++st->rx_cso_good; 1388 ++st->rx_cso_good;
1389 skb->ip_summed = CHECKSUM_UNNECESSARY; 1389 skb->ip_summed = CHECKSUM_UNNECESSARY;
1390 } else 1390 } else
1391 skb->ip_summed = CHECKSUM_NONE; 1391 skb_checksum_none_assert(skb);
1392 1392
1393 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1393 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
1394 st->vlan_xtract++; 1394 st->vlan_xtract++;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 599d178df62d..63ebf76d2390 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -314,14 +314,12 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
314 return 0; 314 return 0;
315} 315}
316 316
317#if defined(CONFIG_CHELSIO_T1_1G)
318static const struct mdio_ops mi1_mdio_ops = { 317static const struct mdio_ops mi1_mdio_ops = {
319 .init = mi1_mdio_init, 318 .init = mi1_mdio_init,
320 .read = mi1_mdio_read, 319 .read = mi1_mdio_read,
321 .write = mi1_mdio_write, 320 .write = mi1_mdio_write,
322 .mode_support = MDIO_SUPPORTS_C22 321 .mode_support = MDIO_SUPPORTS_C22
323}; 322};
324#endif
325 323
326#endif 324#endif
327 325
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 09610323a948..2ab6a7c4ffc1 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1022,7 +1022,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1022 if (blks > cp->ethdev->ctx_tbl_len) 1022 if (blks > cp->ethdev->ctx_tbl_len)
1023 return -ENOMEM; 1023 return -ENOMEM;
1024 1024
1025 cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL); 1025 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1026 if (cp->ctx_arr == NULL) 1026 if (cp->ctx_arr == NULL)
1027 return -ENOMEM; 1027 return -ENOMEM;
1028 1028
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index e1f6156b3710..fec939f8f65f 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -38,7 +38,7 @@
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/clk.h> 40#include <linux/clk.h>
41#include <asm/gpio.h> 41#include <linux/gpio.h>
42#include <asm/atomic.h> 42#include <asm/atomic.h>
43 43
44MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 44MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
108#define CPMAC_RX_INT_CLEAR 0x019c 108#define CPMAC_RX_INT_CLEAR 0x019c
109#define CPMAC_MAC_INT_ENABLE 0x01a8 109#define CPMAC_MAC_INT_ENABLE 0x01a8
110#define CPMAC_MAC_INT_CLEAR 0x01ac 110#define CPMAC_MAC_INT_CLEAR 0x01ac
111#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) 111#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
112#define CPMAC_MAC_ADDR_MID 0x01d0 112#define CPMAC_MAC_ADDR_MID 0x01d0
113#define CPMAC_MAC_ADDR_HI 0x01d4 113#define CPMAC_MAC_ADDR_HI 0x01d4
114#define CPMAC_MAC_HASH_LO 0x01d8 114#define CPMAC_MAC_HASH_LO 0x01d8
@@ -227,7 +227,7 @@ static void cpmac_dump_regs(struct net_device *dev)
227 for (i = 0; i < CPMAC_REG_END; i += 4) { 227 for (i = 0; i < CPMAC_REG_END; i += 4) {
228 if (i % 16 == 0) { 228 if (i % 16 == 0) {
229 if (i) 229 if (i)
230 printk("\n"); 230 pr_cont("\n");
231 printk(KERN_DEBUG "%s: reg[%p]:", dev->name, 231 printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
232 priv->regs + i); 232 priv->regs + i);
233 } 233 }
@@ -262,7 +262,7 @@ static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
262 for (i = 0; i < skb->len; i++) { 262 for (i = 0; i < skb->len; i++) {
263 if (i % 16 == 0) { 263 if (i % 16 == 0) {
264 if (i) 264 if (i)
265 printk("\n"); 265 pr_cont("\n");
266 printk(KERN_DEBUG "%s: data[%p]:", dev->name, 266 printk(KERN_DEBUG "%s: data[%p]:", dev->name,
267 skb->data + i); 267 skb->data + i);
268 } 268 }
@@ -391,7 +391,7 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
391 if (likely(skb)) { 391 if (likely(skb)) {
392 skb_put(desc->skb, desc->datalen); 392 skb_put(desc->skb, desc->datalen);
393 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); 393 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
394 desc->skb->ip_summed = CHECKSUM_NONE; 394 skb_checksum_none_assert(desc->skb);
395 priv->dev->stats.rx_packets++; 395 priv->dev->stats.rx_packets++;
396 priv->dev->stats.rx_bytes += desc->datalen; 396 priv->dev->stats.rx_bytes += desc->datalen;
397 result = desc->skb; 397 result = desc->skb;
@@ -506,7 +506,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
506 "restart rx from a descriptor that's " 506 "restart rx from a descriptor that's "
507 "not free: %p\n", 507 "not free: %p\n",
508 priv->dev->name, restart); 508 priv->dev->name, restart);
509 goto fatal_error; 509 goto fatal_error;
510 } 510 }
511 511
512 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); 512 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
@@ -873,7 +873,8 @@ static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
873 return -EINVAL; 873 return -EINVAL;
874} 874}
875 875
876static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 876static void cpmac_get_ringparam(struct net_device *dev,
877 struct ethtool_ringparam *ring)
877{ 878{
878 struct cpmac_priv *priv = netdev_priv(dev); 879 struct cpmac_priv *priv = netdev_priv(dev);
879 880
@@ -888,7 +889,8 @@ static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam
888 ring->tx_pending = 1; 889 ring->tx_pending = 1;
889} 890}
890 891
891static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 892static int cpmac_set_ringparam(struct net_device *dev,
893 struct ethtool_ringparam *ring)
892{ 894{
893 struct cpmac_priv *priv = netdev_priv(dev); 895 struct cpmac_priv *priv = netdev_priv(dev);
894 896
@@ -1012,8 +1014,8 @@ static int cpmac_open(struct net_device *dev)
1012 1014
1013 priv->rx_head->prev->hw_next = (u32)0; 1015 priv->rx_head->prev->hw_next = (u32)0;
1014 1016
1015 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, 1017 res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
1016 dev->name, dev))) { 1018 if (res) {
1017 if (netif_msg_drv(priv)) 1019 if (netif_msg_drv(priv))
1018 printk(KERN_ERR "%s: failed to obtain irq\n", 1020 printk(KERN_ERR "%s: failed to obtain irq\n",
1019 dev->name); 1021 dev->name);
@@ -1133,7 +1135,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1133 } 1135 }
1134 1136
1135 if (phy_id == PHY_MAX_ADDR) { 1137 if (phy_id == PHY_MAX_ADDR) {
1136 dev_err(&pdev->dev, "no PHY present, falling back to switch on MDIO bus 0\n"); 1138 dev_err(&pdev->dev, "no PHY present, falling back "
1139 "to switch on MDIO bus 0\n");
1137 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1140 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
1138 phy_id = pdev->id; 1141 phy_id = pdev->id;
1139 } 1142 }
@@ -1169,9 +1172,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1169 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1172 priv->msg_enable = netif_msg_init(debug_level, 0xff);
1170 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); 1173 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
1171 1174
1172 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1175 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
1176 mdio_bus_id, phy_id);
1173 1177
1174 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, 1178 priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
1175 PHY_INTERFACE_MODE_MII); 1179 PHY_INTERFACE_MODE_MII);
1176 1180
1177 if (IS_ERR(priv->phy)) { 1181 if (IS_ERR(priv->phy)) {
@@ -1182,7 +1186,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1182 goto fail; 1186 goto fail;
1183 } 1187 }
1184 1188
1185 if ((rc = register_netdev(dev))) { 1189 rc = register_netdev(dev);
1190 if (rc) {
1186 printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, 1191 printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
1187 dev->name); 1192 dev->name);
1188 goto fail; 1193 goto fail;
@@ -1248,11 +1253,13 @@ int __devinit cpmac_init(void)
1248 1253
1249 cpmac_mii->reset(cpmac_mii); 1254 cpmac_mii->reset(cpmac_mii);
1250 1255
1251 for (i = 0; i < 300; i++) 1256 for (i = 0; i < 300; i++) {
1252 if ((mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE))) 1257 mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1258 if (mask)
1253 break; 1259 break;
1254 else 1260 else
1255 msleep(10); 1261 msleep(10);
1262 }
1256 1263
1257 mask &= 0x7fffffff; 1264 mask &= 0x7fffffff;
1258 if (mask & (mask - 1)) { 1265 if (mask & (mask - 1)) {
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index ad19585d960b..1ecf53dafe06 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1286,7 +1286,7 @@ irq_err:
1286/* 1286/*
1287 * Release resources when all the ports and offloading have been stopped. 1287 * Release resources when all the ports and offloading have been stopped.
1288 */ 1288 */
1289static void cxgb_down(struct adapter *adapter) 1289static void cxgb_down(struct adapter *adapter, int on_wq)
1290{ 1290{
1291 t3_sge_stop(adapter); 1291 t3_sge_stop(adapter);
1292 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */ 1292 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
@@ -1296,7 +1296,8 @@ static void cxgb_down(struct adapter *adapter)
1296 free_irq_resources(adapter); 1296 free_irq_resources(adapter);
1297 quiesce_rx(adapter); 1297 quiesce_rx(adapter);
1298 t3_sge_stop(adapter); 1298 t3_sge_stop(adapter);
1299 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ 1299 if (!on_wq)
1300 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1300} 1301}
1301 1302
1302static void schedule_chk_task(struct adapter *adap) 1303static void schedule_chk_task(struct adapter *adap)
@@ -1374,7 +1375,7 @@ static int offload_close(struct t3cdev *tdev)
1374 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); 1375 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1375 1376
1376 if (!adapter->open_device_map) 1377 if (!adapter->open_device_map)
1377 cxgb_down(adapter); 1378 cxgb_down(adapter, 0);
1378 1379
1379 cxgb3_offload_deactivate(adapter); 1380 cxgb3_offload_deactivate(adapter);
1380 return 0; 1381 return 0;
@@ -1409,7 +1410,7 @@ static int cxgb_open(struct net_device *dev)
1409 return 0; 1410 return 0;
1410} 1411}
1411 1412
1412static int cxgb_close(struct net_device *dev) 1413static int __cxgb_close(struct net_device *dev, int on_wq)
1413{ 1414{
1414 struct port_info *pi = netdev_priv(dev); 1415 struct port_info *pi = netdev_priv(dev);
1415 struct adapter *adapter = pi->adapter; 1416 struct adapter *adapter = pi->adapter;
@@ -1436,12 +1437,17 @@ static int cxgb_close(struct net_device *dev)
1436 cancel_delayed_work_sync(&adapter->adap_check_task); 1437 cancel_delayed_work_sync(&adapter->adap_check_task);
1437 1438
1438 if (!adapter->open_device_map) 1439 if (!adapter->open_device_map)
1439 cxgb_down(adapter); 1440 cxgb_down(adapter, on_wq);
1440 1441
1441 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); 1442 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1442 return 0; 1443 return 0;
1443} 1444}
1444 1445
1446static int cxgb_close(struct net_device *dev)
1447{
1448 return __cxgb_close(dev, 0);
1449}
1450
1445static struct net_device_stats *cxgb_get_stats(struct net_device *dev) 1451static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1446{ 1452{
1447 struct port_info *pi = netdev_priv(dev); 1453 struct port_info *pi = netdev_priv(dev);
@@ -2862,7 +2868,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2862 spin_unlock(&adapter->work_lock); 2868 spin_unlock(&adapter->work_lock);
2863} 2869}
2864 2870
2865static int t3_adapter_error(struct adapter *adapter, int reset) 2871static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2866{ 2872{
2867 int i, ret = 0; 2873 int i, ret = 0;
2868 2874
@@ -2877,7 +2883,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
2877 struct net_device *netdev = adapter->port[i]; 2883 struct net_device *netdev = adapter->port[i];
2878 2884
2879 if (netif_running(netdev)) 2885 if (netif_running(netdev))
2880 cxgb_close(netdev); 2886 __cxgb_close(netdev, on_wq);
2881 } 2887 }
2882 2888
2883 /* Stop SGE timers */ 2889 /* Stop SGE timers */
@@ -2948,7 +2954,7 @@ static void fatal_error_task(struct work_struct *work)
2948 int err = 0; 2954 int err = 0;
2949 2955
2950 rtnl_lock(); 2956 rtnl_lock();
2951 err = t3_adapter_error(adapter, 1); 2957 err = t3_adapter_error(adapter, 1, 1);
2952 if (!err) 2958 if (!err)
2953 err = t3_reenable_adapter(adapter); 2959 err = t3_reenable_adapter(adapter);
2954 if (!err) 2960 if (!err)
@@ -2998,7 +3004,7 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2998 if (state == pci_channel_io_perm_failure) 3004 if (state == pci_channel_io_perm_failure)
2999 return PCI_ERS_RESULT_DISCONNECT; 3005 return PCI_ERS_RESULT_DISCONNECT;
3000 3006
3001 ret = t3_adapter_error(adapter, 0); 3007 ret = t3_adapter_error(adapter, 0, 0);
3002 3008
3003 /* Request a slot reset. */ 3009 /* Request a slot reset. */
3004 return PCI_ERS_RESULT_NEED_RESET; 3010 return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index cb42353c9fdd..6990f6c65221 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1997,6 +1997,10 @@
1997 1997
1998#define A_PL_RST 0x6f0 1998#define A_PL_RST 0x6f0
1999 1999
2000#define S_FATALPERREN 4
2001#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
2002#define F_FATALPERREN V_FATALPERREN(1U)
2003
2000#define S_CRSTWRM 1 2004#define S_CRSTWRM 1
2001#define V_CRSTWRM(x) ((x) << S_CRSTWRM) 2005#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
2002#define F_CRSTWRM V_CRSTWRM(1U) 2006#define F_CRSTWRM V_CRSTWRM(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 8ff96c6f6de5..c5a142bea5e9 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2022,7 +2022,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2022 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2022 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2023 skb->ip_summed = CHECKSUM_UNNECESSARY; 2023 skb->ip_summed = CHECKSUM_UNNECESSARY;
2024 } else 2024 } else
2025 skb->ip_summed = CHECKSUM_NONE; 2025 skb_checksum_none_assert(skb);
2026 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2026 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2027 2027
2028 if (unlikely(p->vlan_valid)) { 2028 if (unlikely(p->vlan_valid)) {
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 427c451be1a7..d307c9de59fb 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1408,6 +1408,7 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1408 fatal++; 1408 fatal++;
1409 CH_ALERT(adapter, "%s (0x%x)\n", 1409 CH_ALERT(adapter, "%s (0x%x)\n",
1410 acts->msg, status & acts->mask); 1410 acts->msg, status & acts->mask);
1411 status &= ~acts->mask;
1411 } else if (acts->msg) 1412 } else if (acts->msg)
1412 CH_WARN(adapter, "%s (0x%x)\n", 1413 CH_WARN(adapter, "%s (0x%x)\n",
1413 acts->msg, status & acts->mask); 1414 acts->msg, status & acts->mask);
@@ -1843,11 +1844,10 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1843 t3_os_link_fault_handler(adap, idx); 1844 t3_os_link_fault_handler(adap, idx);
1844 } 1845 }
1845 1846
1846 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1847
1848 if (cause & XGM_INTR_FATAL) 1847 if (cause & XGM_INTR_FATAL)
1849 t3_fatal_err(adap); 1848 t3_fatal_err(adap);
1850 1849
1850 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1851 return cause != 0; 1851 return cause != 0;
1852} 1852}
1853 1853
@@ -3569,6 +3569,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
3569 t3_write_reg(adapter, A_PM1_TX_MODE, 0); 3569 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3570 chan_init_hw(adapter, adapter->params.chan_map); 3570 chan_init_hw(adapter, adapter->params.chan_map);
3571 t3_sge_init(adapter, &adapter->params.sge); 3571 t3_sge_init(adapter, &adapter->params.sge);
3572 t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3572 3573
3573 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); 3574 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3574 3575
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 6e562c0dad7d..3ece9f5069fa 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -463,6 +463,8 @@ struct sge {
463 u8 counter_val[SGE_NCOUNTERS]; 463 u8 counter_val[SGE_NCOUNTERS];
464 unsigned int starve_thres; 464 unsigned int starve_thres;
465 u8 idma_state[2]; 465 u8 idma_state[2];
466 unsigned int egr_start;
467 unsigned int ingr_start;
466 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 468 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
467 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ 469 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
468 DECLARE_BITMAP(starving_fl, MAX_EGRQ); 470 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index c327527fbbc8..75b9401fd484 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -175,16 +175,26 @@ enum {
175 175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { 176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
177 CH_DEVICE(0xa000, 0), /* PE10K */ 177 CH_DEVICE(0xa000, 0), /* PE10K */
178 CH_DEVICE(0x4001, 0), 178 CH_DEVICE(0x4001, -1),
179 CH_DEVICE(0x4002, 0), 179 CH_DEVICE(0x4002, -1),
180 CH_DEVICE(0x4003, 0), 180 CH_DEVICE(0x4003, -1),
181 CH_DEVICE(0x4004, 0), 181 CH_DEVICE(0x4004, -1),
182 CH_DEVICE(0x4005, 0), 182 CH_DEVICE(0x4005, -1),
183 CH_DEVICE(0x4006, 0), 183 CH_DEVICE(0x4006, -1),
184 CH_DEVICE(0x4007, 0), 184 CH_DEVICE(0x4007, -1),
185 CH_DEVICE(0x4008, 0), 185 CH_DEVICE(0x4008, -1),
186 CH_DEVICE(0x4009, 0), 186 CH_DEVICE(0x4009, -1),
187 CH_DEVICE(0x400a, 0), 187 CH_DEVICE(0x400a, -1),
188 CH_DEVICE(0x4401, 4),
189 CH_DEVICE(0x4402, 4),
190 CH_DEVICE(0x4403, 4),
191 CH_DEVICE(0x4404, 4),
192 CH_DEVICE(0x4405, 4),
193 CH_DEVICE(0x4406, 4),
194 CH_DEVICE(0x4407, 4),
195 CH_DEVICE(0x4408, 4),
196 CH_DEVICE(0x4409, 4),
197 CH_DEVICE(0x440a, 4),
188 { 0, } 198 { 0, }
189}; 199};
190 200
@@ -423,10 +433,11 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
423 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 433 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
424 const struct cpl_sge_egr_update *p = (void *)rsp; 434 const struct cpl_sge_egr_update *p = (void *)rsp;
425 unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 435 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
426 struct sge_txq *txq = q->adap->sge.egr_map[qid]; 436 struct sge_txq *txq;
427 437
438 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
428 txq->restarts++; 439 txq->restarts++;
429 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { 440 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
430 struct sge_eth_txq *eq; 441 struct sge_eth_txq *eq;
431 442
432 eq = container_of(txq, struct sge_eth_txq, q); 443 eq = container_of(txq, struct sge_eth_txq, q);
@@ -658,6 +669,15 @@ static int setup_rss(struct adapter *adap)
658} 669}
659 670
660/* 671/*
672 * Return the channel of the ingress queue with the given qid.
673 */
674static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
675{
676 qid -= p->ingr_start;
677 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
678}
679
680/*
661 * Wait until all NAPI handlers are descheduled. 681 * Wait until all NAPI handlers are descheduled.
662 */ 682 */
663static void quiesce_rx(struct adapter *adap) 683static void quiesce_rx(struct adapter *adap)
@@ -1671,27 +1691,41 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1671 return 0; 1691 return 0;
1672} 1692}
1673 1693
1674/* 1694/**
1675 * Translate a physical EEPROM address to virtual. The first 1K is accessed 1695 * eeprom_ptov - translate a physical EEPROM address to virtual
1676 * through virtual addresses starting at 31K, the rest is accessed through 1696 * @phys_addr: the physical EEPROM address
1677 * virtual addresses starting at 0. This mapping is correct only for PF0. 1697 * @fn: the PCI function number
1698 * @sz: size of function-specific area
1699 *
1700 * Translate a physical EEPROM address to virtual. The first 1K is
1701 * accessed through virtual addresses starting at 31K, the rest is
1702 * accessed through virtual addresses starting at 0.
1703 *
1704 * The mapping is as follows:
1705 * [0..1K) -> [31K..32K)
1706 * [1K..1K+A) -> [31K-A..31K)
1707 * [1K+A..ES) -> [0..ES-A-1K)
1708 *
1709 * where A = @fn * @sz, and ES = EEPROM size.
1678 */ 1710 */
1679static int eeprom_ptov(unsigned int phys_addr) 1711static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1680{ 1712{
1713 fn *= sz;
1681 if (phys_addr < 1024) 1714 if (phys_addr < 1024)
1682 return phys_addr + (31 << 10); 1715 return phys_addr + (31 << 10);
1716 if (phys_addr < 1024 + fn)
1717 return 31744 - fn + phys_addr - 1024;
1683 if (phys_addr < EEPROMSIZE) 1718 if (phys_addr < EEPROMSIZE)
1684 return phys_addr - 1024; 1719 return phys_addr - 1024 - fn;
1685 return -EINVAL; 1720 return -EINVAL;
1686} 1721}
1687 1722
1688/* 1723/*
1689 * The next two routines implement eeprom read/write from physical addresses. 1724 * The next two routines implement eeprom read/write from physical addresses.
1690 * The physical->virtual translation is correct only for PF0.
1691 */ 1725 */
1692static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 1726static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1693{ 1727{
1694 int vaddr = eeprom_ptov(phys_addr); 1728 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1695 1729
1696 if (vaddr >= 0) 1730 if (vaddr >= 0)
1697 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); 1731 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -1700,7 +1734,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1700 1734
1701static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 1735static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1702{ 1736{
1703 int vaddr = eeprom_ptov(phys_addr); 1737 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1704 1738
1705 if (vaddr >= 0) 1739 if (vaddr >= 0)
1706 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); 1740 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -1743,6 +1777,14 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1743 aligned_offset = eeprom->offset & ~3; 1777 aligned_offset = eeprom->offset & ~3;
1744 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 1778 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1745 1779
1780 if (adapter->fn > 0) {
1781 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1782
1783 if (aligned_offset < start ||
1784 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1785 return -EPERM;
1786 }
1787
1746 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 1788 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1747 /* 1789 /*
1748 * RMW possibly needed for first or last words. 1790 * RMW possibly needed for first or last words.
@@ -2304,7 +2346,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2304 req->peer_port = htons(0); 2346 req->peer_port = htons(0);
2305 req->local_ip = sip; 2347 req->local_ip = sip;
2306 req->peer_ip = htonl(0); 2348 req->peer_ip = htonl(0);
2307 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; 2349 chan = rxq_to_chan(&adap->sge, queue);
2308 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 2350 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2309 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 2351 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2310 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 2352 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -2346,7 +2388,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2346 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); 2388 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2347 req->peer_ip_hi = cpu_to_be64(0); 2389 req->peer_ip_hi = cpu_to_be64(0);
2348 req->peer_ip_lo = cpu_to_be64(0); 2390 req->peer_ip_lo = cpu_to_be64(0);
2349 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; 2391 chan = rxq_to_chan(&adap->sge, queue);
2350 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 2392 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2351 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 2393 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2352 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 2394 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -3061,12 +3103,16 @@ static int adap_init0(struct adapter *adap)
3061 params[2] = FW_PARAM_PFVF(L2T_END); 3103 params[2] = FW_PARAM_PFVF(L2T_END);
3062 params[3] = FW_PARAM_PFVF(FILTER_START); 3104 params[3] = FW_PARAM_PFVF(FILTER_START);
3063 params[4] = FW_PARAM_PFVF(FILTER_END); 3105 params[4] = FW_PARAM_PFVF(FILTER_END);
3064 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val); 3106 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3107 params[6] = FW_PARAM_PFVF(EQ_START);
3108 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3065 if (ret < 0) 3109 if (ret < 0)
3066 goto bye; 3110 goto bye;
3067 port_vec = val[0]; 3111 port_vec = val[0];
3068 adap->tids.ftid_base = val[3]; 3112 adap->tids.ftid_base = val[3];
3069 adap->tids.nftids = val[4] - val[3] + 1; 3113 adap->tids.nftids = val[4] - val[3] + 1;
3114 adap->sge.ingr_start = val[5];
3115 adap->sge.egr_start = val[6];
3070 3116
3071 if (c.ofldcaps) { 3117 if (c.ofldcaps) {
3072 /* query offload-related parameters */ 3118 /* query offload-related parameters */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index bf38cfc57565..9967f3debce7 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -557,7 +557,8 @@ out: cred = q->avail - cred;
557 557
558 if (unlikely(fl_starving(q))) { 558 if (unlikely(fl_starving(q))) {
559 smp_wmb(); 559 smp_wmb();
560 set_bit(q->cntxt_id, adap->sge.starving_fl); 560 set_bit(q->cntxt_id - adap->sge.egr_start,
561 adap->sge.starving_fl);
561 } 562 }
562 563
563 return cred; 564 return cred;
@@ -974,7 +975,7 @@ out_free: dev_kfree_skb(skb);
974 } 975 }
975 976
976 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | 977 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
977 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0)); 978 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
978 cpl->pack = htons(0); 979 cpl->pack = htons(0);
979 cpl->len = htons(skb->len); 980 cpl->len = htons(skb->len);
980 cpl->ctrl1 = cpu_to_be64(cntrl); 981 cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1213,7 +1214,8 @@ static void txq_stop_maperr(struct sge_ofld_txq *q)
1213{ 1214{
1214 q->mapping_err++; 1215 q->mapping_err++;
1215 q->q.stops++; 1216 q->q.stops++;
1216 set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr); 1217 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1218 q->adap->sge.txq_maperr);
1217} 1219}
1218 1220
1219/** 1221/**
@@ -1603,7 +1605,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1603 rxq->stats.rx_cso++; 1605 rxq->stats.rx_cso++;
1604 } 1606 }
1605 } else 1607 } else
1606 skb->ip_summed = CHECKSUM_NONE; 1608 skb_checksum_none_assert(skb);
1607 1609
1608 if (unlikely(pkt->vlan_ex)) { 1610 if (unlikely(pkt->vlan_ex)) {
1609 struct vlan_group *grp = pi->vlan_grp; 1611 struct vlan_group *grp = pi->vlan_grp;
@@ -1835,6 +1837,7 @@ static unsigned int process_intrq(struct adapter *adap)
1835 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { 1837 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1836 unsigned int qid = ntohl(rc->pldbuflen_qid); 1838 unsigned int qid = ntohl(rc->pldbuflen_qid);
1837 1839
1840 qid -= adap->sge.ingr_start;
1838 napi_schedule(&adap->sge.ingr_map[qid]->napi); 1841 napi_schedule(&adap->sge.ingr_map[qid]->napi);
1839 } 1842 }
1840 1843
@@ -2050,14 +2053,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2050 /* set offset to -1 to distinguish ingress queues without FL */ 2053 /* set offset to -1 to distinguish ingress queues without FL */
2051 iq->offset = fl ? 0 : -1; 2054 iq->offset = fl ? 0 : -1;
2052 2055
2053 adap->sge.ingr_map[iq->cntxt_id] = iq; 2056 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2054 2057
2055 if (fl) { 2058 if (fl) {
2056 fl->cntxt_id = ntohs(c.fl0id); 2059 fl->cntxt_id = ntohs(c.fl0id);
2057 fl->avail = fl->pend_cred = 0; 2060 fl->avail = fl->pend_cred = 0;
2058 fl->pidx = fl->cidx = 0; 2061 fl->pidx = fl->cidx = 0;
2059 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 2062 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2060 adap->sge.egr_map[fl->cntxt_id] = fl; 2063 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2061 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 2064 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2062 } 2065 }
2063 return 0; 2066 return 0;
@@ -2087,7 +2090,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2087 q->stops = q->restarts = 0; 2090 q->stops = q->restarts = 0;
2088 q->stat = (void *)&q->desc[q->size]; 2091 q->stat = (void *)&q->desc[q->size];
2089 q->cntxt_id = id; 2092 q->cntxt_id = id;
2090 adap->sge.egr_map[id] = q; 2093 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2091} 2094}
2092 2095
2093int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 2096int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
@@ -2259,7 +2262,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2259{ 2262{
2260 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2263 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2261 2264
2262 adap->sge.ingr_map[rq->cntxt_id] = NULL; 2265 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2263 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, 2266 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2264 rq->cntxt_id, fl_id, 0xffff); 2267 rq->cntxt_id, fl_id, 0xffff);
2265 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2268 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 10a055565776..c26b455f37de 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -42,6 +42,7 @@ enum {
42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ 42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */ 43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */
44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ 44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
45 EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
45 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ 46 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
46 TCB_SIZE = 128, /* TCB size */ 47 TCB_SIZE = 128, /* TCB size */
47 NMTUS = 16, /* size of MTU table */ 48 NMTUS = 16, /* size of MTU table */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 0969f2fbc1b0..940584a8a640 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -487,6 +487,11 @@ enum fw_params_param_pfvf {
487 FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, 487 FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
488 FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, 488 FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
489 FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, 489 FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
490 FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
491 FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
492 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
493 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
494 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
490}; 495};
491 496
492/* 497/*
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index eb5a1c9cb2d3..f10864ddafbe 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1520,7 +1520,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1520 __skb_pull(skb, PKTSHIFT); 1520 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1521 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1522 skb_record_rx_queue(skb, rspq->idx);
1523 skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */
1524 pi = netdev_priv(skb->dev); 1523 pi = netdev_priv(skb->dev);
1525 rxq->stats.pkts++; 1524 rxq->stats.pkts++;
1526 1525
@@ -1535,7 +1534,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1535 } 1534 }
1536 rxq->stats.rx_cso++; 1535 rxq->stats.rx_cso++;
1537 } else 1536 } else
1538 skb->ip_summed = CHECKSUM_NONE; 1537 skb_checksum_none_assert(skb);
1539 1538
1540 if (unlikely(pkt->vlan_ex)) { 1539 if (unlikely(pkt->vlan_ex)) {
1541 struct vlan_group *grp = pi->vlan_grp; 1540 struct vlan_group *grp = pi->vlan_grp;
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index d7de376d7178..219eb5ad5c12 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -1255,7 +1255,7 @@ static int __devinit dec_lance_probe(struct device *bdev, const int type)
1255 */ 1255 */
1256 init_timer(&lp->multicast_timer); 1256 init_timer(&lp->multicast_timer);
1257 lp->multicast_timer.data = (unsigned long) dev; 1257 lp->multicast_timer.data = (unsigned long) dev;
1258 lp->multicast_timer.function = &lance_set_multicast_retry; 1258 lp->multicast_timer.function = lance_set_multicast_retry;
1259 1259
1260 ret = register_netdev(dev); 1260 ret = register_netdev(dev);
1261 if (ret) { 1261 if (ret) {
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index a2f238d20caa..e1a8216ff692 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -465,7 +465,7 @@ rio_open (struct net_device *dev)
465 init_timer (&np->timer); 465 init_timer (&np->timer);
466 np->timer.expires = jiffies + 1*HZ; 466 np->timer.expires = jiffies + 1*HZ;
467 np->timer.data = (unsigned long) dev; 467 np->timer.data = (unsigned long) dev;
468 np->timer.function = &rio_timer; 468 np->timer.function = rio_timer;
469 add_timer (&np->timer); 469 add_timer (&np->timer);
470 470
471 /* Start Tx/Rx */ 471 /* Start Tx/Rx */
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 4fd6b2b4554b..9f6aeefa06bf 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1056,7 +1056,7 @@ dm9000_rx(struct net_device *dev)
1056 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) 1056 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1057 skb->ip_summed = CHECKSUM_UNNECESSARY; 1057 skb->ip_summed = CHECKSUM_UNNECESSARY;
1058 else 1058 else
1059 skb->ip_summed = CHECKSUM_NONE; 1059 skb_checksum_none_assert(skb);
1060 } 1060 }
1061 netif_rx(skb); 1061 netif_rx(skb);
1062 dev->stats.rx_packets++; 1062 dev->stats.rx_packets++;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5cc39ed289c6..8d9269d12a67 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -790,6 +790,70 @@ static const struct net_device_ops e1000_netdev_ops = {
790}; 790};
791 791
792/** 792/**
793 * e1000_init_hw_struct - initialize members of hw struct
794 * @adapter: board private struct
795 * @hw: structure used by e1000_hw.c
796 *
797 * Factors out initialization of the e1000_hw struct to its own function
798 * that can be called very early at init (just after struct allocation).
799 * Fields are initialized based on PCI device information and
800 * OS network device settings (MTU size).
801 * Returns negative error codes if MAC type setup fails.
802 */
803static int e1000_init_hw_struct(struct e1000_adapter *adapter,
804 struct e1000_hw *hw)
805{
806 struct pci_dev *pdev = adapter->pdev;
807
808 /* PCI config space info */
809 hw->vendor_id = pdev->vendor;
810 hw->device_id = pdev->device;
811 hw->subsystem_vendor_id = pdev->subsystem_vendor;
812 hw->subsystem_id = pdev->subsystem_device;
813 hw->revision_id = pdev->revision;
814
815 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
816
817 hw->max_frame_size = adapter->netdev->mtu +
818 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
819 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
820
821 /* identify the MAC */
822 if (e1000_set_mac_type(hw)) {
823 e_err(probe, "Unknown MAC Type\n");
824 return -EIO;
825 }
826
827 switch (hw->mac_type) {
828 default:
829 break;
830 case e1000_82541:
831 case e1000_82547:
832 case e1000_82541_rev_2:
833 case e1000_82547_rev_2:
834 hw->phy_init_script = 1;
835 break;
836 }
837
838 e1000_set_media_type(hw);
839 e1000_get_bus_info(hw);
840
841 hw->wait_autoneg_complete = false;
842 hw->tbi_compatibility_en = true;
843 hw->adaptive_ifs = true;
844
845 /* Copper options */
846
847 if (hw->media_type == e1000_media_type_copper) {
848 hw->mdix = AUTO_ALL_MODES;
849 hw->disable_polarity_correction = false;
850 hw->master_slave = E1000_MASTER_SLAVE;
851 }
852
853 return 0;
854}
855
856/**
793 * e1000_probe - Device Initialization Routine 857 * e1000_probe - Device Initialization Routine
794 * @pdev: PCI device information struct 858 * @pdev: PCI device information struct
795 * @ent: entry in e1000_pci_tbl 859 * @ent: entry in e1000_pci_tbl
@@ -826,22 +890,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
826 if (err) 890 if (err)
827 return err; 891 return err;
828 892
829 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
830 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
831 pci_using_dac = 1;
832 } else {
833 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
834 if (err) {
835 err = dma_set_coherent_mask(&pdev->dev,
836 DMA_BIT_MASK(32));
837 if (err) {
838 pr_err("No usable DMA config, aborting\n");
839 goto err_dma;
840 }
841 }
842 pci_using_dac = 0;
843 }
844
845 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 893 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
846 if (err) 894 if (err)
847 goto err_pci_reg; 895 goto err_pci_reg;
@@ -885,6 +933,32 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
885 } 933 }
886 } 934 }
887 935
936 /* make ready for any if (hw->...) below */
937 err = e1000_init_hw_struct(adapter, hw);
938 if (err)
939 goto err_sw_init;
940
941 /*
942 * there is a workaround being applied below that limits
943 * 64-bit DMA addresses to 64-bit hardware. There are some
944 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
945 */
946 pci_using_dac = 0;
947 if ((hw->bus_type == e1000_bus_type_pcix) &&
948 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
949 /*
950 * according to DMA-API-HOWTO, coherent calls will always
951 * succeed if the set call did
952 */
953 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
954 pci_using_dac = 1;
955 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
956 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
957 } else {
958 pr_err("No usable DMA config, aborting\n");
959 goto err_dma;
960 }
961
888 netdev->netdev_ops = &e1000_netdev_ops; 962 netdev->netdev_ops = &e1000_netdev_ops;
889 e1000_set_ethtool_ops(netdev); 963 e1000_set_ethtool_ops(netdev);
890 netdev->watchdog_timeo = 5 * HZ; 964 netdev->watchdog_timeo = 5 * HZ;
@@ -959,18 +1033,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
959 if (!is_valid_ether_addr(netdev->perm_addr)) 1033 if (!is_valid_ether_addr(netdev->perm_addr))
960 e_err(probe, "Invalid MAC Address\n"); 1034 e_err(probe, "Invalid MAC Address\n");
961 1035
962 e1000_get_bus_info(hw);
963
964 init_timer(&adapter->tx_fifo_stall_timer); 1036 init_timer(&adapter->tx_fifo_stall_timer);
965 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; 1037 adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
966 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; 1038 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
967 1039
968 init_timer(&adapter->watchdog_timer); 1040 init_timer(&adapter->watchdog_timer);
969 adapter->watchdog_timer.function = &e1000_watchdog; 1041 adapter->watchdog_timer.function = e1000_watchdog;
970 adapter->watchdog_timer.data = (unsigned long) adapter; 1042 adapter->watchdog_timer.data = (unsigned long) adapter;
971 1043
972 init_timer(&adapter->phy_info_timer); 1044 init_timer(&adapter->phy_info_timer);
973 adapter->phy_info_timer.function = &e1000_update_phy_info; 1045 adapter->phy_info_timer.function = e1000_update_phy_info;
974 adapter->phy_info_timer.data = (unsigned long)adapter; 1046 adapter->phy_info_timer.data = (unsigned long)adapter;
975 1047
976 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1048 INIT_WORK(&adapter->reset_task, e1000_reset_task);
@@ -1072,6 +1144,7 @@ err_eeprom:
1072 iounmap(hw->flash_address); 1144 iounmap(hw->flash_address);
1073 kfree(adapter->tx_ring); 1145 kfree(adapter->tx_ring);
1074 kfree(adapter->rx_ring); 1146 kfree(adapter->rx_ring);
1147err_dma:
1075err_sw_init: 1148err_sw_init:
1076 iounmap(hw->hw_addr); 1149 iounmap(hw->hw_addr);
1077err_ioremap: 1150err_ioremap:
@@ -1079,7 +1152,6 @@ err_ioremap:
1079err_alloc_etherdev: 1152err_alloc_etherdev:
1080 pci_release_selected_regions(pdev, bars); 1153 pci_release_selected_regions(pdev, bars);
1081err_pci_reg: 1154err_pci_reg:
1082err_dma:
1083 pci_disable_device(pdev); 1155 pci_disable_device(pdev);
1084 return err; 1156 return err;
1085} 1157}
@@ -1131,62 +1203,12 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1131 * @adapter: board private structure to initialize 1203 * @adapter: board private structure to initialize
1132 * 1204 *
1133 * e1000_sw_init initializes the Adapter private data structure. 1205 * e1000_sw_init initializes the Adapter private data structure.
1134 * Fields are initialized based on PCI device information and 1206 * e1000_init_hw_struct MUST be called before this function
1135 * OS network device settings (MTU size).
1136 **/ 1207 **/
1137 1208
1138static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 1209static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1139{ 1210{
1140 struct e1000_hw *hw = &adapter->hw;
1141 struct net_device *netdev = adapter->netdev;
1142 struct pci_dev *pdev = adapter->pdev;
1143
1144 /* PCI config space info */
1145
1146 hw->vendor_id = pdev->vendor;
1147 hw->device_id = pdev->device;
1148 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1149 hw->subsystem_id = pdev->subsystem_device;
1150 hw->revision_id = pdev->revision;
1151
1152 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1153
1154 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1211 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1155 hw->max_frame_size = netdev->mtu +
1156 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1157 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1158
1159 /* identify the MAC */
1160
1161 if (e1000_set_mac_type(hw)) {
1162 e_err(probe, "Unknown MAC Type\n");
1163 return -EIO;
1164 }
1165
1166 switch (hw->mac_type) {
1167 default:
1168 break;
1169 case e1000_82541:
1170 case e1000_82547:
1171 case e1000_82541_rev_2:
1172 case e1000_82547_rev_2:
1173 hw->phy_init_script = 1;
1174 break;
1175 }
1176
1177 e1000_set_media_type(hw);
1178
1179 hw->wait_autoneg_complete = false;
1180 hw->tbi_compatibility_en = true;
1181 hw->adaptive_ifs = true;
1182
1183 /* Copper options */
1184
1185 if (hw->media_type == e1000_media_type_copper) {
1186 hw->mdix = AUTO_ALL_MODES;
1187 hw->disable_polarity_correction = false;
1188 hw->master_slave = E1000_MASTER_SLAVE;
1189 }
1190 1212
1191 adapter->num_tx_queues = 1; 1213 adapter->num_tx_queues = 1;
1192 adapter->num_rx_queues = 1; 1214 adapter->num_rx_queues = 1;
@@ -3552,7 +3574,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3552 struct e1000_hw *hw = &adapter->hw; 3574 struct e1000_hw *hw = &adapter->hw;
3553 u16 status = (u16)status_err; 3575 u16 status = (u16)status_err;
3554 u8 errors = (u8)(status_err >> 24); 3576 u8 errors = (u8)(status_err >> 24);
3555 skb->ip_summed = CHECKSUM_NONE; 3577
3578 skb_checksum_none_assert(skb);
3556 3579
3557 /* 82543 or newer only */ 3580 /* 82543 or newer only */
3558 if (unlikely(hw->mac_type < e1000_82543)) return; 3581 if (unlikely(hw->mac_type < e1000_82543)) return;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2b8ef44bd2b1..c9b66f4727e4 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -475,7 +475,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
475{ 475{
476 u16 status = (u16)status_err; 476 u16 status = (u16)status_err;
477 u8 errors = (u8)(status_err >> 24); 477 u8 errors = (u8)(status_err >> 24);
478 skb->ip_summed = CHECKSUM_NONE; 478
479 skb_checksum_none_assert(skb);
479 480
480 /* Ignore Checksum bit is set */ 481 /* Ignore Checksum bit is set */
481 if (status & E1000_RXD_STAT_IXSM) 482 if (status & E1000_RXD_STAT_IXSM)
@@ -5745,11 +5746,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5745 } 5746 }
5746 5747
5747 init_timer(&adapter->watchdog_timer); 5748 init_timer(&adapter->watchdog_timer);
5748 adapter->watchdog_timer.function = &e1000_watchdog; 5749 adapter->watchdog_timer.function = e1000_watchdog;
5749 adapter->watchdog_timer.data = (unsigned long) adapter; 5750 adapter->watchdog_timer.data = (unsigned long) adapter;
5750 5751
5751 init_timer(&adapter->phy_info_timer); 5752 init_timer(&adapter->phy_info_timer);
5752 adapter->phy_info_timer.function = &e1000_update_phy_info; 5753 adapter->phy_info_timer.function = e1000_update_phy_info;
5753 adapter->phy_info_timer.data = (unsigned long) adapter; 5754 adapter->phy_info_timer.data = (unsigned long) adapter;
5754 5755
5755 INIT_WORK(&adapter->reset_task, e1000_reset_task); 5756 INIT_WORK(&adapter->reset_task, e1000_reset_task);
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 8d97f168f018..7c826319ee5a 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1457,11 +1457,11 @@ hardware_send_packet(struct net_device *dev, void *buf, short length)
1457 if (net_debug > 5) 1457 if (net_debug > 5)
1458 printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name); 1458 printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
1459 1459
1460 /* determine how much of the transmit buffer space is available */ 1460 /* determine how much of the transmit buffer space is available */
1461 if (lp->tx_end > lp->tx_start) 1461 if (lp->tx_end > lp->tx_start)
1462 tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start); 1462 tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
1463 else if (lp->tx_end < lp->tx_start) 1463 else if (lp->tx_end < lp->tx_start)
1464 tx_available = lp->tx_start - lp->tx_end; 1464 tx_available = lp->tx_start - lp->tx_end;
1465 else tx_available = lp->xmt_ram; 1465 else tx_available = lp->xmt_ram;
1466 1466
1467 if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) { 1467 if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index a333b42111b8..043d99013056 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -180,7 +180,7 @@ static void ehea_update_firmware_handles(void)
180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES; 180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
181 181
182 if (num_fw_handles) { 182 if (num_fw_handles) {
183 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL); 183 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
184 if (!arr) 184 if (!arr)
185 goto out; /* Keep the existing array */ 185 goto out; /* Keep the existing array */
186 } else 186 } else
@@ -265,7 +265,7 @@ static void ehea_update_bcmc_registrations(void)
265 } 265 }
266 266
267 if (num_registrations) { 267 if (num_registrations) {
268 arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC); 268 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
269 if (!arr) 269 if (!arr)
270 goto out; /* Keep the existing array */ 270 goto out; /* Keep the existing array */
271 } else 271 } else
@@ -3721,7 +3721,7 @@ int __init ehea_module_init(void)
3721 if (ret) 3721 if (ret)
3722 ehea_info("failed registering memory remove notifier"); 3722 ehea_info("failed registering memory remove notifier");
3723 3723
3724 ret = crash_shutdown_register(&ehea_crash_handler); 3724 ret = crash_shutdown_register(ehea_crash_handler);
3725 if (ret) 3725 if (ret)
3726 ehea_info("failed registering crash handler"); 3726 ehea_info("failed registering crash handler");
3727 3727
@@ -3746,7 +3746,7 @@ out3:
3746out2: 3746out2:
3747 unregister_memory_notifier(&ehea_mem_nb); 3747 unregister_memory_notifier(&ehea_mem_nb);
3748 unregister_reboot_notifier(&ehea_reboot_nb); 3748 unregister_reboot_notifier(&ehea_reboot_nb);
3749 crash_shutdown_unregister(&ehea_crash_handler); 3749 crash_shutdown_unregister(ehea_crash_handler);
3750out: 3750out:
3751 return ret; 3751 return ret;
3752} 3752}
@@ -3759,7 +3759,7 @@ static void __exit ehea_module_exit(void)
3759 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3759 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3760 ibmebus_unregister_driver(&ehea_driver); 3760 ibmebus_unregister_driver(&ehea_driver);
3761 unregister_reboot_notifier(&ehea_reboot_nb); 3761 unregister_reboot_notifier(&ehea_reboot_nb);
3762 ret = crash_shutdown_unregister(&ehea_crash_handler); 3762 ret = crash_shutdown_unregister(ehea_crash_handler);
3763 if (ret) 3763 if (ret)
3764 ehea_info("failed unregistering crash handler"); 3764 ehea_info("failed unregistering crash handler");
3765 unregister_memory_notifier(&ehea_mem_nb); 3765 unregister_memory_notifier(&ehea_mem_nb);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index f239aa8c6f4c..75869ed7226f 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.1" 35#define DRV_VERSION "1.4.1.2"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 9aab85366d21..711077a2e345 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -911,7 +911,9 @@ static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
911 911
912static int enic_set_mac_address(struct net_device *netdev, void *p) 912static int enic_set_mac_address(struct net_device *netdev, void *p)
913{ 913{
914 return -EOPNOTSUPP; 914 struct sockaddr *saddr = p;
915
916 return enic_set_mac_addr(netdev, (char *)saddr->sa_data);
915} 917}
916 918
917static int enic_dev_packet_filter(struct enic *enic, int directed, 919static int enic_dev_packet_filter(struct enic *enic, int directed,
@@ -2152,17 +2154,6 @@ void enic_dev_deinit(struct enic *enic)
2152 enic_clear_intr_mode(enic); 2154 enic_clear_intr_mode(enic);
2153} 2155}
2154 2156
2155static int enic_dev_stats_clear(struct enic *enic)
2156{
2157 int err;
2158
2159 spin_lock(&enic->devcmd_lock);
2160 err = vnic_dev_stats_clear(enic->vdev);
2161 spin_unlock(&enic->devcmd_lock);
2162
2163 return err;
2164}
2165
2166int enic_dev_init(struct enic *enic) 2157int enic_dev_init(struct enic *enic)
2167{ 2158{
2168 struct device *dev = enic_get_dev(enic); 2159 struct device *dev = enic_get_dev(enic);
@@ -2205,10 +2196,6 @@ int enic_dev_init(struct enic *enic)
2205 2196
2206 enic_init_vnic_resources(enic); 2197 enic_init_vnic_resources(enic);
2207 2198
2208 /* Clear LIF stats
2209 */
2210 enic_dev_stats_clear(enic);
2211
2212 err = enic_set_rq_alloc_buf(enic); 2199 err = enic_set_rq_alloc_buf(enic);
2213 if (err) { 2200 if (err) {
2214 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n"); 2201 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 6a5b578a69e1..08d5d42da260 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -74,6 +74,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
74 struct vnic_dev_bar *bar, unsigned int num_bars) 74 struct vnic_dev_bar *bar, unsigned int num_bars)
75{ 75{
76 struct vnic_resource_header __iomem *rh; 76 struct vnic_resource_header __iomem *rh;
77 struct mgmt_barmap_hdr __iomem *mrh;
77 struct vnic_resource __iomem *r; 78 struct vnic_resource __iomem *r;
78 u8 type; 79 u8 type;
79 80
@@ -85,22 +86,32 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
85 return -EINVAL; 86 return -EINVAL;
86 } 87 }
87 88
88 rh = bar->vaddr; 89 rh = bar->vaddr;
90 mrh = bar->vaddr;
89 if (!rh) { 91 if (!rh) {
90 pr_err("vNIC BAR0 res hdr not mem-mapped\n"); 92 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
91 return -EINVAL; 93 return -EINVAL;
92 } 94 }
93 95
94 if (ioread32(&rh->magic) != VNIC_RES_MAGIC || 96 /* Check for mgmt vnic in addition to normal vnic */
95 ioread32(&rh->version) != VNIC_RES_VERSION) { 97 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
96 pr_err("vNIC BAR0 res magic/version error " 98 (ioread32(&rh->version) != VNIC_RES_VERSION)) {
97 "exp (%lx/%lx) curr (%x/%x)\n", 99 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
100 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
101 pr_err("vNIC BAR0 res magic/version error "
102 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
98 VNIC_RES_MAGIC, VNIC_RES_VERSION, 103 VNIC_RES_MAGIC, VNIC_RES_VERSION,
104 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
99 ioread32(&rh->magic), ioread32(&rh->version)); 105 ioread32(&rh->magic), ioread32(&rh->version));
100 return -EINVAL; 106 return -EINVAL;
107 }
101 } 108 }
102 109
103 r = (struct vnic_resource __iomem *)(rh + 1); 110 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
111 r = (struct vnic_resource __iomem *)(mrh + 1);
112 else
113 r = (struct vnic_resource __iomem *)(rh + 1);
114
104 115
105 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 116 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
106 117
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index 20661755df6b..9abb3d51dea1 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -238,6 +238,18 @@ enum vnic_devcmd_cmd {
238 * out: (u32)a0=status of proxied cmd 238 * out: (u32)a0=status of proxied cmd
239 * a1-a15=out args of proxied cmd */ 239 * a1-a15=out args of proxied cmd */
240 CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), 240 CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
241
242 /*
243 * As for BY_BDF except a0 is index of hvnlink subordinate vnic
244 * or SR-IOV virtual vnic */
245 CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
246
247 /*
248 * in: (u64)a0=paddr of buffer to put latest VIC VIF-CONFIG-INFO TLV in
249 * (u32)a1=length of buffer in a0
250 * out: (u64)a0=paddr of buffer with latest VIC VIF-CONFIG-INFO TLV
251 * (u32)a1=actual length of latest VIC VIF-CONFIG-INFO TLV */
252 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
241}; 253};
242 254
243/* flags for CMD_OPEN */ 255/* flags for CMD_OPEN */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 3b3291248956..e8740e3704e4 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -30,7 +30,7 @@ struct vnic_enet_config {
30 u32 wq_desc_count; 30 u32 wq_desc_count;
31 u32 rq_desc_count; 31 u32 rq_desc_count;
32 u16 mtu; 32 u16 mtu;
33 u16 intr_timer; 33 u16 intr_timer_deprecated;
34 u8 intr_timer_type; 34 u8 intr_timer_type;
35 u8 intr_mode; 35 u8 intr_mode;
36 char devname[16]; 36 char devname[16];
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
index 810287beff14..e0a73f1ca6f4 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/enic/vnic_resource.h
@@ -22,6 +22,11 @@
22 22
23#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ 23#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
24#define VNIC_RES_VERSION 0x00000000L 24#define VNIC_RES_VERSION 0x00000000L
25#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */
26#define MGMTVNIC_VERSION 0x00000000L
27
28/* The MAC address assigned to the CFG vNIC is fixed. */
29#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d }
25 30
26/* vNIC resource types */ 31/* vNIC resource types */
27enum vnic_res_type { 32enum vnic_res_type {
@@ -52,6 +57,14 @@ struct vnic_resource_header {
52 u32 version; 57 u32 version;
53}; 58};
54 59
60struct mgmt_barmap_hdr {
61 u32 magic; /* magic number */
62 u32 version; /* header format version */
63 u16 lif; /* loopback lif for mgmt frames */
64 u16 pci_slot; /* installed pci slot */
65 char serial[16]; /* card serial number */
66};
67
55struct vnic_resource { 68struct vnic_resource {
56 u8 type; 69 u8 type;
57 u8 bar; 70 u8 bar;
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index dbb2aca258b9..b236d7cbc137 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -77,8 +77,10 @@ void vnic_rq_free(struct vnic_rq *rq)
77 vnic_dev_free_desc_ring(vdev, &rq->ring); 77 vnic_dev_free_desc_ring(vdev, &rq->ring);
78 78
79 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) { 79 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
80 kfree(rq->bufs[i]); 80 if (rq->bufs[i]) {
81 rq->bufs[i] = NULL; 81 kfree(rq->bufs[i]);
82 rq->bufs[i] = NULL;
83 }
82 } 84 }
83 85
84 rq->ctrl = NULL; 86 rq->ctrl = NULL;
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index 197c9d24af82..4725b79de0ef 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -54,8 +54,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
54 if (!vp || !value) 54 if (!vp || !value)
55 return -EINVAL; 55 return -EINVAL;
56 56
57 if (ntohl(vp->length) + sizeof(*tlv) + length > 57 if (ntohl(vp->length) + offsetof(struct vic_provinfo_tlv, value) +
58 VIC_PROVINFO_MAX_TLV_DATA) 58 length > VIC_PROVINFO_MAX_TLV_DATA)
59 return -ENOMEM; 59 return -ENOMEM;
60 60
61 tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv + 61 tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
@@ -66,7 +66,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
66 memcpy(tlv->value, value, length); 66 memcpy(tlv->value, value, length);
67 67
68 vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1); 68 vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
69 vp->length = htonl(ntohl(vp->length) + sizeof(*tlv) + length); 69 vp->length = htonl(ntohl(vp->length) +
70 offsetof(struct vic_provinfo_tlv, value) + length);
70 71
71 return 0; 72 return 0;
72} 73}
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index 122e33bcc578..4b2a6c6a569b 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -77,8 +77,10 @@ void vnic_wq_free(struct vnic_wq *wq)
77 vnic_dev_free_desc_ring(vdev, &wq->ring); 77 vnic_dev_free_desc_ring(vdev, &wq->ring);
78 78
79 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { 79 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
80 kfree(wq->bufs[i]); 80 if (wq->bufs[i]) {
81 wq->bufs[i] = NULL; 81 kfree(wq->bufs[i]);
82 wq->bufs[i] = NULL;
83 }
82 } 84 }
83 85
84 wq->ctrl = NULL; 86 wq->ctrl = NULL;
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 57c8ac0ef3f1..32543a300b81 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -758,7 +758,7 @@ static int epic_open(struct net_device *dev)
758 init_timer(&ep->timer); 758 init_timer(&ep->timer);
759 ep->timer.expires = jiffies + 3*HZ; 759 ep->timer.expires = jiffies + 3*HZ;
760 ep->timer.data = (unsigned long)dev; 760 ep->timer.data = (unsigned long)dev;
761 ep->timer.function = &epic_timer; /* timer handler */ 761 ep->timer.function = epic_timer; /* timer handler */
762 add_timer(&ep->timer); 762 add_timer(&ep->timer);
763 763
764 return 0; 764 return 0;
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 6d653c459c1f..c5a2fe099a8d 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -806,11 +806,6 @@ static void ethoc_tx_timeout(struct net_device *dev)
806 ethoc_interrupt(dev->irq, dev); 806 ethoc_interrupt(dev->irq, dev);
807} 807}
808 808
809static struct net_device_stats *ethoc_stats(struct net_device *dev)
810{
811 return &dev->stats;
812}
813
814static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) 809static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
815{ 810{
816 struct ethoc *priv = netdev_priv(dev); 811 struct ethoc *priv = netdev_priv(dev);
@@ -863,7 +858,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
863 .ndo_set_multicast_list = ethoc_set_multicast_list, 858 .ndo_set_multicast_list = ethoc_set_multicast_list,
864 .ndo_change_mtu = ethoc_change_mtu, 859 .ndo_change_mtu = ethoc_change_mtu,
865 .ndo_tx_timeout = ethoc_tx_timeout, 860 .ndo_tx_timeout = ethoc_tx_timeout,
866 .ndo_get_stats = ethoc_stats,
867 .ndo_start_xmit = ethoc_start_xmit, 861 .ndo_start_xmit = ethoc_start_xmit,
868}; 862};
869 863
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d7e8f6b8f4cf..dd54abe2f710 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -915,14 +915,14 @@ static int netdev_open(struct net_device *dev)
915 init_timer(&np->timer); 915 init_timer(&np->timer);
916 np->timer.expires = RUN_AT(3 * HZ); 916 np->timer.expires = RUN_AT(3 * HZ);
917 np->timer.data = (unsigned long) dev; 917 np->timer.data = (unsigned long) dev;
918 np->timer.function = &netdev_timer; 918 np->timer.function = netdev_timer;
919 919
920 /* timer handler */ 920 /* timer handler */
921 add_timer(&np->timer); 921 add_timer(&np->timer);
922 922
923 init_timer(&np->reset_timer); 923 init_timer(&np->reset_timer);
924 np->reset_timer.data = (unsigned long) dev; 924 np->reset_timer.data = (unsigned long) dev;
925 np->reset_timer.function = &reset_timer; 925 np->reset_timer.function = reset_timer;
926 np->reset_timer_armed = 0; 926 np->reset_timer_armed = 0;
927 927
928 return 0; 928 return 0;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e3e10b4add9c..e9f5d030bc26 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -771,11 +771,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
771 771
772 772
773/* ethtool interface */ 773/* ethtool interface */
774static void mpc52xx_fec_get_drvinfo(struct net_device *dev,
775 struct ethtool_drvinfo *info)
776{
777 strcpy(info->driver, DRIVER_NAME);
778}
779 774
780static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 775static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
781{ 776{
@@ -810,7 +805,6 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
810} 805}
811 806
812static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { 807static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
813 .get_drvinfo = mpc52xx_fec_get_drvinfo,
814 .get_settings = mpc52xx_fec_get_settings, 808 .get_settings = mpc52xx_fec_get_settings,
815 .set_settings = mpc52xx_fec_set_settings, 809 .set_settings = mpc52xx_fec_set_settings,
816 .get_link = ethtool_op_get_link, 810 .get_link = ethtool_op_get_link,
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 4da05b1b445c..6a44fe411589 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5440,13 +5440,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5440 5440
5441 init_timer(&np->oom_kick); 5441 init_timer(&np->oom_kick);
5442 np->oom_kick.data = (unsigned long) dev; 5442 np->oom_kick.data = (unsigned long) dev;
5443 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5443 np->oom_kick.function = nv_do_rx_refill; /* timer handler */
5444 init_timer(&np->nic_poll); 5444 init_timer(&np->nic_poll);
5445 np->nic_poll.data = (unsigned long) dev; 5445 np->nic_poll.data = (unsigned long) dev;
5446 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5446 np->nic_poll.function = nv_do_nic_poll; /* timer handler */
5447 init_timer(&np->stats_poll); 5447 init_timer(&np->stats_poll);
5448 np->stats_poll.data = (unsigned long) dev; 5448 np->stats_poll.data = (unsigned long) dev;
5449 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5449 np->stats_poll.function = nv_do_stats_poll; /* timer handler */
5450 5450
5451 err = pci_enable_device(pci_dev); 5451 err = pci_enable_device(pci_dev);
5452 if (err) 5452 if (err)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index d6e3111959ab..d684f187de57 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1036,7 +1036,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev,
1036 ndev = alloc_etherdev(privsize); 1036 ndev = alloc_etherdev(privsize);
1037 if (!ndev) { 1037 if (!ndev) {
1038 ret = -ENOMEM; 1038 ret = -ENOMEM;
1039 goto out_free_fpi; 1039 goto out_put;
1040 } 1040 }
1041 1041
1042 SET_NETDEV_DEV(ndev, &ofdev->dev); 1042 SET_NETDEV_DEV(ndev, &ofdev->dev);
@@ -1099,6 +1099,7 @@ out_cleanup_data:
1099out_free_dev: 1099out_free_dev:
1100 free_netdev(ndev); 1100 free_netdev(ndev);
1101 dev_set_drvdata(&ofdev->dev, NULL); 1101 dev_set_drvdata(&ofdev->dev, NULL);
1102out_put:
1102 of_node_put(fpi->phy_node); 1103 of_node_put(fpi->phy_node);
1103out_free_fpi: 1104out_free_fpi:
1104 kfree(fpi); 1105 kfree(fpi);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4f7c3f3ca234..f30adbf86bb2 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1859,7 +1859,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1859 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1859 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1860 dev->name, grp->interruptError); 1860 dev->name, grp->interruptError);
1861 1861
1862 goto err_irq_fail; 1862 goto err_irq_fail;
1863 } 1863 }
1864 1864
1865 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1865 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
@@ -2048,7 +2048,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2048 u32 bufaddr; 2048 u32 bufaddr;
2049 unsigned long flags; 2049 unsigned long flags;
2050 unsigned int nr_frags, nr_txbds, length; 2050 unsigned int nr_frags, nr_txbds, length;
2051 union skb_shared_tx *shtx;
2052 2051
2053 /* 2052 /*
2054 * TOE=1 frames larger than 2500 bytes may see excess delays 2053 * TOE=1 frames larger than 2500 bytes may see excess delays
@@ -2069,10 +2068,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2069 txq = netdev_get_tx_queue(dev, rq); 2068 txq = netdev_get_tx_queue(dev, rq);
2070 base = tx_queue->tx_bd_base; 2069 base = tx_queue->tx_bd_base;
2071 regs = tx_queue->grp->regs; 2070 regs = tx_queue->grp->regs;
2072 shtx = skb_tx(skb);
2073 2071
2074 /* check if time stamp should be generated */ 2072 /* check if time stamp should be generated */
2075 if (unlikely(shtx->hardware && priv->hwts_tx_en)) 2073 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2074 priv->hwts_tx_en))
2076 do_tstamp = 1; 2075 do_tstamp = 1;
2077 2076
2078 /* make space for additional header when fcb is needed */ 2077 /* make space for additional header when fcb is needed */
@@ -2174,7 +2173,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2174 2173
2175 /* Setup tx hardware time stamping if requested */ 2174 /* Setup tx hardware time stamping if requested */
2176 if (unlikely(do_tstamp)) { 2175 if (unlikely(do_tstamp)) {
2177 shtx->in_progress = 1; 2176 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2178 if (fcb == NULL) 2177 if (fcb == NULL)
2179 fcb = gfar_add_fcb(skb); 2178 fcb = gfar_add_fcb(skb);
2180 fcb->ptp = 1; 2179 fcb->ptp = 1;
@@ -2446,7 +2445,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2446 int howmany = 0; 2445 int howmany = 0;
2447 u32 lstatus; 2446 u32 lstatus;
2448 size_t buflen; 2447 size_t buflen;
2449 union skb_shared_tx *shtx;
2450 2448
2451 rx_queue = priv->rx_queue[tx_queue->qindex]; 2449 rx_queue = priv->rx_queue[tx_queue->qindex];
2452 bdp = tx_queue->dirty_tx; 2450 bdp = tx_queue->dirty_tx;
@@ -2461,8 +2459,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2461 * When time stamping, one additional TxBD must be freed. 2459 * When time stamping, one additional TxBD must be freed.
2462 * Also, we need to dma_unmap_single() the TxPAL. 2460 * Also, we need to dma_unmap_single() the TxPAL.
2463 */ 2461 */
2464 shtx = skb_tx(skb); 2462 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2465 if (unlikely(shtx->in_progress))
2466 nr_txbds = frags + 2; 2463 nr_txbds = frags + 2;
2467 else 2464 else
2468 nr_txbds = frags + 1; 2465 nr_txbds = frags + 1;
@@ -2476,7 +2473,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2476 (lstatus & BD_LENGTH_MASK)) 2473 (lstatus & BD_LENGTH_MASK))
2477 break; 2474 break;
2478 2475
2479 if (unlikely(shtx->in_progress)) { 2476 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2480 next = next_txbd(bdp, base, tx_ring_size); 2477 next = next_txbd(bdp, base, tx_ring_size);
2481 buflen = next->length + GMAC_FCB_LEN; 2478 buflen = next->length + GMAC_FCB_LEN;
2482 } else 2479 } else
@@ -2485,7 +2482,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2485 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2482 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2486 buflen, DMA_TO_DEVICE); 2483 buflen, DMA_TO_DEVICE);
2487 2484
2488 if (unlikely(shtx->in_progress)) { 2485 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2489 struct skb_shared_hwtstamps shhwtstamps; 2486 struct skb_shared_hwtstamps shhwtstamps;
2490 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2487 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2491 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2488 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -2657,7 +2654,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2657 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2654 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2658 skb->ip_summed = CHECKSUM_UNNECESSARY; 2655 skb->ip_summed = CHECKSUM_UNNECESSARY;
2659 else 2656 else
2660 skb->ip_summed = CHECKSUM_NONE; 2657 skb_checksum_none_assert(skb);
2661} 2658}
2662 2659
2663 2660
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index f15c64f1cd38..27d6960ce09e 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -893,7 +893,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
893 if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status)) 893 if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
894 skb->ip_summed = CHECKSUM_UNNECESSARY; 894 skb->ip_summed = CHECKSUM_UNNECESSARY;
895 else 895 else
896 skb->ip_summed = CHECKSUM_NONE; 896 skb_checksum_none_assert(skb);
897 897
898 skb->protocol = eth_type_trans(skb, dev); 898 skb->protocol = eth_type_trans(skb, dev);
899 dev->stats.rx_packets++; 899 dev->stats.rx_packets++;
@@ -1547,10 +1547,10 @@ static int __devinit greth_of_probe(struct platform_device *ofdev, const struct
1547 dev->netdev_ops = &greth_netdev_ops; 1547 dev->netdev_ops = &greth_netdev_ops;
1548 dev->ethtool_ops = &greth_ethtool_ops; 1548 dev->ethtool_ops = &greth_ethtool_ops;
1549 1549
1550 if (register_netdev(dev)) { 1550 err = register_netdev(dev);
1551 if (err) {
1551 if (netif_msg_probe(greth)) 1552 if (netif_msg_probe(greth))
1552 dev_err(greth->dev, "netdevice registration failed.\n"); 1553 dev_err(greth->dev, "netdevice registration failed.\n");
1553 err = -ENOMEM;
1554 goto error5; 1554 goto error5;
1555 } 1555 }
1556 1556
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 49aac7027fbb..9a6485892b3d 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1004,7 +1004,7 @@ static int hamachi_open(struct net_device *dev)
1004 init_timer(&hmp->timer); 1004 init_timer(&hmp->timer);
1005 hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ 1005 hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1006 hmp->timer.data = (unsigned long)dev; 1006 hmp->timer.data = (unsigned long)dev;
1007 hmp->timer.function = &hamachi_timer; /* timer handler */ 1007 hmp->timer.function = hamachi_timer; /* timer handler */
1008 add_timer(&hmp->timer); 1008 add_timer(&hmp->timer);
1009 1009
1010 return 0; 1010 return 0;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 9f64c8637208..33655814448e 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1069,7 +1069,8 @@ static void scc_tx_done(struct scc_channel *scc)
1069 case KISS_DUPLEX_LINK: 1069 case KISS_DUPLEX_LINK:
1070 scc->stat.tx_state = TXS_IDLE2; 1070 scc->stat.tx_state = TXS_IDLE2;
1071 if (scc->kiss.idletime != TIMER_OFF) 1071 if (scc->kiss.idletime != TIMER_OFF)
1072 scc_start_tx_timer(scc, t_idle, scc->kiss.idletime*100); 1072 scc_start_tx_timer(scc, t_idle,
1073 scc->kiss.idletime*100);
1073 break; 1074 break;
1074 case KISS_DUPLEX_OPTIMA: 1075 case KISS_DUPLEX_OPTIMA:
1075 scc_notify(scc, HWEV_ALL_SENT); 1076 scc_notify(scc, HWEV_ALL_SENT);
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 86ececd3c658..d15d2f2ba78e 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -204,10 +204,10 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
204 ei_status.rx_start_page = HP_START_PG + TX_PAGES; 204 ei_status.rx_start_page = HP_START_PG + TX_PAGES;
205 ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG; 205 ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
206 206
207 ei_status.reset_8390 = &hp_reset_8390; 207 ei_status.reset_8390 = hp_reset_8390;
208 ei_status.get_8390_hdr = &hp_get_8390_hdr; 208 ei_status.get_8390_hdr = hp_get_8390_hdr;
209 ei_status.block_input = &hp_block_input; 209 ei_status.block_input = hp_block_input;
210 ei_status.block_output = &hp_block_output; 210 ei_status.block_output = hp_block_output;
211 hp_init_card(dev); 211 hp_init_card(dev);
212 212
213 retval = register_netdev(dev); 213 retval = register_netdev(dev);
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 07d8e5b634f3..c5ef62ceb840 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -155,10 +155,10 @@ static int __devinit hydra_init(struct zorro_dev *z)
155 155
156 ei_status.rx_start_page = start_page + TX_PAGES; 156 ei_status.rx_start_page = start_page + TX_PAGES;
157 157
158 ei_status.reset_8390 = &hydra_reset_8390; 158 ei_status.reset_8390 = hydra_reset_8390;
159 ei_status.block_input = &hydra_block_input; 159 ei_status.block_input = hydra_block_input;
160 ei_status.block_output = &hydra_block_output; 160 ei_status.block_output = hydra_block_output;
161 ei_status.get_8390_hdr = &hydra_get_8390_hdr; 161 ei_status.get_8390_hdr = hydra_get_8390_hdr;
162 ei_status.reg_offset = hydra_offsets; 162 ei_status.reg_offset = hydra_offsets;
163 163
164 dev->netdev_ops = &hydra_netdev_ops; 164 dev->netdev_ops = &hydra_netdev_ops;
@@ -173,9 +173,8 @@ static int __devinit hydra_init(struct zorro_dev *z)
173 173
174 zorro_set_drvdata(z, dev); 174 zorro_set_drvdata(z, dev);
175 175
176 printk(KERN_INFO "%s: Hydra at 0x%08llx, address " 176 pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n",
177 "%pM (hydra.c " HYDRA_VERSION ")\n", 177 dev->name, &z->resource, dev->dev_addr);
178 dev->name, (unsigned long long)z->resource.start, dev->dev_addr);
179 178
180 return 0; 179 return 0;
181} 180}
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 294ccfb427cf..0037a696cd0a 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -602,7 +602,7 @@ static void irqrx_handler(struct net_device *dev)
602 /* set up skb fields */ 602 /* set up skb fields */
603 603
604 skb->protocol = eth_type_trans(skb, dev); 604 skb->protocol = eth_type_trans(skb, dev);
605 skb->ip_summed = CHECKSUM_NONE; 605 skb_checksum_none_assert(skb);
606 606
607 /* bookkeeping */ 607 /* bookkeeping */
608 dev->stats.rx_packets++; 608 dev->stats.rx_packets++;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 4734c939ad03..b3e157ed6776 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1,122 +1,84 @@
1/**************************************************************************/
2/* */
3/* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4/* Copyright (C) 2003 IBM Corp. */
5/* Originally written by Dave Larson (larson1@us.ibm.com) */
6/* Maintained by Santiago Leon (santil@us.ibm.com) */
7/* */
8/* This program is free software; you can redistribute it and/or modify */
9/* it under the terms of the GNU General Public License as published by */
10/* the Free Software Foundation; either version 2 of the License, or */
11/* (at your option) any later version. */
12/* */
13/* This program is distributed in the hope that it will be useful, */
14/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16/* GNU General Public License for more details. */
17/* */
18/* You should have received a copy of the GNU General Public License */
19/* along with this program; if not, write to the Free Software */
20/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
21/* USA */
22/* */
23/* This module contains the implementation of a virtual ethernet device */
24/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25/* option of the RS/6000 Platform Architechture to interface with virtual */
26/* ethernet NICs that are presented to the partition by the hypervisor. */
27/* */
28/**************************************************************************/
29/* 1/*
30 TODO: 2 * IBM Power Virtual Ethernet Device Driver
31 - add support for sysfs 3 *
32 - possibly remove procfs support 4 * This program is free software; you can redistribute it and/or modify
33*/ 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2003, 2010
19 *
20 * Authors: Dave Larson <larson1@us.ibm.com>
21 * Santiago Leon <santil@linux.vnet.ibm.com>
22 * Brian King <brking@linux.vnet.ibm.com>
23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
25 */
34 26
35#include <linux/module.h> 27#include <linux/module.h>
36#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
37#include <linux/types.h> 29#include <linux/types.h>
38#include <linux/errno.h> 30#include <linux/errno.h>
39#include <linux/ioport.h>
40#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
41#include <linux/kernel.h> 32#include <linux/kernel.h>
42#include <linux/netdevice.h> 33#include <linux/netdevice.h>
43#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
44#include <linux/skbuff.h> 35#include <linux/skbuff.h>
45#include <linux/init.h> 36#include <linux/init.h>
46#include <linux/delay.h>
47#include <linux/mm.h> 37#include <linux/mm.h>
48#include <linux/pm.h> 38#include <linux/pm.h>
49#include <linux/ethtool.h> 39#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
51#include <linux/in.h> 40#include <linux/in.h>
52#include <linux/ip.h> 41#include <linux/ip.h>
42#include <linux/ipv6.h>
53#include <linux/slab.h> 43#include <linux/slab.h>
54#include <net/net_namespace.h>
55#include <asm/hvcall.h> 44#include <asm/hvcall.h>
56#include <asm/atomic.h> 45#include <asm/atomic.h>
57#include <asm/vio.h> 46#include <asm/vio.h>
58#include <asm/iommu.h> 47#include <asm/iommu.h>
59#include <asm/uaccess.h>
60#include <asm/firmware.h> 48#include <asm/firmware.h>
61#include <linux/seq_file.h>
62 49
63#include "ibmveth.h" 50#include "ibmveth.h"
64 51
65#undef DEBUG
66
67#define ibmveth_printk(fmt, args...) \
68 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
69
70#define ibmveth_error_printk(fmt, args...) \
71 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
72
73#ifdef DEBUG
74#define ibmveth_debug_printk_no_adapter(fmt, args...) \
75 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
76#define ibmveth_debug_printk(fmt, args...) \
77 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
78#define ibmveth_assert(expr) \
79 if(!(expr)) { \
80 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
81 BUG(); \
82 }
83#else
84#define ibmveth_debug_printk_no_adapter(fmt, args...)
85#define ibmveth_debug_printk(fmt, args...)
86#define ibmveth_assert(expr)
87#endif
88
89static int ibmveth_open(struct net_device *dev);
90static int ibmveth_close(struct net_device *dev);
91static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
92static int ibmveth_poll(struct napi_struct *napi, int budget);
93static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
94static void ibmveth_set_multicast_list(struct net_device *dev);
95static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
96static void ibmveth_proc_register_driver(void);
97static void ibmveth_proc_unregister_driver(void);
98static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
99static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
100static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); 52static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
101static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 53static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
102static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); 54static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
103static struct kobj_type ktype_veth_pool;
104 55
56static struct kobj_type ktype_veth_pool;
105 57
106#ifdef CONFIG_PROC_FS
107#define IBMVETH_PROC_DIR "ibmveth"
108static struct proc_dir_entry *ibmveth_proc_dir;
109#endif
110 58
111static const char ibmveth_driver_name[] = "ibmveth"; 59static const char ibmveth_driver_name[] = "ibmveth";
112static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver"; 60static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
113#define ibmveth_driver_version "1.03" 61#define ibmveth_driver_version "1.04"
114 62
115MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); 63MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
116MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); 64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
117MODULE_LICENSE("GPL"); 65MODULE_LICENSE("GPL");
118MODULE_VERSION(ibmveth_driver_version); 66MODULE_VERSION(ibmveth_driver_version);
119 67
68static unsigned int tx_copybreak __read_mostly = 128;
69module_param(tx_copybreak, uint, 0644);
70MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
73static unsigned int rx_copybreak __read_mostly = 128;
74module_param(rx_copybreak, uint, 0644);
75MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
78static unsigned int rx_flush __read_mostly = 0;
79module_param(rx_flush, uint, 0644);
80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
120struct ibmveth_stat { 82struct ibmveth_stat {
121 char name[ETH_GSTRING_LEN]; 83 char name[ETH_GSTRING_LEN];
122 int offset; 84 int offset;
@@ -128,12 +90,16 @@ struct ibmveth_stat {
128struct ibmveth_stat ibmveth_stats[] = { 90struct ibmveth_stat ibmveth_stats[] = {
129 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, 91 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
130 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, 92 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
131 { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) }, 93 { "replenish_add_buff_failure",
132 { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, 94 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
95 { "replenish_add_buff_success",
96 IBMVETH_STAT_OFF(replenish_add_buff_success) },
133 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, 97 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
134 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, 98 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
135 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, 99 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
136 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, 100 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
101 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
102 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
137}; 103};
138 104
139/* simple methods of getting data from the current rxq entry */ 105/* simple methods of getting data from the current rxq entry */
@@ -144,41 +110,44 @@ static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
144 110
145static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) 111static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
146{ 112{
147 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; 113 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
114 IBMVETH_RXQ_TOGGLE_SHIFT;
148} 115}
149 116
150static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) 117static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
151{ 118{
152 return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); 119 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
153} 120}
154 121
155static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) 122static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
156{ 123{
157 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); 124 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
158} 125}
159 126
160static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) 127static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
161{ 128{
162 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); 129 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
163} 130}
164 131
165static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 132static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
166{ 133{
167 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); 134 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
168} 135}
169 136
170static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) 137static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
171{ 138{
172 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); 139 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
173} 140}
174 141
175/* setup the initial settings for a buffer pool */ 142/* setup the initial settings for a buffer pool */
176static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) 143static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
144 u32 pool_index, u32 pool_size,
145 u32 buff_size, u32 pool_active)
177{ 146{
178 pool->size = pool_size; 147 pool->size = pool_size;
179 pool->index = pool_index; 148 pool->index = pool_index;
180 pool->buff_size = buff_size; 149 pool->buff_size = buff_size;
181 pool->threshold = pool_size / 2; 150 pool->threshold = pool_size * 7 / 8;
182 pool->active = pool_active; 151 pool->active = pool_active;
183} 152}
184 153
@@ -189,12 +158,11 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
189 158
190 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); 159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
191 160
192 if(!pool->free_map) { 161 if (!pool->free_map)
193 return -1; 162 return -1;
194 }
195 163
196 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); 164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
197 if(!pool->dma_addr) { 165 if (!pool->dma_addr) {
198 kfree(pool->free_map); 166 kfree(pool->free_map);
199 pool->free_map = NULL; 167 pool->free_map = NULL;
200 return -1; 168 return -1;
@@ -202,7 +170,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
202 170
203 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); 171 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
204 172
205 if(!pool->skbuff) { 173 if (!pool->skbuff) {
206 kfree(pool->dma_addr); 174 kfree(pool->dma_addr);
207 pool->dma_addr = NULL; 175 pool->dma_addr = NULL;
208 176
@@ -213,9 +181,8 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
213 181
214 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); 182 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
215 183
216 for(i = 0; i < pool->size; ++i) { 184 for (i = 0; i < pool->size; ++i)
217 pool->free_map[i] = i; 185 pool->free_map[i] = i;
218 }
219 186
220 atomic_set(&pool->available, 0); 187 atomic_set(&pool->available, 0);
221 pool->producer_index = 0; 188 pool->producer_index = 0;
@@ -224,10 +191,19 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
224 return 0; 191 return 0;
225} 192}
226 193
194static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
195{
196 unsigned long offset;
197
198 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
199 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
200}
201
227/* replenish the buffers for a pool. note that we don't need to 202/* replenish the buffers for a pool. note that we don't need to
228 * skb_reserve these since they are used for incoming... 203 * skb_reserve these since they are used for incoming...
229 */ 204 */
230static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 205static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
206 struct ibmveth_buff_pool *pool)
231{ 207{
232 u32 i; 208 u32 i;
233 u32 count = pool->size - atomic_read(&pool->available); 209 u32 count = pool->size - atomic_read(&pool->available);
@@ -240,23 +216,26 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
240 216
241 mb(); 217 mb();
242 218
243 for(i = 0; i < count; ++i) { 219 for (i = 0; i < count; ++i) {
244 union ibmveth_buf_desc desc; 220 union ibmveth_buf_desc desc;
245 221
246 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
247 223
248 if(!skb) { 224 if (!skb) {
249 ibmveth_debug_printk("replenish: unable to allocate skb\n"); 225 netdev_dbg(adapter->netdev,
226 "replenish: unable to allocate skb\n");
250 adapter->replenish_no_mem++; 227 adapter->replenish_no_mem++;
251 break; 228 break;
252 } 229 }
253 230
254 free_index = pool->consumer_index; 231 free_index = pool->consumer_index;
255 pool->consumer_index = (pool->consumer_index + 1) % pool->size; 232 pool->consumer_index++;
233 if (pool->consumer_index >= pool->size)
234 pool->consumer_index = 0;
256 index = pool->free_map[free_index]; 235 index = pool->free_map[free_index];
257 236
258 ibmveth_assert(index != IBM_VETH_INVALID_MAP); 237 BUG_ON(index == IBM_VETH_INVALID_MAP);
259 ibmveth_assert(pool->skbuff[index] == NULL); 238 BUG_ON(pool->skbuff[index] != NULL);
260 239
261 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 240 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
262 pool->buff_size, DMA_FROM_DEVICE); 241 pool->buff_size, DMA_FROM_DEVICE);
@@ -269,16 +248,23 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
269 pool->skbuff[index] = skb; 248 pool->skbuff[index] = skb;
270 249
271 correlator = ((u64)pool->index << 32) | index; 250 correlator = ((u64)pool->index << 32) | index;
272 *(u64*)skb->data = correlator; 251 *(u64 *)skb->data = correlator;
273 252
274 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; 253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
275 desc.fields.address = dma_addr; 254 desc.fields.address = dma_addr;
276 255
277 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 256 if (rx_flush) {
257 unsigned int len = min(pool->buff_size,
258 adapter->netdev->mtu +
259 IBMVETH_BUFF_OH);
260 ibmveth_flush_buffer(skb->data, len);
261 }
262 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
263 desc.desc);
278 264
279 if (lpar_rc != H_SUCCESS) 265 if (lpar_rc != H_SUCCESS) {
280 goto failure; 266 goto failure;
281 else { 267 } else {
282 buffers_added++; 268 buffers_added++;
283 adapter->replenish_add_buff_success++; 269 adapter->replenish_add_buff_success++;
284 } 270 }
@@ -313,26 +299,31 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
313 299
314 adapter->replenish_task_cycles++; 300 adapter->replenish_task_cycles++;
315 301
316 for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) 302 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
317 if(adapter->rx_buff_pool[i].active) 303 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
318 ibmveth_replenish_buffer_pool(adapter,
319 &adapter->rx_buff_pool[i]);
320 304
321 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 305 if (pool->active &&
306 (atomic_read(&pool->available) < pool->threshold))
307 ibmveth_replenish_buffer_pool(adapter, pool);
308 }
309
310 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
311 4096 - 8);
322} 312}
323 313
324/* empty and free ana buffer pool - also used to do cleanup in error paths */ 314/* empty and free ana buffer pool - also used to do cleanup in error paths */
325static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 315static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
316 struct ibmveth_buff_pool *pool)
326{ 317{
327 int i; 318 int i;
328 319
329 kfree(pool->free_map); 320 kfree(pool->free_map);
330 pool->free_map = NULL; 321 pool->free_map = NULL;
331 322
332 if(pool->skbuff && pool->dma_addr) { 323 if (pool->skbuff && pool->dma_addr) {
333 for(i = 0; i < pool->size; ++i) { 324 for (i = 0; i < pool->size; ++i) {
334 struct sk_buff *skb = pool->skbuff[i]; 325 struct sk_buff *skb = pool->skbuff[i];
335 if(skb) { 326 if (skb) {
336 dma_unmap_single(&adapter->vdev->dev, 327 dma_unmap_single(&adapter->vdev->dev,
337 pool->dma_addr[i], 328 pool->dma_addr[i],
338 pool->buff_size, 329 pool->buff_size,
@@ -343,31 +334,32 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
343 } 334 }
344 } 335 }
345 336
346 if(pool->dma_addr) { 337 if (pool->dma_addr) {
347 kfree(pool->dma_addr); 338 kfree(pool->dma_addr);
348 pool->dma_addr = NULL; 339 pool->dma_addr = NULL;
349 } 340 }
350 341
351 if(pool->skbuff) { 342 if (pool->skbuff) {
352 kfree(pool->skbuff); 343 kfree(pool->skbuff);
353 pool->skbuff = NULL; 344 pool->skbuff = NULL;
354 } 345 }
355} 346}
356 347
357/* remove a buffer from a pool */ 348/* remove a buffer from a pool */
358static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator) 349static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
350 u64 correlator)
359{ 351{
360 unsigned int pool = correlator >> 32; 352 unsigned int pool = correlator >> 32;
361 unsigned int index = correlator & 0xffffffffUL; 353 unsigned int index = correlator & 0xffffffffUL;
362 unsigned int free_index; 354 unsigned int free_index;
363 struct sk_buff *skb; 355 struct sk_buff *skb;
364 356
365 ibmveth_assert(pool < IbmVethNumBufferPools); 357 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
366 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 358 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
367 359
368 skb = adapter->rx_buff_pool[pool].skbuff[index]; 360 skb = adapter->rx_buff_pool[pool].skbuff[index];
369 361
370 ibmveth_assert(skb != NULL); 362 BUG_ON(skb == NULL);
371 363
372 adapter->rx_buff_pool[pool].skbuff[index] = NULL; 364 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
373 365
@@ -377,9 +369,10 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
377 DMA_FROM_DEVICE); 369 DMA_FROM_DEVICE);
378 370
379 free_index = adapter->rx_buff_pool[pool].producer_index; 371 free_index = adapter->rx_buff_pool[pool].producer_index;
380 adapter->rx_buff_pool[pool].producer_index 372 adapter->rx_buff_pool[pool].producer_index++;
381 = (adapter->rx_buff_pool[pool].producer_index + 1) 373 if (adapter->rx_buff_pool[pool].producer_index >=
382 % adapter->rx_buff_pool[pool].size; 374 adapter->rx_buff_pool[pool].size)
375 adapter->rx_buff_pool[pool].producer_index = 0;
383 adapter->rx_buff_pool[pool].free_map[free_index] = index; 376 adapter->rx_buff_pool[pool].free_map[free_index] = index;
384 377
385 mb(); 378 mb();
@@ -394,8 +387,8 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
394 unsigned int pool = correlator >> 32; 387 unsigned int pool = correlator >> 32;
395 unsigned int index = correlator & 0xffffffffUL; 388 unsigned int index = correlator & 0xffffffffUL;
396 389
397 ibmveth_assert(pool < IbmVethNumBufferPools); 390 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
398 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 391 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
399 392
400 return adapter->rx_buff_pool[pool].skbuff[index]; 393 return adapter->rx_buff_pool[pool].skbuff[index];
401} 394}
@@ -410,10 +403,10 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
410 union ibmveth_buf_desc desc; 403 union ibmveth_buf_desc desc;
411 unsigned long lpar_rc; 404 unsigned long lpar_rc;
412 405
413 ibmveth_assert(pool < IbmVethNumBufferPools); 406 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
414 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 407 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
415 408
416 if(!adapter->rx_buff_pool[pool].active) { 409 if (!adapter->rx_buff_pool[pool].active) {
417 ibmveth_rxq_harvest_buffer(adapter); 410 ibmveth_rxq_harvest_buffer(adapter);
418 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 411 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
419 return; 412 return;
@@ -425,12 +418,13 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
425 418
426 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 419 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
427 420
428 if(lpar_rc != H_SUCCESS) { 421 if (lpar_rc != H_SUCCESS) {
429 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); 422 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
423 "during recycle rc=%ld", lpar_rc);
430 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 424 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
431 } 425 }
432 426
433 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 427 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
434 adapter->rx_queue.index = 0; 428 adapter->rx_queue.index = 0;
435 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 429 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
436 } 430 }
@@ -440,7 +434,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
440{ 434{
441 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 435 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
442 436
443 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 437 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
444 adapter->rx_queue.index = 0; 438 adapter->rx_queue.index = 0;
445 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 439 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
446 } 440 }
@@ -451,7 +445,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
451 int i; 445 int i;
452 struct device *dev = &adapter->vdev->dev; 446 struct device *dev = &adapter->vdev->dev;
453 447
454 if(adapter->buffer_list_addr != NULL) { 448 if (adapter->buffer_list_addr != NULL) {
455 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { 449 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
456 dma_unmap_single(dev, adapter->buffer_list_dma, 4096, 450 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
457 DMA_BIDIRECTIONAL); 451 DMA_BIDIRECTIONAL);
@@ -461,7 +455,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
461 adapter->buffer_list_addr = NULL; 455 adapter->buffer_list_addr = NULL;
462 } 456 }
463 457
464 if(adapter->filter_list_addr != NULL) { 458 if (adapter->filter_list_addr != NULL) {
465 if (!dma_mapping_error(dev, adapter->filter_list_dma)) { 459 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
466 dma_unmap_single(dev, adapter->filter_list_dma, 4096, 460 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
467 DMA_BIDIRECTIONAL); 461 DMA_BIDIRECTIONAL);
@@ -471,7 +465,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
471 adapter->filter_list_addr = NULL; 465 adapter->filter_list_addr = NULL;
472 } 466 }
473 467
474 if(adapter->rx_queue.queue_addr != NULL) { 468 if (adapter->rx_queue.queue_addr != NULL) {
475 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { 469 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
476 dma_unmap_single(dev, 470 dma_unmap_single(dev,
477 adapter->rx_queue.queue_dma, 471 adapter->rx_queue.queue_dma,
@@ -483,7 +477,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
483 adapter->rx_queue.queue_addr = NULL; 477 adapter->rx_queue.queue_addr = NULL;
484 } 478 }
485 479
486 for(i = 0; i<IbmVethNumBufferPools; i++) 480 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
487 if (adapter->rx_buff_pool[i].active) 481 if (adapter->rx_buff_pool[i].active)
488 ibmveth_free_buffer_pool(adapter, 482 ibmveth_free_buffer_pool(adapter,
489 &adapter->rx_buff_pool[i]); 483 &adapter->rx_buff_pool[i]);
@@ -506,9 +500,11 @@ static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
506{ 500{
507 int rc, try_again = 1; 501 int rc, try_again = 1;
508 502
509 /* After a kexec the adapter will still be open, so our attempt to 503 /*
510 * open it will fail. So if we get a failure we free the adapter and 504 * After a kexec the adapter will still be open, so our attempt to
511 * try again, but only once. */ 505 * open it will fail. So if we get a failure we free the adapter and
506 * try again, but only once.
507 */
512retry: 508retry:
513 rc = h_register_logical_lan(adapter->vdev->unit_address, 509 rc = h_register_logical_lan(adapter->vdev->unit_address,
514 adapter->buffer_list_dma, rxq_desc.desc, 510 adapter->buffer_list_dma, rxq_desc.desc,
@@ -537,28 +533,31 @@ static int ibmveth_open(struct net_device *netdev)
537 int i; 533 int i;
538 struct device *dev; 534 struct device *dev;
539 535
540 ibmveth_debug_printk("open starting\n"); 536 netdev_dbg(netdev, "open starting\n");
541 537
542 napi_enable(&adapter->napi); 538 napi_enable(&adapter->napi);
543 539
544 for(i = 0; i<IbmVethNumBufferPools; i++) 540 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
545 rxq_entries += adapter->rx_buff_pool[i].size; 541 rxq_entries += adapter->rx_buff_pool[i].size;
546 542
547 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 543 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
548 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 544 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
549 545
550 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { 546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
551 ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); 547 netdev_err(netdev, "unable to allocate filter or buffer list "
548 "pages\n");
552 ibmveth_cleanup(adapter); 549 ibmveth_cleanup(adapter);
553 napi_disable(&adapter->napi); 550 napi_disable(&adapter->napi);
554 return -ENOMEM; 551 return -ENOMEM;
555 } 552 }
556 553
557 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; 554 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
558 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); 555 rxq_entries;
556 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
557 GFP_KERNEL);
559 558
560 if(!adapter->rx_queue.queue_addr) { 559 if (!adapter->rx_queue.queue_addr) {
561 ibmveth_error_printk("unable to allocate rx queue pages\n"); 560 netdev_err(netdev, "unable to allocate rx queue pages\n");
562 ibmveth_cleanup(adapter); 561 ibmveth_cleanup(adapter);
563 napi_disable(&adapter->napi); 562 napi_disable(&adapter->napi);
564 return -ENOMEM; 563 return -ENOMEM;
@@ -577,7 +576,8 @@ static int ibmveth_open(struct net_device *netdev)
577 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || 576 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
578 (dma_mapping_error(dev, adapter->filter_list_dma)) || 577 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
579 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { 578 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
580 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 579 netdev_err(netdev, "unable to map filter or buffer list "
580 "pages\n");
581 ibmveth_cleanup(adapter); 581 ibmveth_cleanup(adapter);
582 napi_disable(&adapter->napi); 582 napi_disable(&adapter->napi);
583 return -ENOMEM; 583 return -ENOMEM;
@@ -590,20 +590,23 @@ static int ibmveth_open(struct net_device *netdev)
590 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 590 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
591 mac_address = mac_address >> 16; 591 mac_address = mac_address >> 16;
592 592
593 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; 593 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
594 adapter->rx_queue.queue_len;
594 rxq_desc.fields.address = adapter->rx_queue.queue_dma; 595 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
595 596
596 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); 597 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
597 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); 598 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
598 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 599 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
599 600
600 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 601 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
601 602
602 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); 603 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
603 604
604 if(lpar_rc != H_SUCCESS) { 605 if (lpar_rc != H_SUCCESS) {
605 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 606 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
606 ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n", 607 lpar_rc);
608 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
609 "desc:0x%llx MAC:0x%llx\n",
607 adapter->buffer_list_dma, 610 adapter->buffer_list_dma,
608 adapter->filter_list_dma, 611 adapter->filter_list_dma,
609 rxq_desc.desc, 612 rxq_desc.desc,
@@ -613,11 +616,11 @@ static int ibmveth_open(struct net_device *netdev)
613 return -ENONET; 616 return -ENONET;
614 } 617 }
615 618
616 for(i = 0; i<IbmVethNumBufferPools; i++) { 619 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
617 if(!adapter->rx_buff_pool[i].active) 620 if (!adapter->rx_buff_pool[i].active)
618 continue; 621 continue;
619 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 622 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
620 ibmveth_error_printk("unable to alloc pool\n"); 623 netdev_err(netdev, "unable to alloc pool\n");
621 adapter->rx_buff_pool[i].active = 0; 624 adapter->rx_buff_pool[i].active = 0;
622 ibmveth_cleanup(adapter); 625 ibmveth_cleanup(adapter);
623 napi_disable(&adapter->napi); 626 napi_disable(&adapter->napi);
@@ -625,9 +628,12 @@ static int ibmveth_open(struct net_device *netdev)
625 } 628 }
626 } 629 }
627 630
628 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); 631 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
629 if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { 632 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
630 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 633 netdev);
634 if (rc != 0) {
635 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
636 netdev->irq, rc);
631 do { 637 do {
632 rc = h_free_logical_lan(adapter->vdev->unit_address); 638 rc = h_free_logical_lan(adapter->vdev->unit_address);
633 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 639 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
@@ -640,7 +646,7 @@ static int ibmveth_open(struct net_device *netdev)
640 adapter->bounce_buffer = 646 adapter->bounce_buffer =
641 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); 647 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
642 if (!adapter->bounce_buffer) { 648 if (!adapter->bounce_buffer) {
643 ibmveth_error_printk("unable to allocate bounce buffer\n"); 649 netdev_err(netdev, "unable to allocate bounce buffer\n");
644 ibmveth_cleanup(adapter); 650 ibmveth_cleanup(adapter);
645 napi_disable(&adapter->napi); 651 napi_disable(&adapter->napi);
646 return -ENOMEM; 652 return -ENOMEM;
@@ -649,18 +655,18 @@ static int ibmveth_open(struct net_device *netdev)
649 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 655 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
650 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 656 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
651 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 657 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
652 ibmveth_error_printk("unable to map bounce buffer\n"); 658 netdev_err(netdev, "unable to map bounce buffer\n");
653 ibmveth_cleanup(adapter); 659 ibmveth_cleanup(adapter);
654 napi_disable(&adapter->napi); 660 napi_disable(&adapter->napi);
655 return -ENOMEM; 661 return -ENOMEM;
656 } 662 }
657 663
658 ibmveth_debug_printk("initial replenish cycle\n"); 664 netdev_dbg(netdev, "initial replenish cycle\n");
659 ibmveth_interrupt(netdev->irq, netdev); 665 ibmveth_interrupt(netdev->irq, netdev);
660 666
661 netif_start_queue(netdev); 667 netif_start_queue(netdev);
662 668
663 ibmveth_debug_printk("open complete\n"); 669 netdev_dbg(netdev, "open complete\n");
664 670
665 return 0; 671 return 0;
666} 672}
@@ -670,7 +676,7 @@ static int ibmveth_close(struct net_device *netdev)
670 struct ibmveth_adapter *adapter = netdev_priv(netdev); 676 struct ibmveth_adapter *adapter = netdev_priv(netdev);
671 long lpar_rc; 677 long lpar_rc;
672 678
673 ibmveth_debug_printk("close starting\n"); 679 netdev_dbg(netdev, "close starting\n");
674 680
675 napi_disable(&adapter->napi); 681 napi_disable(&adapter->napi);
676 682
@@ -683,26 +689,29 @@ static int ibmveth_close(struct net_device *netdev)
683 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 689 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
684 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 690 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
685 691
686 if(lpar_rc != H_SUCCESS) 692 if (lpar_rc != H_SUCCESS) {
687 { 693 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
688 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 694 "continuing with close\n", lpar_rc);
689 lpar_rc);
690 } 695 }
691 696
692 free_irq(netdev->irq, netdev); 697 free_irq(netdev->irq, netdev);
693 698
694 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 699 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
700 4096 - 8);
695 701
696 ibmveth_cleanup(adapter); 702 ibmveth_cleanup(adapter);
697 703
698 ibmveth_debug_printk("close complete\n"); 704 netdev_dbg(netdev, "close complete\n");
699 705
700 return 0; 706 return 0;
701} 707}
702 708
703static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 709static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
704 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); 710{
705 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); 711 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
712 SUPPORTED_FIBRE);
713 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
714 ADVERTISED_FIBRE);
706 cmd->speed = SPEED_1000; 715 cmd->speed = SPEED_1000;
707 cmd->duplex = DUPLEX_FULL; 716 cmd->duplex = DUPLEX_FULL;
708 cmd->port = PORT_FIBRE; 717 cmd->port = PORT_FIBRE;
@@ -714,12 +723,16 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
714 return 0; 723 return 0;
715} 724}
716 725
717static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { 726static void netdev_get_drvinfo(struct net_device *dev,
727 struct ethtool_drvinfo *info)
728{
718 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); 729 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
719 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1); 730 strncpy(info->version, ibmveth_driver_version,
731 sizeof(info->version) - 1);
720} 732}
721 733
722static u32 netdev_get_link(struct net_device *dev) { 734static u32 netdev_get_link(struct net_device *dev)
735{
723 return 1; 736 return 1;
724} 737}
725 738
@@ -727,18 +740,20 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
727{ 740{
728 struct ibmveth_adapter *adapter = netdev_priv(dev); 741 struct ibmveth_adapter *adapter = netdev_priv(dev);
729 742
730 if (data) 743 if (data) {
731 adapter->rx_csum = 1; 744 adapter->rx_csum = 1;
732 else { 745 } else {
733 /* 746 /*
734 * Since the ibmveth firmware interface does not have the concept of 747 * Since the ibmveth firmware interface does not have the
735 * separate tx/rx checksum offload enable, if rx checksum is disabled 748 * concept of separate tx/rx checksum offload enable, if rx
736 * we also have to disable tx checksum offload. Once we disable rx 749 * checksum is disabled we also have to disable tx checksum
737 * checksum offload, we are no longer allowed to send tx buffers that 750 * offload. Once we disable rx checksum offload, we are no
738 * are not properly checksummed. 751 * longer allowed to send tx buffers that are not properly
752 * checksummed.
739 */ 753 */
740 adapter->rx_csum = 0; 754 adapter->rx_csum = 0;
741 dev->features &= ~NETIF_F_IP_CSUM; 755 dev->features &= ~NETIF_F_IP_CSUM;
756 dev->features &= ~NETIF_F_IPV6_CSUM;
742 } 757 }
743} 758}
744 759
@@ -747,10 +762,15 @@ static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
747 struct ibmveth_adapter *adapter = netdev_priv(dev); 762 struct ibmveth_adapter *adapter = netdev_priv(dev);
748 763
749 if (data) { 764 if (data) {
750 dev->features |= NETIF_F_IP_CSUM; 765 if (adapter->fw_ipv4_csum_support)
766 dev->features |= NETIF_F_IP_CSUM;
767 if (adapter->fw_ipv6_csum_support)
768 dev->features |= NETIF_F_IPV6_CSUM;
751 adapter->rx_csum = 1; 769 adapter->rx_csum = 1;
752 } else 770 } else {
753 dev->features &= ~NETIF_F_IP_CSUM; 771 dev->features &= ~NETIF_F_IP_CSUM;
772 dev->features &= ~NETIF_F_IPV6_CSUM;
773 }
754} 774}
755 775
756static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, 776static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
@@ -758,7 +778,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
758{ 778{
759 struct ibmveth_adapter *adapter = netdev_priv(dev); 779 struct ibmveth_adapter *adapter = netdev_priv(dev);
760 unsigned long set_attr, clr_attr, ret_attr; 780 unsigned long set_attr, clr_attr, ret_attr;
761 long ret; 781 unsigned long set_attr6, clr_attr6;
782 long ret, ret6;
762 int rc1 = 0, rc2 = 0; 783 int rc1 = 0, rc2 = 0;
763 int restart = 0; 784 int restart = 0;
764 785
@@ -772,10 +793,13 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
772 set_attr = 0; 793 set_attr = 0;
773 clr_attr = 0; 794 clr_attr = 0;
774 795
775 if (data) 796 if (data) {
776 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 797 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
777 else 798 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
799 } else {
778 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 800 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
801 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
802 }
779 803
780 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 804 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
781 805
@@ -786,18 +810,39 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
786 set_attr, &ret_attr); 810 set_attr, &ret_attr);
787 811
788 if (ret != H_SUCCESS) { 812 if (ret != H_SUCCESS) {
789 rc1 = -EIO; 813 netdev_err(dev, "unable to change IPv4 checksum "
790 ibmveth_error_printk("unable to change checksum offload settings." 814 "offload settings. %d rc=%ld\n",
791 " %d rc=%ld\n", data, ret); 815 data, ret);
792 816
793 ret = h_illan_attributes(adapter->vdev->unit_address, 817 ret = h_illan_attributes(adapter->vdev->unit_address,
794 set_attr, clr_attr, &ret_attr); 818 set_attr, clr_attr, &ret_attr);
819 } else {
820 adapter->fw_ipv4_csum_support = data;
821 }
822
823 ret6 = h_illan_attributes(adapter->vdev->unit_address,
824 clr_attr6, set_attr6, &ret_attr);
825
826 if (ret6 != H_SUCCESS) {
827 netdev_err(dev, "unable to change IPv6 checksum "
828 "offload settings. %d rc=%ld\n",
829 data, ret);
830
831 ret = h_illan_attributes(adapter->vdev->unit_address,
832 set_attr6, clr_attr6,
833 &ret_attr);
795 } else 834 } else
835 adapter->fw_ipv6_csum_support = data;
836
837 if (ret == H_SUCCESS || ret6 == H_SUCCESS)
796 done(dev, data); 838 done(dev, data);
839 else
840 rc1 = -EIO;
797 } else { 841 } else {
798 rc1 = -EIO; 842 rc1 = -EIO;
799 ibmveth_error_printk("unable to change checksum offload settings." 843 netdev_err(dev, "unable to change checksum offload settings."
800 " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); 844 " %d rc=%ld ret_attr=%lx\n", data, ret,
845 ret_attr);
801 } 846 }
802 847
803 if (restart) 848 if (restart)
@@ -821,13 +866,14 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
821 struct ibmveth_adapter *adapter = netdev_priv(dev); 866 struct ibmveth_adapter *adapter = netdev_priv(dev);
822 int rc = 0; 867 int rc = 0;
823 868
824 if (data && (dev->features & NETIF_F_IP_CSUM)) 869 if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
825 return 0; 870 return 0;
826 if (!data && !(dev->features & NETIF_F_IP_CSUM)) 871 if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
827 return 0; 872 return 0;
828 873
829 if (data && !adapter->rx_csum) 874 if (data && !adapter->rx_csum)
830 rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); 875 rc = ibmveth_set_csum_offload(dev, data,
876 ibmveth_set_tx_csum_flags);
831 else 877 else
832 ibmveth_set_tx_csum_flags(dev, data); 878 ibmveth_set_tx_csum_flags(dev, data);
833 879
@@ -881,6 +927,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
881 .get_strings = ibmveth_get_strings, 927 .get_strings = ibmveth_get_strings,
882 .get_sset_count = ibmveth_get_sset_count, 928 .get_sset_count = ibmveth_get_sset_count,
883 .get_ethtool_stats = ibmveth_get_ethtool_stats, 929 .get_ethtool_stats = ibmveth_get_ethtool_stats,
930 .set_sg = ethtool_op_set_sg,
884}; 931};
885 932
886static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 933static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -890,129 +937,216 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
890 937
891#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) 938#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
892 939
893static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, 940static int ibmveth_send(struct ibmveth_adapter *adapter,
894 struct net_device *netdev) 941 union ibmveth_buf_desc *descs)
895{ 942{
896 struct ibmveth_adapter *adapter = netdev_priv(netdev);
897 union ibmveth_buf_desc desc;
898 unsigned long lpar_rc;
899 unsigned long correlator; 943 unsigned long correlator;
900 unsigned long flags;
901 unsigned int retry_count; 944 unsigned int retry_count;
902 unsigned int tx_dropped = 0; 945 unsigned long ret;
903 unsigned int tx_bytes = 0; 946
904 unsigned int tx_packets = 0; 947 /*
905 unsigned int tx_send_failed = 0; 948 * The retry count sets a maximum for the number of broadcast and
906 unsigned int tx_map_failed = 0; 949 * multicast destinations within the system.
907 int used_bounce = 0; 950 */
908 unsigned long data_dma_addr; 951 retry_count = 1024;
952 correlator = 0;
953 do {
954 ret = h_send_logical_lan(adapter->vdev->unit_address,
955 descs[0].desc, descs[1].desc,
956 descs[2].desc, descs[3].desc,
957 descs[4].desc, descs[5].desc,
958 correlator, &correlator);
959 } while ((ret == H_BUSY) && (retry_count--));
960
961 if (ret != H_SUCCESS && ret != H_DROPPED) {
962 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
963 "with rc=%ld\n", ret);
964 return 1;
965 }
966
967 return 0;
968}
909 969
910 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 970static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
971 struct net_device *netdev)
972{
973 struct ibmveth_adapter *adapter = netdev_priv(netdev);
974 unsigned int desc_flags;
975 union ibmveth_buf_desc descs[6];
976 int last, i;
977 int force_bounce = 0;
978
979 /*
980 * veth handles a maximum of 6 segments including the header, so
981 * we have to linearize the skb if there are more than this.
982 */
983 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
984 netdev->stats.tx_dropped++;
985 goto out;
986 }
911 987
988 /* veth can't checksum offload UDP */
912 if (skb->ip_summed == CHECKSUM_PARTIAL && 989 if (skb->ip_summed == CHECKSUM_PARTIAL &&
913 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { 990 ((skb->protocol == htons(ETH_P_IP) &&
914 ibmveth_error_printk("tx: failed to checksum packet\n"); 991 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
915 tx_dropped++; 992 (skb->protocol == htons(ETH_P_IPV6) &&
993 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
994 skb_checksum_help(skb)) {
995
996 netdev_err(netdev, "tx: failed to checksum packet\n");
997 netdev->stats.tx_dropped++;
916 goto out; 998 goto out;
917 } 999 }
918 1000
1001 desc_flags = IBMVETH_BUF_VALID;
1002
919 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1003 if (skb->ip_summed == CHECKSUM_PARTIAL) {
920 unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; 1004 unsigned char *buf = skb_transport_header(skb) +
1005 skb->csum_offset;
921 1006
922 desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); 1007 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
923 1008
924 /* Need to zero out the checksum */ 1009 /* Need to zero out the checksum */
925 buf[0] = 0; 1010 buf[0] = 0;
926 buf[1] = 0; 1011 buf[1] = 0;
927 } 1012 }
928 1013
929 data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 1014retry_bounce:
930 skb->len, DMA_TO_DEVICE); 1015 memset(descs, 0, sizeof(descs));
931 if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { 1016
932 if (!firmware_has_feature(FW_FEATURE_CMO)) 1017 /*
933 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 1018 * If a linear packet is below the rx threshold then
1019 * copy it into the static bounce buffer. This avoids the
1020 * cost of a TCE insert and remove.
1021 */
1022 if (force_bounce || (!skb_is_nonlinear(skb) &&
1023 (skb->len < tx_copybreak))) {
934 skb_copy_from_linear_data(skb, adapter->bounce_buffer, 1024 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
935 skb->len); 1025 skb->len);
936 desc.fields.address = adapter->bounce_buffer_dma; 1026
937 tx_map_failed++; 1027 descs[0].fields.flags_len = desc_flags | skb->len;
938 used_bounce = 1; 1028 descs[0].fields.address = adapter->bounce_buffer_dma;
939 wmb(); 1029
940 } else 1030 if (ibmveth_send(adapter, descs)) {
941 desc.fields.address = data_dma_addr; 1031 adapter->tx_send_failed++;
942 1032 netdev->stats.tx_dropped++;
943 /* send the frame. Arbitrarily set retrycount to 1024 */ 1033 } else {
944 correlator = 0; 1034 netdev->stats.tx_packets++;
945 retry_count = 1024; 1035 netdev->stats.tx_bytes += skb->len;
946 do { 1036 }
947 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, 1037
948 desc.desc, 0, 0, 0, 0, 0, 1038 goto out;
949 correlator, &correlator); 1039 }
950 } while ((lpar_rc == H_BUSY) && (retry_count--)); 1040
951 1041 /* Map the header */
952 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { 1042 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
953 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); 1043 skb_headlen(skb),
954 ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", 1044 DMA_TO_DEVICE);
955 (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, 1045 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
956 skb->len, desc.fields.address); 1046 goto map_failed;
957 tx_send_failed++; 1047
958 tx_dropped++; 1048 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
959 } else { 1049
960 tx_packets++; 1050 /* Map the frags */
961 tx_bytes += skb->len; 1051 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
962 netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 1052 unsigned long dma_addr;
1053 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1054
1055 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
1056 frag->page_offset, frag->size,
1057 DMA_TO_DEVICE);
1058
1059 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1060 goto map_failed_frags;
1061
1062 descs[i+1].fields.flags_len = desc_flags | frag->size;
1063 descs[i+1].fields.address = dma_addr;
963 } 1064 }
964 1065
965 if (!used_bounce) 1066 if (ibmveth_send(adapter, descs)) {
966 dma_unmap_single(&adapter->vdev->dev, data_dma_addr, 1067 adapter->tx_send_failed++;
967 skb->len, DMA_TO_DEVICE); 1068 netdev->stats.tx_dropped++;
1069 } else {
1070 netdev->stats.tx_packets++;
1071 netdev->stats.tx_bytes += skb->len;
1072 }
968 1073
969out: spin_lock_irqsave(&adapter->stats_lock, flags); 1074 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
970 netdev->stats.tx_dropped += tx_dropped; 1075 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
971 netdev->stats.tx_bytes += tx_bytes; 1076 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
972 netdev->stats.tx_packets += tx_packets; 1077 DMA_TO_DEVICE);
973 adapter->tx_send_failed += tx_send_failed;
974 adapter->tx_map_failed += tx_map_failed;
975 spin_unlock_irqrestore(&adapter->stats_lock, flags);
976 1078
1079out:
977 dev_kfree_skb(skb); 1080 dev_kfree_skb(skb);
978 return NETDEV_TX_OK; 1081 return NETDEV_TX_OK;
1082
1083map_failed_frags:
1084 last = i+1;
1085 for (i = 0; i < last; i++)
1086 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1087 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1088 DMA_TO_DEVICE);
1089
1090map_failed:
1091 if (!firmware_has_feature(FW_FEATURE_CMO))
1092 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1093 adapter->tx_map_failed++;
1094 skb_linearize(skb);
1095 force_bounce = 1;
1096 goto retry_bounce;
979} 1097}
980 1098
981static int ibmveth_poll(struct napi_struct *napi, int budget) 1099static int ibmveth_poll(struct napi_struct *napi, int budget)
982{ 1100{
983 struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); 1101 struct ibmveth_adapter *adapter =
1102 container_of(napi, struct ibmveth_adapter, napi);
984 struct net_device *netdev = adapter->netdev; 1103 struct net_device *netdev = adapter->netdev;
985 int frames_processed = 0; 1104 int frames_processed = 0;
986 unsigned long lpar_rc; 1105 unsigned long lpar_rc;
987 1106
988 restart_poll: 1107restart_poll:
989 do { 1108 do {
990 struct sk_buff *skb;
991
992 if (!ibmveth_rxq_pending_buffer(adapter)) 1109 if (!ibmveth_rxq_pending_buffer(adapter))
993 break; 1110 break;
994 1111
995 rmb(); 1112 smp_rmb();
996 if (!ibmveth_rxq_buffer_valid(adapter)) { 1113 if (!ibmveth_rxq_buffer_valid(adapter)) {
997 wmb(); /* suggested by larson1 */ 1114 wmb(); /* suggested by larson1 */
998 adapter->rx_invalid_buffer++; 1115 adapter->rx_invalid_buffer++;
999 ibmveth_debug_printk("recycling invalid buffer\n"); 1116 netdev_dbg(netdev, "recycling invalid buffer\n");
1000 ibmveth_rxq_recycle_buffer(adapter); 1117 ibmveth_rxq_recycle_buffer(adapter);
1001 } else { 1118 } else {
1119 struct sk_buff *skb, *new_skb;
1002 int length = ibmveth_rxq_frame_length(adapter); 1120 int length = ibmveth_rxq_frame_length(adapter);
1003 int offset = ibmveth_rxq_frame_offset(adapter); 1121 int offset = ibmveth_rxq_frame_offset(adapter);
1004 int csum_good = ibmveth_rxq_csum_good(adapter); 1122 int csum_good = ibmveth_rxq_csum_good(adapter);
1005 1123
1006 skb = ibmveth_rxq_get_buffer(adapter); 1124 skb = ibmveth_rxq_get_buffer(adapter);
1007 if (csum_good)
1008 skb->ip_summed = CHECKSUM_UNNECESSARY;
1009 1125
1010 ibmveth_rxq_harvest_buffer(adapter); 1126 new_skb = NULL;
1127 if (length < rx_copybreak)
1128 new_skb = netdev_alloc_skb(netdev, length);
1129
1130 if (new_skb) {
1131 skb_copy_to_linear_data(new_skb,
1132 skb->data + offset,
1133 length);
1134 if (rx_flush)
1135 ibmveth_flush_buffer(skb->data,
1136 length + offset);
1137 skb = new_skb;
1138 ibmveth_rxq_recycle_buffer(adapter);
1139 } else {
1140 ibmveth_rxq_harvest_buffer(adapter);
1141 skb_reserve(skb, offset);
1142 }
1011 1143
1012 skb_reserve(skb, offset);
1013 skb_put(skb, length); 1144 skb_put(skb, length);
1014 skb->protocol = eth_type_trans(skb, netdev); 1145 skb->protocol = eth_type_trans(skb, netdev);
1015 1146
1147 if (csum_good)
1148 skb->ip_summed = CHECKSUM_UNNECESSARY;
1149
1016 netif_receive_skb(skb); /* send it up */ 1150 netif_receive_skb(skb); /* send it up */
1017 1151
1018 netdev->stats.rx_packets++; 1152 netdev->stats.rx_packets++;
@@ -1030,7 +1164,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1030 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1164 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1031 VIO_IRQ_ENABLE); 1165 VIO_IRQ_ENABLE);
1032 1166
1033 ibmveth_assert(lpar_rc == H_SUCCESS); 1167 BUG_ON(lpar_rc != H_SUCCESS);
1034 1168
1035 napi_complete(napi); 1169 napi_complete(napi);
1036 1170
@@ -1054,7 +1188,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1054 if (napi_schedule_prep(&adapter->napi)) { 1188 if (napi_schedule_prep(&adapter->napi)) {
1055 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1189 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1056 VIO_IRQ_DISABLE); 1190 VIO_IRQ_DISABLE);
1057 ibmveth_assert(lpar_rc == H_SUCCESS); 1191 BUG_ON(lpar_rc != H_SUCCESS);
1058 __napi_schedule(&adapter->napi); 1192 __napi_schedule(&adapter->napi);
1059 } 1193 }
1060 return IRQ_HANDLED; 1194 return IRQ_HANDLED;
@@ -1071,8 +1205,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1071 IbmVethMcastEnableRecv | 1205 IbmVethMcastEnableRecv |
1072 IbmVethMcastDisableFiltering, 1206 IbmVethMcastDisableFiltering,
1073 0); 1207 0);
1074 if(lpar_rc != H_SUCCESS) { 1208 if (lpar_rc != H_SUCCESS) {
1075 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 1209 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1210 "entering promisc mode\n", lpar_rc);
1076 } 1211 }
1077 } else { 1212 } else {
1078 struct netdev_hw_addr *ha; 1213 struct netdev_hw_addr *ha;
@@ -1082,19 +1217,23 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1082 IbmVethMcastDisableFiltering | 1217 IbmVethMcastDisableFiltering |
1083 IbmVethMcastClearFilterTable, 1218 IbmVethMcastClearFilterTable,
1084 0); 1219 0);
1085 if(lpar_rc != H_SUCCESS) { 1220 if (lpar_rc != H_SUCCESS) {
1086 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 1221 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1222 "attempting to clear filter table\n",
1223 lpar_rc);
1087 } 1224 }
1088 /* add the addresses to the filter table */ 1225 /* add the addresses to the filter table */
1089 netdev_for_each_mc_addr(ha, netdev) { 1226 netdev_for_each_mc_addr(ha, netdev) {
1090 // add the multicast address to the filter table 1227 /* add the multicast address to the filter table */
1091 unsigned long mcast_addr = 0; 1228 unsigned long mcast_addr = 0;
1092 memcpy(((char *)&mcast_addr)+2, ha->addr, 6); 1229 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1093 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1230 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1094 IbmVethMcastAddFilter, 1231 IbmVethMcastAddFilter,
1095 mcast_addr); 1232 mcast_addr);
1096 if(lpar_rc != H_SUCCESS) { 1233 if (lpar_rc != H_SUCCESS) {
1097 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); 1234 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1235 "when adding an entry to the filter "
1236 "table\n", lpar_rc);
1098 } 1237 }
1099 } 1238 }
1100 1239
@@ -1102,8 +1241,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1102 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1241 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1103 IbmVethMcastEnableFiltering, 1242 IbmVethMcastEnableFiltering,
1104 0); 1243 0);
1105 if(lpar_rc != H_SUCCESS) { 1244 if (lpar_rc != H_SUCCESS) {
1106 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); 1245 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1246 "enabling filtering\n", lpar_rc);
1107 } 1247 }
1108 } 1248 }
1109} 1249}
@@ -1116,14 +1256,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1116 int i, rc; 1256 int i, rc;
1117 int need_restart = 0; 1257 int need_restart = 0;
1118 1258
1119 if (new_mtu < IBMVETH_MAX_MTU) 1259 if (new_mtu < IBMVETH_MIN_MTU)
1120 return -EINVAL; 1260 return -EINVAL;
1121 1261
1122 for (i = 0; i < IbmVethNumBufferPools; i++) 1262 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1123 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) 1263 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1124 break; 1264 break;
1125 1265
1126 if (i == IbmVethNumBufferPools) 1266 if (i == IBMVETH_NUM_BUFF_POOLS)
1127 return -EINVAL; 1267 return -EINVAL;
1128 1268
1129 /* Deactivate all the buffer pools so that the next loop can activate 1269 /* Deactivate all the buffer pools so that the next loop can activate
@@ -1136,7 +1276,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1136 } 1276 }
1137 1277
1138 /* Look for an active buffer pool that can hold the new MTU */ 1278 /* Look for an active buffer pool that can hold the new MTU */
1139 for(i = 0; i<IbmVethNumBufferPools; i++) { 1279 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1140 adapter->rx_buff_pool[i].active = 1; 1280 adapter->rx_buff_pool[i].active = 1;
1141 1281
1142 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { 1282 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
@@ -1190,7 +1330,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1190 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1330 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1191 ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1331 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1192 1332
1193 for (i = 0; i < IbmVethNumBufferPools; i++) { 1333 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1194 /* add the size of the active receive buffers */ 1334 /* add the size of the active receive buffers */
1195 if (adapter->rx_buff_pool[i].active) 1335 if (adapter->rx_buff_pool[i].active)
1196 ret += 1336 ret +=
@@ -1219,41 +1359,36 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1219#endif 1359#endif
1220}; 1360};
1221 1361
1222static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 1362static int __devinit ibmveth_probe(struct vio_dev *dev,
1363 const struct vio_device_id *id)
1223{ 1364{
1224 int rc, i; 1365 int rc, i;
1225 long ret;
1226 struct net_device *netdev; 1366 struct net_device *netdev;
1227 struct ibmveth_adapter *adapter; 1367 struct ibmveth_adapter *adapter;
1228 unsigned long set_attr, ret_attr;
1229
1230 unsigned char *mac_addr_p; 1368 unsigned char *mac_addr_p;
1231 unsigned int *mcastFilterSize_p; 1369 unsigned int *mcastFilterSize_p;
1232 1370
1371 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1372 dev->unit_address);
1233 1373
1234 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", 1374 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1235 dev->unit_address); 1375 NULL);
1236 1376 if (!mac_addr_p) {
1237 mac_addr_p = (unsigned char *) vio_get_attribute(dev, 1377 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1238 VETH_MAC_ADDR, NULL); 1378 return -EINVAL;
1239 if(!mac_addr_p) {
1240 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
1241 "attribute\n", __FILE__, __LINE__);
1242 return 0;
1243 } 1379 }
1244 1380
1245 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev, 1381 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1246 VETH_MCAST_FILTER_SIZE, NULL); 1382 VETH_MCAST_FILTER_SIZE, NULL);
1247 if(!mcastFilterSize_p) { 1383 if (!mcastFilterSize_p) {
1248 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " 1384 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1249 "VETH_MCAST_FILTER_SIZE attribute\n", 1385 "attribute\n");
1250 __FILE__, __LINE__); 1386 return -EINVAL;
1251 return 0;
1252 } 1387 }
1253 1388
1254 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); 1389 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1255 1390
1256 if(!netdev) 1391 if (!netdev)
1257 return -ENOMEM; 1392 return -ENOMEM;
1258 1393
1259 adapter = netdev_priv(netdev); 1394 adapter = netdev_priv(netdev);
@@ -1261,19 +1396,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1261 1396
1262 adapter->vdev = dev; 1397 adapter->vdev = dev;
1263 adapter->netdev = netdev; 1398 adapter->netdev = netdev;
1264 adapter->mcastFilterSize= *mcastFilterSize_p; 1399 adapter->mcastFilterSize = *mcastFilterSize_p;
1265 adapter->pool_config = 0; 1400 adapter->pool_config = 0;
1266 1401
1267 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1402 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1268 1403
1269 /* Some older boxes running PHYP non-natively have an OF that 1404 /*
1270 returns a 8-byte local-mac-address field (and the first 1405 * Some older boxes running PHYP non-natively have an OF that returns
1271 2 bytes have to be ignored) while newer boxes' OF return 1406 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1272 a 6-byte field. Note that IEEE 1275 specifies that 1407 * ignored) while newer boxes' OF return a 6-byte field. Note that
1273 local-mac-address must be a 6-byte field. 1408 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1274 The RPA doc specifies that the first byte must be 10b, so 1409 * The RPA doc specifies that the first byte must be 10b, so we'll
1275 we'll just look for it to solve this 8 vs. 6 byte field issue */ 1410 * just look for it to solve this 8 vs. 6 byte field issue
1276 1411 */
1277 if ((*mac_addr_p & 0x3) != 0x02) 1412 if ((*mac_addr_p & 0x3) != 0x02)
1278 mac_addr_p += 2; 1413 mac_addr_p += 2;
1279 1414
@@ -1284,12 +1419,11 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1284 netdev->netdev_ops = &ibmveth_netdev_ops; 1419 netdev->netdev_ops = &ibmveth_netdev_ops;
1285 netdev->ethtool_ops = &netdev_ethtool_ops; 1420 netdev->ethtool_ops = &netdev_ethtool_ops;
1286 SET_NETDEV_DEV(netdev, &dev->dev); 1421 SET_NETDEV_DEV(netdev, &dev->dev);
1287 netdev->features |= NETIF_F_LLTX; 1422 netdev->features |= NETIF_F_SG;
1288 spin_lock_init(&adapter->stats_lock);
1289 1423
1290 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 1424 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1291 1425
1292 for(i = 0; i<IbmVethNumBufferPools; i++) { 1426 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1293 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; 1427 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1294 int error; 1428 int error;
1295 1429
@@ -1302,41 +1436,25 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1302 kobject_uevent(kobj, KOBJ_ADD); 1436 kobject_uevent(kobj, KOBJ_ADD);
1303 } 1437 }
1304 1438
1305 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1439 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1306 1440
1307 adapter->buffer_list_dma = DMA_ERROR_CODE; 1441 adapter->buffer_list_dma = DMA_ERROR_CODE;
1308 adapter->filter_list_dma = DMA_ERROR_CODE; 1442 adapter->filter_list_dma = DMA_ERROR_CODE;
1309 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 1443 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1310 1444
1311 ibmveth_debug_printk("registering netdev...\n"); 1445 netdev_dbg(netdev, "registering netdev...\n");
1312
1313 ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
1314
1315 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
1316 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
1317 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
1318 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
1319
1320 ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
1321 1446
1322 if (ret == H_SUCCESS) { 1447 ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags);
1323 adapter->rx_csum = 1;
1324 netdev->features |= NETIF_F_IP_CSUM;
1325 } else
1326 ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
1327 }
1328 1448
1329 rc = register_netdev(netdev); 1449 rc = register_netdev(netdev);
1330 1450
1331 if(rc) { 1451 if (rc) {
1332 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc); 1452 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1333 free_netdev(netdev); 1453 free_netdev(netdev);
1334 return rc; 1454 return rc;
1335 } 1455 }
1336 1456
1337 ibmveth_debug_printk("registered\n"); 1457 netdev_dbg(netdev, "registered\n");
1338
1339 ibmveth_proc_register_adapter(adapter);
1340 1458
1341 return 0; 1459 return 0;
1342} 1460}
@@ -1347,114 +1465,23 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1347 struct ibmveth_adapter *adapter = netdev_priv(netdev); 1465 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1348 int i; 1466 int i;
1349 1467
1350 for(i = 0; i<IbmVethNumBufferPools; i++) 1468 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1351 kobject_put(&adapter->rx_buff_pool[i].kobj); 1469 kobject_put(&adapter->rx_buff_pool[i].kobj);
1352 1470
1353 unregister_netdev(netdev); 1471 unregister_netdev(netdev);
1354 1472
1355 ibmveth_proc_unregister_adapter(adapter);
1356
1357 free_netdev(netdev); 1473 free_netdev(netdev);
1358 dev_set_drvdata(&dev->dev, NULL); 1474 dev_set_drvdata(&dev->dev, NULL);
1359 1475
1360 return 0; 1476 return 0;
1361} 1477}
1362 1478
1363#ifdef CONFIG_PROC_FS
1364static void ibmveth_proc_register_driver(void)
1365{
1366 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
1367 if (ibmveth_proc_dir) {
1368 }
1369}
1370
1371static void ibmveth_proc_unregister_driver(void)
1372{
1373 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1374}
1375
1376static int ibmveth_show(struct seq_file *seq, void *v)
1377{
1378 struct ibmveth_adapter *adapter = seq->private;
1379 char *current_mac = (char *) adapter->netdev->dev_addr;
1380 char *firmware_mac = (char *) &adapter->mac_addr;
1381
1382 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1383
1384 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1385 seq_printf(seq, "Current MAC: %pM\n", current_mac);
1386 seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac);
1387
1388 seq_printf(seq, "\nAdapter Statistics:\n");
1389 seq_printf(seq, " TX: vio_map_single failres: %lld\n", adapter->tx_map_failed);
1390 seq_printf(seq, " send failures: %lld\n", adapter->tx_send_failed);
1391 seq_printf(seq, " RX: replenish task cycles: %lld\n", adapter->replenish_task_cycles);
1392 seq_printf(seq, " alloc_skb_failures: %lld\n", adapter->replenish_no_mem);
1393 seq_printf(seq, " add buffer failures: %lld\n", adapter->replenish_add_buff_failure);
1394 seq_printf(seq, " invalid buffers: %lld\n", adapter->rx_invalid_buffer);
1395 seq_printf(seq, " no buffers: %lld\n", adapter->rx_no_buffer);
1396
1397 return 0;
1398}
1399
1400static int ibmveth_proc_open(struct inode *inode, struct file *file)
1401{
1402 return single_open(file, ibmveth_show, PDE(inode)->data);
1403}
1404
1405static const struct file_operations ibmveth_proc_fops = {
1406 .owner = THIS_MODULE,
1407 .open = ibmveth_proc_open,
1408 .read = seq_read,
1409 .llseek = seq_lseek,
1410 .release = single_release,
1411};
1412
1413static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1414{
1415 struct proc_dir_entry *entry;
1416 if (ibmveth_proc_dir) {
1417 char u_addr[10];
1418 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1419 entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
1420 &ibmveth_proc_fops, adapter);
1421 if (!entry)
1422 ibmveth_error_printk("Cannot create adapter proc entry");
1423 }
1424}
1425
1426static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1427{
1428 if (ibmveth_proc_dir) {
1429 char u_addr[10];
1430 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1431 remove_proc_entry(u_addr, ibmveth_proc_dir);
1432 }
1433}
1434
1435#else /* CONFIG_PROC_FS */
1436static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1437{
1438}
1439
1440static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1441{
1442}
1443static void ibmveth_proc_register_driver(void)
1444{
1445}
1446
1447static void ibmveth_proc_unregister_driver(void)
1448{
1449}
1450#endif /* CONFIG_PROC_FS */
1451
1452static struct attribute veth_active_attr; 1479static struct attribute veth_active_attr;
1453static struct attribute veth_num_attr; 1480static struct attribute veth_num_attr;
1454static struct attribute veth_size_attr; 1481static struct attribute veth_size_attr;
1455 1482
1456static ssize_t veth_pool_show(struct kobject * kobj, 1483static ssize_t veth_pool_show(struct kobject *kobj,
1457 struct attribute * attr, char * buf) 1484 struct attribute *attr, char *buf)
1458{ 1485{
1459 struct ibmveth_buff_pool *pool = container_of(kobj, 1486 struct ibmveth_buff_pool *pool = container_of(kobj,
1460 struct ibmveth_buff_pool, 1487 struct ibmveth_buff_pool,
@@ -1469,8 +1496,8 @@ static ssize_t veth_pool_show(struct kobject * kobj,
1469 return 0; 1496 return 0;
1470} 1497}
1471 1498
1472static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, 1499static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1473const char * buf, size_t count) 1500 const char *buf, size_t count)
1474{ 1501{
1475 struct ibmveth_buff_pool *pool = container_of(kobj, 1502 struct ibmveth_buff_pool *pool = container_of(kobj,
1476 struct ibmveth_buff_pool, 1503 struct ibmveth_buff_pool,
@@ -1484,8 +1511,9 @@ const char * buf, size_t count)
1484 if (attr == &veth_active_attr) { 1511 if (attr == &veth_active_attr) {
1485 if (value && !pool->active) { 1512 if (value && !pool->active) {
1486 if (netif_running(netdev)) { 1513 if (netif_running(netdev)) {
1487 if(ibmveth_alloc_buffer_pool(pool)) { 1514 if (ibmveth_alloc_buffer_pool(pool)) {
1488 ibmveth_error_printk("unable to alloc pool\n"); 1515 netdev_err(netdev,
1516 "unable to alloc pool\n");
1489 return -ENOMEM; 1517 return -ENOMEM;
1490 } 1518 }
1491 pool->active = 1; 1519 pool->active = 1;
@@ -1494,14 +1522,15 @@ const char * buf, size_t count)
1494 adapter->pool_config = 0; 1522 adapter->pool_config = 0;
1495 if ((rc = ibmveth_open(netdev))) 1523 if ((rc = ibmveth_open(netdev)))
1496 return rc; 1524 return rc;
1497 } else 1525 } else {
1498 pool->active = 1; 1526 pool->active = 1;
1527 }
1499 } else if (!value && pool->active) { 1528 } else if (!value && pool->active) {
1500 int mtu = netdev->mtu + IBMVETH_BUFF_OH; 1529 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1501 int i; 1530 int i;
1502 /* Make sure there is a buffer pool with buffers that 1531 /* Make sure there is a buffer pool with buffers that
1503 can hold a packet of the size of the MTU */ 1532 can hold a packet of the size of the MTU */
1504 for (i = 0; i < IbmVethNumBufferPools; i++) { 1533 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1505 if (pool == &adapter->rx_buff_pool[i]) 1534 if (pool == &adapter->rx_buff_pool[i])
1506 continue; 1535 continue;
1507 if (!adapter->rx_buff_pool[i].active) 1536 if (!adapter->rx_buff_pool[i].active)
@@ -1510,8 +1539,8 @@ const char * buf, size_t count)
1510 break; 1539 break;
1511 } 1540 }
1512 1541
1513 if (i == IbmVethNumBufferPools) { 1542 if (i == IBMVETH_NUM_BUFF_POOLS) {
1514 ibmveth_error_printk("no active pool >= MTU\n"); 1543 netdev_err(netdev, "no active pool >= MTU\n");
1515 return -EPERM; 1544 return -EPERM;
1516 } 1545 }
1517 1546
@@ -1526,9 +1555,9 @@ const char * buf, size_t count)
1526 pool->active = 0; 1555 pool->active = 0;
1527 } 1556 }
1528 } else if (attr == &veth_num_attr) { 1557 } else if (attr == &veth_num_attr) {
1529 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) 1558 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1530 return -EINVAL; 1559 return -EINVAL;
1531 else { 1560 } else {
1532 if (netif_running(netdev)) { 1561 if (netif_running(netdev)) {
1533 adapter->pool_config = 1; 1562 adapter->pool_config = 1;
1534 ibmveth_close(netdev); 1563 ibmveth_close(netdev);
@@ -1536,13 +1565,14 @@ const char * buf, size_t count)
1536 pool->size = value; 1565 pool->size = value;
1537 if ((rc = ibmveth_open(netdev))) 1566 if ((rc = ibmveth_open(netdev)))
1538 return rc; 1567 return rc;
1539 } else 1568 } else {
1540 pool->size = value; 1569 pool->size = value;
1570 }
1541 } 1571 }
1542 } else if (attr == &veth_size_attr) { 1572 } else if (attr == &veth_size_attr) {
1543 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) 1573 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1544 return -EINVAL; 1574 return -EINVAL;
1545 else { 1575 } else {
1546 if (netif_running(netdev)) { 1576 if (netif_running(netdev)) {
1547 adapter->pool_config = 1; 1577 adapter->pool_config = 1;
1548 ibmveth_close(netdev); 1578 ibmveth_close(netdev);
@@ -1550,8 +1580,9 @@ const char * buf, size_t count)
1550 pool->buff_size = value; 1580 pool->buff_size = value;
1551 if ((rc = ibmveth_open(netdev))) 1581 if ((rc = ibmveth_open(netdev)))
1552 return rc; 1582 return rc;
1553 } else 1583 } else {
1554 pool->buff_size = value; 1584 pool->buff_size = value;
1585 }
1555 } 1586 }
1556 } 1587 }
1557 1588
@@ -1561,16 +1592,16 @@ const char * buf, size_t count)
1561} 1592}
1562 1593
1563 1594
1564#define ATTR(_name, _mode) \ 1595#define ATTR(_name, _mode) \
1565 struct attribute veth_##_name##_attr = { \ 1596 struct attribute veth_##_name##_attr = { \
1566 .name = __stringify(_name), .mode = _mode, \ 1597 .name = __stringify(_name), .mode = _mode, \
1567 }; 1598 };
1568 1599
1569static ATTR(active, 0644); 1600static ATTR(active, 0644);
1570static ATTR(num, 0644); 1601static ATTR(num, 0644);
1571static ATTR(size, 0644); 1602static ATTR(size, 0644);
1572 1603
1573static struct attribute * veth_pool_attrs[] = { 1604static struct attribute *veth_pool_attrs[] = {
1574 &veth_active_attr, 1605 &veth_active_attr,
1575 &veth_num_attr, 1606 &veth_num_attr,
1576 &veth_size_attr, 1607 &veth_size_attr,
@@ -1595,7 +1626,7 @@ static int ibmveth_resume(struct device *dev)
1595 return 0; 1626 return 0;
1596} 1627}
1597 1628
1598static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1629static struct vio_device_id ibmveth_device_table[] __devinitdata = {
1599 { "network", "IBM,l-lan"}, 1630 { "network", "IBM,l-lan"},
1600 { "", "" } 1631 { "", "" }
1601}; 1632};
@@ -1619,9 +1650,8 @@ static struct vio_driver ibmveth_driver = {
1619 1650
1620static int __init ibmveth_module_init(void) 1651static int __init ibmveth_module_init(void)
1621{ 1652{
1622 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version); 1653 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1623 1654 ibmveth_driver_string, ibmveth_driver_version);
1624 ibmveth_proc_register_driver();
1625 1655
1626 return vio_register_driver(&ibmveth_driver); 1656 return vio_register_driver(&ibmveth_driver);
1627} 1657}
@@ -1629,7 +1659,6 @@ static int __init ibmveth_module_init(void)
1629static void __exit ibmveth_module_exit(void) 1659static void __exit ibmveth_module_exit(void)
1630{ 1660{
1631 vio_unregister_driver(&ibmveth_driver); 1661 vio_unregister_driver(&ibmveth_driver);
1632 ibmveth_proc_unregister_driver();
1633} 1662}
1634 1663
1635module_init(ibmveth_module_init); 1664module_init(ibmveth_module_init);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index ec76ace66c6b..43a794fab9ff 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -1,26 +1,28 @@
1/**************************************************************************/ 1/*
2/* */ 2 * IBM Power Virtual Ethernet Device Driver
3/* IBM eServer i/[Series Virtual Ethernet Device Driver */ 3 *
4/* Copyright (C) 2003 IBM Corp. */ 4 * This program is free software; you can redistribute it and/or modify
5/* Dave Larson (larson1@us.ibm.com) */ 5 * it under the terms of the GNU General Public License as published by
6/* Santiago Leon (santil@us.ibm.com) */ 6 * the Free Software Foundation; either version 2 of the License, or
7/* */ 7 * (at your option) any later version.
8/* This program is free software; you can redistribute it and/or modify */ 8 *
9/* it under the terms of the GNU General Public License as published by */ 9 * This program is distributed in the hope that it will be useful,
10/* the Free Software Foundation; either version 2 of the License, or */ 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11/* (at your option) any later version. */ 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12/* */ 12 * GNU General Public License for more details.
13/* This program is distributed in the hope that it will be useful, */ 13 *
14/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ 14 * You should have received a copy of the GNU General Public License
15/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ 15 * along with this program; if not, write to the Free Software
16/* GNU General Public License for more details. */ 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17/* */ 17 *
18/* You should have received a copy of the GNU General Public License */ 18 * Copyright (C) IBM Corporation, 2003, 2010
19/* along with this program; if not, write to the Free Software */ 19 *
20/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */ 20 * Authors: Dave Larson <larson1@us.ibm.com>
21/* USA */ 21 * Santiago Leon <santil@linux.vnet.ibm.com>
22/* */ 22 * Brian King <brking@linux.vnet.ibm.com>
23/**************************************************************************/ 23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
25 */
24 26
25#ifndef _IBMVETH_H 27#ifndef _IBMVETH_H
26#define _IBMVETH_H 28#define _IBMVETH_H
@@ -92,17 +94,17 @@ static inline long h_illan_attributes(unsigned long unit_address,
92#define h_change_logical_lan_mac(ua, mac) \ 94#define h_change_logical_lan_mac(ua, mac) \
93 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 95 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
94 96
95#define IbmVethNumBufferPools 5 97#define IBMVETH_NUM_BUFF_POOLS 5
96#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */ 98#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
97#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ 99#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
98#define IBMVETH_MAX_MTU 68 100#define IBMVETH_MIN_MTU 68
99#define IBMVETH_MAX_POOL_COUNT 4096 101#define IBMVETH_MAX_POOL_COUNT 4096
100#define IBMVETH_BUFF_LIST_SIZE 4096 102#define IBMVETH_BUFF_LIST_SIZE 4096
101#define IBMVETH_FILT_LIST_SIZE 4096 103#define IBMVETH_FILT_LIST_SIZE 4096
102#define IBMVETH_MAX_BUF_SIZE (1024 * 128) 104#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
103 105
104static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; 106static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
105static int pool_count[] = { 256, 768, 256, 256, 256 }; 107static int pool_count[] = { 256, 512, 256, 256, 256 };
106static int pool_active[] = { 1, 1, 0, 0, 0}; 108static int pool_active[] = { 1, 1, 0, 0, 0};
107 109
108#define IBM_VETH_INVALID_MAP ((u16)0xffff) 110#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -142,13 +144,15 @@ struct ibmveth_adapter {
142 void * filter_list_addr; 144 void * filter_list_addr;
143 dma_addr_t buffer_list_dma; 145 dma_addr_t buffer_list_dma;
144 dma_addr_t filter_list_dma; 146 dma_addr_t filter_list_dma;
145 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; 147 struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
146 struct ibmveth_rx_q rx_queue; 148 struct ibmveth_rx_q rx_queue;
147 int pool_config; 149 int pool_config;
148 int rx_csum; 150 int rx_csum;
149 void *bounce_buffer; 151 void *bounce_buffer;
150 dma_addr_t bounce_buffer_dma; 152 dma_addr_t bounce_buffer_dma;
151 153
154 u64 fw_ipv6_csum_support;
155 u64 fw_ipv4_csum_support;
152 /* adapter specific stats */ 156 /* adapter specific stats */
153 u64 replenish_task_cycles; 157 u64 replenish_task_cycles;
154 u64 replenish_no_mem; 158 u64 replenish_no_mem;
@@ -158,7 +162,6 @@ struct ibmveth_adapter {
158 u64 rx_no_buffer; 162 u64 rx_no_buffer;
159 u64 tx_map_failed; 163 u64 tx_map_failed;
160 u64 tx_send_failed; 164 u64 tx_send_failed;
161 spinlock_t stats_lock;
162}; 165};
163 166
164struct ibmveth_buf_desc_fields { 167struct ibmveth_buf_desc_fields {
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 6e63d9a7fc75..44e0ff1494e0 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -143,7 +143,7 @@ struct igb_buffer {
143 u16 next_to_watch; 143 u16 next_to_watch;
144 unsigned int bytecount; 144 unsigned int bytecount;
145 u16 gso_segs; 145 u16 gso_segs;
146 union skb_shared_tx shtx; 146 u8 tx_flags;
147 u8 mapped_as_page; 147 u8 mapped_as_page;
148 }; 148 };
149 /* RX */ 149 /* RX */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9b4e5895f5f9..c4d861b557ca 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1888,9 +1888,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1888 goto err_eeprom; 1888 goto err_eeprom;
1889 } 1889 }
1890 1890
1891 setup_timer(&adapter->watchdog_timer, &igb_watchdog, 1891 setup_timer(&adapter->watchdog_timer, igb_watchdog,
1892 (unsigned long) adapter); 1892 (unsigned long) adapter);
1893 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, 1893 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
1894 (unsigned long) adapter); 1894 (unsigned long) adapter);
1895 1895
1896 INIT_WORK(&adapter->reset_task, igb_reset_task); 1896 INIT_WORK(&adapter->reset_task, igb_reset_task);
@@ -3954,7 +3954,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3954 } 3954 }
3955 3955
3956 tx_ring->buffer_info[i].skb = skb; 3956 tx_ring->buffer_info[i].skb = skb;
3957 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags; 3957 tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
3958 /* multiply data chunks by size of headers */ 3958 /* multiply data chunks by size of headers */
3959 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len; 3959 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3960 tx_ring->buffer_info[i].gso_segs = gso_segs; 3960 tx_ring->buffer_info[i].gso_segs = gso_segs;
@@ -4088,7 +4088,6 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4088 u32 tx_flags = 0; 4088 u32 tx_flags = 0;
4089 u16 first; 4089 u16 first;
4090 u8 hdr_len = 0; 4090 u8 hdr_len = 0;
4091 union skb_shared_tx *shtx = skb_tx(skb);
4092 4091
4093 /* need: 1 descriptor per page, 4092 /* need: 1 descriptor per page,
4094 * + 2 desc gap to keep tail from touching head, 4093 * + 2 desc gap to keep tail from touching head,
@@ -4100,8 +4099,8 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4100 return NETDEV_TX_BUSY; 4099 return NETDEV_TX_BUSY;
4101 } 4100 }
4102 4101
4103 if (unlikely(shtx->hardware)) { 4102 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4104 shtx->in_progress = 1; 4103 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4105 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4104 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4106 } 4105 }
4107 4106
@@ -5319,7 +5318,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
5319 u64 regval; 5318 u64 regval;
5320 5319
5321 /* if skb does not support hw timestamp or TX stamp not valid exit */ 5320 /* if skb does not support hw timestamp or TX stamp not valid exit */
5322 if (likely(!buffer_info->shtx.hardware) || 5321 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
5323 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) 5322 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5324 return; 5323 return;
5325 5324
@@ -5456,7 +5455,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
5456static inline void igb_rx_checksum_adv(struct igb_ring *ring, 5455static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5457 u32 status_err, struct sk_buff *skb) 5456 u32 status_err, struct sk_buff *skb)
5458{ 5457{
5459 skb->ip_summed = CHECKSUM_NONE; 5458 skb_checksum_none_assert(skb);
5460 5459
5461 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 5460 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5462 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || 5461 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
@@ -5500,7 +5499,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5500 * values must belong to this one here and therefore we don't need to 5499 * values must belong to this one here and therefore we don't need to
5501 * compare any of the additional attributes stored for it. 5500 * compare any of the additional attributes stored for it.
5502 * 5501 *
5503 * If nothing went wrong, then it should have a skb_shared_tx that we 5502 * If nothing went wrong, then it should have a shared tx_flags that we
5504 * can turn into a skb_shared_hwtstamps. 5503 * can turn into a skb_shared_hwtstamps.
5505 */ 5504 */
5506 if (staterr & E1000_RXDADV_STAT_TSIP) { 5505 if (staterr & E1000_RXDADV_STAT_TSIP) {
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index c539f7c9c3e0..c7fab80d2490 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -103,7 +103,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
103static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 103static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
104 u32 status_err, struct sk_buff *skb) 104 u32 status_err, struct sk_buff *skb)
105{ 105{
106 skb->ip_summed = CHECKSUM_NONE; 106 skb_checksum_none_assert(skb);
107 107
108 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 108 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
109 if ((status_err & E1000_RXD_STAT_IXSM) || 109 if ((status_err & E1000_RXD_STAT_IXSM) ||
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 0b3f6df5cff7..c8ee8d28767b 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -827,7 +827,7 @@ static void ioc3_mii_start(struct ioc3_private *ip)
827{ 827{
828 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ 828 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
829 ip->ioc3_timer.data = (unsigned long) ip; 829 ip->ioc3_timer.data = (unsigned long) ip;
830 ip->ioc3_timer.function = &ioc3_timer; 830 ip->ioc3_timer.function = ioc3_timer;
831 add_timer(&ip->ioc3_timer); 831 add_timer(&ip->ioc3_timer);
832} 832}
833 833
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 72e3d2da9e9f..dc0198092343 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1213,7 +1213,7 @@ static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1213 1213
1214 skb_put(skb, framelen); 1214 skb_put(skb, framelen);
1215 skb->protocol = eth_type_trans(skb, dev); 1215 skb->protocol = eth_type_trans(skb, dev);
1216 skb->ip_summed = CHECKSUM_NONE; 1216 skb_checksum_none_assert(skb);
1217 netif_rx(skb); 1217 netif_rx(skb);
1218 sp->rx_buff[entry] = NULL; 1218 sp->rx_buff[entry] = NULL;
1219} 1219}
@@ -1278,7 +1278,7 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
1278 jumbo->skb->protocol = 1278 jumbo->skb->protocol =
1279 eth_type_trans(jumbo->skb, dev); 1279 eth_type_trans(jumbo->skb, dev);
1280 1280
1281 jumbo->skb->ip_summed = CHECKSUM_NONE; 1281 skb_checksum_none_assert(jumbo->skb);
1282 netif_rx(jumbo->skb); 1282 netif_rx(jumbo->skb);
1283 } 1283 }
1284 } 1284 }
@@ -1476,7 +1476,7 @@ static int ipg_nic_rx(struct net_device *dev)
1476 * IP/TCP/UDP frame was received. Let the 1476 * IP/TCP/UDP frame was received. Let the
1477 * upper layer decide. 1477 * upper layer decide.
1478 */ 1478 */
1479 skb->ip_summed = CHECKSUM_NONE; 1479 skb_checksum_none_assert(skb);
1480 1480
1481 /* Hand off frame for higher layer processing. 1481 /* Hand off frame for higher layer processing.
1482 * The function netif_rx() releases the sk_buff 1482 * The function netif_rx() releases the sk_buff
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 5b1036ac38d7..74b20f179cea 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -734,7 +734,7 @@ static int mcs_net_open(struct net_device *netdev)
734 } 734 }
735 735
736 if (!mcs_setup_urbs(mcs)) 736 if (!mcs_setup_urbs(mcs))
737 goto error3; 737 goto error3;
738 738
739 ret = mcs_receive_start(mcs); 739 ret = mcs_receive_start(mcs);
740 if (ret) 740 if (ret)
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index b0a6cd815be1..67c0ad42d818 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -1182,12 +1182,13 @@ F01_E */
1182 1182
1183 skb = dev_alloc_skb(len + 1 - 4); 1183 skb = dev_alloc_skb(len + 1 - 4);
1184 /* 1184 /*
1185 * if frame size,data ptr,or skb ptr are wrong ,the get next 1185 * if frame size, data ptr, or skb ptr are wrong, then get next
1186 * entry. 1186 * entry.
1187 */ 1187 */
1188 if ((skb == NULL) || (skb->data == NULL) || 1188 if ((skb == NULL) || (skb->data == NULL) ||
1189 (self->rx_buff.data == NULL) || (len < 6)) { 1189 (self->rx_buff.data == NULL) || (len < 6)) {
1190 self->netdev->stats.rx_dropped++; 1190 self->netdev->stats.rx_dropped++;
1191 kfree_skb(skb);
1191 return TRUE; 1192 return TRUE;
1192 } 1193 }
1193 skb_reserve(skb, 1); 1194 skb_reserve(skb, 1);
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index ba1de5973fb2..8df645e78f2e 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1524,7 +1524,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1524 1524
1525 skb_put(skb, length); 1525 skb_put(skb, length);
1526 skb->protocol = eth_type_trans(skb, dev); 1526 skb->protocol = eth_type_trans(skb, dev);
1527 skb->ip_summed = CHECKSUM_NONE; 1527 skb_checksum_none_assert(skb);
1528 netif_rx(skb); /* send it up */ 1528 netif_rx(skb); /* send it up */
1529 dev->stats.rx_packets++; 1529 dev->stats.rx_packets++;
1530 dev->stats.rx_bytes += length; 1530 dev->stats.rx_bytes += length;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 45fc89b9ba64..c2f6e71e1181 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -470,7 +470,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
470 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); 470 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
471 471
472 init_timer(&adapter->watchdog_timer); 472 init_timer(&adapter->watchdog_timer);
473 adapter->watchdog_timer.function = &ixgb_watchdog; 473 adapter->watchdog_timer.function = ixgb_watchdog;
474 adapter->watchdog_timer.data = (unsigned long)adapter; 474 adapter->watchdog_timer.data = (unsigned long)adapter;
475 475
476 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); 476 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
@@ -1905,7 +1905,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1905 */ 1905 */
1906 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || 1906 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1907 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { 1907 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1908 skb->ip_summed = CHECKSUM_NONE; 1908 skb_checksum_none_assert(skb);
1909 return; 1909 return;
1910 } 1910 }
1911 1911
@@ -1913,7 +1913,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1913 /* now look at the TCP checksum error bit */ 1913 /* now look at the TCP checksum error bit */
1914 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { 1914 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1915 /* let the stack verify checksum errors */ 1915 /* let the stack verify checksum errors */
1916 skb->ip_summed = CHECKSUM_NONE; 1916 skb_checksum_none_assert(skb);
1917 adapter->hw_csum_rx_error++; 1917 adapter->hw_csum_rx_error++;
1918 } else { 1918 } else {
1919 /* TCP checksum is good */ 1919 /* TCP checksum is good */
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 9e15eb93860e..5cebc3755b64 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -69,15 +69,20 @@
69#define IXGBE_MAX_FCPAUSE 0xFFFF 69#define IXGBE_MAX_FCPAUSE 0xFFFF
70 70
71/* Supported Rx Buffer Sizes */ 71/* Supported Rx Buffer Sizes */
72#define IXGBE_RXBUFFER_64 64 /* Used for packet split */ 72#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
73#define IXGBE_RXBUFFER_128 128 /* Used for packet split */
74#define IXGBE_RXBUFFER_256 256 /* Used for packet split */
75#define IXGBE_RXBUFFER_2048 2048 73#define IXGBE_RXBUFFER_2048 2048
76#define IXGBE_RXBUFFER_4096 4096 74#define IXGBE_RXBUFFER_4096 4096
77#define IXGBE_RXBUFFER_8192 8192 75#define IXGBE_RXBUFFER_8192 8192
78#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ 76#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
79 77
80#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 78/*
79 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
80 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
81 * this adds up to 512 bytes of extra data meaning the smallest allocation
82 * we could have is 1K.
83 * i.e. RXBUFFER_512 --> size-1024 slab
84 */
85#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
81 86
82#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 87#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
83 88
@@ -251,11 +256,11 @@ struct ixgbe_q_vector {
251 (R)->next_to_clean - (R)->next_to_use - 1) 256 (R)->next_to_clean - (R)->next_to_use - 1)
252 257
253#define IXGBE_RX_DESC_ADV(R, i) \ 258#define IXGBE_RX_DESC_ADV(R, i) \
254 (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) 259 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
255#define IXGBE_TX_DESC_ADV(R, i) \ 260#define IXGBE_TX_DESC_ADV(R, i) \
256 (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) 261 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
257#define IXGBE_TX_CTXTDESC_ADV(R, i) \ 262#define IXGBE_TX_CTXTDESC_ADV(R, i) \
258 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) 263 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
259 264
260#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 265#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
261#ifdef IXGBE_FCOE 266#ifdef IXGBE_FCOE
@@ -448,9 +453,20 @@ extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
448extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 453extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
449extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 454extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
450extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 455extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
456extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
457extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
451extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 458extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
452extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 459extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
453extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 460extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
461extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
462 struct net_device *,
463 struct ixgbe_adapter *,
464 struct ixgbe_ring *);
465extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
466 struct ixgbe_tx_buffer *);
467extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
468 struct ixgbe_ring *rx_ring,
469 int cleaned_count);
454extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 470extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
455extern int ethtool_ioctl(struct ifreq *ifr); 471extern int ethtool_ioctl(struct ifreq *ifr);
456extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 472extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dcebc82c6f4d..25ef8b197373 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -820,16 +820,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
820 struct ixgbe_adapter *adapter = netdev_priv(netdev); 820 struct ixgbe_adapter *adapter = netdev_priv(netdev);
821 char firmware_version[32]; 821 char firmware_version[32];
822 822
823 strncpy(drvinfo->driver, ixgbe_driver_name, 32); 823 strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
824 strncpy(drvinfo->version, ixgbe_driver_version, 32); 824 strncpy(drvinfo->version, ixgbe_driver_version,
825 825 sizeof(drvinfo->version));
826 sprintf(firmware_version, "%d.%d-%d", 826
827 (adapter->eeprom_version & 0xF000) >> 12, 827 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
828 (adapter->eeprom_version & 0x0FF0) >> 4, 828 (adapter->eeprom_version & 0xF000) >> 12,
829 adapter->eeprom_version & 0x000F); 829 (adapter->eeprom_version & 0x0FF0) >> 4,
830 830 adapter->eeprom_version & 0x000F);
831 strncpy(drvinfo->fw_version, firmware_version, 32); 831
832 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 832 strncpy(drvinfo->fw_version, firmware_version,
833 sizeof(drvinfo->fw_version));
834 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
835 sizeof(drvinfo->bus_info));
833 drvinfo->n_stats = IXGBE_STATS_LEN; 836 drvinfo->n_stats = IXGBE_STATS_LEN;
834 drvinfo->testinfo_len = IXGBE_TEST_LEN; 837 drvinfo->testinfo_len = IXGBE_TEST_LEN;
835 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 838 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -1435,9 +1438,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1435 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1438 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1436 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1439 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1437 struct ixgbe_hw *hw = &adapter->hw; 1440 struct ixgbe_hw *hw = &adapter->hw;
1438 struct pci_dev *pdev = adapter->pdev;
1439 u32 reg_ctl; 1441 u32 reg_ctl;
1440 int i;
1441 1442
1442 /* shut down the DMA engines now so they can be reinitialized later */ 1443 /* shut down the DMA engines now so they can be reinitialized later */
1443 1444
@@ -1445,14 +1446,15 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1445 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1446 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1446 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1447 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1447 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1448 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1448 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)); 1449 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
1449 reg_ctl &= ~IXGBE_RXDCTL_ENABLE; 1450 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1450 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl); 1451 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
1451 1452
1452 /* now Tx */ 1453 /* now Tx */
1453 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)); 1454 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1454 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1455 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1455 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl); 1456 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1457
1456 if (hw->mac.type == ixgbe_mac_82599EB) { 1458 if (hw->mac.type == ixgbe_mac_82599EB) {
1457 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1459 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1458 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1460 reg_ctl &= ~IXGBE_DMATXCTL_TE;
@@ -1461,221 +1463,57 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1461 1463
1462 ixgbe_reset(adapter); 1464 ixgbe_reset(adapter);
1463 1465
1464 if (tx_ring->desc && tx_ring->tx_buffer_info) { 1466 ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
1465 for (i = 0; i < tx_ring->count; i++) { 1467 ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
1466 struct ixgbe_tx_buffer *buf =
1467 &(tx_ring->tx_buffer_info[i]);
1468 if (buf->dma)
1469 dma_unmap_single(&pdev->dev, buf->dma,
1470 buf->length, DMA_TO_DEVICE);
1471 if (buf->skb)
1472 dev_kfree_skb(buf->skb);
1473 }
1474 }
1475
1476 if (rx_ring->desc && rx_ring->rx_buffer_info) {
1477 for (i = 0; i < rx_ring->count; i++) {
1478 struct ixgbe_rx_buffer *buf =
1479 &(rx_ring->rx_buffer_info[i]);
1480 if (buf->dma)
1481 dma_unmap_single(&pdev->dev, buf->dma,
1482 IXGBE_RXBUFFER_2048,
1483 DMA_FROM_DEVICE);
1484 if (buf->skb)
1485 dev_kfree_skb(buf->skb);
1486 }
1487 }
1488
1489 if (tx_ring->desc) {
1490 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1491 tx_ring->dma);
1492 tx_ring->desc = NULL;
1493 }
1494 if (rx_ring->desc) {
1495 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1496 rx_ring->dma);
1497 rx_ring->desc = NULL;
1498 }
1499
1500 kfree(tx_ring->tx_buffer_info);
1501 tx_ring->tx_buffer_info = NULL;
1502 kfree(rx_ring->rx_buffer_info);
1503 rx_ring->rx_buffer_info = NULL;
1504} 1468}
1505 1469
1506static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1470static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1507{ 1471{
1508 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1472 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1509 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1473 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1510 struct pci_dev *pdev = adapter->pdev;
1511 u32 rctl, reg_data; 1474 u32 rctl, reg_data;
1512 int i, ret_val; 1475 int ret_val;
1476 int err;
1513 1477
1514 /* Setup Tx descriptor ring and Tx buffers */ 1478 /* Setup Tx descriptor ring and Tx buffers */
1479 tx_ring->count = IXGBE_DEFAULT_TXD;
1480 tx_ring->queue_index = 0;
1481 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1482 tx_ring->numa_node = adapter->node;
1515 1483
1516 if (!tx_ring->count) 1484 err = ixgbe_setup_tx_resources(adapter, tx_ring);
1517 tx_ring->count = IXGBE_DEFAULT_TXD; 1485 if (err)
1518 1486 return 1;
1519 tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
1520 sizeof(struct ixgbe_tx_buffer),
1521 GFP_KERNEL);
1522 if (!(tx_ring->tx_buffer_info)) {
1523 ret_val = 1;
1524 goto err_nomem;
1525 }
1526
1527 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1528 tx_ring->size = ALIGN(tx_ring->size, 4096);
1529 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1530 &tx_ring->dma, GFP_KERNEL);
1531 if (!(tx_ring->desc)) {
1532 ret_val = 2;
1533 goto err_nomem;
1534 }
1535 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1536
1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
1538 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1539 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
1540 ((u64) tx_ring->dma >> 32));
1541 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
1542 tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
1543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
1544 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
1545
1546 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1547 reg_data |= IXGBE_HLREG0_TXPADEN;
1548 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1549 1487
1550 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1488 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1551 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1489 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1552 reg_data |= IXGBE_DMATXCTL_TE; 1490 reg_data |= IXGBE_DMATXCTL_TE;
1553 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1554 } 1492 }
1555 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
1556 reg_data |= IXGBE_TXDCTL_ENABLE;
1557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
1558
1559 for (i = 0; i < tx_ring->count; i++) {
1560 union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
1561 struct sk_buff *skb;
1562 unsigned int size = 1024;
1563
1564 skb = alloc_skb(size, GFP_KERNEL);
1565 if (!skb) {
1566 ret_val = 3;
1567 goto err_nomem;
1568 }
1569 skb_put(skb, size);
1570 tx_ring->tx_buffer_info[i].skb = skb;
1571 tx_ring->tx_buffer_info[i].length = skb->len;
1572 tx_ring->tx_buffer_info[i].dma =
1573 dma_map_single(&pdev->dev, skb->data, skb->len,
1574 DMA_TO_DEVICE);
1575 desc->read.buffer_addr =
1576 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1577 desc->read.cmd_type_len = cpu_to_le32(skb->len);
1578 desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
1579 IXGBE_TXD_CMD_IFCS |
1580 IXGBE_TXD_CMD_RS);
1581 desc->read.olinfo_status = 0;
1582 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1583 desc->read.olinfo_status |=
1584 (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
1585 1493
1586 } 1494 ixgbe_configure_tx_ring(adapter, tx_ring);
1587 1495
1588 /* Setup Rx Descriptor ring and Rx buffers */ 1496 /* Setup Rx Descriptor ring and Rx buffers */
1589 1497 rx_ring->count = IXGBE_DEFAULT_RXD;
1590 if (!rx_ring->count) 1498 rx_ring->queue_index = 0;
1591 rx_ring->count = IXGBE_DEFAULT_RXD; 1499 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1592 1500 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1593 rx_ring->rx_buffer_info = kcalloc(rx_ring->count, 1501 rx_ring->numa_node = adapter->node;
1594 sizeof(struct ixgbe_rx_buffer), 1502
1595 GFP_KERNEL); 1503 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1596 if (!(rx_ring->rx_buffer_info)) { 1504 if (err) {
1597 ret_val = 4; 1505 ret_val = 4;
1598 goto err_nomem; 1506 goto err_nomem;
1599 } 1507 }
1600 1508
1601 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1602 rx_ring->size = ALIGN(rx_ring->size, 4096);
1603 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1604 &rx_ring->dma, GFP_KERNEL);
1605 if (!(rx_ring->desc)) {
1606 ret_val = 5;
1607 goto err_nomem;
1608 }
1609 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1610
1611 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1509 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1612 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1510 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1613 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
1614 ((u64)rx_ring->dma & 0xFFFFFFFF));
1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
1616 ((u64) rx_ring->dma >> 32));
1617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
1618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
1619 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
1620
1621 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1622 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1624
1625 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1626 reg_data &= ~IXGBE_HLREG0_LPBK;
1627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1628 1511
1629 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL); 1512 ixgbe_configure_rx_ring(adapter, rx_ring);
1630#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
1631 Threshold Size mask */
1632 reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
1633 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
1634
1635 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
1636#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
1637 reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
1638 reg_data |= adapter->hw.mac.mc_filter_type;
1639 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
1640
1641 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
1642 reg_data |= IXGBE_RXDCTL_ENABLE;
1643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1644 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1645 int j = adapter->rx_ring[0]->reg_idx;
1646 u32 k;
1647 for (k = 0; k < 10; k++) {
1648 if (IXGBE_READ_REG(&adapter->hw,
1649 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1650 break;
1651 else
1652 msleep(1);
1653 }
1654 }
1655 1513
1656 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1514 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1515 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1658 1516
1659 for (i = 0; i < rx_ring->count; i++) {
1660 union ixgbe_adv_rx_desc *rx_desc =
1661 IXGBE_RX_DESC_ADV(*rx_ring, i);
1662 struct sk_buff *skb;
1663
1664 skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
1665 if (!skb) {
1666 ret_val = 6;
1667 goto err_nomem;
1668 }
1669 skb_reserve(skb, NET_IP_ALIGN);
1670 rx_ring->rx_buffer_info[i].skb = skb;
1671 rx_ring->rx_buffer_info[i].dma =
1672 dma_map_single(&pdev->dev, skb->data,
1673 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1674 rx_desc->read.pkt_addr =
1675 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1676 memset(skb->data, 0x00, skb->len);
1677 }
1678
1679 return 0; 1517 return 0;
1680 1518
1681err_nomem: 1519err_nomem:
@@ -1689,16 +1527,21 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1689 u32 reg_data; 1527 u32 reg_data;
1690 1528
1691 /* right now we only support MAC loopback in the driver */ 1529 /* right now we only support MAC loopback in the driver */
1692
1693 /* Setup MAC loopback */
1694 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1530 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1531 /* Setup MAC loopback */
1695 reg_data |= IXGBE_HLREG0_LPBK; 1532 reg_data |= IXGBE_HLREG0_LPBK;
1696 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1697 1534
1535 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1536 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1538
1698 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); 1539 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1699 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1540 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1700 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1541 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1701 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); 1542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1543 IXGBE_WRITE_FLUSH(&adapter->hw);
1544 msleep(10);
1702 1545
1703 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1546 /* Disable Atlas Tx lanes; re-enabled in reset path */
1704 if (hw->mac.type == ixgbe_mac_82598EB) { 1547 if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1756,15 +1599,81 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1756 return 13; 1599 return 13;
1757} 1600}
1758 1601
1602static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1603 struct ixgbe_ring *rx_ring,
1604 struct ixgbe_ring *tx_ring,
1605 unsigned int size)
1606{
1607 union ixgbe_adv_rx_desc *rx_desc;
1608 struct ixgbe_rx_buffer *rx_buffer_info;
1609 struct ixgbe_tx_buffer *tx_buffer_info;
1610 const int bufsz = rx_ring->rx_buf_len;
1611 u32 staterr;
1612 u16 rx_ntc, tx_ntc, count = 0;
1613
1614 /* initialize next to clean and descriptor values */
1615 rx_ntc = rx_ring->next_to_clean;
1616 tx_ntc = tx_ring->next_to_clean;
1617 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1618 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1619
1620 while (staterr & IXGBE_RXD_STAT_DD) {
1621 /* check Rx buffer */
1622 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1623
1624 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1625 dma_unmap_single(&adapter->pdev->dev,
1626 rx_buffer_info->dma,
1627 bufsz,
1628 DMA_FROM_DEVICE);
1629 rx_buffer_info->dma = 0;
1630
1631 /* verify contents of skb */
1632 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
1633 count++;
1634
1635 /* unmap buffer on Tx side */
1636 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1637 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1638
1639 /* increment Rx/Tx next to clean counters */
1640 rx_ntc++;
1641 if (rx_ntc == rx_ring->count)
1642 rx_ntc = 0;
1643 tx_ntc++;
1644 if (tx_ntc == tx_ring->count)
1645 tx_ntc = 0;
1646
1647 /* fetch next descriptor */
1648 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1649 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1650 }
1651
1652 /* re-map buffers to ring, store next to clean values */
1653 ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
1654 rx_ring->next_to_clean = rx_ntc;
1655 tx_ring->next_to_clean = tx_ntc;
1656
1657 return count;
1658}
1659
1759static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1660static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1760{ 1661{
1761 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1662 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1762 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1663 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1763 struct pci_dev *pdev = adapter->pdev; 1664 int i, j, lc, good_cnt, ret_val = 0;
1764 int i, j, k, l, lc, good_cnt, ret_val = 0; 1665 unsigned int size = 1024;
1765 unsigned long time; 1666 netdev_tx_t tx_ret_val;
1667 struct sk_buff *skb;
1766 1668
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1); 1669 /* allocate test skb */
1670 skb = alloc_skb(size, GFP_KERNEL);
1671 if (!skb)
1672 return 11;
1673
1674 /* place data into test skb */
1675 ixgbe_create_lbtest_frame(skb, size);
1676 skb_put(skb, size);
1768 1677
1769 /* 1678 /*
1770 * Calculate the loop count based on the largest descriptor ring 1679 * Calculate the loop count based on the largest descriptor ring
@@ -1777,54 +1686,40 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1777 else 1686 else
1778 lc = ((rx_ring->count / 64) * 2) + 1; 1687 lc = ((rx_ring->count / 64) * 2) + 1;
1779 1688
1780 k = l = 0;
1781 for (j = 0; j <= lc; j++) { 1689 for (j = 0; j <= lc; j++) {
1782 for (i = 0; i < 64; i++) { 1690 /* reset count of good packets */
1783 ixgbe_create_lbtest_frame(
1784 tx_ring->tx_buffer_info[k].skb,
1785 1024);
1786 dma_sync_single_for_device(&pdev->dev,
1787 tx_ring->tx_buffer_info[k].dma,
1788 tx_ring->tx_buffer_info[k].length,
1789 DMA_TO_DEVICE);
1790 if (unlikely(++k == tx_ring->count))
1791 k = 0;
1792 }
1793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
1794 msleep(200);
1795 /* set the start time for the receive */
1796 time = jiffies;
1797 good_cnt = 0; 1691 good_cnt = 0;
1798 do { 1692
1799 /* receive the sent packets */ 1693 /* place 64 packets on the transmit queue*/
1800 dma_sync_single_for_cpu(&pdev->dev, 1694 for (i = 0; i < 64; i++) {
1801 rx_ring->rx_buffer_info[l].dma, 1695 skb_get(skb);
1802 IXGBE_RXBUFFER_2048, 1696 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1803 DMA_FROM_DEVICE); 1697 adapter->netdev,
1804 ret_val = ixgbe_check_lbtest_frame( 1698 adapter,
1805 rx_ring->rx_buffer_info[l].skb, 1024); 1699 tx_ring);
1806 if (!ret_val) 1700 if (tx_ret_val == NETDEV_TX_OK)
1807 good_cnt++; 1701 good_cnt++;
1808 if (++l == rx_ring->count) 1702 }
1809 l = 0; 1703
1810 /*
1811 * time + 20 msecs (200 msecs on 2.4) is more than
1812 * enough time to complete the receives, if it's
1813 * exceeded, break and error off
1814 */
1815 } while (good_cnt < 64 && jiffies < (time + 20));
1816 if (good_cnt != 64) { 1704 if (good_cnt != 64) {
1817 /* ret_val is the same as mis-compare */ 1705 ret_val = 12;
1818 ret_val = 13;
1819 break; 1706 break;
1820 } 1707 }
1821 if (jiffies >= (time + 20)) { 1708
1822 /* Error code for time out error */ 1709 /* allow 200 milliseconds for packets to go from Tx to Rx */
1823 ret_val = 14; 1710 msleep(200);
1711
1712 good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
1713 tx_ring, size);
1714 if (good_cnt != 64) {
1715 ret_val = 13;
1824 break; 1716 break;
1825 } 1717 }
1826 } 1718 }
1827 1719
1720 /* free the original skb */
1721 kfree_skb(skb);
1722
1828 return ret_val; 1723 return ret_val;
1829} 1724}
1830 1725
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 072327c5e41a..2f1de8b90f9e 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -304,12 +304,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
304 if (!ixgbe_rx_is_fcoe(rx_desc)) 304 if (!ixgbe_rx_is_fcoe(rx_desc))
305 goto ddp_out; 305 goto ddp_out;
306 306
307 skb->ip_summed = CHECKSUM_UNNECESSARY;
308 sterr = le32_to_cpu(rx_desc->wb.upper.status_error); 307 sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
309 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); 308 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
310 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); 309 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
311 if (fcerr == IXGBE_FCERR_BADCRC) 310 if (fcerr == IXGBE_FCERR_BADCRC)
312 skb->ip_summed = CHECKSUM_NONE; 311 skb_checksum_none_assert(skb);
312 else
313 skb->ip_summed = CHECKSUM_UNNECESSARY;
313 314
314 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 315 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
315 fh = (struct fc_frame_header *)(skb->data + 316 fh = (struct fc_frame_header *)(skb->data +
@@ -471,7 +472,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
471 472
472 /* write context desc */ 473 /* write context desc */
473 i = tx_ring->next_to_use; 474 i = tx_ring->next_to_use;
474 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 475 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
475 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 476 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
476 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 477 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
477 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 478 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e32af434cc9d..d03eef96c0ba 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -50,7 +50,7 @@
50 50
51char ixgbe_driver_name[] = "ixgbe"; 51char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "2.0.84-k2" 55#define DRV_VERSION "2.0.84-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
@@ -120,7 +120,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120 120
121#ifdef CONFIG_IXGBE_DCA 121#ifdef CONFIG_IXGBE_DCA
122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
123 void *p); 123 void *p);
124static struct notifier_block dca_notifier = { 124static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca, 125 .notifier_call = ixgbe_notify_dca,
126 .next = NULL, 126 .next = NULL,
@@ -131,8 +131,8 @@ static struct notifier_block dca_notifier = {
131#ifdef CONFIG_PCI_IOV 131#ifdef CONFIG_PCI_IOV
132static unsigned int max_vfs; 132static unsigned int max_vfs;
133module_param(max_vfs, uint, 0); 133module_param(max_vfs, uint, 0);
134MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " 134MODULE_PARM_DESC(max_vfs,
135 "per physical function"); 135 "Maximum number of virtual functions to allocate per physical function");
136#endif /* CONFIG_PCI_IOV */ 136#endif /* CONFIG_PCI_IOV */
137 137
138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,8 +169,8 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
169 169
170 /* take a breather then clean up driver data */ 170 /* take a breather then clean up driver data */
171 msleep(100); 171 msleep(100);
172 if (adapter->vfinfo) 172
173 kfree(adapter->vfinfo); 173 kfree(adapter->vfinfo);
174 adapter->vfinfo = NULL; 174 adapter->vfinfo = NULL;
175 175
176 adapter->num_vfs = 0; 176 adapter->num_vfs = 0;
@@ -282,17 +282,17 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
283 break; 283 break;
284 default: 284 default:
285 printk(KERN_INFO "%-15s %08x\n", reginfo->name, 285 pr_info("%-15s %08x\n", reginfo->name,
286 IXGBE_READ_REG(hw, reginfo->ofs)); 286 IXGBE_READ_REG(hw, reginfo->ofs));
287 return; 287 return;
288 } 288 }
289 289
290 for (i = 0; i < 8; i++) { 290 for (i = 0; i < 8; i++) {
291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); 291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
292 printk(KERN_ERR "%-15s ", rname); 292 pr_err("%-15s", rname);
293 for (j = 0; j < 8; j++) 293 for (j = 0; j < 8; j++)
294 printk(KERN_CONT "%08x ", regs[i*8+j]); 294 pr_cont(" %08x", regs[i*8+j]);
295 printk(KERN_CONT "\n"); 295 pr_cont("\n");
296 } 296 }
297 297
298} 298}
@@ -322,18 +322,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
322 /* Print netdevice Info */ 322 /* Print netdevice Info */
323 if (netdev) { 323 if (netdev) {
324 dev_info(&adapter->pdev->dev, "Net device Info\n"); 324 dev_info(&adapter->pdev->dev, "Net device Info\n");
325 printk(KERN_INFO "Device Name state " 325 pr_info("Device Name state "
326 "trans_start last_rx\n"); 326 "trans_start last_rx\n");
327 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", 327 pr_info("%-15s %016lX %016lX %016lX\n",
328 netdev->name, 328 netdev->name,
329 netdev->state, 329 netdev->state,
330 netdev->trans_start, 330 netdev->trans_start,
331 netdev->last_rx); 331 netdev->last_rx);
332 } 332 }
333 333
334 /* Print Registers */ 334 /* Print Registers */
335 dev_info(&adapter->pdev->dev, "Register Dump\n"); 335 dev_info(&adapter->pdev->dev, "Register Dump\n");
336 printk(KERN_INFO " Register Name Value\n"); 336 pr_info(" Register Name Value\n");
337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; 337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
338 reginfo->name; reginfo++) { 338 reginfo->name; reginfo++) {
339 ixgbe_regdump(hw, reginfo); 339 ixgbe_regdump(hw, reginfo);
@@ -344,13 +344,12 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
344 goto exit; 344 goto exit;
345 345
346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
347 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] " 347 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
348 "leng ntw timestamp\n");
349 for (n = 0; n < adapter->num_tx_queues; n++) { 348 for (n = 0; n < adapter->num_tx_queues; n++) {
350 tx_ring = adapter->tx_ring[n]; 349 tx_ring = adapter->tx_ring[n];
351 tx_buffer_info = 350 tx_buffer_info =
352 &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 351 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
353 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 352 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
354 n, tx_ring->next_to_use, tx_ring->next_to_clean, 353 n, tx_ring->next_to_use, tx_ring->next_to_clean,
355 (u64)tx_buffer_info->dma, 354 (u64)tx_buffer_info->dma,
356 tx_buffer_info->length, 355 tx_buffer_info->length,
@@ -377,18 +376,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
377 376
378 for (n = 0; n < adapter->num_tx_queues; n++) { 377 for (n = 0; n < adapter->num_tx_queues; n++) {
379 tx_ring = adapter->tx_ring[n]; 378 tx_ring = adapter->tx_ring[n];
380 printk(KERN_INFO "------------------------------------\n"); 379 pr_info("------------------------------------\n");
381 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); 380 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
382 printk(KERN_INFO "------------------------------------\n"); 381 pr_info("------------------------------------\n");
383 printk(KERN_INFO "T [desc] [address 63:0 ] " 382 pr_info("T [desc] [address 63:0 ] "
384 "[PlPOIdStDDt Ln] [bi->dma ] " 383 "[PlPOIdStDDt Ln] [bi->dma ] "
385 "leng ntw timestamp bi->skb\n"); 384 "leng ntw timestamp bi->skb\n");
386 385
387 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 386 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
388 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 387 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
389 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 388 tx_buffer_info = &tx_ring->tx_buffer_info[i];
390 u0 = (struct my_u0 *)tx_desc; 389 u0 = (struct my_u0 *)tx_desc;
391 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" 390 pr_info("T [0x%03X] %016llX %016llX %016llX"
392 " %04X %3X %016llX %p", i, 391 " %04X %3X %016llX %p", i,
393 le64_to_cpu(u0->a), 392 le64_to_cpu(u0->a),
394 le64_to_cpu(u0->b), 393 le64_to_cpu(u0->b),
@@ -399,13 +398,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
399 tx_buffer_info->skb); 398 tx_buffer_info->skb);
400 if (i == tx_ring->next_to_use && 399 if (i == tx_ring->next_to_use &&
401 i == tx_ring->next_to_clean) 400 i == tx_ring->next_to_clean)
402 printk(KERN_CONT " NTC/U\n"); 401 pr_cont(" NTC/U\n");
403 else if (i == tx_ring->next_to_use) 402 else if (i == tx_ring->next_to_use)
404 printk(KERN_CONT " NTU\n"); 403 pr_cont(" NTU\n");
405 else if (i == tx_ring->next_to_clean) 404 else if (i == tx_ring->next_to_clean)
406 printk(KERN_CONT " NTC\n"); 405 pr_cont(" NTC\n");
407 else 406 else
408 printk(KERN_CONT "\n"); 407 pr_cont("\n");
409 408
410 if (netif_msg_pktdata(adapter) && 409 if (netif_msg_pktdata(adapter) &&
411 tx_buffer_info->dma != 0) 410 tx_buffer_info->dma != 0)
@@ -419,11 +418,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
419 /* Print RX Rings Summary */ 418 /* Print RX Rings Summary */
420rx_ring_summary: 419rx_ring_summary:
421 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 420 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
422 printk(KERN_INFO "Queue [NTU] [NTC]\n"); 421 pr_info("Queue [NTU] [NTC]\n");
423 for (n = 0; n < adapter->num_rx_queues; n++) { 422 for (n = 0; n < adapter->num_rx_queues; n++) {
424 rx_ring = adapter->rx_ring[n]; 423 rx_ring = adapter->rx_ring[n];
425 printk(KERN_INFO "%5d %5X %5X\n", n, 424 pr_info("%5d %5X %5X\n",
426 rx_ring->next_to_use, rx_ring->next_to_clean); 425 n, rx_ring->next_to_use, rx_ring->next_to_clean);
427 } 426 }
428 427
429 /* Print RX Rings */ 428 /* Print RX Rings */
@@ -454,30 +453,30 @@ rx_ring_summary:
454 */ 453 */
455 for (n = 0; n < adapter->num_rx_queues; n++) { 454 for (n = 0; n < adapter->num_rx_queues; n++) {
456 rx_ring = adapter->rx_ring[n]; 455 rx_ring = adapter->rx_ring[n];
457 printk(KERN_INFO "------------------------------------\n"); 456 pr_info("------------------------------------\n");
458 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); 457 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
459 printk(KERN_INFO "------------------------------------\n"); 458 pr_info("------------------------------------\n");
460 printk(KERN_INFO "R [desc] [ PktBuf A0] " 459 pr_info("R [desc] [ PktBuf A0] "
461 "[ HeadBuf DD] [bi->dma ] [bi->skb] " 460 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
462 "<-- Adv Rx Read format\n"); 461 "<-- Adv Rx Read format\n");
463 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " 462 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
464 "[vl er S cks ln] ---------------- [bi->skb] " 463 "[vl er S cks ln] ---------------- [bi->skb] "
465 "<-- Adv Rx Write-Back format\n"); 464 "<-- Adv Rx Write-Back format\n");
466 465
467 for (i = 0; i < rx_ring->count; i++) { 466 for (i = 0; i < rx_ring->count; i++) {
468 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 467 rx_buffer_info = &rx_ring->rx_buffer_info[i];
469 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 468 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
470 u0 = (struct my_u0 *)rx_desc; 469 u0 = (struct my_u0 *)rx_desc;
471 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 470 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
472 if (staterr & IXGBE_RXD_STAT_DD) { 471 if (staterr & IXGBE_RXD_STAT_DD) {
473 /* Descriptor Done */ 472 /* Descriptor Done */
474 printk(KERN_INFO "RWB[0x%03X] %016llX " 473 pr_info("RWB[0x%03X] %016llX "
475 "%016llX ---------------- %p", i, 474 "%016llX ---------------- %p", i,
476 le64_to_cpu(u0->a), 475 le64_to_cpu(u0->a),
477 le64_to_cpu(u0->b), 476 le64_to_cpu(u0->b),
478 rx_buffer_info->skb); 477 rx_buffer_info->skb);
479 } else { 478 } else {
480 printk(KERN_INFO "R [0x%03X] %016llX " 479 pr_info("R [0x%03X] %016llX "
481 "%016llX %016llX %p", i, 480 "%016llX %016llX %p", i,
482 le64_to_cpu(u0->a), 481 le64_to_cpu(u0->a),
483 le64_to_cpu(u0->b), 482 le64_to_cpu(u0->b),
@@ -503,11 +502,11 @@ rx_ring_summary:
503 } 502 }
504 503
505 if (i == rx_ring->next_to_use) 504 if (i == rx_ring->next_to_use)
506 printk(KERN_CONT " NTU\n"); 505 pr_cont(" NTU\n");
507 else if (i == rx_ring->next_to_clean) 506 else if (i == rx_ring->next_to_clean)
508 printk(KERN_CONT " NTC\n"); 507 pr_cont(" NTC\n");
509 else 508 else
510 printk(KERN_CONT "\n"); 509 pr_cont("\n");
511 510
512 } 511 }
513 } 512 }
@@ -523,7 +522,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
523 /* Let firmware take over control of h/w */ 522 /* Let firmware take over control of h/w */
524 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 523 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
526 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 525 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
527} 526}
528 527
529static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 528static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -533,7 +532,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
533 /* Let firmware know the driver has taken over */ 532 /* Let firmware know the driver has taken over */
534 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 533 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
536 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 535 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
537} 536}
538 537
539/* 538/*
@@ -545,7 +544,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
545 * 544 *
546 */ 545 */
547static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 546static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
548 u8 queue, u8 msix_vector) 547 u8 queue, u8 msix_vector)
549{ 548{
550 u32 ivar, index; 549 u32 ivar, index;
551 struct ixgbe_hw *hw = &adapter->hw; 550 struct ixgbe_hw *hw = &adapter->hw;
@@ -586,7 +585,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
586} 585}
587 586
588static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, 587static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589 u64 qmask) 588 u64 qmask)
590{ 589{
591 u32 mask; 590 u32 mask;
592 591
@@ -601,9 +600,9 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
601 } 600 }
602} 601}
603 602
604static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
605 struct ixgbe_tx_buffer 604 struct ixgbe_tx_buffer
606 *tx_buffer_info) 605 *tx_buffer_info)
607{ 606{
608 if (tx_buffer_info->dma) { 607 if (tx_buffer_info->dma) {
609 if (tx_buffer_info->mapped_as_page) 608 if (tx_buffer_info->mapped_as_page)
@@ -637,7 +636,7 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
637 * Returns : true if in xon state (currently not paused) 636 * Returns : true if in xon state (currently not paused)
638 */ 637 */
639static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, 638static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
640 struct ixgbe_ring *tx_ring) 639 struct ixgbe_ring *tx_ring)
641{ 640{
642 u32 txoff = IXGBE_TFCS_TXOFF; 641 u32 txoff = IXGBE_TFCS_TXOFF;
643 642
@@ -682,8 +681,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
682} 681}
683 682
684static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 683static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
685 struct ixgbe_ring *tx_ring, 684 struct ixgbe_ring *tx_ring,
686 unsigned int eop) 685 unsigned int eop)
687{ 686{
688 struct ixgbe_hw *hw = &adapter->hw; 687 struct ixgbe_hw *hw = &adapter->hw;
689 688
@@ -695,7 +694,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
695 ixgbe_tx_xon_state(adapter, tx_ring)) { 694 ixgbe_tx_xon_state(adapter, tx_ring)) {
696 /* detected Tx unit hang */ 695 /* detected Tx unit hang */
697 union ixgbe_adv_tx_desc *tx_desc; 696 union ixgbe_adv_tx_desc *tx_desc;
698 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 697 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
699 e_err(drv, "Detected Tx Unit Hang\n" 698 e_err(drv, "Detected Tx Unit Hang\n"
700 " Tx Queue <%d>\n" 699 " Tx Queue <%d>\n"
701 " TDH, TDT <%x>, <%x>\n" 700 " TDH, TDT <%x>, <%x>\n"
@@ -732,7 +731,7 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
732 * @tx_ring: tx ring to clean 731 * @tx_ring: tx ring to clean
733 **/ 732 **/
734static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 733static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
735 struct ixgbe_ring *tx_ring) 734 struct ixgbe_ring *tx_ring)
736{ 735{
737 struct ixgbe_adapter *adapter = q_vector->adapter; 736 struct ixgbe_adapter *adapter = q_vector->adapter;
738 struct net_device *netdev = adapter->netdev; 737 struct net_device *netdev = adapter->netdev;
@@ -743,7 +742,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
743 742
744 i = tx_ring->next_to_clean; 743 i = tx_ring->next_to_clean;
745 eop = tx_ring->tx_buffer_info[i].next_to_watch; 744 eop = tx_ring->tx_buffer_info[i].next_to_watch;
746 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 745 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
747 746
748 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 747 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
749 (count < tx_ring->work_limit)) { 748 (count < tx_ring->work_limit)) {
@@ -751,7 +750,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
751 rmb(); /* read buffer_info after eop_desc */ 750 rmb(); /* read buffer_info after eop_desc */
752 for ( ; !cleaned; count++) { 751 for ( ; !cleaned; count++) {
753 struct sk_buff *skb; 752 struct sk_buff *skb;
754 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 753 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
755 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 754 tx_buffer_info = &tx_ring->tx_buffer_info[i];
756 cleaned = (i == eop); 755 cleaned = (i == eop);
757 skb = tx_buffer_info->skb; 756 skb = tx_buffer_info->skb;
@@ -781,7 +780,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
781 } 780 }
782 781
783 ixgbe_unmap_and_free_tx_resource(adapter, 782 ixgbe_unmap_and_free_tx_resource(adapter,
784 tx_buffer_info); 783 tx_buffer_info);
785 784
786 tx_desc->wb.status = 0; 785 tx_desc->wb.status = 0;
787 786
@@ -791,14 +790,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
791 } 790 }
792 791
793 eop = tx_ring->tx_buffer_info[i].next_to_watch; 792 eop = tx_ring->tx_buffer_info[i].next_to_watch;
794 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 793 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
795 } 794 }
796 795
797 tx_ring->next_to_clean = i; 796 tx_ring->next_to_clean = i;
798 797
799#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 798#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
800 if (unlikely(count && netif_carrier_ok(netdev) && 799 if (unlikely(count && netif_carrier_ok(netdev) &&
801 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 800 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
802 /* Make sure that anybody stopping the queue after this 801 /* Make sure that anybody stopping the queue after this
803 * sees the new next_to_clean. 802 * sees the new next_to_clean.
804 */ 803 */
@@ -832,7 +831,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
832 831
833#ifdef CONFIG_IXGBE_DCA 832#ifdef CONFIG_IXGBE_DCA
834static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 833static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
835 struct ixgbe_ring *rx_ring) 834 struct ixgbe_ring *rx_ring)
836{ 835{
837 u32 rxctrl; 836 u32 rxctrl;
838 int cpu = get_cpu(); 837 int cpu = get_cpu();
@@ -846,13 +845,13 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
846 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 845 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
847 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 846 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
848 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 847 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
849 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 848 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
850 } 849 }
851 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 850 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
852 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 851 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
853 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 852 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
854 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 853 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
855 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 854 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 855 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
857 rx_ring->cpu = cpu; 856 rx_ring->cpu = cpu;
858 } 857 }
@@ -860,7 +859,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
860} 859}
861 860
862static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 861static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
863 struct ixgbe_ring *tx_ring) 862 struct ixgbe_ring *tx_ring)
864{ 863{
865 u32 txctrl; 864 u32 txctrl;
866 int cpu = get_cpu(); 865 int cpu = get_cpu();
@@ -878,7 +877,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
878 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); 877 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
879 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; 878 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
880 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 879 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
881 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 880 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
882 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 881 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
883 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); 882 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
884 } 883 }
@@ -946,16 +945,15 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
946 * @rx_desc: rx descriptor 945 * @rx_desc: rx descriptor
947 **/ 946 **/
948static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, 947static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
949 struct sk_buff *skb, u8 status, 948 struct sk_buff *skb, u8 status,
950 struct ixgbe_ring *ring, 949 struct ixgbe_ring *ring,
951 union ixgbe_adv_rx_desc *rx_desc) 950 union ixgbe_adv_rx_desc *rx_desc)
952{ 951{
953 struct ixgbe_adapter *adapter = q_vector->adapter; 952 struct ixgbe_adapter *adapter = q_vector->adapter;
954 struct napi_struct *napi = &q_vector->napi; 953 struct napi_struct *napi = &q_vector->napi;
955 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 954 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
956 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 955 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
957 956
958 skb_record_rx_queue(skb, ring->queue_index);
959 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 957 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
960 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) 958 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
961 vlan_gro_receive(napi, adapter->vlgrp, tag, skb); 959 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
@@ -981,7 +979,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
981{ 979{
982 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); 980 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
983 981
984 skb->ip_summed = CHECKSUM_NONE; 982 skb_checksum_none_assert(skb);
985 983
986 /* Rx csum disabled */ 984 /* Rx csum disabled */
987 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 985 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -1017,7 +1015,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1017} 1015}
1018 1016
1019static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 1017static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1020 struct ixgbe_ring *rx_ring, u32 val) 1018 struct ixgbe_ring *rx_ring, u32 val)
1021{ 1019{
1022 /* 1020 /*
1023 * Force memory writes to complete before letting h/w 1021 * Force memory writes to complete before letting h/w
@@ -1033,25 +1031,27 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1033 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 1031 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1034 * @adapter: address of board private structure 1032 * @adapter: address of board private structure
1035 **/ 1033 **/
1036static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1034void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1037 struct ixgbe_ring *rx_ring, 1035 struct ixgbe_ring *rx_ring,
1038 int cleaned_count) 1036 int cleaned_count)
1039{ 1037{
1038 struct net_device *netdev = adapter->netdev;
1040 struct pci_dev *pdev = adapter->pdev; 1039 struct pci_dev *pdev = adapter->pdev;
1041 union ixgbe_adv_rx_desc *rx_desc; 1040 union ixgbe_adv_rx_desc *rx_desc;
1042 struct ixgbe_rx_buffer *bi; 1041 struct ixgbe_rx_buffer *bi;
1043 unsigned int i; 1042 unsigned int i;
1043 unsigned int bufsz = rx_ring->rx_buf_len;
1044 1044
1045 i = rx_ring->next_to_use; 1045 i = rx_ring->next_to_use;
1046 bi = &rx_ring->rx_buffer_info[i]; 1046 bi = &rx_ring->rx_buffer_info[i];
1047 1047
1048 while (cleaned_count--) { 1048 while (cleaned_count--) {
1049 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 1049 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1050 1050
1051 if (!bi->page_dma && 1051 if (!bi->page_dma &&
1052 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 1052 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
1053 if (!bi->page) { 1053 if (!bi->page) {
1054 bi->page = alloc_page(GFP_ATOMIC); 1054 bi->page = netdev_alloc_page(netdev);
1055 if (!bi->page) { 1055 if (!bi->page) {
1056 adapter->alloc_rx_page_failed++; 1056 adapter->alloc_rx_page_failed++;
1057 goto no_buffers; 1057 goto no_buffers;
@@ -1063,29 +1063,28 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1063 } 1063 }
1064 1064
1065 bi->page_dma = dma_map_page(&pdev->dev, bi->page, 1065 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
1066 bi->page_offset, 1066 bi->page_offset,
1067 (PAGE_SIZE / 2), 1067 (PAGE_SIZE / 2),
1068 DMA_FROM_DEVICE); 1068 DMA_FROM_DEVICE);
1069 } 1069 }
1070 1070
1071 if (!bi->skb) { 1071 if (!bi->skb) {
1072 struct sk_buff *skb; 1072 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1073 /* netdev_alloc_skb reserves 32 bytes up front!! */ 1073 bufsz);
1074 uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; 1074 bi->skb = skb;
1075 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1076 1075
1077 if (!skb) { 1076 if (!skb) {
1078 adapter->alloc_rx_buff_failed++; 1077 adapter->alloc_rx_buff_failed++;
1079 goto no_buffers; 1078 goto no_buffers;
1080 } 1079 }
1080 /* initialize queue mapping */
1081 skb_record_rx_queue(skb, rx_ring->queue_index);
1082 }
1081 1083
1082 /* advance the data pointer to the next cache line */ 1084 if (!bi->dma) {
1083 skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) 1085 bi->dma = dma_map_single(&pdev->dev,
1084 - skb->data)); 1086 bi->skb->data,
1085 1087 rx_ring->rx_buf_len,
1086 bi->skb = skb;
1087 bi->dma = dma_map_single(&pdev->dev, skb->data,
1088 rx_ring->rx_buf_len,
1089 DMA_FROM_DEVICE); 1088 DMA_FROM_DEVICE);
1090 } 1089 }
1091 /* Refresh the desc even if buffer_addrs didn't change because 1090 /* Refresh the desc even if buffer_addrs didn't change because
@@ -1095,6 +1094,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1095 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 1094 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1096 } else { 1095 } else {
1097 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 1096 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1097 rx_desc->read.hdr_addr = 0;
1098 } 1098 }
1099 1099
1100 i++; 1100 i++;
@@ -1126,8 +1126,8 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1126static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 1126static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1127{ 1127{
1128 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & 1128 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1129 IXGBE_RXDADV_RSCCNT_MASK) >> 1129 IXGBE_RXDADV_RSCCNT_MASK) >>
1130 IXGBE_RXDADV_RSCCNT_SHIFT; 1130 IXGBE_RXDADV_RSCCNT_SHIFT;
1131} 1131}
1132 1132
1133/** 1133/**
@@ -1140,7 +1140,7 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1140 * turns it into the frag list owner. 1140 * turns it into the frag list owner.
1141 **/ 1141 **/
1142static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, 1142static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1143 u64 *count) 1143 u64 *count)
1144{ 1144{
1145 unsigned int frag_list_size = 0; 1145 unsigned int frag_list_size = 0;
1146 1146
@@ -1168,8 +1168,8 @@ struct ixgbe_rsc_cb {
1168#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) 1168#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1169 1169
1170static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1170static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1171 struct ixgbe_ring *rx_ring, 1171 struct ixgbe_ring *rx_ring,
1172 int *work_done, int work_to_do) 1172 int *work_done, int work_to_do)
1173{ 1173{
1174 struct ixgbe_adapter *adapter = q_vector->adapter; 1174 struct ixgbe_adapter *adapter = q_vector->adapter;
1175 struct net_device *netdev = adapter->netdev; 1175 struct net_device *netdev = adapter->netdev;
@@ -1188,7 +1188,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1188#endif /* IXGBE_FCOE */ 1188#endif /* IXGBE_FCOE */
1189 1189
1190 i = rx_ring->next_to_clean; 1190 i = rx_ring->next_to_clean;
1191 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 1191 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1192 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1192 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1193 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1193 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1194 1194
@@ -1231,9 +1231,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1231 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1231 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1232 } else { 1232 } else {
1233 dma_unmap_single(&pdev->dev, 1233 dma_unmap_single(&pdev->dev,
1234 rx_buffer_info->dma, 1234 rx_buffer_info->dma,
1235 rx_ring->rx_buf_len, 1235 rx_ring->rx_buf_len,
1236 DMA_FROM_DEVICE); 1236 DMA_FROM_DEVICE);
1237 } 1237 }
1238 rx_buffer_info->dma = 0; 1238 rx_buffer_info->dma = 0;
1239 skb_put(skb, len); 1239 skb_put(skb, len);
@@ -1244,9 +1244,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1244 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1244 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1245 rx_buffer_info->page_dma = 0; 1245 rx_buffer_info->page_dma = 0;
1246 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1246 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1247 rx_buffer_info->page, 1247 rx_buffer_info->page,
1248 rx_buffer_info->page_offset, 1248 rx_buffer_info->page_offset,
1249 upper_len); 1249 upper_len);
1250 1250
1251 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 1251 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
1252 (page_count(rx_buffer_info->page) != 1)) 1252 (page_count(rx_buffer_info->page) != 1))
@@ -1263,7 +1263,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1263 if (i == rx_ring->count) 1263 if (i == rx_ring->count)
1264 i = 0; 1264 i = 0;
1265 1265
1266 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 1266 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
1267 prefetch(next_rxd); 1267 prefetch(next_rxd);
1268 cleaned_count++; 1268 cleaned_count++;
1269 1269
@@ -1280,18 +1280,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1280 1280
1281 if (staterr & IXGBE_RXD_STAT_EOP) { 1281 if (staterr & IXGBE_RXD_STAT_EOP) {
1282 if (skb->prev) 1282 if (skb->prev)
1283 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1283 skb = ixgbe_transform_rsc_queue(skb,
1284 &(rx_ring->rsc_count));
1284 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1285 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1285 if (IXGBE_RSC_CB(skb)->delay_unmap) { 1286 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1286 dma_unmap_single(&pdev->dev, 1287 dma_unmap_single(&pdev->dev,
1287 IXGBE_RSC_CB(skb)->dma, 1288 IXGBE_RSC_CB(skb)->dma,
1288 rx_ring->rx_buf_len, 1289 rx_ring->rx_buf_len,
1289 DMA_FROM_DEVICE); 1290 DMA_FROM_DEVICE);
1290 IXGBE_RSC_CB(skb)->dma = 0; 1291 IXGBE_RSC_CB(skb)->dma = 0;
1291 IXGBE_RSC_CB(skb)->delay_unmap = false; 1292 IXGBE_RSC_CB(skb)->delay_unmap = false;
1292 } 1293 }
1293 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1294 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1294 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 1295 rx_ring->rsc_count +=
1296 skb_shinfo(skb)->nr_frags;
1295 else 1297 else
1296 rx_ring->rsc_count++; 1298 rx_ring->rsc_count++;
1297 rx_ring->rsc_flush++; 1299 rx_ring->rsc_flush++;
@@ -1403,24 +1405,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1403 q_vector = adapter->q_vector[v_idx]; 1405 q_vector = adapter->q_vector[v_idx];
1404 /* XXX for_each_set_bit(...) */ 1406 /* XXX for_each_set_bit(...) */
1405 r_idx = find_first_bit(q_vector->rxr_idx, 1407 r_idx = find_first_bit(q_vector->rxr_idx,
1406 adapter->num_rx_queues); 1408 adapter->num_rx_queues);
1407 1409
1408 for (i = 0; i < q_vector->rxr_count; i++) { 1410 for (i = 0; i < q_vector->rxr_count; i++) {
1409 j = adapter->rx_ring[r_idx]->reg_idx; 1411 j = adapter->rx_ring[r_idx]->reg_idx;
1410 ixgbe_set_ivar(adapter, 0, j, v_idx); 1412 ixgbe_set_ivar(adapter, 0, j, v_idx);
1411 r_idx = find_next_bit(q_vector->rxr_idx, 1413 r_idx = find_next_bit(q_vector->rxr_idx,
1412 adapter->num_rx_queues, 1414 adapter->num_rx_queues,
1413 r_idx + 1); 1415 r_idx + 1);
1414 } 1416 }
1415 r_idx = find_first_bit(q_vector->txr_idx, 1417 r_idx = find_first_bit(q_vector->txr_idx,
1416 adapter->num_tx_queues); 1418 adapter->num_tx_queues);
1417 1419
1418 for (i = 0; i < q_vector->txr_count; i++) { 1420 for (i = 0; i < q_vector->txr_count; i++) {
1419 j = adapter->tx_ring[r_idx]->reg_idx; 1421 j = adapter->tx_ring[r_idx]->reg_idx;
1420 ixgbe_set_ivar(adapter, 1, j, v_idx); 1422 ixgbe_set_ivar(adapter, 1, j, v_idx);
1421 r_idx = find_next_bit(q_vector->txr_idx, 1423 r_idx = find_next_bit(q_vector->txr_idx,
1422 adapter->num_tx_queues, 1424 adapter->num_tx_queues,
1423 r_idx + 1); 1425 r_idx + 1);
1424 } 1426 }
1425 1427
1426 if (q_vector->txr_count && !q_vector->rxr_count) 1428 if (q_vector->txr_count && !q_vector->rxr_count)
@@ -1435,7 +1437,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1435 1437
1436 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1438 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1437 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1439 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1438 v_idx); 1440 v_idx);
1439 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1441 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1440 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1442 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1441 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1443 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
@@ -1477,8 +1479,8 @@ enum latency_range {
1477 * parameter (see ixgbe_param.c) 1479 * parameter (see ixgbe_param.c)
1478 **/ 1480 **/
1479static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 1481static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1480 u32 eitr, u8 itr_setting, 1482 u32 eitr, u8 itr_setting,
1481 int packets, int bytes) 1483 int packets, int bytes)
1482{ 1484{
1483 unsigned int retval = itr_setting; 1485 unsigned int retval = itr_setting;
1484 u32 timepassed_us; 1486 u32 timepassed_us;
@@ -1567,30 +1569,30 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1567 for (i = 0; i < q_vector->txr_count; i++) { 1569 for (i = 0; i < q_vector->txr_count; i++) {
1568 tx_ring = adapter->tx_ring[r_idx]; 1570 tx_ring = adapter->tx_ring[r_idx];
1569 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1571 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1570 q_vector->tx_itr, 1572 q_vector->tx_itr,
1571 tx_ring->total_packets, 1573 tx_ring->total_packets,
1572 tx_ring->total_bytes); 1574 tx_ring->total_bytes);
1573 /* if the result for this queue would decrease interrupt 1575 /* if the result for this queue would decrease interrupt
1574 * rate for this vector then use that result */ 1576 * rate for this vector then use that result */
1575 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 1577 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1576 q_vector->tx_itr - 1 : ret_itr); 1578 q_vector->tx_itr - 1 : ret_itr);
1577 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1579 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1578 r_idx + 1); 1580 r_idx + 1);
1579 } 1581 }
1580 1582
1581 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1583 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1582 for (i = 0; i < q_vector->rxr_count; i++) { 1584 for (i = 0; i < q_vector->rxr_count; i++) {
1583 rx_ring = adapter->rx_ring[r_idx]; 1585 rx_ring = adapter->rx_ring[r_idx];
1584 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1586 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1585 q_vector->rx_itr, 1587 q_vector->rx_itr,
1586 rx_ring->total_packets, 1588 rx_ring->total_packets,
1587 rx_ring->total_bytes); 1589 rx_ring->total_bytes);
1588 /* if the result for this queue would decrease interrupt 1590 /* if the result for this queue would decrease interrupt
1589 * rate for this vector then use that result */ 1591 * rate for this vector then use that result */
1590 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 1592 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1591 q_vector->rx_itr - 1 : ret_itr); 1593 q_vector->rx_itr - 1 : ret_itr);
1592 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1594 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1593 r_idx + 1); 1595 r_idx + 1);
1594 } 1596 }
1595 1597
1596 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1598 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
@@ -1627,39 +1629,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1627static void ixgbe_check_overtemp_task(struct work_struct *work) 1629static void ixgbe_check_overtemp_task(struct work_struct *work)
1628{ 1630{
1629 struct ixgbe_adapter *adapter = container_of(work, 1631 struct ixgbe_adapter *adapter = container_of(work,
1630 struct ixgbe_adapter, 1632 struct ixgbe_adapter,
1631 check_overtemp_task); 1633 check_overtemp_task);
1632 struct ixgbe_hw *hw = &adapter->hw; 1634 struct ixgbe_hw *hw = &adapter->hw;
1633 u32 eicr = adapter->interrupt_event; 1635 u32 eicr = adapter->interrupt_event;
1634 1636
1635 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 1637 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
1636 switch (hw->device_id) { 1638 return;
1637 case IXGBE_DEV_ID_82599_T3_LOM: { 1639
1638 u32 autoneg; 1640 switch (hw->device_id) {
1639 bool link_up = false; 1641 case IXGBE_DEV_ID_82599_T3_LOM: {
1642 u32 autoneg;
1643 bool link_up = false;
1640 1644
1641 if (hw->mac.ops.check_link) 1645 if (hw->mac.ops.check_link)
1642 hw->mac.ops.check_link(hw, &autoneg, &link_up, false); 1646 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1643 1647
1644 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || 1648 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1645 (eicr & IXGBE_EICR_LSC)) 1649 (eicr & IXGBE_EICR_LSC))
1646 /* Check if this is due to overtemp */ 1650 /* Check if this is due to overtemp */
1647 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) 1651 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1648 break; 1652 break;
1649 } 1653 return;
1654 }
1655 default:
1656 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1650 return; 1657 return;
1651 default: 1658 break;
1652 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1653 return;
1654 break;
1655 }
1656 e_crit(drv, "Network adapter has been stopped because it has "
1657 "over heated. Restart the computer. If the problem "
1658 "persists, power off the system and replace the "
1659 "adapter\n");
1660 /* write to clear the interrupt */
1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1662 } 1659 }
1660 e_crit(drv,
1661 "Network adapter has been stopped because it has over heated. "
1662 "Restart the computer. If the problem persists, "
1663 "power off the system and replace the adapter\n");
1664 /* write to clear the interrupt */
1665 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1663} 1666}
1664 1667
1665static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1668static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1746,9 +1749,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1746 netif_tx_stop_all_queues(netdev); 1749 netif_tx_stop_all_queues(netdev);
1747 for (i = 0; i < adapter->num_tx_queues; i++) { 1750 for (i = 0; i < adapter->num_tx_queues; i++) {
1748 struct ixgbe_ring *tx_ring = 1751 struct ixgbe_ring *tx_ring =
1749 adapter->tx_ring[i]; 1752 adapter->tx_ring[i];
1750 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1753 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1751 &tx_ring->reinit_state)) 1754 &tx_ring->reinit_state))
1752 schedule_work(&adapter->fdir_reinit_task); 1755 schedule_work(&adapter->fdir_reinit_task);
1753 } 1756 }
1754 } 1757 }
@@ -1777,7 +1780,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1777} 1780}
1778 1781
1779static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, 1782static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1780 u64 qmask) 1783 u64 qmask)
1781{ 1784{
1782 u32 mask; 1785 u32 mask;
1783 1786
@@ -1809,7 +1812,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1809 tx_ring->total_bytes = 0; 1812 tx_ring->total_bytes = 0;
1810 tx_ring->total_packets = 0; 1813 tx_ring->total_packets = 0;
1811 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1814 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1812 r_idx + 1); 1815 r_idx + 1);
1813 } 1816 }
1814 1817
1815 /* EIAM disabled interrupts (on this vector) for us */ 1818 /* EIAM disabled interrupts (on this vector) for us */
@@ -1837,7 +1840,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1837 rx_ring->total_bytes = 0; 1840 rx_ring->total_bytes = 0;
1838 rx_ring->total_packets = 0; 1841 rx_ring->total_packets = 0;
1839 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1842 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1840 r_idx + 1); 1843 r_idx + 1);
1841 } 1844 }
1842 1845
1843 if (!q_vector->rxr_count) 1846 if (!q_vector->rxr_count)
@@ -1867,7 +1870,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1867 ring->total_bytes = 0; 1870 ring->total_bytes = 0;
1868 ring->total_packets = 0; 1871 ring->total_packets = 0;
1869 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1872 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1870 r_idx + 1); 1873 r_idx + 1);
1871 } 1874 }
1872 1875
1873 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1876 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1876,7 +1879,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1876 ring->total_bytes = 0; 1879 ring->total_bytes = 0;
1877 ring->total_packets = 0; 1880 ring->total_packets = 0;
1878 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1881 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1879 r_idx + 1); 1882 r_idx + 1);
1880 } 1883 }
1881 1884
1882 /* EIAM disabled interrupts (on this vector) for us */ 1885 /* EIAM disabled interrupts (on this vector) for us */
@@ -1896,7 +1899,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1896static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1899static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1897{ 1900{
1898 struct ixgbe_q_vector *q_vector = 1901 struct ixgbe_q_vector *q_vector =
1899 container_of(napi, struct ixgbe_q_vector, napi); 1902 container_of(napi, struct ixgbe_q_vector, napi);
1900 struct ixgbe_adapter *adapter = q_vector->adapter; 1903 struct ixgbe_adapter *adapter = q_vector->adapter;
1901 struct ixgbe_ring *rx_ring = NULL; 1904 struct ixgbe_ring *rx_ring = NULL;
1902 int work_done = 0; 1905 int work_done = 0;
@@ -1918,7 +1921,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1918 ixgbe_set_itr_msix(q_vector); 1921 ixgbe_set_itr_msix(q_vector);
1919 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1922 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1920 ixgbe_irq_enable_queues(adapter, 1923 ixgbe_irq_enable_queues(adapter,
1921 ((u64)1 << q_vector->v_idx)); 1924 ((u64)1 << q_vector->v_idx));
1922 } 1925 }
1923 1926
1924 return work_done; 1927 return work_done;
@@ -1935,7 +1938,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1935static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) 1938static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1936{ 1939{
1937 struct ixgbe_q_vector *q_vector = 1940 struct ixgbe_q_vector *q_vector =
1938 container_of(napi, struct ixgbe_q_vector, napi); 1941 container_of(napi, struct ixgbe_q_vector, napi);
1939 struct ixgbe_adapter *adapter = q_vector->adapter; 1942 struct ixgbe_adapter *adapter = q_vector->adapter;
1940 struct ixgbe_ring *ring = NULL; 1943 struct ixgbe_ring *ring = NULL;
1941 int work_done = 0, i; 1944 int work_done = 0, i;
@@ -1951,7 +1954,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1951#endif 1954#endif
1952 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 1955 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1953 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1956 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1954 r_idx + 1); 1957 r_idx + 1);
1955 } 1958 }
1956 1959
1957 /* attempt to distribute budget to each queue fairly, but don't allow 1960 /* attempt to distribute budget to each queue fairly, but don't allow
@@ -1967,7 +1970,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1967#endif 1970#endif
1968 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 1971 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1969 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1972 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1970 r_idx + 1); 1973 r_idx + 1);
1971 } 1974 }
1972 1975
1973 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1976 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1979,7 +1982,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1979 ixgbe_set_itr_msix(q_vector); 1982 ixgbe_set_itr_msix(q_vector);
1980 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1983 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1981 ixgbe_irq_enable_queues(adapter, 1984 ixgbe_irq_enable_queues(adapter,
1982 ((u64)1 << q_vector->v_idx)); 1985 ((u64)1 << q_vector->v_idx));
1983 return 0; 1986 return 0;
1984 } 1987 }
1985 1988
@@ -1997,7 +2000,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1997static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) 2000static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1998{ 2001{
1999 struct ixgbe_q_vector *q_vector = 2002 struct ixgbe_q_vector *q_vector =
2000 container_of(napi, struct ixgbe_q_vector, napi); 2003 container_of(napi, struct ixgbe_q_vector, napi);
2001 struct ixgbe_adapter *adapter = q_vector->adapter; 2004 struct ixgbe_adapter *adapter = q_vector->adapter;
2002 struct ixgbe_ring *tx_ring = NULL; 2005 struct ixgbe_ring *tx_ring = NULL;
2003 int work_done = 0; 2006 int work_done = 0;
@@ -2019,14 +2022,15 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2019 if (adapter->tx_itr_setting & 1) 2022 if (adapter->tx_itr_setting & 1)
2020 ixgbe_set_itr_msix(q_vector); 2023 ixgbe_set_itr_msix(q_vector);
2021 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2024 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2022 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); 2025 ixgbe_irq_enable_queues(adapter,
2026 ((u64)1 << q_vector->v_idx));
2023 } 2027 }
2024 2028
2025 return work_done; 2029 return work_done;
2026} 2030}
2027 2031
2028static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 2032static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2029 int r_idx) 2033 int r_idx)
2030{ 2034{
2031 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2035 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2032 2036
@@ -2035,7 +2039,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2035} 2039}
2036 2040
2037static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 2041static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2038 int t_idx) 2042 int t_idx)
2039{ 2043{
2040 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2044 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2041 2045
@@ -2055,7 +2059,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2055 * mapping configurations in here. 2059 * mapping configurations in here.
2056 **/ 2060 **/
2057static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 2061static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2058 int vectors) 2062 int vectors)
2059{ 2063{
2060 int v_start = 0; 2064 int v_start = 0;
2061 int rxr_idx = 0, txr_idx = 0; 2065 int rxr_idx = 0, txr_idx = 0;
@@ -2122,7 +2126,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2122 struct net_device *netdev = adapter->netdev; 2126 struct net_device *netdev = adapter->netdev;
2123 irqreturn_t (*handler)(int, void *); 2127 irqreturn_t (*handler)(int, void *);
2124 int i, vector, q_vectors, err; 2128 int i, vector, q_vectors, err;
2125 int ri=0, ti=0; 2129 int ri = 0, ti = 0;
2126 2130
2127 /* Decrement for Other and TCP Timer vectors */ 2131 /* Decrement for Other and TCP Timer vectors */
2128 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2132 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -2133,26 +2137,24 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2133 goto out; 2137 goto out;
2134 2138
2135#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 2139#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
2136 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 2140 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
2137 &ixgbe_msix_clean_many) 2141 &ixgbe_msix_clean_many)
2138 for (vector = 0; vector < q_vectors; vector++) { 2142 for (vector = 0; vector < q_vectors; vector++) {
2139 handler = SET_HANDLER(adapter->q_vector[vector]); 2143 handler = SET_HANDLER(adapter->q_vector[vector]);
2140 2144
2141 if(handler == &ixgbe_msix_clean_rx) { 2145 if (handler == &ixgbe_msix_clean_rx) {
2142 sprintf(adapter->name[vector], "%s-%s-%d", 2146 sprintf(adapter->name[vector], "%s-%s-%d",
2143 netdev->name, "rx", ri++); 2147 netdev->name, "rx", ri++);
2144 } 2148 } else if (handler == &ixgbe_msix_clean_tx) {
2145 else if(handler == &ixgbe_msix_clean_tx) {
2146 sprintf(adapter->name[vector], "%s-%s-%d", 2149 sprintf(adapter->name[vector], "%s-%s-%d",
2147 netdev->name, "tx", ti++); 2150 netdev->name, "tx", ti++);
2148 } 2151 } else
2149 else
2150 sprintf(adapter->name[vector], "%s-%s-%d", 2152 sprintf(adapter->name[vector], "%s-%s-%d",
2151 netdev->name, "TxRx", vector); 2153 netdev->name, "TxRx", vector);
2152 2154
2153 err = request_irq(adapter->msix_entries[vector].vector, 2155 err = request_irq(adapter->msix_entries[vector].vector,
2154 handler, 0, adapter->name[vector], 2156 handler, 0, adapter->name[vector],
2155 adapter->q_vector[vector]); 2157 adapter->q_vector[vector]);
2156 if (err) { 2158 if (err) {
2157 e_err(probe, "request_irq failed for MSIX interrupt " 2159 e_err(probe, "request_irq failed for MSIX interrupt "
2158 "Error: %d\n", err); 2160 "Error: %d\n", err);
@@ -2162,7 +2164,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2162 2164
2163 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 2165 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
2164 err = request_irq(adapter->msix_entries[vector].vector, 2166 err = request_irq(adapter->msix_entries[vector].vector,
2165 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2167 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
2166 if (err) { 2168 if (err) {
2167 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2169 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2168 goto free_queue_irqs; 2170 goto free_queue_irqs;
@@ -2173,7 +2175,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2173free_queue_irqs: 2175free_queue_irqs:
2174 for (i = vector - 1; i >= 0; i--) 2176 for (i = vector - 1; i >= 0; i--)
2175 free_irq(adapter->msix_entries[--vector].vector, 2177 free_irq(adapter->msix_entries[--vector].vector,
2176 adapter->q_vector[i]); 2178 adapter->q_vector[i]);
2177 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2179 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2178 pci_disable_msix(adapter->pdev); 2180 pci_disable_msix(adapter->pdev);
2179 kfree(adapter->msix_entries); 2181 kfree(adapter->msix_entries);
@@ -2191,13 +2193,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2191 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 2193 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2192 2194
2193 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 2195 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2194 q_vector->tx_itr, 2196 q_vector->tx_itr,
2195 tx_ring->total_packets, 2197 tx_ring->total_packets,
2196 tx_ring->total_bytes); 2198 tx_ring->total_bytes);
2197 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, 2199 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
2198 q_vector->rx_itr, 2200 q_vector->rx_itr,
2199 rx_ring->total_packets, 2201 rx_ring->total_packets,
2200 rx_ring->total_bytes); 2202 rx_ring->total_bytes);
2201 2203
2202 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 2204 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
2203 2205
@@ -2343,10 +2345,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2343 err = ixgbe_request_msix_irqs(adapter); 2345 err = ixgbe_request_msix_irqs(adapter);
2344 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 2346 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2345 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 2347 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2346 netdev->name, netdev); 2348 netdev->name, netdev);
2347 } else { 2349 } else {
2348 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 2350 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2349 netdev->name, netdev); 2351 netdev->name, netdev);
2350 } 2352 }
2351 2353
2352 if (err) 2354 if (err)
@@ -2370,7 +2372,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2370 i--; 2372 i--;
2371 for (; i >= 0; i--) { 2373 for (; i >= 0; i--) {
2372 free_irq(adapter->msix_entries[i].vector, 2374 free_irq(adapter->msix_entries[i].vector,
2373 adapter->q_vector[i]); 2375 adapter->q_vector[i]);
2374 } 2376 }
2375 2377
2376 ixgbe_reset_q_vectors(adapter); 2378 ixgbe_reset_q_vectors(adapter);
@@ -2413,7 +2415,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2413 struct ixgbe_hw *hw = &adapter->hw; 2415 struct ixgbe_hw *hw = &adapter->hw;
2414 2416
2415 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 2417 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
2416 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); 2418 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
2417 2419
2418 ixgbe_set_ivar(adapter, 0, 0, 0); 2420 ixgbe_set_ivar(adapter, 0, 0, 0);
2419 ixgbe_set_ivar(adapter, 1, 0, 0); 2421 ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -2425,95 +2427,140 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2425} 2427}
2426 2428
2427/** 2429/**
2428 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset 2430 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2429 * @adapter: board private structure 2431 * @adapter: board private structure
2432 * @ring: structure containing ring specific data
2430 * 2433 *
2431 * Configure the Tx unit of the MAC after a reset. 2434 * Configure the Tx descriptor ring after a reset.
2432 **/ 2435 **/
2433static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 2436void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2437 struct ixgbe_ring *ring)
2434{ 2438{
2435 u64 tdba;
2436 struct ixgbe_hw *hw = &adapter->hw; 2439 struct ixgbe_hw *hw = &adapter->hw;
2437 u32 i, j, tdlen, txctrl; 2440 u64 tdba = ring->dma;
2441 int wait_loop = 10;
2442 u32 txdctl;
2443 u16 reg_idx = ring->reg_idx;
2438 2444
2439 /* Setup the HW Tx Head and Tail descriptor pointers */ 2445 /* disable queue to avoid issues while updating state */
2440 for (i = 0; i < adapter->num_tx_queues; i++) { 2446 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2441 struct ixgbe_ring *ring = adapter->tx_ring[i]; 2447 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2442 j = ring->reg_idx; 2448 txdctl & ~IXGBE_TXDCTL_ENABLE);
2443 tdba = ring->dma; 2449 IXGBE_WRITE_FLUSH(hw);
2444 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 2450
2445 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 2451 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2446 (tdba & DMA_BIT_MASK(32))); 2452 (tdba & DMA_BIT_MASK(32)));
2447 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 2453 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2448 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 2454 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2449 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 2455 ring->count * sizeof(union ixgbe_adv_tx_desc));
2450 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 2456 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2451 adapter->tx_ring[i]->head = IXGBE_TDH(j); 2457 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2452 adapter->tx_ring[i]->tail = IXGBE_TDT(j); 2458 ring->head = IXGBE_TDH(reg_idx);
2453 /* 2459 ring->tail = IXGBE_TDT(reg_idx);
2454 * Disable Tx Head Writeback RO bit, since this hoses 2460
2455 * bookkeeping if things aren't delivered in order. 2461 /* configure fetching thresholds */
2456 */ 2462 if (adapter->rx_itr_setting == 0) {
2457 switch (hw->mac.type) { 2463 /* cannot set wthresh when itr==0 */
2458 case ixgbe_mac_82598EB: 2464 txdctl &= ~0x007F0000;
2459 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 2465 } else {
2460 break; 2466 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2461 case ixgbe_mac_82599EB: 2467 txdctl |= (8 << 16);
2462 default: 2468 }
2463 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 2469 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2464 break; 2470 /* PThresh workaround for Tx hang with DFP enabled. */
2465 } 2471 txdctl |= 32;
2466 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2467 switch (hw->mac.type) {
2468 case ixgbe_mac_82598EB:
2469 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
2470 break;
2471 case ixgbe_mac_82599EB:
2472 default:
2473 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
2474 break;
2475 }
2476 } 2472 }
2477 2473
2478 if (hw->mac.type == ixgbe_mac_82599EB) { 2474 /* reinitialize flowdirector state */
2479 u32 rttdcs; 2475 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
2480 u32 mask;
2481 2476
2482 /* disable the arbiter while setting MTQC */ 2477 /* enable queue */
2483 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2478 txdctl |= IXGBE_TXDCTL_ENABLE;
2484 rttdcs |= IXGBE_RTTDCS_ARBDIS; 2479 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2485 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2486 2480
2487 /* set transmit pool layout */ 2481 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2488 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); 2482 if (hw->mac.type == ixgbe_mac_82598EB &&
2489 switch (adapter->flags & mask) { 2483 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2484 return;
2490 2485
2491 case (IXGBE_FLAG_SRIOV_ENABLED): 2486 /* poll to verify queue is enabled */
2492 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 2487 do {
2493 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); 2488 msleep(1);
2494 break; 2489 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2490 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2491 if (!wait_loop)
2492 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2493}
2495 2494
2496 case (IXGBE_FLAG_DCB_ENABLED): 2495static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2497 /* We enable 8 traffic classes, DCB only */ 2496{
2498 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 2497 struct ixgbe_hw *hw = &adapter->hw;
2499 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); 2498 u32 rttdcs;
2500 break; 2499 u32 mask;
2501 2500
2502 default: 2501 if (hw->mac.type == ixgbe_mac_82598EB)
2503 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2502 return;
2504 break; 2503
2505 } 2504 /* disable the arbiter while setting MTQC */
2505 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2506 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2507 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2508
2509 /* set transmit pool layout */
2510 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2511 switch (adapter->flags & mask) {
2512
2513 case (IXGBE_FLAG_SRIOV_ENABLED):
2514 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2515 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2516 break;
2517
2518 case (IXGBE_FLAG_DCB_ENABLED):
2519 /* We enable 8 traffic classes, DCB only */
2520 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2521 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2522 break;
2523
2524 default:
2525 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2526 break;
2527 }
2528
2529 /* re-enable the arbiter */
2530 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2531 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2532}
2533
2534/**
2535 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2536 * @adapter: board private structure
2537 *
2538 * Configure the Tx unit of the MAC after a reset.
2539 **/
2540static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2541{
2542 struct ixgbe_hw *hw = &adapter->hw;
2543 u32 dmatxctl;
2544 u32 i;
2545
2546 ixgbe_setup_mtqc(adapter);
2506 2547
2507 /* re-eable the arbiter */ 2548 if (hw->mac.type != ixgbe_mac_82598EB) {
2508 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2549 /* DMATXCTL.EN must be before Tx queues are enabled */
2509 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2550 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2551 dmatxctl |= IXGBE_DMATXCTL_TE;
2552 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2510 } 2553 }
2554
2555 /* Setup the HW Tx Head and Tail descriptor pointers */
2556 for (i = 0; i < adapter->num_tx_queues; i++)
2557 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2511} 2558}
2512 2559
2513#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 2560#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2514 2561
2515static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 2562static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2516 struct ixgbe_ring *rx_ring) 2563 struct ixgbe_ring *rx_ring)
2517{ 2564{
2518 u32 srrctl; 2565 u32 srrctl;
2519 int index; 2566 int index;
@@ -2529,6 +2576,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2529 2576
2530 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2577 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2531 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2578 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2579 if (adapter->num_vfs)
2580 srrctl |= IXGBE_SRRCTL_DROP_EN;
2532 2581
2533 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2582 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2534 IXGBE_SRRCTL_BSIZEHDR_MASK; 2583 IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -2549,20 +2598,46 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2549 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 2598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2550} 2599}
2551 2600
2552static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 2601static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2553{ 2602{
2554 u32 mrqc = 0; 2603 struct ixgbe_hw *hw = &adapter->hw;
2604 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2605 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2606 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2607 u32 mrqc = 0, reta = 0;
2608 u32 rxcsum;
2609 int i, j;
2555 int mask; 2610 int mask;
2556 2611
2557 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB)) 2612 /* Fill out hash function seeds */
2558 return mrqc; 2613 for (i = 0; i < 10; i++)
2614 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2615
2616 /* Fill out redirection table */
2617 for (i = 0, j = 0; i < 128; i++, j++) {
2618 if (j == adapter->ring_feature[RING_F_RSS].indices)
2619 j = 0;
2620 /* reta = 4-byte sliding window of
2621 * 0x00..(indices-1)(indices-1)00..etc. */
2622 reta = (reta << 8) | (j * 0x11);
2623 if ((i & 3) == 3)
2624 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2625 }
2626
2627 /* Disable indicating checksum in descriptor, enables RSS hash */
2628 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2629 rxcsum |= IXGBE_RXCSUM_PCSD;
2630 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2559 2631
2560 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED 2632 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2633 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2634 else
2635 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2561#ifdef CONFIG_IXGBE_DCB 2636#ifdef CONFIG_IXGBE_DCB
2562 | IXGBE_FLAG_DCB_ENABLED 2637 | IXGBE_FLAG_DCB_ENABLED
2563#endif 2638#endif
2564 | IXGBE_FLAG_SRIOV_ENABLED 2639 | IXGBE_FLAG_SRIOV_ENABLED
2565 ); 2640 );
2566 2641
2567 switch (mask) { 2642 switch (mask) {
2568 case (IXGBE_FLAG_RSS_ENABLED): 2643 case (IXGBE_FLAG_RSS_ENABLED):
@@ -2580,7 +2655,13 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2580 break; 2655 break;
2581 } 2656 }
2582 2657
2583 return mrqc; 2658 /* Perform hash on these packet types */
2659 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2660 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2661 | IXGBE_MRQC_RSS_FIELD_IPV6
2662 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2663
2664 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2584} 2665}
2585 2666
2586/** 2667/**
@@ -2588,25 +2669,26 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2588 * @adapter: address of board private structure 2669 * @adapter: address of board private structure
2589 * @index: index of ring to set 2670 * @index: index of ring to set
2590 **/ 2671 **/
2591static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) 2672static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2673 struct ixgbe_ring *ring)
2592{ 2674{
2593 struct ixgbe_ring *rx_ring;
2594 struct ixgbe_hw *hw = &adapter->hw; 2675 struct ixgbe_hw *hw = &adapter->hw;
2595 int j;
2596 u32 rscctrl; 2676 u32 rscctrl;
2597 int rx_buf_len; 2677 int rx_buf_len;
2678 u16 reg_idx = ring->reg_idx;
2598 2679
2599 rx_ring = adapter->rx_ring[index]; 2680 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
2600 j = rx_ring->reg_idx; 2681 return;
2601 rx_buf_len = rx_ring->rx_buf_len; 2682
2602 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); 2683 rx_buf_len = ring->rx_buf_len;
2684 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2603 rscctrl |= IXGBE_RSCCTL_RSCEN; 2685 rscctrl |= IXGBE_RSCCTL_RSCEN;
2604 /* 2686 /*
2605 * we must limit the number of descriptors so that the 2687 * we must limit the number of descriptors so that the
2606 * total size of max desc * buf_len is not greater 2688 * total size of max desc * buf_len is not greater
2607 * than 65535 2689 * than 65535
2608 */ 2690 */
2609 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2691 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2610#if (MAX_SKB_FRAGS > 16) 2692#if (MAX_SKB_FRAGS > 16)
2611 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2693 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2612#elif (MAX_SKB_FRAGS > 8) 2694#elif (MAX_SKB_FRAGS > 8)
@@ -2624,31 +2706,181 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
2624 else 2706 else
2625 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 2707 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2626 } 2708 }
2627 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); 2709 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2628} 2710}
2629 2711
2630/** 2712/**
2631 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 2713 * ixgbe_set_uta - Set unicast filter table address
2632 * @adapter: board private structure 2714 * @adapter: board private structure
2633 * 2715 *
2634 * Configure the Rx unit of the MAC after a reset. 2716 * The unicast table address is a register array of 32-bit registers.
2717 * The table is meant to be used in a way similar to how the MTA is used
2718 * however due to certain limitations in the hardware it is necessary to
2719 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2720 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2635 **/ 2721 **/
2636static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 2722static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2723{
2724 struct ixgbe_hw *hw = &adapter->hw;
2725 int i;
2726
2727 /* The UTA table only exists on 82599 hardware and newer */
2728 if (hw->mac.type < ixgbe_mac_82599EB)
2729 return;
2730
2731 /* we only need to do this if VMDq is enabled */
2732 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2733 return;
2734
2735 for (i = 0; i < 128; i++)
2736 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2737}
2738
2739#define IXGBE_MAX_RX_DESC_POLL 10
2740static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2741 struct ixgbe_ring *ring)
2742{
2743 struct ixgbe_hw *hw = &adapter->hw;
2744 int reg_idx = ring->reg_idx;
2745 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2746 u32 rxdctl;
2747
2748 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2749 if (hw->mac.type == ixgbe_mac_82598EB &&
2750 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2751 return;
2752
2753 do {
2754 msleep(1);
2755 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2756 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
2757
2758 if (!wait_loop) {
2759 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
2760 "the polling period\n", reg_idx);
2761 }
2762}
2763
2764void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2765 struct ixgbe_ring *ring)
2766{
2767 struct ixgbe_hw *hw = &adapter->hw;
2768 u64 rdba = ring->dma;
2769 u32 rxdctl;
2770 u16 reg_idx = ring->reg_idx;
2771
2772 /* disable queue to avoid issues while updating state */
2773 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2774 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
2775 rxdctl & ~IXGBE_RXDCTL_ENABLE);
2776 IXGBE_WRITE_FLUSH(hw);
2777
2778 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2779 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
2780 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
2781 ring->count * sizeof(union ixgbe_adv_rx_desc));
2782 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2783 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2784 ring->head = IXGBE_RDH(reg_idx);
2785 ring->tail = IXGBE_RDT(reg_idx);
2786
2787 ixgbe_configure_srrctl(adapter, ring);
2788 ixgbe_configure_rscctl(adapter, ring);
2789
2790 if (hw->mac.type == ixgbe_mac_82598EB) {
2791 /*
2792 * enable cache line friendly hardware writes:
2793 * PTHRESH=32 descriptors (half the internal cache),
2794 * this also removes ugly rx_no_buffer_count increment
2795 * HTHRESH=4 descriptors (to minimize latency on fetch)
2796 * WTHRESH=8 burst writeback up to two cache lines
2797 */
2798 rxdctl &= ~0x3FFFFF;
2799 rxdctl |= 0x080420;
2800 }
2801
2802 /* enable receive descriptor ring */
2803 rxdctl |= IXGBE_RXDCTL_ENABLE;
2804 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2805
2806 ixgbe_rx_desc_queue_enable(adapter, ring);
2807 ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
2808}
2809
2810static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2811{
2812 struct ixgbe_hw *hw = &adapter->hw;
2813 int p;
2814
2815 /* PSRTYPE must be initialized in non 82598 adapters */
2816 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2817 IXGBE_PSRTYPE_UDPHDR |
2818 IXGBE_PSRTYPE_IPV4HDR |
2819 IXGBE_PSRTYPE_L2HDR |
2820 IXGBE_PSRTYPE_IPV6HDR;
2821
2822 if (hw->mac.type == ixgbe_mac_82598EB)
2823 return;
2824
2825 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2826 psrtype |= (adapter->num_rx_queues_per_pool << 29);
2827
2828 for (p = 0; p < adapter->num_rx_pools; p++)
2829 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
2830 psrtype);
2831}
2832
2833static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2834{
2835 struct ixgbe_hw *hw = &adapter->hw;
2836 u32 gcr_ext;
2837 u32 vt_reg_bits;
2838 u32 reg_offset, vf_shift;
2839 u32 vmdctl;
2840
2841 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2842 return;
2843
2844 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2845 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2846 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2847 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2848
2849 vf_shift = adapter->num_vfs % 32;
2850 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2851
2852 /* Enable only the PF's pool for Tx/Rx */
2853 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2854 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2855 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2856 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2857 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2858
2859 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2860 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2861
2862 /*
2863 * Set up VF register offsets for selected VT Mode,
2864 * i.e. 32 or 64 VFs for SR-IOV
2865 */
2866 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2867 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2868 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2869 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2870
2871 /* enable Tx loopback for VF/PF communication */
2872 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2873}
2874
2875static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2637{ 2876{
2638 u64 rdba;
2639 struct ixgbe_hw *hw = &adapter->hw; 2877 struct ixgbe_hw *hw = &adapter->hw;
2640 struct ixgbe_ring *rx_ring;
2641 struct net_device *netdev = adapter->netdev; 2878 struct net_device *netdev = adapter->netdev;
2642 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2879 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2643 int i, j;
2644 u32 rdlen, rxctrl, rxcsum;
2645 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2646 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2647 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2648 u32 fctrl, hlreg0;
2649 u32 reta = 0, mrqc = 0;
2650 u32 rdrxctl;
2651 int rx_buf_len; 2880 int rx_buf_len;
2881 struct ixgbe_ring *rx_ring;
2882 int i;
2883 u32 mhadd, hlreg0;
2652 2884
2653 /* Decide whether to use packet split mode or not */ 2885 /* Decide whether to use packet split mode or not */
2654 /* Do not use packet split if we're in SR-IOV Mode */ 2886 /* Do not use packet split if we're in SR-IOV Mode */
@@ -2658,62 +2890,40 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2658 /* Set the RX buffer length according to the mode */ 2890 /* Set the RX buffer length according to the mode */
2659 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 2891 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
2660 rx_buf_len = IXGBE_RX_HDR_SIZE; 2892 rx_buf_len = IXGBE_RX_HDR_SIZE;
2661 if (hw->mac.type == ixgbe_mac_82599EB) {
2662 /* PSRTYPE must be initialized in 82599 */
2663 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2664 IXGBE_PSRTYPE_UDPHDR |
2665 IXGBE_PSRTYPE_IPV4HDR |
2666 IXGBE_PSRTYPE_IPV6HDR |
2667 IXGBE_PSRTYPE_L2HDR;
2668 IXGBE_WRITE_REG(hw,
2669 IXGBE_PSRTYPE(adapter->num_vfs),
2670 psrtype);
2671 }
2672 } else { 2893 } else {
2673 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 2894 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2674 (netdev->mtu <= ETH_DATA_LEN)) 2895 (netdev->mtu <= ETH_DATA_LEN))
2675 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 2896 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2676 else 2897 else
2677 rx_buf_len = ALIGN(max_frame, 1024); 2898 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
2678 } 2899 }
2679 2900
2680 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2901#ifdef IXGBE_FCOE
2681 fctrl |= IXGBE_FCTRL_BAM; 2902 /* adjust max frame to be able to do baby jumbo for FCoE */
2682 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 2903 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2683 fctrl |= IXGBE_FCTRL_PMCF; 2904 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2684 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 2905 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2906
2907#endif /* IXGBE_FCOE */
2908 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2909 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2910 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2911 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2912
2913 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2914 }
2685 2915
2686 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2916 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2687 if (adapter->netdev->mtu <= ETH_DATA_LEN) 2917 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2688 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 2918 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2689 else
2690 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2691#ifdef IXGBE_FCOE
2692 if (netdev->features & NETIF_F_FCOE_MTU)
2693 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2694#endif
2695 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 2919 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2696 2920
2697 rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
2698 /* disable receives while setting up the descriptors */
2699 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2700 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2701
2702 /* 2921 /*
2703 * Setup the HW Rx Head and Tail Descriptor Pointers and 2922 * Setup the HW Rx Head and Tail Descriptor Pointers and
2704 * the Base and Length of the Rx Descriptor Ring 2923 * the Base and Length of the Rx Descriptor Ring
2705 */ 2924 */
2706 for (i = 0; i < adapter->num_rx_queues; i++) { 2925 for (i = 0; i < adapter->num_rx_queues; i++) {
2707 rx_ring = adapter->rx_ring[i]; 2926 rx_ring = adapter->rx_ring[i];
2708 rdba = rx_ring->dma;
2709 j = rx_ring->reg_idx;
2710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
2711 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2712 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2713 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2714 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
2715 rx_ring->head = IXGBE_RDH(j);
2716 rx_ring->tail = IXGBE_RDT(j);
2717 rx_ring->rx_buf_len = rx_buf_len; 2927 rx_ring->rx_buf_len = rx_buf_len;
2718 2928
2719 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 2929 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
@@ -2729,15 +2939,21 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2729 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2939 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2730 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 2940 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2731 rx_ring->rx_buf_len = 2941 rx_ring->rx_buf_len =
2732 IXGBE_FCOE_JUMBO_FRAME_SIZE; 2942 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2733 } 2943 }
2734 } 2944 }
2735
2736#endif /* IXGBE_FCOE */ 2945#endif /* IXGBE_FCOE */
2737 ixgbe_configure_srrctl(adapter, rx_ring);
2738 } 2946 }
2739 2947
2740 if (hw->mac.type == ixgbe_mac_82598EB) { 2948}
2949
2950static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2951{
2952 struct ixgbe_hw *hw = &adapter->hw;
2953 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2954
2955 switch (hw->mac.type) {
2956 case ixgbe_mac_82598EB:
2741 /* 2957 /*
2742 * For VMDq support of different descriptor types or 2958 * For VMDq support of different descriptor types or
2743 * buffer sizes through the use of multiple SRRCTL 2959 * buffer sizes through the use of multiple SRRCTL
@@ -2748,110 +2964,66 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2748 * effects of setting this bit are only that SRRCTL must be 2964 * effects of setting this bit are only that SRRCTL must be
2749 * fully programmed [0..15] 2965 * fully programmed [0..15]
2750 */ 2966 */
2751 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2752 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 2967 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2753 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2968 break;
2969 case ixgbe_mac_82599EB:
2970 /* Disable RSC for ACK packets */
2971 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2972 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2973 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2974 /* hardware requires some bits to be set by default */
2975 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
2976 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
2977 break;
2978 default:
2979 /* We should do nothing since we don't know this hardware */
2980 return;
2754 } 2981 }
2755 2982
2756 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 2983 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2757 u32 vt_reg_bits; 2984}
2758 u32 reg_offset, vf_shift;
2759 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2760 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2761 | IXGBE_VT_CTL_REPLEN;
2762 vt_reg_bits |= (adapter->num_vfs <<
2763 IXGBE_VT_CTL_POOL_SHIFT);
2764 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2765 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2766
2767 vf_shift = adapter->num_vfs % 32;
2768 reg_offset = adapter->num_vfs / 32;
2769 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2770 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2771 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2772 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2773 /* Enable only the PF's pool for Tx/Rx */
2774 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2775 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2776 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2777 ixgbe_set_vmolr(hw, adapter->num_vfs, true);
2778 }
2779
2780 /* Program MRQC for the distribution of queues */
2781 mrqc = ixgbe_setup_mrqc(adapter);
2782
2783 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2784 /* Fill out redirection table */
2785 for (i = 0, j = 0; i < 128; i++, j++) {
2786 if (j == adapter->ring_feature[RING_F_RSS].indices)
2787 j = 0;
2788 /* reta = 4-byte sliding window of
2789 * 0x00..(indices-1)(indices-1)00..etc. */
2790 reta = (reta << 8) | (j * 0x11);
2791 if ((i & 3) == 3)
2792 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2793 }
2794
2795 /* Fill out hash function seeds */
2796 for (i = 0; i < 10; i++)
2797 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2798
2799 if (hw->mac.type == ixgbe_mac_82598EB)
2800 mrqc |= IXGBE_MRQC_RSSEN;
2801 /* Perform hash on these packet types */
2802 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2803 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2804 | IXGBE_MRQC_RSS_FIELD_IPV6
2805 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2806 }
2807 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2808 2985
2809 if (adapter->num_vfs) { 2986/**
2810 u32 reg; 2987 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
2988 * @adapter: board private structure
2989 *
2990 * Configure the Rx unit of the MAC after a reset.
2991 **/
2992static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2993{
2994 struct ixgbe_hw *hw = &adapter->hw;
2995 int i;
2996 u32 rxctrl;
2811 2997
2812 /* Map PF MAC address in RAR Entry 0 to first pool 2998 /* disable receives while setting up the descriptors */
2813 * following VFs */ 2999 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2814 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); 3000 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2815 3001
2816 /* Set up VF register offsets for selected VT Mode, i.e. 3002 ixgbe_setup_psrtype(adapter);
2817 * 64 VFs for SR-IOV */ 3003 ixgbe_setup_rdrxctl(adapter);
2818 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2819 reg |= IXGBE_GCR_EXT_SRIOV;
2820 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
2821 }
2822 3004
2823 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3005 /* Program registers for the distribution of queues */
3006 ixgbe_setup_mrqc(adapter);
2824 3007
2825 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 3008 ixgbe_set_uta(adapter);
2826 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2827 /* Disable indicating checksum in descriptor, enables
2828 * RSS hash */
2829 rxcsum |= IXGBE_RXCSUM_PCSD;
2830 }
2831 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2832 /* Enable IPv4 payload checksum for UDP fragments
2833 * if PCSD is not set */
2834 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2835 }
2836 3009
2837 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3010 /* set_rx_buffer_len must be called before ring initialization */
3011 ixgbe_set_rx_buffer_len(adapter);
2838 3012
2839 if (hw->mac.type == ixgbe_mac_82599EB) { 3013 /*
2840 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 3014 * Setup the HW Rx Head and Tail Descriptor Pointers and
2841 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 3015 * the Base and Length of the Rx Descriptor Ring
2842 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 3016 */
2843 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 3017 for (i = 0; i < adapter->num_rx_queues; i++)
2844 } 3018 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
2845 3019
2846 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 3020 /* disable drop enable for 82598 parts */
2847 /* Enable 82599 HW-RSC */ 3021 if (hw->mac.type == ixgbe_mac_82598EB)
2848 for (i = 0; i < adapter->num_rx_queues; i++) 3022 rxctrl |= IXGBE_RXCTRL_DMBYPS;
2849 ixgbe_configure_rscctl(adapter, i);
2850 3023
2851 /* Disable RSC for ACK packets */ 3024 /* enable all receives */
2852 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 3025 rxctrl |= IXGBE_RXCTRL_RXEN;
2853 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 3026 hw->mac.ops.enable_rx_dma(hw, rxctrl);
2854 }
2855} 3027}
2856 3028
2857static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 3029static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2955,7 +3127,7 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2955} 3127}
2956 3128
2957static void ixgbe_vlan_rx_register(struct net_device *netdev, 3129static void ixgbe_vlan_rx_register(struct net_device *netdev,
2958 struct vlan_group *grp) 3130 struct vlan_group *grp)
2959{ 3131{
2960 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3132 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2961 3133
@@ -3052,6 +3224,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3052 3224
3053 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3225 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3054 3226
3227 /* set all bits that we expect to always be set */
3228 fctrl |= IXGBE_FCTRL_BAM;
3229 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3230 fctrl |= IXGBE_FCTRL_PMCF;
3231
3055 /* clear the bits we are changing the status of */ 3232 /* clear the bits we are changing the status of */
3056 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3233 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3057 3234
@@ -3157,6 +3334,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3157 u32 txdctl; 3334 u32 txdctl;
3158 int i, j; 3335 int i, j;
3159 3336
3337 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3338 if (hw->mac.type == ixgbe_mac_82598EB)
3339 netif_set_gso_max_size(adapter->netdev, 65536);
3340 return;
3341 }
3342
3343 if (hw->mac.type == ixgbe_mac_82598EB)
3344 netif_set_gso_max_size(adapter->netdev, 32768);
3345
3160 ixgbe_dcb_check_config(&adapter->dcb_cfg); 3346 ixgbe_dcb_check_config(&adapter->dcb_cfg);
3161 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); 3347 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
3162 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); 3348 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
@@ -3188,17 +3374,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3188 3374
3189 ixgbe_restore_vlan(adapter); 3375 ixgbe_restore_vlan(adapter);
3190#ifdef CONFIG_IXGBE_DCB 3376#ifdef CONFIG_IXGBE_DCB
3191 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3377 ixgbe_configure_dcb(adapter);
3192 if (hw->mac.type == ixgbe_mac_82598EB)
3193 netif_set_gso_max_size(netdev, 32768);
3194 else
3195 netif_set_gso_max_size(netdev, 65536);
3196 ixgbe_configure_dcb(adapter);
3197 } else {
3198 netif_set_gso_max_size(netdev, 65536);
3199 }
3200#else
3201 netif_set_gso_max_size(netdev, 65536);
3202#endif 3378#endif
3203 3379
3204#ifdef IXGBE_FCOE 3380#ifdef IXGBE_FCOE
@@ -3209,17 +3385,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3209 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 3385 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3210 for (i = 0; i < adapter->num_tx_queues; i++) 3386 for (i = 0; i < adapter->num_tx_queues; i++)
3211 adapter->tx_ring[i]->atr_sample_rate = 3387 adapter->tx_ring[i]->atr_sample_rate =
3212 adapter->atr_sample_rate; 3388 adapter->atr_sample_rate;
3213 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); 3389 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3214 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 3390 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3215 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); 3391 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3216 } 3392 }
3393 ixgbe_configure_virtualization(adapter);
3217 3394
3218 ixgbe_configure_tx(adapter); 3395 ixgbe_configure_tx(adapter);
3219 ixgbe_configure_rx(adapter); 3396 ixgbe_configure_rx(adapter);
3220 for (i = 0; i < adapter->num_rx_queues; i++)
3221 ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
3222 (adapter->rx_ring[i]->count - 1));
3223} 3397}
3224 3398
3225static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 3399static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -3290,7 +3464,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3290 goto link_cfg_out; 3464 goto link_cfg_out;
3291 3465
3292 if (hw->mac.ops.get_link_capabilities) 3466 if (hw->mac.ops.get_link_capabilities)
3293 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 3467 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3468 &negotiation);
3294 if (ret) 3469 if (ret)
3295 goto link_cfg_out; 3470 goto link_cfg_out;
3296 3471
@@ -3300,62 +3475,15 @@ link_cfg_out:
3300 return ret; 3475 return ret;
3301} 3476}
3302 3477
3303#define IXGBE_MAX_RX_DESC_POLL 10 3478static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3304static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3305 int rxr)
3306{
3307 int j = adapter->rx_ring[rxr]->reg_idx;
3308 int k;
3309
3310 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
3311 if (IXGBE_READ_REG(&adapter->hw,
3312 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
3313 break;
3314 else
3315 msleep(1);
3316 }
3317 if (k >= IXGBE_MAX_RX_DESC_POLL) {
3318 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3319 "the polling period\n", rxr);
3320 }
3321 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
3322 (adapter->rx_ring[rxr]->count - 1));
3323}
3324
3325static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3326{ 3479{
3327 struct net_device *netdev = adapter->netdev;
3328 struct ixgbe_hw *hw = &adapter->hw; 3480 struct ixgbe_hw *hw = &adapter->hw;
3329 int i, j = 0; 3481 u32 gpie = 0;
3330 int num_rx_rings = adapter->num_rx_queues;
3331 int err;
3332 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3333 u32 txdctl, rxdctl, mhadd;
3334 u32 dmatxctl;
3335 u32 gpie;
3336 u32 ctrl_ext;
3337
3338 ixgbe_get_hw_control(adapter);
3339
3340 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
3341 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
3342 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3343 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
3344 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
3345 } else {
3346 /* MSI only */
3347 gpie = 0;
3348 }
3349 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3350 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3351 gpie |= IXGBE_GPIE_VTMODE_64;
3352 }
3353 /* XXX: to interrupt immediately for EICS writes, enable this */
3354 /* gpie |= IXGBE_GPIE_EIMEN; */
3355 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3356 }
3357 3482
3358 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3483 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3484 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3485 IXGBE_GPIE_OCD;
3486 gpie |= IXGBE_GPIE_EIAME;
3359 /* 3487 /*
3360 * use EIAM to auto-mask when MSI-X interrupt is asserted 3488 * use EIAM to auto-mask when MSI-X interrupt is asserted
3361 * this saves a register write for every interrupt 3489 * this saves a register write for every interrupt
@@ -3376,98 +3504,33 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3376 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3504 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3377 } 3505 }
3378 3506
3379 /* Enable Thermal over heat sensor interrupt */ 3507 /* XXX: to interrupt immediately for EICS writes, enable this */
3380 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 3508 /* gpie |= IXGBE_GPIE_EIMEN; */
3381 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3509
3382 gpie |= IXGBE_SDP0_GPIEN; 3510 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3383 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3511 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3512 gpie |= IXGBE_GPIE_VTMODE_64;
3384 } 3513 }
3385 3514
3386 /* Enable fan failure interrupt if media type is copper */ 3515 /* Enable fan failure interrupt */
3387 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3516 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3388 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3389 gpie |= IXGBE_SDP1_GPIEN; 3517 gpie |= IXGBE_SDP1_GPIEN;
3390 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3391 }
3392 3518
3393 if (hw->mac.type == ixgbe_mac_82599EB) { 3519 if (hw->mac.type == ixgbe_mac_82599EB)
3394 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3395 gpie |= IXGBE_SDP1_GPIEN; 3520 gpie |= IXGBE_SDP1_GPIEN;
3396 gpie |= IXGBE_SDP2_GPIEN; 3521 gpie |= IXGBE_SDP2_GPIEN;
3397 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3398 }
3399
3400#ifdef IXGBE_FCOE
3401 /* adjust max frame to be able to do baby jumbo for FCoE */
3402 if ((netdev->features & NETIF_F_FCOE_MTU) &&
3403 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3404 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3405 3522
3406#endif /* IXGBE_FCOE */ 3523 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3407 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3524}
3408 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3409 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3410 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3411
3412 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3413 }
3414
3415 for (i = 0; i < adapter->num_tx_queues; i++) {
3416 j = adapter->tx_ring[i]->reg_idx;
3417 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3418 if (adapter->rx_itr_setting == 0) {
3419 /* cannot set wthresh when itr==0 */
3420 txdctl &= ~0x007F0000;
3421 } else {
3422 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
3423 txdctl |= (8 << 16);
3424 }
3425 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3426 }
3427 3525
3428 if (hw->mac.type == ixgbe_mac_82599EB) { 3526static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3429 /* DMATXCTL.EN must be set after all Tx queue config is done */ 3527{
3430 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 3528 struct ixgbe_hw *hw = &adapter->hw;
3431 dmatxctl |= IXGBE_DMATXCTL_TE; 3529 int err;
3432 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 3530 u32 ctrl_ext;
3433 }
3434 for (i = 0; i < adapter->num_tx_queues; i++) {
3435 j = adapter->tx_ring[i]->reg_idx;
3436 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3437 txdctl |= IXGBE_TXDCTL_ENABLE;
3438 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3439 if (hw->mac.type == ixgbe_mac_82599EB) {
3440 int wait_loop = 10;
3441 /* poll for Tx Enable ready */
3442 do {
3443 msleep(1);
3444 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3445 } while (--wait_loop &&
3446 !(txdctl & IXGBE_TXDCTL_ENABLE));
3447 if (!wait_loop)
3448 e_err(drv, "Could not enable Tx Queue %d\n", j);
3449 }
3450 }
3451 3531
3452 for (i = 0; i < num_rx_rings; i++) { 3532 ixgbe_get_hw_control(adapter);
3453 j = adapter->rx_ring[i]->reg_idx; 3533 ixgbe_setup_gpie(adapter);
3454 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3455 /* enable PTHRESH=32 descriptors (half the internal cache)
3456 * and HTHRESH=0 descriptors (to minimize latency on fetch),
3457 * this also removes a pesky rx_no_buffer_count increment */
3458 rxdctl |= 0x0020;
3459 rxdctl |= IXGBE_RXDCTL_ENABLE;
3460 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
3461 if (hw->mac.type == ixgbe_mac_82599EB)
3462 ixgbe_rx_desc_queue_enable(adapter, i);
3463 }
3464 /* enable all receives */
3465 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3466 if (hw->mac.type == ixgbe_mac_82598EB)
3467 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
3468 else
3469 rxdctl |= IXGBE_RXCTRL_RXEN;
3470 hw->mac.ops.enable_rx_dma(hw, rxdctl);
3471 3534
3472 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 3535 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3473 ixgbe_configure_msix(adapter); 3536 ixgbe_configure_msix(adapter);
@@ -3483,7 +3546,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3483 3546
3484 /* clear any pending interrupts, may auto mask */ 3547 /* clear any pending interrupts, may auto mask */
3485 IXGBE_READ_REG(hw, IXGBE_EICR); 3548 IXGBE_READ_REG(hw, IXGBE_EICR);
3486
3487 ixgbe_irq_enable(adapter); 3549 ixgbe_irq_enable(adapter);
3488 3550
3489 /* 3551 /*
@@ -3525,12 +3587,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3525 e_err(probe, "link_config FAILED %d\n", err); 3587 e_err(probe, "link_config FAILED %d\n", err);
3526 } 3588 }
3527 3589
3528 for (i = 0; i < adapter->num_tx_queues; i++)
3529 set_bit(__IXGBE_FDIR_INIT_DONE,
3530 &(adapter->tx_ring[i]->reinit_state));
3531
3532 /* enable transmits */ 3590 /* enable transmits */
3533 netif_tx_start_all_queues(netdev); 3591 netif_tx_start_all_queues(adapter->netdev);
3534 3592
3535 /* bring the link up in the watchdog, this could race with our first 3593 /* bring the link up in the watchdog, this could race with our first
3536 * link up interrupt but shouldn't be a problem */ 3594 * link up interrupt but shouldn't be a problem */
@@ -3609,21 +3667,24 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3609 * @rx_ring: ring to free buffers from 3667 * @rx_ring: ring to free buffers from
3610 **/ 3668 **/
3611static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 3669static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3612 struct ixgbe_ring *rx_ring) 3670 struct ixgbe_ring *rx_ring)
3613{ 3671{
3614 struct pci_dev *pdev = adapter->pdev; 3672 struct pci_dev *pdev = adapter->pdev;
3615 unsigned long size; 3673 unsigned long size;
3616 unsigned int i; 3674 unsigned int i;
3617 3675
3618 /* Free all the Rx ring sk_buffs */ 3676 /* ring already cleared, nothing to do */
3677 if (!rx_ring->rx_buffer_info)
3678 return;
3619 3679
3680 /* Free all the Rx ring sk_buffs */
3620 for (i = 0; i < rx_ring->count; i++) { 3681 for (i = 0; i < rx_ring->count; i++) {
3621 struct ixgbe_rx_buffer *rx_buffer_info; 3682 struct ixgbe_rx_buffer *rx_buffer_info;
3622 3683
3623 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3684 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3624 if (rx_buffer_info->dma) { 3685 if (rx_buffer_info->dma) {
3625 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 3686 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3626 rx_ring->rx_buf_len, 3687 rx_ring->rx_buf_len,
3627 DMA_FROM_DEVICE); 3688 DMA_FROM_DEVICE);
3628 rx_buffer_info->dma = 0; 3689 rx_buffer_info->dma = 0;
3629 } 3690 }
@@ -3635,7 +3696,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3635 if (IXGBE_RSC_CB(this)->delay_unmap) { 3696 if (IXGBE_RSC_CB(this)->delay_unmap) {
3636 dma_unmap_single(&pdev->dev, 3697 dma_unmap_single(&pdev->dev,
3637 IXGBE_RSC_CB(this)->dma, 3698 IXGBE_RSC_CB(this)->dma,
3638 rx_ring->rx_buf_len, 3699 rx_ring->rx_buf_len,
3639 DMA_FROM_DEVICE); 3700 DMA_FROM_DEVICE);
3640 IXGBE_RSC_CB(this)->dma = 0; 3701 IXGBE_RSC_CB(this)->dma = 0;
3641 IXGBE_RSC_CB(skb)->delay_unmap = false; 3702 IXGBE_RSC_CB(skb)->delay_unmap = false;
@@ -3677,14 +3738,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3677 * @tx_ring: ring to be cleaned 3738 * @tx_ring: ring to be cleaned
3678 **/ 3739 **/
3679static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 3740static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3680 struct ixgbe_ring *tx_ring) 3741 struct ixgbe_ring *tx_ring)
3681{ 3742{
3682 struct ixgbe_tx_buffer *tx_buffer_info; 3743 struct ixgbe_tx_buffer *tx_buffer_info;
3683 unsigned long size; 3744 unsigned long size;
3684 unsigned int i; 3745 unsigned int i;
3685 3746
3686 /* Free all the Tx ring sk_buffs */ 3747 /* ring already cleared, nothing to do */
3748 if (!tx_ring->tx_buffer_info)
3749 return;
3687 3750
3751 /* Free all the Tx ring sk_buffs */
3688 for (i = 0; i < tx_ring->count; i++) { 3752 for (i = 0; i < tx_ring->count; i++) {
3689 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3753 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3690 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 3754 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
@@ -3786,13 +3850,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3786 j = adapter->tx_ring[i]->reg_idx; 3850 j = adapter->tx_ring[i]->reg_idx;
3787 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3851 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3788 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 3852 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3789 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 3853 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3790 } 3854 }
3791 /* Disable the Tx DMA engine on 82599 */ 3855 /* Disable the Tx DMA engine on 82599 */
3792 if (hw->mac.type == ixgbe_mac_82599EB) 3856 if (hw->mac.type == ixgbe_mac_82599EB)
3793 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 3857 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3794 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3858 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3795 ~IXGBE_DMATXCTL_TE)); 3859 ~IXGBE_DMATXCTL_TE));
3796 3860
3797 /* power down the optics */ 3861 /* power down the optics */
3798 if (hw->phy.multispeed_fiber) 3862 if (hw->phy.multispeed_fiber)
@@ -3822,7 +3886,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3822static int ixgbe_poll(struct napi_struct *napi, int budget) 3886static int ixgbe_poll(struct napi_struct *napi, int budget)
3823{ 3887{
3824 struct ixgbe_q_vector *q_vector = 3888 struct ixgbe_q_vector *q_vector =
3825 container_of(napi, struct ixgbe_q_vector, napi); 3889 container_of(napi, struct ixgbe_q_vector, napi);
3826 struct ixgbe_adapter *adapter = q_vector->adapter; 3890 struct ixgbe_adapter *adapter = q_vector->adapter;
3827 int tx_clean_complete, work_done = 0; 3891 int tx_clean_complete, work_done = 0;
3828 3892
@@ -3932,7 +3996,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3932 * Rx load across CPUs using RSS. 3996 * Rx load across CPUs using RSS.
3933 * 3997 *
3934 **/ 3998 **/
3935static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) 3999static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3936{ 4000{
3937 bool ret = false; 4001 bool ret = false;
3938 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; 4002 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
@@ -4061,7 +4125,7 @@ done:
4061} 4125}
4062 4126
4063static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 4127static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4064 int vectors) 4128 int vectors)
4065{ 4129{
4066 int err, vector_threshold; 4130 int err, vector_threshold;
4067 4131
@@ -4080,7 +4144,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4080 */ 4144 */
4081 while (vectors >= vector_threshold) { 4145 while (vectors >= vector_threshold) {
4082 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 4146 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
4083 vectors); 4147 vectors);
4084 if (!err) /* Success in acquiring all requested vectors. */ 4148 if (!err) /* Success in acquiring all requested vectors. */
4085 break; 4149 break;
4086 else if (err < 0) 4150 else if (err < 0)
@@ -4107,7 +4171,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4107 * vectors we were allocated. 4171 * vectors we were allocated.
4108 */ 4172 */
4109 adapter->num_msix_vectors = min(vectors, 4173 adapter->num_msix_vectors = min(vectors,
4110 adapter->max_msix_q_vectors + NON_Q_VECTORS); 4174 adapter->max_msix_q_vectors + NON_Q_VECTORS);
4111 } 4175 }
4112} 4176}
4113 4177
@@ -4178,12 +4242,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4178 } 4242 }
4179 for ( ; i < 5; i++) { 4243 for ( ; i < 5; i++) {
4180 adapter->tx_ring[i]->reg_idx = 4244 adapter->tx_ring[i]->reg_idx =
4181 ((i + 2) << 4); 4245 ((i + 2) << 4);
4182 adapter->rx_ring[i]->reg_idx = i << 4; 4246 adapter->rx_ring[i]->reg_idx = i << 4;
4183 } 4247 }
4184 for ( ; i < dcb_i; i++) { 4248 for ( ; i < dcb_i; i++) {
4185 adapter->tx_ring[i]->reg_idx = 4249 adapter->tx_ring[i]->reg_idx =
4186 ((i + 8) << 3); 4250 ((i + 8) << 3);
4187 adapter->rx_ring[i]->reg_idx = i << 4; 4251 adapter->rx_ring[i]->reg_idx = i << 4;
4188 } 4252 }
4189 4253
@@ -4226,7 +4290,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4226 * Cache the descriptor ring offsets for Flow Director to the assigned rings. 4290 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4227 * 4291 *
4228 **/ 4292 **/
4229static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) 4293static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4230{ 4294{
4231 int i; 4295 int i;
4232 bool ret = false; 4296 bool ret = false;
@@ -4383,7 +4447,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4383 adapter->node = cur_node; 4447 adapter->node = cur_node;
4384 } 4448 }
4385 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, 4449 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4386 adapter->node); 4450 adapter->node);
4387 if (!ring) 4451 if (!ring)
4388 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4452 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4389 if (!ring) 4453 if (!ring)
@@ -4407,7 +4471,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4407 adapter->node = cur_node; 4471 adapter->node = cur_node;
4408 } 4472 }
4409 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, 4473 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4410 adapter->node); 4474 adapter->node);
4411 if (!ring) 4475 if (!ring)
4412 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4476 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4413 if (!ring) 4477 if (!ring)
@@ -4453,7 +4517,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4453 * (roughly) the same number of vectors as there are CPU's. 4517 * (roughly) the same number of vectors as there are CPU's.
4454 */ 4518 */
4455 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 4519 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
4456 (int)num_online_cpus()) + NON_Q_VECTORS; 4520 (int)num_online_cpus()) + NON_Q_VECTORS;
4457 4521
4458 /* 4522 /*
4459 * At the same time, hardware can only support a maximum of 4523 * At the same time, hardware can only support a maximum of
@@ -4467,7 +4531,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4467 /* A failure in MSI-X entry allocation isn't fatal, but it does 4531 /* A failure in MSI-X entry allocation isn't fatal, but it does
4468 * mean we disable MSI-X capabilities of the adapter. */ 4532 * mean we disable MSI-X capabilities of the adapter. */
4469 adapter->msix_entries = kcalloc(v_budget, 4533 adapter->msix_entries = kcalloc(v_budget,
4470 sizeof(struct msix_entry), GFP_KERNEL); 4534 sizeof(struct msix_entry), GFP_KERNEL);
4471 if (adapter->msix_entries) { 4535 if (adapter->msix_entries) {
4472 for (vector = 0; vector < v_budget; vector++) 4536 for (vector = 0; vector < v_budget; vector++)
4473 adapter->msix_entries[vector].entry = vector; 4537 adapter->msix_entries[vector].entry = vector;
@@ -4529,10 +4593,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4529 4593
4530 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 4594 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4531 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), 4595 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4532 GFP_KERNEL, adapter->node); 4596 GFP_KERNEL, adapter->node);
4533 if (!q_vector) 4597 if (!q_vector)
4534 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), 4598 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4535 GFP_KERNEL); 4599 GFP_KERNEL);
4536 if (!q_vector) 4600 if (!q_vector)
4537 goto err_out; 4601 goto err_out;
4538 q_vector->adapter = adapter; 4602 q_vector->adapter = adapter;
@@ -4693,8 +4757,8 @@ static void ixgbe_sfp_timer(unsigned long data)
4693static void ixgbe_sfp_task(struct work_struct *work) 4757static void ixgbe_sfp_task(struct work_struct *work)
4694{ 4758{
4695 struct ixgbe_adapter *adapter = container_of(work, 4759 struct ixgbe_adapter *adapter = container_of(work,
4696 struct ixgbe_adapter, 4760 struct ixgbe_adapter,
4697 sfp_task); 4761 sfp_task);
4698 struct ixgbe_hw *hw = &adapter->hw; 4762 struct ixgbe_hw *hw = &adapter->hw;
4699 4763
4700 if ((hw->phy.type == ixgbe_phy_nl) && 4764 if ((hw->phy.type == ixgbe_phy_nl) &&
@@ -4719,7 +4783,7 @@ static void ixgbe_sfp_task(struct work_struct *work)
4719reschedule: 4783reschedule:
4720 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) 4784 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4721 mod_timer(&adapter->sfp_timer, 4785 mod_timer(&adapter->sfp_timer,
4722 round_jiffies(jiffies + (2 * HZ))); 4786 round_jiffies(jiffies + (2 * HZ)));
4723} 4787}
4724 4788
4725/** 4789/**
@@ -4775,7 +4839,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4775 adapter->atr_sample_rate = 20; 4839 adapter->atr_sample_rate = 20;
4776 } 4840 }
4777 adapter->ring_feature[RING_F_FDIR].indices = 4841 adapter->ring_feature[RING_F_FDIR].indices =
4778 IXGBE_MAX_FDIR_INDICES; 4842 IXGBE_MAX_FDIR_INDICES;
4779 adapter->fdir_pballoc = 0; 4843 adapter->fdir_pballoc = 0;
4780#ifdef IXGBE_FCOE 4844#ifdef IXGBE_FCOE
4781 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 4845 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4806,7 +4870,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4806 adapter->dcb_cfg.round_robin_enable = false; 4870 adapter->dcb_cfg.round_robin_enable = false;
4807 adapter->dcb_set_bitmap = 0x00; 4871 adapter->dcb_set_bitmap = 0x00;
4808 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 4872 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
4809 adapter->ring_feature[RING_F_DCB].indices); 4873 adapter->ring_feature[RING_F_DCB].indices);
4810 4874
4811#endif 4875#endif
4812 4876
@@ -4861,7 +4925,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4861 * Return 0 on success, negative on failure 4925 * Return 0 on success, negative on failure
4862 **/ 4926 **/
4863int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 4927int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4864 struct ixgbe_ring *tx_ring) 4928 struct ixgbe_ring *tx_ring)
4865{ 4929{
4866 struct pci_dev *pdev = adapter->pdev; 4930 struct pci_dev *pdev = adapter->pdev;
4867 int size; 4931 int size;
@@ -4928,7 +4992,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4928 * Returns 0 on success, negative on failure 4992 * Returns 0 on success, negative on failure
4929 **/ 4993 **/
4930int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 4994int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4931 struct ixgbe_ring *rx_ring) 4995 struct ixgbe_ring *rx_ring)
4932{ 4996{
4933 struct pci_dev *pdev = adapter->pdev; 4997 struct pci_dev *pdev = adapter->pdev;
4934 int size; 4998 int size;
@@ -5001,7 +5065,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5001 * Free all transmit software resources 5065 * Free all transmit software resources
5002 **/ 5066 **/
5003void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 5067void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
5004 struct ixgbe_ring *tx_ring) 5068 struct ixgbe_ring *tx_ring)
5005{ 5069{
5006 struct pci_dev *pdev = adapter->pdev; 5070 struct pci_dev *pdev = adapter->pdev;
5007 5071
@@ -5039,7 +5103,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5039 * Free all receive software resources 5103 * Free all receive software resources
5040 **/ 5104 **/
5041void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 5105void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
5042 struct ixgbe_ring *rx_ring) 5106 struct ixgbe_ring *rx_ring)
5043{ 5107{
5044 struct pci_dev *pdev = adapter->pdev; 5108 struct pci_dev *pdev = adapter->pdev;
5045 5109
@@ -5333,6 +5397,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5333 u64 total_mpc = 0; 5397 u64 total_mpc = 0;
5334 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 5398 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5335 u64 non_eop_descs = 0, restart_queue = 0; 5399 u64 non_eop_descs = 0, restart_queue = 0;
5400 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5336 5401
5337 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5402 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5338 test_bit(__IXGBE_RESETTING, &adapter->state)) 5403 test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5343,7 +5408,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5343 u64 rsc_flush = 0; 5408 u64 rsc_flush = 0;
5344 for (i = 0; i < 16; i++) 5409 for (i = 0; i < 16; i++)
5345 adapter->hw_rx_no_dma_resources += 5410 adapter->hw_rx_no_dma_resources +=
5346 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5411 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5347 for (i = 0; i < adapter->num_rx_queues; i++) { 5412 for (i = 0; i < adapter->num_rx_queues; i++) {
5348 rsc_count += adapter->rx_ring[i]->rsc_count; 5413 rsc_count += adapter->rx_ring[i]->rsc_count;
5349 rsc_flush += adapter->rx_ring[i]->rsc_flush; 5414 rsc_flush += adapter->rx_ring[i]->rsc_flush;
@@ -5361,119 +5426,118 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5361 non_eop_descs += adapter->rx_ring[i]->non_eop_descs; 5426 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
5362 adapter->non_eop_descs = non_eop_descs; 5427 adapter->non_eop_descs = non_eop_descs;
5363 5428
5364 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 5429 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5365 for (i = 0; i < 8; i++) { 5430 for (i = 0; i < 8; i++) {
5366 /* for packet buffers not used, the register should read 0 */ 5431 /* for packet buffers not used, the register should read 0 */
5367 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 5432 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5368 missed_rx += mpc; 5433 missed_rx += mpc;
5369 adapter->stats.mpc[i] += mpc; 5434 hwstats->mpc[i] += mpc;
5370 total_mpc += adapter->stats.mpc[i]; 5435 total_mpc += hwstats->mpc[i];
5371 if (hw->mac.type == ixgbe_mac_82598EB) 5436 if (hw->mac.type == ixgbe_mac_82598EB)
5372 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 5437 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5373 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 5438 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5374 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 5439 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5375 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5440 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5376 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5441 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5377 if (hw->mac.type == ixgbe_mac_82599EB) { 5442 if (hw->mac.type == ixgbe_mac_82599EB) {
5378 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 5443 hwstats->pxonrxc[i] +=
5379 IXGBE_PXONRXCNT(i)); 5444 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5380 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 5445 hwstats->pxoffrxc[i] +=
5381 IXGBE_PXOFFRXCNT(i)); 5446 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5382 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5447 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5383 } else { 5448 } else {
5384 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 5449 hwstats->pxonrxc[i] +=
5385 IXGBE_PXONRXC(i)); 5450 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5386 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 5451 hwstats->pxoffrxc[i] +=
5387 IXGBE_PXOFFRXC(i)); 5452 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
5388 } 5453 }
5389 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, 5454 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5390 IXGBE_PXONTXC(i)); 5455 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5391 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
5392 IXGBE_PXOFFTXC(i));
5393 } 5456 }
5394 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 5457 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5395 /* work around hardware counting issue */ 5458 /* work around hardware counting issue */
5396 adapter->stats.gprc -= missed_rx; 5459 hwstats->gprc -= missed_rx;
5397 5460
5398 /* 82598 hardware only has a 32 bit counter in the high register */ 5461 /* 82598 hardware only has a 32 bit counter in the high register */
5399 if (hw->mac.type == ixgbe_mac_82599EB) { 5462 if (hw->mac.type == ixgbe_mac_82599EB) {
5400 u64 tmp; 5463 u64 tmp;
5401 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5464 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5402 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */ 5465 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
5403 adapter->stats.gorc += (tmp << 32); 5466 /* 4 high bits of GORC */
5404 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5467 hwstats->gorc += (tmp << 32);
5405 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */ 5468 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5406 adapter->stats.gotc += (tmp << 32); 5469 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
5407 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5470 /* 4 high bits of GOTC */
5408 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5471 hwstats->gotc += (tmp << 32);
5409 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5472 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5410 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 5473 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5411 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5474 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5412 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 5475 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5476 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5477 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5413#ifdef IXGBE_FCOE 5478#ifdef IXGBE_FCOE
5414 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 5479 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5415 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 5480 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5416 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 5481 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5417 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 5482 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5418 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5483 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5419 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5484 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5420#endif /* IXGBE_FCOE */ 5485#endif /* IXGBE_FCOE */
5421 } else { 5486 } else {
5422 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 5487 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5423 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 5488 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5424 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 5489 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5425 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 5490 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5426 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 5491 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5427 } 5492 }
5428 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 5493 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5429 adapter->stats.bprc += bprc; 5494 hwstats->bprc += bprc;
5430 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 5495 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5431 if (hw->mac.type == ixgbe_mac_82598EB) 5496 if (hw->mac.type == ixgbe_mac_82598EB)
5432 adapter->stats.mprc -= bprc; 5497 hwstats->mprc -= bprc;
5433 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 5498 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5434 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 5499 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5435 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 5500 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5436 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 5501 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5437 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 5502 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5438 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 5503 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5439 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 5504 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5440 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 5505 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5441 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 5506 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5442 adapter->stats.lxontxc += lxon; 5507 hwstats->lxontxc += lxon;
5443 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 5508 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5444 adapter->stats.lxofftxc += lxoff; 5509 hwstats->lxofftxc += lxoff;
5445 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 5510 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5446 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 5511 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5447 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 5512 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5448 /* 5513 /*
5449 * 82598 errata - tx of flow control packets is included in tx counters 5514 * 82598 errata - tx of flow control packets is included in tx counters
5450 */ 5515 */
5451 xon_off_tot = lxon + lxoff; 5516 xon_off_tot = lxon + lxoff;
5452 adapter->stats.gptc -= xon_off_tot; 5517 hwstats->gptc -= xon_off_tot;
5453 adapter->stats.mptc -= xon_off_tot; 5518 hwstats->mptc -= xon_off_tot;
5454 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); 5519 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5455 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 5520 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5456 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 5521 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5457 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 5522 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5458 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 5523 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5459 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 5524 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5460 adapter->stats.ptc64 -= xon_off_tot; 5525 hwstats->ptc64 -= xon_off_tot;
5461 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 5526 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5462 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 5527 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5463 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 5528 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5464 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 5529 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5465 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 5530 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5466 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 5531 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5467 5532
5468 /* Fill out the OS statistics structure */ 5533 /* Fill out the OS statistics structure */
5469 netdev->stats.multicast = adapter->stats.mprc; 5534 netdev->stats.multicast = hwstats->mprc;
5470 5535
5471 /* Rx Errors */ 5536 /* Rx Errors */
5472 netdev->stats.rx_errors = adapter->stats.crcerrs + 5537 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5473 adapter->stats.rlec;
5474 netdev->stats.rx_dropped = 0; 5538 netdev->stats.rx_dropped = 0;
5475 netdev->stats.rx_length_errors = adapter->stats.rlec; 5539 netdev->stats.rx_length_errors = hwstats->rlec;
5476 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 5540 netdev->stats.rx_crc_errors = hwstats->crcerrs;
5477 netdev->stats.rx_missed_errors = total_mpc; 5541 netdev->stats.rx_missed_errors = total_mpc;
5478} 5542}
5479 5543
@@ -5532,8 +5596,8 @@ watchdog_short_circuit:
5532static void ixgbe_multispeed_fiber_task(struct work_struct *work) 5596static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5533{ 5597{
5534 struct ixgbe_adapter *adapter = container_of(work, 5598 struct ixgbe_adapter *adapter = container_of(work,
5535 struct ixgbe_adapter, 5599 struct ixgbe_adapter,
5536 multispeed_fiber_task); 5600 multispeed_fiber_task);
5537 struct ixgbe_hw *hw = &adapter->hw; 5601 struct ixgbe_hw *hw = &adapter->hw;
5538 u32 autoneg; 5602 u32 autoneg;
5539 bool negotiation; 5603 bool negotiation;
@@ -5556,8 +5620,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5556static void ixgbe_sfp_config_module_task(struct work_struct *work) 5620static void ixgbe_sfp_config_module_task(struct work_struct *work)
5557{ 5621{
5558 struct ixgbe_adapter *adapter = container_of(work, 5622 struct ixgbe_adapter *adapter = container_of(work,
5559 struct ixgbe_adapter, 5623 struct ixgbe_adapter,
5560 sfp_config_module_task); 5624 sfp_config_module_task);
5561 struct ixgbe_hw *hw = &adapter->hw; 5625 struct ixgbe_hw *hw = &adapter->hw;
5562 u32 err; 5626 u32 err;
5563 5627
@@ -5590,15 +5654,15 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5590static void ixgbe_fdir_reinit_task(struct work_struct *work) 5654static void ixgbe_fdir_reinit_task(struct work_struct *work)
5591{ 5655{
5592 struct ixgbe_adapter *adapter = container_of(work, 5656 struct ixgbe_adapter *adapter = container_of(work,
5593 struct ixgbe_adapter, 5657 struct ixgbe_adapter,
5594 fdir_reinit_task); 5658 fdir_reinit_task);
5595 struct ixgbe_hw *hw = &adapter->hw; 5659 struct ixgbe_hw *hw = &adapter->hw;
5596 int i; 5660 int i;
5597 5661
5598 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5662 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5599 for (i = 0; i < adapter->num_tx_queues; i++) 5663 for (i = 0; i < adapter->num_tx_queues; i++)
5600 set_bit(__IXGBE_FDIR_INIT_DONE, 5664 set_bit(__IXGBE_FDIR_INIT_DONE,
5601 &(adapter->tx_ring[i]->reinit_state)); 5665 &(adapter->tx_ring[i]->reinit_state));
5602 } else { 5666 } else {
5603 e_err(probe, "failed to finish FDIR re-initialization, " 5667 e_err(probe, "failed to finish FDIR re-initialization, "
5604 "ignored adding FDIR ATR filters\n"); 5668 "ignored adding FDIR ATR filters\n");
@@ -5616,8 +5680,8 @@ static DEFINE_MUTEX(ixgbe_watchdog_lock);
5616static void ixgbe_watchdog_task(struct work_struct *work) 5680static void ixgbe_watchdog_task(struct work_struct *work)
5617{ 5681{
5618 struct ixgbe_adapter *adapter = container_of(work, 5682 struct ixgbe_adapter *adapter = container_of(work,
5619 struct ixgbe_adapter, 5683 struct ixgbe_adapter,
5620 watchdog_task); 5684 watchdog_task);
5621 struct net_device *netdev = adapter->netdev; 5685 struct net_device *netdev = adapter->netdev;
5622 struct ixgbe_hw *hw = &adapter->hw; 5686 struct ixgbe_hw *hw = &adapter->hw;
5623 u32 link_speed; 5687 u32 link_speed;
@@ -5648,7 +5712,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5648 5712
5649 if (link_up || 5713 if (link_up ||
5650 time_after(jiffies, (adapter->link_check_timeout + 5714 time_after(jiffies, (adapter->link_check_timeout +
5651 IXGBE_TRY_LINK_TIMEOUT))) { 5715 IXGBE_TRY_LINK_TIMEOUT))) {
5652 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 5716 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5653 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); 5717 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5654 } 5718 }
@@ -5719,8 +5783,8 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5719} 5783}
5720 5784
5721static int ixgbe_tso(struct ixgbe_adapter *adapter, 5785static int ixgbe_tso(struct ixgbe_adapter *adapter,
5722 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 5786 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5723 u32 tx_flags, u8 *hdr_len) 5787 u32 tx_flags, u8 *hdr_len)
5724{ 5788{
5725 struct ixgbe_adv_tx_context_desc *context_desc; 5789 struct ixgbe_adv_tx_context_desc *context_desc;
5726 unsigned int i; 5790 unsigned int i;
@@ -5743,28 +5807,28 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5743 iph->tot_len = 0; 5807 iph->tot_len = 0;
5744 iph->check = 0; 5808 iph->check = 0;
5745 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 5809 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5746 iph->daddr, 0, 5810 iph->daddr, 0,
5747 IPPROTO_TCP, 5811 IPPROTO_TCP,
5748 0); 5812 0);
5749 } else if (skb_is_gso_v6(skb)) { 5813 } else if (skb_is_gso_v6(skb)) {
5750 ipv6_hdr(skb)->payload_len = 0; 5814 ipv6_hdr(skb)->payload_len = 0;
5751 tcp_hdr(skb)->check = 5815 tcp_hdr(skb)->check =
5752 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5816 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5753 &ipv6_hdr(skb)->daddr, 5817 &ipv6_hdr(skb)->daddr,
5754 0, IPPROTO_TCP, 0); 5818 0, IPPROTO_TCP, 0);
5755 } 5819 }
5756 5820
5757 i = tx_ring->next_to_use; 5821 i = tx_ring->next_to_use;
5758 5822
5759 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5823 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5760 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 5824 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
5761 5825
5762 /* VLAN MACLEN IPLEN */ 5826 /* VLAN MACLEN IPLEN */
5763 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 5827 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5764 vlan_macip_lens |= 5828 vlan_macip_lens |=
5765 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 5829 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5766 vlan_macip_lens |= ((skb_network_offset(skb)) << 5830 vlan_macip_lens |= ((skb_network_offset(skb)) <<
5767 IXGBE_ADVTXD_MACLEN_SHIFT); 5831 IXGBE_ADVTXD_MACLEN_SHIFT);
5768 *hdr_len += skb_network_offset(skb); 5832 *hdr_len += skb_network_offset(skb);
5769 vlan_macip_lens |= 5833 vlan_macip_lens |=
5770 (skb_transport_header(skb) - skb_network_header(skb)); 5834 (skb_transport_header(skb) - skb_network_header(skb));
@@ -5775,7 +5839,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5775 5839
5776 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 5840 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5777 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 5841 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5778 IXGBE_ADVTXD_DTYP_CTXT); 5842 IXGBE_ADVTXD_DTYP_CTXT);
5779 5843
5780 if (skb->protocol == htons(ETH_P_IP)) 5844 if (skb->protocol == htons(ETH_P_IP))
5781 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5845 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -5803,9 +5867,53 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5803 return false; 5867 return false;
5804} 5868}
5805 5869
5870static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5871{
5872 u32 rtn = 0;
5873 __be16 protocol;
5874
5875 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5876 protocol = ((const struct vlan_ethhdr *)skb->data)->
5877 h_vlan_encapsulated_proto;
5878 else
5879 protocol = skb->protocol;
5880
5881 switch (protocol) {
5882 case cpu_to_be16(ETH_P_IP):
5883 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
5884 switch (ip_hdr(skb)->protocol) {
5885 case IPPROTO_TCP:
5886 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5887 break;
5888 case IPPROTO_SCTP:
5889 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5890 break;
5891 }
5892 break;
5893 case cpu_to_be16(ETH_P_IPV6):
5894 /* XXX what about other V6 headers?? */
5895 switch (ipv6_hdr(skb)->nexthdr) {
5896 case IPPROTO_TCP:
5897 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5898 break;
5899 case IPPROTO_SCTP:
5900 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5901 break;
5902 }
5903 break;
5904 default:
5905 if (unlikely(net_ratelimit()))
5906 e_warn(probe, "partial checksum but proto=%x!\n",
5907 skb->protocol);
5908 break;
5909 }
5910
5911 return rtn;
5912}
5913
5806static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 5914static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5807 struct ixgbe_ring *tx_ring, 5915 struct ixgbe_ring *tx_ring,
5808 struct sk_buff *skb, u32 tx_flags) 5916 struct sk_buff *skb, u32 tx_flags)
5809{ 5917{
5810 struct ixgbe_adv_tx_context_desc *context_desc; 5918 struct ixgbe_adv_tx_context_desc *context_desc;
5811 unsigned int i; 5919 unsigned int i;
@@ -5816,63 +5924,25 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5816 (tx_flags & IXGBE_TX_FLAGS_VLAN)) { 5924 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
5817 i = tx_ring->next_to_use; 5925 i = tx_ring->next_to_use;
5818 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5926 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5819 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 5927 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
5820 5928
5821 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 5929 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5822 vlan_macip_lens |= 5930 vlan_macip_lens |=
5823 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 5931 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5824 vlan_macip_lens |= (skb_network_offset(skb) << 5932 vlan_macip_lens |= (skb_network_offset(skb) <<
5825 IXGBE_ADVTXD_MACLEN_SHIFT); 5933 IXGBE_ADVTXD_MACLEN_SHIFT);
5826 if (skb->ip_summed == CHECKSUM_PARTIAL) 5934 if (skb->ip_summed == CHECKSUM_PARTIAL)
5827 vlan_macip_lens |= (skb_transport_header(skb) - 5935 vlan_macip_lens |= (skb_transport_header(skb) -
5828 skb_network_header(skb)); 5936 skb_network_header(skb));
5829 5937
5830 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 5938 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5831 context_desc->seqnum_seed = 0; 5939 context_desc->seqnum_seed = 0;
5832 5940
5833 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 5941 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
5834 IXGBE_ADVTXD_DTYP_CTXT); 5942 IXGBE_ADVTXD_DTYP_CTXT);
5835
5836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5837 __be16 protocol;
5838
5839 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
5840 const struct vlan_ethhdr *vhdr =
5841 (const struct vlan_ethhdr *)skb->data;
5842
5843 protocol = vhdr->h_vlan_encapsulated_proto;
5844 } else {
5845 protocol = skb->protocol;
5846 }
5847 5943
5848 switch (protocol) { 5944 if (skb->ip_summed == CHECKSUM_PARTIAL)
5849 case cpu_to_be16(ETH_P_IP): 5945 type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
5850 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5851 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5852 type_tucmd_mlhl |=
5853 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5854 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
5855 type_tucmd_mlhl |=
5856 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5857 break;
5858 case cpu_to_be16(ETH_P_IPV6):
5859 /* XXX what about other V6 headers?? */
5860 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5861 type_tucmd_mlhl |=
5862 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5863 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
5864 type_tucmd_mlhl |=
5865 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5866 break;
5867 default:
5868 if (unlikely(net_ratelimit())) {
5869 e_warn(probe, "partial checksum "
5870 "but proto=%x!\n",
5871 skb->protocol);
5872 }
5873 break;
5874 }
5875 }
5876 5946
5877 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5947 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5878 /* use index zero for tx checksum offload */ 5948 /* use index zero for tx checksum offload */
@@ -5893,9 +5963,9 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5893} 5963}
5894 5964
5895static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 5965static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5896 struct ixgbe_ring *tx_ring, 5966 struct ixgbe_ring *tx_ring,
5897 struct sk_buff *skb, u32 tx_flags, 5967 struct sk_buff *skb, u32 tx_flags,
5898 unsigned int first) 5968 unsigned int first)
5899{ 5969{
5900 struct pci_dev *pdev = adapter->pdev; 5970 struct pci_dev *pdev = adapter->pdev;
5901 struct ixgbe_tx_buffer *tx_buffer_info; 5971 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -5990,7 +6060,7 @@ dma_error:
5990 6060
5991 /* clear timestamp and dma mappings for remaining portion of packet */ 6061 /* clear timestamp and dma mappings for remaining portion of packet */
5992 while (count--) { 6062 while (count--) {
5993 if (i==0) 6063 if (i == 0)
5994 i += tx_ring->count; 6064 i += tx_ring->count;
5995 i--; 6065 i--;
5996 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6066 tx_buffer_info = &tx_ring->tx_buffer_info[i];
@@ -6001,8 +6071,8 @@ dma_error:
6001} 6071}
6002 6072
6003static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 6073static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6004 struct ixgbe_ring *tx_ring, 6074 struct ixgbe_ring *tx_ring,
6005 int tx_flags, int count, u32 paylen, u8 hdr_len) 6075 int tx_flags, int count, u32 paylen, u8 hdr_len)
6006{ 6076{
6007 union ixgbe_adv_tx_desc *tx_desc = NULL; 6077 union ixgbe_adv_tx_desc *tx_desc = NULL;
6008 struct ixgbe_tx_buffer *tx_buffer_info; 6078 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6021,17 +6091,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6021 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 6091 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6022 6092
6023 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6093 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6024 IXGBE_ADVTXD_POPTS_SHIFT; 6094 IXGBE_ADVTXD_POPTS_SHIFT;
6025 6095
6026 /* use index 1 context for tso */ 6096 /* use index 1 context for tso */
6027 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 6097 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6028 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 6098 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6029 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 6099 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
6030 IXGBE_ADVTXD_POPTS_SHIFT; 6100 IXGBE_ADVTXD_POPTS_SHIFT;
6031 6101
6032 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 6102 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6033 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6103 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6034 IXGBE_ADVTXD_POPTS_SHIFT; 6104 IXGBE_ADVTXD_POPTS_SHIFT;
6035 6105
6036 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6106 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6037 olinfo_status |= IXGBE_ADVTXD_CC; 6107 olinfo_status |= IXGBE_ADVTXD_CC;
@@ -6045,10 +6115,10 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6045 i = tx_ring->next_to_use; 6115 i = tx_ring->next_to_use;
6046 while (count--) { 6116 while (count--) {
6047 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6117 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6048 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 6118 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
6049 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 6119 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6050 tx_desc->read.cmd_type_len = 6120 tx_desc->read.cmd_type_len =
6051 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 6121 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
6052 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 6122 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6053 i++; 6123 i++;
6054 if (i == tx_ring->count) 6124 if (i == tx_ring->count)
@@ -6070,7 +6140,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6070} 6140}
6071 6141
6072static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6142static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6073 int queue, u32 tx_flags) 6143 int queue, u32 tx_flags)
6074{ 6144{
6075 struct ixgbe_atr_input atr_input; 6145 struct ixgbe_atr_input atr_input;
6076 struct tcphdr *th; 6146 struct tcphdr *th;
@@ -6098,7 +6168,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6098 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6168 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6099 6169
6100 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6170 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
6101 IXGBE_TX_FLAGS_VLAN_SHIFT; 6171 IXGBE_TX_FLAGS_VLAN_SHIFT;
6102 src_ipv4_addr = iph->saddr; 6172 src_ipv4_addr = iph->saddr;
6103 dst_ipv4_addr = iph->daddr; 6173 dst_ipv4_addr = iph->daddr;
6104 flex_bytes = eth->h_proto; 6174 flex_bytes = eth->h_proto;
@@ -6117,7 +6187,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6117} 6187}
6118 6188
6119static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 6189static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6120 struct ixgbe_ring *tx_ring, int size) 6190 struct ixgbe_ring *tx_ring, int size)
6121{ 6191{
6122 netif_stop_subqueue(netdev, tx_ring->queue_index); 6192 netif_stop_subqueue(netdev, tx_ring->queue_index);
6123 /* Herbert's original patch had: 6193 /* Herbert's original patch had:
@@ -6137,7 +6207,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6137} 6207}
6138 6208
6139static int ixgbe_maybe_stop_tx(struct net_device *netdev, 6209static int ixgbe_maybe_stop_tx(struct net_device *netdev,
6140 struct ixgbe_ring *tx_ring, int size) 6210 struct ixgbe_ring *tx_ring, int size)
6141{ 6211{
6142 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 6212 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6143 return 0; 6213 return 0;
@@ -6183,11 +6253,10 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6183 return skb_tx_hash(dev, skb); 6253 return skb_tx_hash(dev, skb);
6184} 6254}
6185 6255
6186static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 6256netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
6187 struct net_device *netdev) 6257 struct ixgbe_adapter *adapter,
6258 struct ixgbe_ring *tx_ring)
6188{ 6259{
6189 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6190 struct ixgbe_ring *tx_ring;
6191 struct netdev_queue *txq; 6260 struct netdev_queue *txq;
6192 unsigned int first; 6261 unsigned int first;
6193 unsigned int tx_flags = 0; 6262 unsigned int tx_flags = 0;
@@ -6211,8 +6280,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6211 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6280 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6212 } 6281 }
6213 6282
6214 tx_ring = adapter->tx_ring[skb->queue_mapping];
6215
6216#ifdef IXGBE_FCOE 6283#ifdef IXGBE_FCOE
6217 /* for FCoE with DCB, we force the priority to what 6284 /* for FCoE with DCB, we force the priority to what
6218 * was specified by the switch */ 6285 * was specified by the switch */
@@ -6283,10 +6350,10 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6283 if (tx_ring->atr_sample_rate) { 6350 if (tx_ring->atr_sample_rate) {
6284 ++tx_ring->atr_count; 6351 ++tx_ring->atr_count;
6285 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 6352 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6286 test_bit(__IXGBE_FDIR_INIT_DONE, 6353 test_bit(__IXGBE_FDIR_INIT_DONE,
6287 &tx_ring->reinit_state)) { 6354 &tx_ring->reinit_state)) {
6288 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6355 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6289 tx_flags); 6356 tx_flags);
6290 tx_ring->atr_count = 0; 6357 tx_ring->atr_count = 0;
6291 } 6358 }
6292 } 6359 }
@@ -6294,7 +6361,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6294 txq->tx_bytes += skb->len; 6361 txq->tx_bytes += skb->len;
6295 txq->tx_packets++; 6362 txq->tx_packets++;
6296 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, 6363 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
6297 hdr_len); 6364 hdr_len);
6298 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 6365 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
6299 6366
6300 } else { 6367 } else {
@@ -6306,6 +6373,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6306 return NETDEV_TX_OK; 6373 return NETDEV_TX_OK;
6307} 6374}
6308 6375
6376static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6377{
6378 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6379 struct ixgbe_ring *tx_ring;
6380
6381 tx_ring = adapter->tx_ring[skb->queue_mapping];
6382 return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
6383}
6384
6309/** 6385/**
6310 * ixgbe_set_mac - Change the Ethernet Address of the NIC 6386 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6311 * @netdev: network interface device structure 6387 * @netdev: network interface device structure
@@ -6437,7 +6513,7 @@ static void ixgbe_netpoll(struct net_device *netdev)
6437#endif 6513#endif
6438 6514
6439static const struct net_device_ops ixgbe_netdev_ops = { 6515static const struct net_device_ops ixgbe_netdev_ops = {
6440 .ndo_open = ixgbe_open, 6516 .ndo_open = ixgbe_open,
6441 .ndo_stop = ixgbe_close, 6517 .ndo_stop = ixgbe_close,
6442 .ndo_start_xmit = ixgbe_xmit_frame, 6518 .ndo_start_xmit = ixgbe_xmit_frame,
6443 .ndo_select_queue = ixgbe_select_queue, 6519 .ndo_select_queue = ixgbe_select_queue,
@@ -6532,7 +6608,7 @@ err_novfs:
6532 * and a hardware reset occur. 6608 * and a hardware reset occur.
6533 **/ 6609 **/
6534static int __devinit ixgbe_probe(struct pci_dev *pdev, 6610static int __devinit ixgbe_probe(struct pci_dev *pdev,
6535 const struct pci_device_id *ent) 6611 const struct pci_device_id *ent)
6536{ 6612{
6537 struct net_device *netdev; 6613 struct net_device *netdev;
6538 struct ixgbe_adapter *adapter = NULL; 6614 struct ixgbe_adapter *adapter = NULL;
@@ -6577,7 +6653,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6577 } 6653 }
6578 6654
6579 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 6655 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6580 IORESOURCE_MEM), ixgbe_driver_name); 6656 IORESOURCE_MEM), ixgbe_driver_name);
6581 if (err) { 6657 if (err) {
6582 dev_err(&pdev->dev, 6658 dev_err(&pdev->dev,
6583 "pci_request_selected_regions failed 0x%x\n", err); 6659 "pci_request_selected_regions failed 0x%x\n", err);
@@ -6617,7 +6693,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6617 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 6693 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6618 6694
6619 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 6695 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6620 pci_resource_len(pdev, 0)); 6696 pci_resource_len(pdev, 0));
6621 if (!hw->hw_addr) { 6697 if (!hw->hw_addr) {
6622 err = -EIO; 6698 err = -EIO;
6623 goto err_ioremap; 6699 goto err_ioremap;
@@ -6661,7 +6737,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6661 * which might start the timer 6737 * which might start the timer
6662 */ 6738 */
6663 init_timer(&adapter->sfp_timer); 6739 init_timer(&adapter->sfp_timer);
6664 adapter->sfp_timer.function = &ixgbe_sfp_timer; 6740 adapter->sfp_timer.function = ixgbe_sfp_timer;
6665 adapter->sfp_timer.data = (unsigned long) adapter; 6741 adapter->sfp_timer.data = (unsigned long) adapter;
6666 6742
6667 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); 6743 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
@@ -6671,7 +6747,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6671 6747
6672 /* a new SFP+ module arrival, called from GPI SDP2 context */ 6748 /* a new SFP+ module arrival, called from GPI SDP2 context */
6673 INIT_WORK(&adapter->sfp_config_module_task, 6749 INIT_WORK(&adapter->sfp_config_module_task,
6674 ixgbe_sfp_config_module_task); 6750 ixgbe_sfp_config_module_task);
6675 6751
6676 ii->get_invariants(hw); 6752 ii->get_invariants(hw);
6677 6753
@@ -6723,10 +6799,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6723 ixgbe_probe_vf(adapter, ii); 6799 ixgbe_probe_vf(adapter, ii);
6724 6800
6725 netdev->features = NETIF_F_SG | 6801 netdev->features = NETIF_F_SG |
6726 NETIF_F_IP_CSUM | 6802 NETIF_F_IP_CSUM |
6727 NETIF_F_HW_VLAN_TX | 6803 NETIF_F_HW_VLAN_TX |
6728 NETIF_F_HW_VLAN_RX | 6804 NETIF_F_HW_VLAN_RX |
6729 NETIF_F_HW_VLAN_FILTER; 6805 NETIF_F_HW_VLAN_FILTER;
6730 6806
6731 netdev->features |= NETIF_F_IPV6_CSUM; 6807 netdev->features |= NETIF_F_IPV6_CSUM;
6732 netdev->features |= NETIF_F_TSO; 6808 netdev->features |= NETIF_F_TSO;
@@ -6793,7 +6869,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6793 hw->mac.ops.disable_tx_laser(hw); 6869 hw->mac.ops.disable_tx_laser(hw);
6794 6870
6795 init_timer(&adapter->watchdog_timer); 6871 init_timer(&adapter->watchdog_timer);
6796 adapter->watchdog_timer.function = &ixgbe_watchdog; 6872 adapter->watchdog_timer.function = ixgbe_watchdog;
6797 adapter->watchdog_timer.data = (unsigned long)adapter; 6873 adapter->watchdog_timer.data = (unsigned long)adapter;
6798 6874
6799 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 6875 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
@@ -6806,7 +6882,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6806 switch (pdev->device) { 6882 switch (pdev->device) {
6807 case IXGBE_DEV_ID_82599_KX4: 6883 case IXGBE_DEV_ID_82599_KX4:
6808 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 6884 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6809 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 6885 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
6810 break; 6886 break;
6811 default: 6887 default:
6812 adapter->wol = 0; 6888 adapter->wol = 0;
@@ -6819,13 +6895,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6819 6895
6820 /* print bus type/speed/width info */ 6896 /* print bus type/speed/width info */
6821 e_dev_info("(PCI Express:%s:%s) %pM\n", 6897 e_dev_info("(PCI Express:%s:%s) %pM\n",
6822 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 6898 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
6823 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), 6899 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
6824 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 6900 "Unknown"),
6825 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 6901 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
6826 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 6902 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
6827 "Unknown"), 6903 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6828 netdev->dev_addr); 6904 "Unknown"),
6905 netdev->dev_addr);
6829 ixgbe_read_pba_num_generic(hw, &part_num); 6906 ixgbe_read_pba_num_generic(hw, &part_num);
6830 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 6907 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6831 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " 6908 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
@@ -6872,7 +6949,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6872 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); 6949 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
6873 6950
6874 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 6951 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
6875 INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); 6952 INIT_WORK(&adapter->check_overtemp_task,
6953 ixgbe_check_overtemp_task);
6876#ifdef CONFIG_IXGBE_DCA 6954#ifdef CONFIG_IXGBE_DCA
6877 if (dca_add_requester(&pdev->dev) == 0) { 6955 if (dca_add_requester(&pdev->dev) == 0) {
6878 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 6956 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -6908,8 +6986,8 @@ err_eeprom:
6908err_ioremap: 6986err_ioremap:
6909 free_netdev(netdev); 6987 free_netdev(netdev);
6910err_alloc_etherdev: 6988err_alloc_etherdev:
6911 pci_release_selected_regions(pdev, pci_select_bars(pdev, 6989 pci_release_selected_regions(pdev,
6912 IORESOURCE_MEM)); 6990 pci_select_bars(pdev, IORESOURCE_MEM));
6913err_pci_reg: 6991err_pci_reg:
6914err_dma: 6992err_dma:
6915 pci_disable_device(pdev); 6993 pci_disable_device(pdev);
@@ -6976,7 +7054,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6976 7054
6977 iounmap(adapter->hw.hw_addr); 7055 iounmap(adapter->hw.hw_addr);
6978 pci_release_selected_regions(pdev, pci_select_bars(pdev, 7056 pci_release_selected_regions(pdev, pci_select_bars(pdev,
6979 IORESOURCE_MEM)); 7057 IORESOURCE_MEM));
6980 7058
6981 e_dev_info("complete\n"); 7059 e_dev_info("complete\n");
6982 7060
@@ -6996,7 +7074,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6996 * this device has been detected. 7074 * this device has been detected.
6997 */ 7075 */
6998static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 7076static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
6999 pci_channel_state_t state) 7077 pci_channel_state_t state)
7000{ 7078{
7001 struct net_device *netdev = pci_get_drvdata(pdev); 7079 struct net_device *netdev = pci_get_drvdata(pdev);
7002 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7080 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7102,8 +7180,7 @@ static struct pci_driver ixgbe_driver = {
7102static int __init ixgbe_init_module(void) 7180static int __init ixgbe_init_module(void)
7103{ 7181{
7104 int ret; 7182 int ret;
7105 pr_info("%s - version %s\n", ixgbe_driver_string, 7183 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7106 ixgbe_driver_version);
7107 pr_info("%s\n", ixgbe_copyright); 7184 pr_info("%s\n", ixgbe_copyright);
7108 7185
7109#ifdef CONFIG_IXGBE_DCA 7186#ifdef CONFIG_IXGBE_DCA
@@ -7132,12 +7209,12 @@ static void __exit ixgbe_exit_module(void)
7132 7209
7133#ifdef CONFIG_IXGBE_DCA 7210#ifdef CONFIG_IXGBE_DCA
7134static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 7211static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7135 void *p) 7212 void *p)
7136{ 7213{
7137 int ret_val; 7214 int ret_val;
7138 7215
7139 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 7216 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
7140 __ixgbe_notify_dca); 7217 __ixgbe_notify_dca);
7141 7218
7142 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 7219 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7143} 7220}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9587d975d66c..d3cc6ce7c973 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -871,6 +871,8 @@
871#define IXGBE_RDRXCTL_MVMEN 0x00000020 871#define IXGBE_RDRXCTL_MVMEN 0x00000020
872#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ 872#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
873#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ 873#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
874#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
875#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
874 876
875/* RQTC Bit Masks and Shifts */ 877/* RQTC Bit Masks and Shifts */
876#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) 878#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index f7015efbff05..da4033c6efa2 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -243,7 +243,6 @@ struct ixgbevf_adapter {
243 /* OS defined structs */ 243 /* OS defined structs */
244 struct net_device *netdev; 244 struct net_device *netdev;
245 struct pci_dev *pdev; 245 struct pci_dev *pdev;
246 struct net_device_stats net_stats;
247 246
248 /* structs defined in ixgbe_vf.h */ 247 /* structs defined in ixgbe_vf.h */
249 struct ixgbe_hw hw; 248 struct ixgbe_hw hw;
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 918c00359b0a..3eda1bdbbb7a 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -308,8 +308,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
308 tx_ring->total_bytes += total_bytes; 308 tx_ring->total_bytes += total_bytes;
309 tx_ring->total_packets += total_packets; 309 tx_ring->total_packets += total_packets;
310 310
311 adapter->net_stats.tx_bytes += total_bytes; 311 netdev->stats.tx_bytes += total_bytes;
312 adapter->net_stats.tx_packets += total_packets; 312 netdev->stats.tx_packets += total_packets;
313 313
314 return (count < tx_ring->work_limit); 314 return (count < tx_ring->work_limit);
315} 315}
@@ -356,7 +356,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
356static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 356static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
357 u32 status_err, struct sk_buff *skb) 357 u32 status_err, struct sk_buff *skb)
358{ 358{
359 skb->ip_summed = CHECKSUM_NONE; 359 skb_checksum_none_assert(skb);
360 360
361 /* Rx csum disabled */ 361 /* Rx csum disabled */
362 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 362 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -639,8 +639,8 @@ next_desc:
639 639
640 rx_ring->total_packets += total_rx_packets; 640 rx_ring->total_packets += total_rx_packets;
641 rx_ring->total_bytes += total_rx_bytes; 641 rx_ring->total_bytes += total_rx_bytes;
642 adapter->net_stats.rx_bytes += total_rx_bytes; 642 adapter->netdev->stats.rx_bytes += total_rx_bytes;
643 adapter->net_stats.rx_packets += total_rx_packets; 643 adapter->netdev->stats.rx_packets += total_rx_packets;
644 644
645 return cleaned; 645 return cleaned;
646} 646}
@@ -2297,7 +2297,7 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2297 adapter->stats.vfmprc); 2297 adapter->stats.vfmprc);
2298 2298
2299 /* Fill out the OS statistics structure */ 2299 /* Fill out the OS statistics structure */
2300 adapter->net_stats.multicast = adapter->stats.vfmprc - 2300 adapter->netdev->stats.multicast = adapter->stats.vfmprc -
2301 adapter->stats.base_vfmprc; 2301 adapter->stats.base_vfmprc;
2302} 2302}
2303 2303
@@ -3181,21 +3181,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3181} 3181}
3182 3182
3183/** 3183/**
3184 * ixgbevf_get_stats - Get System Network Statistics
3185 * @netdev: network interface device structure
3186 *
3187 * Returns the address of the device statistics structure.
3188 * The statistics are actually updated from the timer callback.
3189 **/
3190static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
3191{
3192 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3193
3194 /* only return the current stats */
3195 return &adapter->net_stats;
3196}
3197
3198/**
3199 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3184 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3200 * @netdev: network interface device structure 3185 * @netdev: network interface device structure
3201 * @p: pointer to an address structure 3186 * @p: pointer to an address structure
@@ -3272,7 +3257,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
3272 .ndo_open = &ixgbevf_open, 3257 .ndo_open = &ixgbevf_open,
3273 .ndo_stop = &ixgbevf_close, 3258 .ndo_stop = &ixgbevf_close,
3274 .ndo_start_xmit = &ixgbevf_xmit_frame, 3259 .ndo_start_xmit = &ixgbevf_xmit_frame,
3275 .ndo_get_stats = &ixgbevf_get_stats,
3276 .ndo_set_rx_mode = &ixgbevf_set_rx_mode, 3260 .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
3277 .ndo_set_multicast_list = &ixgbevf_set_rx_mode, 3261 .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
3278 .ndo_validate_addr = eth_validate_addr, 3262 .ndo_validate_addr = eth_validate_addr,
@@ -3426,7 +3410,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3426 } 3410 }
3427 3411
3428 init_timer(&adapter->watchdog_timer); 3412 init_timer(&adapter->watchdog_timer);
3429 adapter->watchdog_timer.function = &ixgbevf_watchdog; 3413 adapter->watchdog_timer.function = ixgbevf_watchdog;
3430 adapter->watchdog_timer.data = (unsigned long)adapter; 3414 adapter->watchdog_timer.data = (unsigned long)adapter;
3431 3415
3432 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3416 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 94b750b8874f..61f9dc831424 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -124,8 +124,6 @@ struct ixgbe_hw {
124 void *back; 124 void *back;
125 125
126 u8 __iomem *hw_addr; 126 u8 __iomem *hw_addr;
127 u8 *flash_address;
128 unsigned long io_base;
129 127
130 struct ixgbe_mac_info mac; 128 struct ixgbe_mac_info mac;
131 struct ixgbe_mbx_info mbx; 129 struct ixgbe_mbx_info mbx;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 99f24f5cac53..c04c096bc6a9 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -21,6 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/pci.h> 28#include <linux/pci.h>
@@ -73,7 +75,7 @@ read_again:
73 } 75 }
74 76
75 if (i == 0) { 77 if (i == 0) {
76 jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg); 78 pr_err("phy(%d) read timeout : %d\n", phy, reg);
77 return 0; 79 return 0;
78 } 80 }
79 81
@@ -102,7 +104,7 @@ jme_mdio_write(struct net_device *netdev,
102 } 104 }
103 105
104 if (i == 0) 106 if (i == 0)
105 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg); 107 pr_err("phy(%d) write timeout : %d\n", phy, reg);
106} 108}
107 109
108static inline void 110static inline void
@@ -227,7 +229,7 @@ jme_reload_eeprom(struct jme_adapter *jme)
227 } 229 }
228 230
229 if (i == 0) { 231 if (i == 0) {
230 jeprintk(jme->pdev, "eeprom reload timeout\n"); 232 pr_err("eeprom reload timeout\n");
231 return -EIO; 233 return -EIO;
232 } 234 }
233 } 235 }
@@ -397,8 +399,7 @@ jme_check_link(struct net_device *netdev, int testonly)
397 phylink = jread32(jme, JME_PHY_LINK); 399 phylink = jread32(jme, JME_PHY_LINK);
398 } 400 }
399 if (!cnt) 401 if (!cnt)
400 jeprintk(jme->pdev, 402 pr_err("Waiting speed resolve timeout\n");
401 "Waiting speed resolve timeout.\n");
402 403
403 strcat(linkmsg, "ANed: "); 404 strcat(linkmsg, "ANed: ");
404 } 405 }
@@ -480,13 +481,13 @@ jme_check_link(struct net_device *netdev, int testonly)
480 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 481 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
481 "MDI-X" : 482 "MDI-X" :
482 "MDI"); 483 "MDI");
483 netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg); 484 netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
484 netif_carrier_on(netdev); 485 netif_carrier_on(netdev);
485 } else { 486 } else {
486 if (testonly) 487 if (testonly)
487 goto out; 488 goto out;
488 489
489 netif_info(jme, link, jme->dev, "Link is down.\n"); 490 netif_info(jme, link, jme->dev, "Link is down\n");
490 jme->phylink = 0; 491 jme->phylink = 0;
491 netif_carrier_off(netdev); 492 netif_carrier_off(netdev);
492 } 493 }
@@ -648,7 +649,7 @@ jme_disable_tx_engine(struct jme_adapter *jme)
648 } 649 }
649 650
650 if (!i) 651 if (!i)
651 jeprintk(jme->pdev, "Disable TX engine timeout.\n"); 652 pr_err("Disable TX engine timeout\n");
652} 653}
653 654
654static void 655static void
@@ -867,7 +868,7 @@ jme_disable_rx_engine(struct jme_adapter *jme)
867 } 868 }
868 869
869 if (!i) 870 if (!i)
870 jeprintk(jme->pdev, "Disable RX engine timeout.\n"); 871 pr_err("Disable RX engine timeout\n");
871 872
872} 873}
873 874
@@ -887,13 +888,13 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
887 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 888 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
888 == RXWBFLAG_UDPON)) { 889 == RXWBFLAG_UDPON)) {
889 if (flags & RXWBFLAG_IPV4) 890 if (flags & RXWBFLAG_IPV4)
890 netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n"); 891 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
891 return false; 892 return false;
892 } 893 }
893 894
894 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 895 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
895 == RXWBFLAG_IPV4)) { 896 == RXWBFLAG_IPV4)) {
896 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n"); 897 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
897 return false; 898 return false;
898 } 899 }
899 900
@@ -936,7 +937,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
936 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 937 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
937 skb->ip_summed = CHECKSUM_UNNECESSARY; 938 skb->ip_summed = CHECKSUM_UNNECESSARY;
938 else 939 else
939 skb->ip_summed = CHECKSUM_NONE; 940 skb_checksum_none_assert(skb);
940 941
941 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 942 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
942 if (jme->vlgrp) { 943 if (jme->vlgrp) {
@@ -1185,9 +1186,9 @@ jme_link_change_tasklet(unsigned long arg)
1185 1186
1186 while (!atomic_dec_and_test(&jme->link_changing)) { 1187 while (!atomic_dec_and_test(&jme->link_changing)) {
1187 atomic_inc(&jme->link_changing); 1188 atomic_inc(&jme->link_changing);
1188 netif_info(jme, intr, jme->dev, "Get link change lock failed.\n"); 1189 netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
1189 while (atomic_read(&jme->link_changing) != 1) 1190 while (atomic_read(&jme->link_changing) != 1)
1190 netif_info(jme, intr, jme->dev, "Waiting link change lock.\n"); 1191 netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
1191 } 1192 }
1192 1193
1193 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1194 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1221,15 +1222,13 @@ jme_link_change_tasklet(unsigned long arg)
1221 if (netif_carrier_ok(netdev)) { 1222 if (netif_carrier_ok(netdev)) {
1222 rc = jme_setup_rx_resources(jme); 1223 rc = jme_setup_rx_resources(jme);
1223 if (rc) { 1224 if (rc) {
1224 jeprintk(jme->pdev, "Allocating resources for RX error" 1225 pr_err("Allocating resources for RX error, Device STOPPED!\n");
1225 ", Device STOPPED!\n");
1226 goto out_enable_tasklet; 1226 goto out_enable_tasklet;
1227 } 1227 }
1228 1228
1229 rc = jme_setup_tx_resources(jme); 1229 rc = jme_setup_tx_resources(jme);
1230 if (rc) { 1230 if (rc) {
1231 jeprintk(jme->pdev, "Allocating resources for TX error" 1231 pr_err("Allocating resources for TX error, Device STOPPED!\n");
1232 ", Device STOPPED!\n");
1233 goto err_out_free_rx_resources; 1232 goto err_out_free_rx_resources;
1234 } 1233 }
1235 1234
@@ -1324,7 +1323,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
1324 smp_wmb(); 1323 smp_wmb();
1325 if (unlikely(netif_queue_stopped(jme->dev) && 1324 if (unlikely(netif_queue_stopped(jme->dev) &&
1326 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1325 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1327 netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n"); 1326 netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
1328 netif_wake_queue(jme->dev); 1327 netif_wake_queue(jme->dev);
1329 } 1328 }
1330 1329
@@ -1339,7 +1338,7 @@ jme_tx_clean_tasklet(unsigned long arg)
1339 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; 1338 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1340 int i, j, cnt = 0, max, err, mask; 1339 int i, j, cnt = 0, max, err, mask;
1341 1340
1342 tx_dbg(jme, "Into txclean.\n"); 1341 tx_dbg(jme, "Into txclean\n");
1343 1342
1344 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) 1343 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1345 goto out; 1344 goto out;
@@ -1361,7 +1360,7 @@ jme_tx_clean_tasklet(unsigned long arg)
1361 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { 1360 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1362 1361
1363 tx_dbg(jme, "txclean: %d+%d@%lu\n", 1362 tx_dbg(jme, "txclean: %d+%d@%lu\n",
1364 i, ctxbi->nr_desc, jiffies); 1363 i, ctxbi->nr_desc, jiffies);
1365 1364
1366 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; 1365 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1367 1366
@@ -1402,7 +1401,7 @@ jme_tx_clean_tasklet(unsigned long arg)
1402 ctxbi->nr_desc = 0; 1401 ctxbi->nr_desc = 0;
1403 } 1402 }
1404 1403
1405 tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies); 1404 tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
1406 atomic_set(&txring->next_to_clean, i); 1405 atomic_set(&txring->next_to_clean, i);
1407 atomic_add(cnt, &txring->nr_free); 1406 atomic_add(cnt, &txring->nr_free);
1408 1407
@@ -1548,10 +1547,10 @@ jme_request_irq(struct jme_adapter *jme)
1548 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, 1547 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1549 netdev); 1548 netdev);
1550 if (rc) { 1549 if (rc) {
1551 jeprintk(jme->pdev, 1550 netdev_err(netdev,
1552 "Unable to request %s interrupt (return: %d)\n", 1551 "Unable to request %s interrupt (return: %d)\n",
1553 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", 1552 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1554 rc); 1553 rc);
1555 1554
1556 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1555 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1557 pci_disable_msi(jme->pdev); 1556 pci_disable_msi(jme->pdev);
@@ -1834,7 +1833,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1834 *flags |= TXFLAG_UDPCS; 1833 *flags |= TXFLAG_UDPCS;
1835 break; 1834 break;
1836 default: 1835 default:
1837 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n"); 1836 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
1838 break; 1837 break;
1839 } 1838 }
1840 } 1839 }
@@ -1909,12 +1908,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
1909 smp_wmb(); 1908 smp_wmb();
1910 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 1909 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1911 netif_stop_queue(jme->dev); 1910 netif_stop_queue(jme->dev);
1912 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n"); 1911 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
1913 smp_wmb(); 1912 smp_wmb();
1914 if (atomic_read(&txring->nr_free) 1913 if (atomic_read(&txring->nr_free)
1915 >= (jme->tx_wake_threshold)) { 1914 >= (jme->tx_wake_threshold)) {
1916 netif_wake_queue(jme->dev); 1915 netif_wake_queue(jme->dev);
1917 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n"); 1916 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
1918 } 1917 }
1919 } 1918 }
1920 1919
@@ -1922,7 +1921,8 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
1922 (jiffies - txbi->start_xmit) >= TX_TIMEOUT && 1921 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1923 txbi->skb)) { 1922 txbi->skb)) {
1924 netif_stop_queue(jme->dev); 1923 netif_stop_queue(jme->dev);
1925 netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies); 1924 netif_info(jme, tx_queued, jme->dev,
1925 "TX Queue Stopped %d@%lu\n", idx, jiffies);
1926 } 1926 }
1927} 1927}
1928 1928
@@ -1945,7 +1945,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1945 1945
1946 if (unlikely(idx < 0)) { 1946 if (unlikely(idx < 0)) {
1947 netif_stop_queue(netdev); 1947 netif_stop_queue(netdev);
1948 netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n"); 1948 netif_err(jme, tx_err, jme->dev,
1949 "BUG! Tx ring full when queue awake!\n");
1949 1950
1950 return NETDEV_TX_BUSY; 1951 return NETDEV_TX_BUSY;
1951 } 1952 }
@@ -1957,9 +1958,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1957 TXCS_QUEUE0S | 1958 TXCS_QUEUE0S |
1958 TXCS_ENABLE); 1959 TXCS_ENABLE);
1959 1960
1960 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx, 1961 tx_dbg(jme, "xmit: %d+%d@%lu\n",
1961 skb_shinfo(skb)->nr_frags + 2, 1962 idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
1962 jiffies);
1963 jme_stop_queue_if_full(jme); 1963 jme_stop_queue_if_full(jme);
1964 1964
1965 return NETDEV_TX_OK; 1965 return NETDEV_TX_OK;
@@ -2501,7 +2501,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2501 val = jread32(jme, JME_SMBCSR); 2501 val = jread32(jme, JME_SMBCSR);
2502 } 2502 }
2503 if (!to) { 2503 if (!to) {
2504 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2504 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2505 return 0xFF; 2505 return 0xFF;
2506 } 2506 }
2507 2507
@@ -2517,7 +2517,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2517 val = jread32(jme, JME_SMBINTF); 2517 val = jread32(jme, JME_SMBINTF);
2518 } 2518 }
2519 if (!to) { 2519 if (!to) {
2520 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2520 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2521 return 0xFF; 2521 return 0xFF;
2522 } 2522 }
2523 2523
@@ -2537,7 +2537,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2537 val = jread32(jme, JME_SMBCSR); 2537 val = jread32(jme, JME_SMBCSR);
2538 } 2538 }
2539 if (!to) { 2539 if (!to) {
2540 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2540 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2541 return; 2541 return;
2542 } 2542 }
2543 2543
@@ -2554,7 +2554,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2554 val = jread32(jme, JME_SMBINTF); 2554 val = jread32(jme, JME_SMBINTF);
2555 } 2555 }
2556 if (!to) { 2556 if (!to) {
2557 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2557 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2558 return; 2558 return;
2559 } 2559 }
2560 2560
@@ -2699,26 +2699,26 @@ jme_init_one(struct pci_dev *pdev,
2699 */ 2699 */
2700 rc = pci_enable_device(pdev); 2700 rc = pci_enable_device(pdev);
2701 if (rc) { 2701 if (rc) {
2702 jeprintk(pdev, "Cannot enable PCI device.\n"); 2702 pr_err("Cannot enable PCI device\n");
2703 goto err_out; 2703 goto err_out;
2704 } 2704 }
2705 2705
2706 using_dac = jme_pci_dma64(pdev); 2706 using_dac = jme_pci_dma64(pdev);
2707 if (using_dac < 0) { 2707 if (using_dac < 0) {
2708 jeprintk(pdev, "Cannot set PCI DMA Mask.\n"); 2708 pr_err("Cannot set PCI DMA Mask\n");
2709 rc = -EIO; 2709 rc = -EIO;
2710 goto err_out_disable_pdev; 2710 goto err_out_disable_pdev;
2711 } 2711 }
2712 2712
2713 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2713 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2714 jeprintk(pdev, "No PCI resource region found.\n"); 2714 pr_err("No PCI resource region found\n");
2715 rc = -ENOMEM; 2715 rc = -ENOMEM;
2716 goto err_out_disable_pdev; 2716 goto err_out_disable_pdev;
2717 } 2717 }
2718 2718
2719 rc = pci_request_regions(pdev, DRV_NAME); 2719 rc = pci_request_regions(pdev, DRV_NAME);
2720 if (rc) { 2720 if (rc) {
2721 jeprintk(pdev, "Cannot obtain PCI resource region.\n"); 2721 pr_err("Cannot obtain PCI resource region\n");
2722 goto err_out_disable_pdev; 2722 goto err_out_disable_pdev;
2723 } 2723 }
2724 2724
@@ -2729,7 +2729,7 @@ jme_init_one(struct pci_dev *pdev,
2729 */ 2729 */
2730 netdev = alloc_etherdev(sizeof(*jme)); 2730 netdev = alloc_etherdev(sizeof(*jme));
2731 if (!netdev) { 2731 if (!netdev) {
2732 jeprintk(pdev, "Cannot allocate netdev structure.\n"); 2732 pr_err("Cannot allocate netdev structure\n");
2733 rc = -ENOMEM; 2733 rc = -ENOMEM;
2734 goto err_out_release_regions; 2734 goto err_out_release_regions;
2735 } 2735 }
@@ -2767,7 +2767,7 @@ jme_init_one(struct pci_dev *pdev,
2767 jme->regs = ioremap(pci_resource_start(pdev, 0), 2767 jme->regs = ioremap(pci_resource_start(pdev, 0),
2768 pci_resource_len(pdev, 0)); 2768 pci_resource_len(pdev, 0));
2769 if (!(jme->regs)) { 2769 if (!(jme->regs)) {
2770 jeprintk(pdev, "Mapping PCI resource region error.\n"); 2770 pr_err("Mapping PCI resource region error\n");
2771 rc = -ENOMEM; 2771 rc = -ENOMEM;
2772 goto err_out_free_netdev; 2772 goto err_out_free_netdev;
2773 } 2773 }
@@ -2855,8 +2855,8 @@ jme_init_one(struct pci_dev *pdev,
2855 2855
2856 if (!jme->mii_if.phy_id) { 2856 if (!jme->mii_if.phy_id) {
2857 rc = -EIO; 2857 rc = -EIO;
2858 jeprintk(pdev, "Can not find phy_id.\n"); 2858 pr_err("Can not find phy_id\n");
2859 goto err_out_unmap; 2859 goto err_out_unmap;
2860 } 2860 }
2861 2861
2862 jme->reg_ghc |= GHC_LINK_POLL; 2862 jme->reg_ghc |= GHC_LINK_POLL;
@@ -2883,8 +2883,7 @@ jme_init_one(struct pci_dev *pdev,
2883 jme_reset_mac_processor(jme); 2883 jme_reset_mac_processor(jme);
2884 rc = jme_reload_eeprom(jme); 2884 rc = jme_reload_eeprom(jme);
2885 if (rc) { 2885 if (rc) {
2886 jeprintk(pdev, 2886 pr_err("Reload eeprom for reading MAC Address error\n");
2887 "Reload eeprom for reading MAC Address error.\n");
2888 goto err_out_unmap; 2887 goto err_out_unmap;
2889 } 2888 }
2890 jme_load_macaddr(netdev); 2889 jme_load_macaddr(netdev);
@@ -2900,7 +2899,7 @@ jme_init_one(struct pci_dev *pdev,
2900 */ 2899 */
2901 rc = register_netdev(netdev); 2900 rc = register_netdev(netdev);
2902 if (rc) { 2901 if (rc) {
2903 jeprintk(pdev, "Cannot register net device.\n"); 2902 pr_err("Cannot register net device\n");
2904 goto err_out_unmap; 2903 goto err_out_unmap;
2905 } 2904 }
2906 2905
@@ -3042,8 +3041,7 @@ static struct pci_driver jme_driver = {
3042static int __init 3041static int __init
3043jme_init_module(void) 3042jme_init_module(void)
3044{ 3043{
3045 printk(KERN_INFO PFX "JMicron JMC2XX ethernet " 3044 pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
3046 "driver version %s\n", DRV_VERSION);
3047 return pci_register_driver(&jme_driver); 3045 return pci_register_driver(&jme_driver);
3048} 3046}
3049 3047
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 07ad3a457185..1360f68861b8 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -41,9 +41,6 @@
41 NETIF_MSG_TX_ERR | \ 41 NETIF_MSG_TX_ERR | \
42 NETIF_MSG_HW) 42 NETIF_MSG_HW)
43 43
44#define jeprintk(pdev, fmt, args...) \
45 printk(KERN_ERR PFX fmt, ## args)
46
47#ifdef TX_DEBUG 44#ifdef TX_DEBUG
48#define tx_dbg(priv, fmt, args...) \ 45#define tx_dbg(priv, fmt, args...) \
49 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args) 46 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index bdf2149e5296..874ee01e8d9d 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -760,7 +760,7 @@ static void ll_temac_recv(struct net_device *ndev)
760 skb_put(skb, length); 760 skb_put(skb, length);
761 skb->dev = ndev; 761 skb->dev = ndev;
762 skb->protocol = eth_type_trans(skb, ndev); 762 skb->protocol = eth_type_trans(skb, ndev);
763 skb->ip_summed = CHECKSUM_NONE; 763 skb_checksum_none_assert(skb);
764 764
765 /* if we're doing rx csum offload, set it up */ 765 /* if we're doing rx csum offload, set it up */
766 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && 766 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 3832fa4961dd..f84f5e6ededb 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -562,19 +562,19 @@ static int __init mac8390_initdev(struct net_device *dev,
562 562
563 case ACCESS_16: 563 case ACCESS_16:
564 /* 16 bit card, register map is reversed */ 564 /* 16 bit card, register map is reversed */
565 ei_status.reset_8390 = &mac8390_no_reset; 565 ei_status.reset_8390 = mac8390_no_reset;
566 ei_status.block_input = &slow_sane_block_input; 566 ei_status.block_input = slow_sane_block_input;
567 ei_status.block_output = &slow_sane_block_output; 567 ei_status.block_output = slow_sane_block_output;
568 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 568 ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
569 ei_status.reg_offset = back4_offsets; 569 ei_status.reg_offset = back4_offsets;
570 break; 570 break;
571 571
572 case ACCESS_32: 572 case ACCESS_32:
573 /* 32 bit card, register map is reversed */ 573 /* 32 bit card, register map is reversed */
574 ei_status.reset_8390 = &mac8390_no_reset; 574 ei_status.reset_8390 = mac8390_no_reset;
575 ei_status.block_input = &sane_block_input; 575 ei_status.block_input = sane_block_input;
576 ei_status.block_output = &sane_block_output; 576 ei_status.block_output = sane_block_output;
577 ei_status.get_8390_hdr = &sane_get_8390_hdr; 577 ei_status.get_8390_hdr = sane_get_8390_hdr;
578 ei_status.reg_offset = back4_offsets; 578 ei_status.reg_offset = back4_offsets;
579 access_bitmode = 1; 579 access_bitmode = 1;
580 break; 580 break;
@@ -586,19 +586,19 @@ static int __init mac8390_initdev(struct net_device *dev,
586 * but overwrite system memory when run at 32 bit. 586 * but overwrite system memory when run at 32 bit.
587 * so we run them all at 16 bit. 587 * so we run them all at 16 bit.
588 */ 588 */
589 ei_status.reset_8390 = &mac8390_no_reset; 589 ei_status.reset_8390 = mac8390_no_reset;
590 ei_status.block_input = &slow_sane_block_input; 590 ei_status.block_input = slow_sane_block_input;
591 ei_status.block_output = &slow_sane_block_output; 591 ei_status.block_output = slow_sane_block_output;
592 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 592 ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
593 ei_status.reg_offset = back4_offsets; 593 ei_status.reg_offset = back4_offsets;
594 break; 594 break;
595 595
596 case MAC8390_CABLETRON: 596 case MAC8390_CABLETRON:
597 /* 16 bit card, register map is short forward */ 597 /* 16 bit card, register map is short forward */
598 ei_status.reset_8390 = &mac8390_no_reset; 598 ei_status.reset_8390 = mac8390_no_reset;
599 ei_status.block_input = &slow_sane_block_input; 599 ei_status.block_input = slow_sane_block_input;
600 ei_status.block_output = &slow_sane_block_output; 600 ei_status.block_output = slow_sane_block_output;
601 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 601 ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
602 ei_status.reg_offset = fwrd2_offsets; 602 ei_status.reg_offset = fwrd2_offsets;
603 break; 603 break;
604 604
@@ -606,19 +606,19 @@ static int __init mac8390_initdev(struct net_device *dev,
606 case MAC8390_KINETICS: 606 case MAC8390_KINETICS:
607 /* 16 bit memory, register map is forward */ 607 /* 16 bit memory, register map is forward */
608 /* dayna and similar */ 608 /* dayna and similar */
609 ei_status.reset_8390 = &mac8390_no_reset; 609 ei_status.reset_8390 = mac8390_no_reset;
610 ei_status.block_input = &dayna_block_input; 610 ei_status.block_input = dayna_block_input;
611 ei_status.block_output = &dayna_block_output; 611 ei_status.block_output = dayna_block_output;
612 ei_status.get_8390_hdr = &dayna_get_8390_hdr; 612 ei_status.get_8390_hdr = dayna_get_8390_hdr;
613 ei_status.reg_offset = fwrd4_offsets; 613 ei_status.reg_offset = fwrd4_offsets;
614 break; 614 break;
615 615
616 case MAC8390_INTERLAN: 616 case MAC8390_INTERLAN:
617 /* 16 bit memory, register map is forward */ 617 /* 16 bit memory, register map is forward */
618 ei_status.reset_8390 = &interlan_reset; 618 ei_status.reset_8390 = interlan_reset;
619 ei_status.block_input = &slow_sane_block_input; 619 ei_status.block_input = slow_sane_block_input;
620 ei_status.block_output = &slow_sane_block_output; 620 ei_status.block_output = slow_sane_block_output;
621 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 621 ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
622 ei_status.reg_offset = fwrd4_offsets; 622 ei_status.reg_offset = fwrd4_offsets;
623 break; 623 break;
624 624
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index ff2f158ab0b9..4297f6e8c4bc 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -407,7 +407,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
407 } 407 }
408 408
409 skb_reserve(skb, RX_OFFSET); 409 skb_reserve(skb, RX_OFFSET);
410 skb->ip_summed = CHECKSUM_NONE; 410 skb_checksum_none_assert(skb);
411 skb_put(skb, len); 411 skb_put(skb, len);
412 412
413 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 413 for (frag = first_frag; ; frag = NEXT_RX(frag)) {
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3b1c54a9c6ef..42567279843e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -84,26 +84,45 @@ static const struct proto_ops macvtap_socket_ops;
84static DEFINE_SPINLOCK(macvtap_lock); 84static DEFINE_SPINLOCK(macvtap_lock);
85 85
86/* 86/*
87 * Choose the next free queue, for now there is only one 87 * get_slot: return a [unused/occupied] slot in vlan->taps[]:
88 * - if 'q' is NULL, return the first empty slot;
89 * - otherwise, return the slot this pointer occupies.
88 */ 90 */
91static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
92{
93 int i;
94
95 for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
96 if (rcu_dereference(vlan->taps[i]) == q)
97 return i;
98 }
99
100 /* Should never happen */
101 BUG_ON(1);
102}
103
89static int macvtap_set_queue(struct net_device *dev, struct file *file, 104static int macvtap_set_queue(struct net_device *dev, struct file *file,
90 struct macvtap_queue *q) 105 struct macvtap_queue *q)
91{ 106{
92 struct macvlan_dev *vlan = netdev_priv(dev); 107 struct macvlan_dev *vlan = netdev_priv(dev);
108 int index;
93 int err = -EBUSY; 109 int err = -EBUSY;
94 110
95 spin_lock(&macvtap_lock); 111 spin_lock(&macvtap_lock);
96 if (rcu_dereference(vlan->tap)) 112 if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
97 goto out; 113 goto out;
98 114
99 err = 0; 115 err = 0;
116 index = get_slot(vlan, NULL);
100 rcu_assign_pointer(q->vlan, vlan); 117 rcu_assign_pointer(q->vlan, vlan);
101 rcu_assign_pointer(vlan->tap, q); 118 rcu_assign_pointer(vlan->taps[index], q);
102 sock_hold(&q->sk); 119 sock_hold(&q->sk);
103 120
104 q->file = file; 121 q->file = file;
105 file->private_data = q; 122 file->private_data = q;
106 123
124 vlan->numvtaps++;
125
107out: 126out:
108 spin_unlock(&macvtap_lock); 127 spin_unlock(&macvtap_lock);
109 return err; 128 return err;
@@ -124,9 +143,12 @@ static void macvtap_put_queue(struct macvtap_queue *q)
124 spin_lock(&macvtap_lock); 143 spin_lock(&macvtap_lock);
125 vlan = rcu_dereference(q->vlan); 144 vlan = rcu_dereference(q->vlan);
126 if (vlan) { 145 if (vlan) {
127 rcu_assign_pointer(vlan->tap, NULL); 146 int index = get_slot(vlan, q);
147
148 rcu_assign_pointer(vlan->taps[index], NULL);
128 rcu_assign_pointer(q->vlan, NULL); 149 rcu_assign_pointer(q->vlan, NULL);
129 sock_put(&q->sk); 150 sock_put(&q->sk);
151 --vlan->numvtaps;
130 } 152 }
131 153
132 spin_unlock(&macvtap_lock); 154 spin_unlock(&macvtap_lock);
@@ -136,39 +158,82 @@ static void macvtap_put_queue(struct macvtap_queue *q)
136} 158}
137 159
138/* 160/*
139 * Since we only support one queue, just dereference the pointer. 161 * Select a queue based on the rxq of the device on which this packet
162 * arrived. If the incoming device is not mq, calculate a flow hash
163 * to select a queue. If all fails, find the first available queue.
164 * Cache vlan->numvtaps since it can become zero during the execution
165 * of this function.
140 */ 166 */
141static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, 167static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
142 struct sk_buff *skb) 168 struct sk_buff *skb)
143{ 169{
144 struct macvlan_dev *vlan = netdev_priv(dev); 170 struct macvlan_dev *vlan = netdev_priv(dev);
171 struct macvtap_queue *tap = NULL;
172 int numvtaps = vlan->numvtaps;
173 __u32 rxq;
174
175 if (!numvtaps)
176 goto out;
177
178 if (likely(skb_rx_queue_recorded(skb))) {
179 rxq = skb_get_rx_queue(skb);
180
181 while (unlikely(rxq >= numvtaps))
182 rxq -= numvtaps;
183
184 tap = rcu_dereference(vlan->taps[rxq]);
185 if (tap)
186 goto out;
187 }
188
189 /* Check if we can use flow to select a queue */
190 rxq = skb_get_rxhash(skb);
191 if (rxq) {
192 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
193 if (tap)
194 goto out;
195 }
145 196
146 return rcu_dereference(vlan->tap); 197 /* Everything failed - find first available queue */
198 for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
199 tap = rcu_dereference(vlan->taps[rxq]);
200 if (tap)
201 break;
202 }
203
204out:
205 return tap;
147} 206}
148 207
149/* 208/*
150 * The net_device is going away, give up the reference 209 * The net_device is going away, give up the reference
151 * that it holds on the queue (all the queues one day) 210 * that it holds on all queues and safely set the pointer
152 * and safely set the pointer from the queues to NULL. 211 * from the queues to NULL.
153 */ 212 */
154static void macvtap_del_queues(struct net_device *dev) 213static void macvtap_del_queues(struct net_device *dev)
155{ 214{
156 struct macvlan_dev *vlan = netdev_priv(dev); 215 struct macvlan_dev *vlan = netdev_priv(dev);
157 struct macvtap_queue *q; 216 struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
217 int i, j = 0;
158 218
219 /* macvtap_put_queue can free some slots, so go through all slots */
159 spin_lock(&macvtap_lock); 220 spin_lock(&macvtap_lock);
160 q = rcu_dereference(vlan->tap); 221 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
161 if (!q) { 222 q = rcu_dereference(vlan->taps[i]);
162 spin_unlock(&macvtap_lock); 223 if (q) {
163 return; 224 qlist[j++] = q;
225 rcu_assign_pointer(vlan->taps[i], NULL);
226 rcu_assign_pointer(q->vlan, NULL);
227 vlan->numvtaps--;
228 }
164 } 229 }
165 230 BUG_ON(vlan->numvtaps != 0);
166 rcu_assign_pointer(vlan->tap, NULL);
167 rcu_assign_pointer(q->vlan, NULL);
168 spin_unlock(&macvtap_lock); 231 spin_unlock(&macvtap_lock);
169 232
170 synchronize_rcu(); 233 synchronize_rcu();
171 sock_put(&q->sk); 234
235 for (--j; j >= 0; j--)
236 sock_put(&qlist[j]->sk);
172} 237}
173 238
174/* 239/*
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 1fd068e1d930..d1aa45a15854 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -6,4 +6,4 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o 9 en_resources.o en_netdev.o en_selftest.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8c8515619b8e..8f4bf1f07c11 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -74,7 +74,7 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
74 74
75u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) 75u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
76{ 76{
77 u32 obj, i; 77 u32 obj;
78 78
79 if (likely(cnt == 1 && align == 1)) 79 if (likely(cnt == 1 && align == 1))
80 return mlx4_bitmap_alloc(bitmap); 80 return mlx4_bitmap_alloc(bitmap);
@@ -91,8 +91,7 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
91 } 91 }
92 92
93 if (obj < bitmap->max) { 93 if (obj < bitmap->max) {
94 for (i = 0; i < cnt; i++) 94 bitmap_set(bitmap->table, obj, cnt);
95 set_bit(obj + i, bitmap->table);
96 if (obj == bitmap->last) { 95 if (obj == bitmap->last) {
97 bitmap->last = (obj + cnt); 96 bitmap->last = (obj + cnt);
98 if (bitmap->last >= bitmap->max) 97 if (bitmap->last >= bitmap->max)
@@ -109,13 +108,10 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
109 108
110void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) 109void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
111{ 110{
112 u32 i;
113
114 obj &= bitmap->max + bitmap->reserved_top - 1; 111 obj &= bitmap->max + bitmap->reserved_top - 1;
115 112
116 spin_lock(&bitmap->lock); 113 spin_lock(&bitmap->lock);
117 for (i = 0; i < cnt; i++) 114 bitmap_clear(bitmap->table, obj, cnt);
118 clear_bit(obj + i, bitmap->table);
119 bitmap->last = min(bitmap->last, obj); 115 bitmap->last = min(bitmap->last, obj);
120 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
121 & bitmap->mask; 117 & bitmap->mask;
@@ -125,8 +121,6 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
125int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 121int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
126 u32 reserved_bot, u32 reserved_top) 122 u32 reserved_bot, u32 reserved_top)
127{ 123{
128 int i;
129
130 /* num must be a power of 2 */ 124 /* num must be a power of 2 */
131 if (num != roundup_pow_of_two(num)) 125 if (num != roundup_pow_of_two(num))
132 return -EINVAL; 126 return -EINVAL;
@@ -142,8 +136,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
142 if (!bitmap->table) 136 if (!bitmap->table)
143 return -ENOMEM; 137 return -ENOMEM;
144 138
145 for (i = 0; i < reserved_bot; ++i) 139 bitmap_set(bitmap->table, 0, reserved_bot);
146 set_bit(i, bitmap->table);
147 140
148 return 0; 141 return 0;
149} 142}
@@ -188,7 +181,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
188 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 181 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
189 buf->npages = buf->nbufs; 182 buf->npages = buf->nbufs;
190 buf->page_shift = PAGE_SHIFT; 183 buf->page_shift = PAGE_SHIFT;
191 buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, 184 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
192 GFP_KERNEL); 185 GFP_KERNEL);
193 if (!buf->page_list) 186 if (!buf->page_list)
194 return -ENOMEM; 187 return -ENOMEM;
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index b275238fe70d..056152b3ff58 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -39,21 +39,6 @@
39#include "en_port.h" 39#include "en_port.h"
40 40
41 41
42static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
43{
44 int i;
45
46 priv->port_stats.lro_aggregated = 0;
47 priv->port_stats.lro_flushed = 0;
48 priv->port_stats.lro_no_desc = 0;
49
50 for (i = 0; i < priv->rx_ring_num; i++) {
51 priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
52 priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
53 priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
54 }
55}
56
57static void 42static void
58mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) 43mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
59{ 44{
@@ -112,7 +97,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
112 "tx_heartbeat_errors", "tx_window_errors", 97 "tx_heartbeat_errors", "tx_window_errors",
113 98
114 /* port statistics */ 99 /* port statistics */
115 "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets", 100 "tso_packets",
116 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", 101 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
117 "rx_csum_good", "rx_csum_none", "tx_chksum_offload", 102 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
118 103
@@ -125,6 +110,14 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
125#define NUM_MAIN_STATS 21 110#define NUM_MAIN_STATS 21
126#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) 111#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
127 112
113static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
114 "Interupt Test",
115 "Link Test",
116 "Speed Test",
117 "Register Test",
118 "Loopback Test",
119};
120
128static u32 mlx4_en_get_msglevel(struct net_device *dev) 121static u32 mlx4_en_get_msglevel(struct net_device *dev)
129{ 122{
130 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; 123 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
@@ -146,10 +139,15 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
146{ 139{
147 struct mlx4_en_priv *priv = netdev_priv(dev); 140 struct mlx4_en_priv *priv = netdev_priv(dev);
148 141
149 if (sset != ETH_SS_STATS) 142 switch (sset) {
143 case ETH_SS_STATS:
144 return NUM_ALL_STATS +
145 (priv->tx_ring_num + priv->rx_ring_num) * 2;
146 case ETH_SS_TEST:
147 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2;
148 default:
150 return -EOPNOTSUPP; 149 return -EOPNOTSUPP;
151 150 }
152 return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
153} 151}
154 152
155static void mlx4_en_get_ethtool_stats(struct net_device *dev, 153static void mlx4_en_get_ethtool_stats(struct net_device *dev,
@@ -161,8 +159,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
161 159
162 spin_lock_bh(&priv->stats_lock); 160 spin_lock_bh(&priv->stats_lock);
163 161
164 mlx4_en_update_lro_stats(priv);
165
166 for (i = 0; i < NUM_MAIN_STATS; i++) 162 for (i = 0; i < NUM_MAIN_STATS; i++)
167 data[index++] = ((unsigned long *) &priv->stats)[i]; 163 data[index++] = ((unsigned long *) &priv->stats)[i];
168 for (i = 0; i < NUM_PORT_STATS; i++) 164 for (i = 0; i < NUM_PORT_STATS; i++)
@@ -181,6 +177,12 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
181 177
182} 178}
183 179
180static void mlx4_en_self_test(struct net_device *dev,
181 struct ethtool_test *etest, u64 *buf)
182{
183 mlx4_en_ex_selftest(dev, &etest->flags, buf);
184}
185
184static void mlx4_en_get_strings(struct net_device *dev, 186static void mlx4_en_get_strings(struct net_device *dev,
185 uint32_t stringset, uint8_t *data) 187 uint32_t stringset, uint8_t *data)
186{ 188{
@@ -188,44 +190,76 @@ static void mlx4_en_get_strings(struct net_device *dev,
188 int index = 0; 190 int index = 0;
189 int i; 191 int i;
190 192
191 if (stringset != ETH_SS_STATS) 193 switch (stringset) {
192 return; 194 case ETH_SS_TEST:
193 195 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
194 /* Add main counters */ 196 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
195 for (i = 0; i < NUM_MAIN_STATS; i++) 197 if (priv->mdev->dev->caps.loopback_support)
196 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); 198 for (; i < MLX4_EN_NUM_SELF_TEST; i++)
197 for (i = 0; i < NUM_PORT_STATS; i++) 199 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
198 strcpy(data + (index++) * ETH_GSTRING_LEN, 200 break;
201
202 case ETH_SS_STATS:
203 /* Add main counters */
204 for (i = 0; i < NUM_MAIN_STATS; i++)
205 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
206 for (i = 0; i< NUM_PORT_STATS; i++)
207 strcpy(data + (index++) * ETH_GSTRING_LEN,
199 main_strings[i + NUM_MAIN_STATS]); 208 main_strings[i + NUM_MAIN_STATS]);
200 for (i = 0; i < priv->tx_ring_num; i++) { 209 for (i = 0; i < priv->tx_ring_num; i++) {
201 sprintf(data + (index++) * ETH_GSTRING_LEN, 210 sprintf(data + (index++) * ETH_GSTRING_LEN,
202 "tx%d_packets", i); 211 "tx%d_packets", i);
203 sprintf(data + (index++) * ETH_GSTRING_LEN, 212 sprintf(data + (index++) * ETH_GSTRING_LEN,
204 "tx%d_bytes", i); 213 "tx%d_bytes", i);
205 } 214 }
206 for (i = 0; i < priv->rx_ring_num; i++) { 215 for (i = 0; i < priv->rx_ring_num; i++) {
207 sprintf(data + (index++) * ETH_GSTRING_LEN, 216 sprintf(data + (index++) * ETH_GSTRING_LEN,
208 "rx%d_packets", i); 217 "rx%d_packets", i);
209 sprintf(data + (index++) * ETH_GSTRING_LEN, 218 sprintf(data + (index++) * ETH_GSTRING_LEN,
210 "rx%d_bytes", i); 219 "rx%d_bytes", i);
211 } 220 }
212 for (i = 0; i < NUM_PKT_STATS; i++) 221 for (i = 0; i< NUM_PKT_STATS; i++)
213 strcpy(data + (index++) * ETH_GSTRING_LEN, 222 strcpy(data + (index++) * ETH_GSTRING_LEN,
214 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]); 223 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
224 break;
225 }
215} 226}
216 227
217static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 228static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
218{ 229{
230 struct mlx4_en_priv *priv = netdev_priv(dev);
231 int trans_type;
232
219 cmd->autoneg = AUTONEG_DISABLE; 233 cmd->autoneg = AUTONEG_DISABLE;
220 cmd->supported = SUPPORTED_10000baseT_Full; 234 cmd->supported = SUPPORTED_10000baseT_Full;
221 cmd->advertising = ADVERTISED_1000baseT_Full; 235 cmd->advertising = ADVERTISED_10000baseT_Full;
236
237 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
238 return -ENOMEM;
239
240 trans_type = priv->port_state.transciver;
222 if (netif_carrier_ok(dev)) { 241 if (netif_carrier_ok(dev)) {
223 cmd->speed = SPEED_10000; 242 cmd->speed = priv->port_state.link_speed;
224 cmd->duplex = DUPLEX_FULL; 243 cmd->duplex = DUPLEX_FULL;
225 } else { 244 } else {
226 cmd->speed = -1; 245 cmd->speed = -1;
227 cmd->duplex = -1; 246 cmd->duplex = -1;
228 } 247 }
248
249 if (trans_type > 0 && trans_type <= 0xC) {
250 cmd->port = PORT_FIBRE;
251 cmd->transceiver = XCVR_EXTERNAL;
252 cmd->supported |= SUPPORTED_FIBRE;
253 cmd->advertising |= ADVERTISED_FIBRE;
254 } else if (trans_type == 0x80 || trans_type == 0) {
255 cmd->port = PORT_TP;
256 cmd->transceiver = XCVR_INTERNAL;
257 cmd->supported |= SUPPORTED_TP;
258 cmd->advertising |= ADVERTISED_TP;
259 } else {
260 cmd->port = -1;
261 cmd->transceiver = -1;
262 }
229 return 0; 263 return 0;
230} 264}
231 265
@@ -343,8 +377,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
343 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 377 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
344 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 378 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
345 379
346 if (rx_size == priv->prof->rx_ring_size && 380 if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
347 tx_size == priv->prof->tx_ring_size) 381 priv->rx_ring[0].size) &&
382 tx_size == priv->tx_ring[0].size)
348 return 0; 383 return 0;
349 384
350 mutex_lock(&mdev->state_lock); 385 mutex_lock(&mdev->state_lock);
@@ -378,49 +413,13 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
378 struct ethtool_ringparam *param) 413 struct ethtool_ringparam *param)
379{ 414{
380 struct mlx4_en_priv *priv = netdev_priv(dev); 415 struct mlx4_en_priv *priv = netdev_priv(dev);
381 struct mlx4_en_dev *mdev = priv->mdev;
382 416
383 memset(param, 0, sizeof(*param)); 417 memset(param, 0, sizeof(*param));
384 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; 418 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
385 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; 419 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
386 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size; 420 param->rx_pending = priv->port_up ?
387 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size; 421 priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
388} 422 param->tx_pending = priv->tx_ring[0].size;
389
390static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data)
391{
392 struct mlx4_en_priv *priv = netdev_priv(dev);
393 struct mlx4_en_dev *mdev = priv->mdev;
394 int rc = 0;
395 int changed = 0;
396
397 if (data & ~ETH_FLAG_LRO)
398 return -EOPNOTSUPP;
399
400 if (data & ETH_FLAG_LRO) {
401 if (mdev->profile.num_lro == 0)
402 return -EOPNOTSUPP;
403 if (!(dev->features & NETIF_F_LRO))
404 changed = 1;
405 } else if (dev->features & NETIF_F_LRO) {
406 changed = 1;
407 }
408
409 if (changed) {
410 if (netif_running(dev)) {
411 mutex_lock(&mdev->state_lock);
412 mlx4_en_stop_port(dev);
413 }
414 dev->features ^= NETIF_F_LRO;
415 if (netif_running(dev)) {
416 rc = mlx4_en_start_port(dev);
417 if (rc)
418 en_err(priv, "Failed to restart port\n");
419 mutex_unlock(&mdev->state_lock);
420 }
421 }
422
423 return rc;
424} 423}
425 424
426const struct ethtool_ops mlx4_en_ethtool_ops = { 425const struct ethtool_ops mlx4_en_ethtool_ops = {
@@ -441,6 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
441 .get_strings = mlx4_en_get_strings, 440 .get_strings = mlx4_en_get_strings,
442 .get_sset_count = mlx4_en_get_sset_count, 441 .get_sset_count = mlx4_en_get_sset_count,
443 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 442 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
443 .self_test = mlx4_en_self_test,
444 .get_wol = mlx4_en_get_wol, 444 .get_wol = mlx4_en_get_wol,
445 .get_msglevel = mlx4_en_get_msglevel, 445 .get_msglevel = mlx4_en_get_msglevel,
446 .set_msglevel = mlx4_en_set_msglevel, 446 .set_msglevel = mlx4_en_set_msglevel,
@@ -451,7 +451,6 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
451 .get_ringparam = mlx4_en_get_ringparam, 451 .get_ringparam = mlx4_en_get_ringparam,
452 .set_ringparam = mlx4_en_set_ringparam, 452 .set_ringparam = mlx4_en_set_ringparam,
453 .get_flags = ethtool_op_get_flags, 453 .get_flags = ethtool_op_get_flags,
454 .set_flags = mlx4_ethtool_op_set_flags,
455}; 454};
456 455
457 456
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 97934f1ec53a..143906417048 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -63,15 +63,12 @@ static const char mlx4_en_version[] =
63 */ 63 */
64 64
65 65
66/* Use a XOR rathern than Toeplitz hash function for RSS */ 66/* Enable RSS TCP traffic */
67MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS"); 67MLX4_EN_PARM_INT(tcp_rss, 1,
68 68 "Enable RSS for incomming TCP traffic or disabled (0)");
69/* RSS hash type mask - default to <saddr, daddr, sport, dport> */ 69/* Enable RSS UDP traffic */
70MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask"); 70MLX4_EN_PARM_INT(udp_rss, 1,
71 71 "Enable RSS for incomming UDP traffic or disabled (0)");
72/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
73MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
74 "Number of LRO sessions per ring or disabled (0)");
75 72
76/* Priority pausing */ 73/* Priority pausing */
77MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." 74MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@ -107,9 +104,12 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
107 struct mlx4_en_profile *params = &mdev->profile; 104 struct mlx4_en_profile *params = &mdev->profile;
108 int i; 105 int i;
109 106
110 params->rss_xor = (rss_xor != 0); 107 params->tcp_rss = tcp_rss;
111 params->rss_mask = rss_mask & 0x1f; 108 params->udp_rss = udp_rss;
112 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); 109 if (params->udp_rss && !mdev->dev->caps.udp_rss) {
110 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
111 params->udp_rss = 0;
112 }
113 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 113 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
114 params->prof[i].rx_pause = 1; 114 params->prof[i].rx_pause = 1;
115 params->prof[i].rx_ppp = pfcrx; 115 params->prof[i].rx_ppp = pfcrx;
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index a0d8a26f5a02..411bda581c04 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -109,7 +109,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
109 mutex_unlock(&mdev->state_lock); 109 mutex_unlock(&mdev->state_lock);
110} 110}
111 111
112static u64 mlx4_en_mac_to_u64(u8 *addr) 112u64 mlx4_en_mac_to_u64(u8 *addr)
113{ 113{
114 u64 mac = 0; 114 u64 mac = 0;
115 int i; 115 int i;
@@ -513,6 +513,10 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
513 513
514 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 514 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
515 } 515 }
516 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
517 queue_work(mdev->workqueue, &priv->mac_task);
518 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
519 }
516 mutex_unlock(&mdev->state_lock); 520 mutex_unlock(&mdev->state_lock);
517} 521}
518 522
@@ -528,10 +532,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
528 * report to system log */ 532 * report to system log */
529 if (priv->last_link_state != linkstate) { 533 if (priv->last_link_state != linkstate) {
530 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 534 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
531 en_dbg(LINK, priv, "Link Down\n"); 535 en_info(priv, "Link Down\n");
532 netif_carrier_off(priv->dev); 536 netif_carrier_off(priv->dev);
533 } else { 537 } else {
534 en_dbg(LINK, priv, "Link Up\n"); 538 en_info(priv, "Link Up\n");
535 netif_carrier_on(priv->dev); 539 netif_carrier_on(priv->dev);
536 } 540 }
537 } 541 }
@@ -653,6 +657,7 @@ int mlx4_en_start_port(struct net_device *dev)
653 en_err(priv, "Failed setting port mac\n"); 657 en_err(priv, "Failed setting port mac\n");
654 goto tx_err; 658 goto tx_err;
655 } 659 }
660 mdev->mac_removed[priv->port] = 0;
656 661
657 /* Init port */ 662 /* Init port */
658 en_dbg(HW, priv, "Initializing port\n"); 663 en_dbg(HW, priv, "Initializing port\n");
@@ -704,12 +709,12 @@ void mlx4_en_stop_port(struct net_device *dev)
704 netif_tx_stop_all_queues(dev); 709 netif_tx_stop_all_queues(dev);
705 netif_tx_unlock_bh(dev); 710 netif_tx_unlock_bh(dev);
706 711
707 /* close port*/ 712 /* Set port as not active */
708 priv->port_up = false; 713 priv->port_up = false;
709 mlx4_CLOSE_PORT(mdev->dev, priv->port);
710 714
711 /* Unregister Mac address for the port */ 715 /* Unregister Mac address for the port */
712 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 716 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
717 mdev->mac_removed[priv->port] = 1;
713 718
714 /* Free TX Rings */ 719 /* Free TX Rings */
715 for (i = 0; i < priv->tx_ring_num; i++) { 720 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -731,6 +736,9 @@ void mlx4_en_stop_port(struct net_device *dev)
731 msleep(1); 736 msleep(1);
732 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 737 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
733 } 738 }
739
740 /* close port*/
741 mlx4_CLOSE_PORT(mdev->dev, priv->port);
734} 742}
735 743
736static void mlx4_en_restart(struct work_struct *work) 744static void mlx4_en_restart(struct work_struct *work)
@@ -1023,9 +1031,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1023 1031
1024 /* Set defualt MAC */ 1032 /* Set defualt MAC */
1025 dev->addr_len = ETH_ALEN; 1033 dev->addr_len = ETH_ALEN;
1026 for (i = 0; i < ETH_ALEN; i++) 1034 for (i = 0; i < ETH_ALEN; i++) {
1027 dev->dev_addr[ETH_ALEN - 1 - i] = 1035 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1028 (u8) (priv->mac >> (8 * i)); 1036 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1037 }
1029 1038
1030 /* 1039 /*
1031 * Set driver features 1040 * Set driver features
@@ -1038,8 +1047,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1038 dev->features |= NETIF_F_HW_VLAN_TX | 1047 dev->features |= NETIF_F_HW_VLAN_TX |
1039 NETIF_F_HW_VLAN_RX | 1048 NETIF_F_HW_VLAN_RX |
1040 NETIF_F_HW_VLAN_FILTER; 1049 NETIF_F_HW_VLAN_FILTER;
1041 if (mdev->profile.num_lro) 1050 dev->features |= NETIF_F_GRO;
1042 dev->features |= NETIF_F_LRO;
1043 if (mdev->LSO_support) { 1051 if (mdev->LSO_support) {
1044 dev->features |= NETIF_F_TSO; 1052 dev->features |= NETIF_F_TSO;
1045 dev->features |= NETIF_F_TSO6; 1053 dev->features |= NETIF_F_TSO6;
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index a29abe845d2e..aa3ef2aee5bf 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -142,6 +142,38 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
142 return err; 142 return err;
143} 143}
144 144
145int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
146{
147 struct mlx4_en_query_port_context *qport_context;
148 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
149 struct mlx4_en_port_state *state = &priv->port_state;
150 struct mlx4_cmd_mailbox *mailbox;
151 int err;
152
153 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
154 if (IS_ERR(mailbox))
155 return PTR_ERR(mailbox);
156 memset(mailbox->buf, 0, sizeof(*qport_context));
157 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
158 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
159 if (err)
160 goto out;
161 qport_context = mailbox->buf;
162
163 /* This command is always accessed from Ethtool context
164 * already synchronized, no need in locking */
165 state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
166 if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
167 MLX4_EN_1G_SPEED)
168 state->link_speed = 1000;
169 else
170 state->link_speed = 10000;
171 state->transciver = qport_context->transceiver;
172
173out:
174 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
175 return err;
176}
145 177
146int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) 178int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
147{ 179{
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index e6477f12beb5..f6511aa2b7df 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -84,6 +84,20 @@ enum {
84 MLX4_MCAST_ENABLE = 2, 84 MLX4_MCAST_ENABLE = 2,
85}; 85};
86 86
87struct mlx4_en_query_port_context {
88 u8 link_up;
89#define MLX4_EN_LINK_UP_MASK 0x80
90 u8 reserved;
91 __be16 mtu;
92 u8 reserved2;
93 u8 link_speed;
94#define MLX4_EN_SPEED_MASK 0x3
95#define MLX4_EN_1G_SPEED 0x2
96 u16 reserved3[5];
97 __be64 mac;
98 u8 transceiver;
99};
100
87 101
88struct mlx4_en_stat_out_mbox { 102struct mlx4_en_stat_out_mbox {
89 /* Received frames with a length of 64 octets */ 103 /* Received frames with a length of 64 octets */
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 8e2fcb7103c3..570f2508fb30 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -42,18 +42,6 @@
42#include "mlx4_en.h" 42#include "mlx4_en.h"
43 43
44 44
45static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
46 void **ip_hdr, void **tcpudp_hdr,
47 u64 *hdr_flags, void *priv)
48{
49 *mac_hdr = page_address(frags->page) + frags->page_offset;
50 *ip_hdr = *mac_hdr + ETH_HLEN;
51 *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
52 *hdr_flags = LRO_IPV4 | LRO_TCP;
53
54 return 0;
55}
56
57static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, 45static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
58 struct mlx4_en_rx_desc *rx_desc, 46 struct mlx4_en_rx_desc *rx_desc,
59 struct skb_frag_struct *skb_frags, 47 struct skb_frag_struct *skb_frags,
@@ -251,7 +239,6 @@ reduce_rings:
251 ring->prod--; 239 ring->prod--;
252 mlx4_en_free_rx_desc(priv, ring, ring->actual_size); 240 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
253 } 241 }
254 ring->size_mask = ring->actual_size - 1;
255 } 242 }
256 243
257 return 0; 244 return 0;
@@ -313,28 +300,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
313 } 300 }
314 ring->buf = ring->wqres.buf.direct.buf; 301 ring->buf = ring->wqres.buf.direct.buf;
315 302
316 /* Configure lro mngr */
317 memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
318 ring->lro.dev = priv->dev;
319 ring->lro.features = LRO_F_NAPI;
320 ring->lro.frag_align_pad = NET_IP_ALIGN;
321 ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
322 ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
323 ring->lro.max_desc = mdev->profile.num_lro;
324 ring->lro.max_aggr = MAX_SKB_FRAGS;
325 ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
326 sizeof(struct net_lro_desc),
327 GFP_KERNEL);
328 if (!ring->lro.lro_arr) {
329 en_err(priv, "Failed to allocate lro array\n");
330 goto err_map;
331 }
332 ring->lro.get_frag_header = mlx4_en_get_frag_header;
333
334 return 0; 303 return 0;
335 304
336err_map:
337 mlx4_en_unmap_buffer(&ring->wqres.buf);
338err_hwq: 305err_hwq:
339 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 306 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
340err_ring: 307err_ring:
@@ -389,6 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
389 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 356 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
390 ring = &priv->rx_ring[ring_ind]; 357 ring = &priv->rx_ring[ring_ind];
391 358
359 ring->size_mask = ring->actual_size - 1;
392 mlx4_en_update_rx_prod_db(ring); 360 mlx4_en_update_rx_prod_db(ring);
393 } 361 }
394 362
@@ -412,7 +380,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
412{ 380{
413 struct mlx4_en_dev *mdev = priv->mdev; 381 struct mlx4_en_dev *mdev = priv->mdev;
414 382
415 kfree(ring->lro.lro_arr);
416 mlx4_en_unmap_buffer(&ring->wqres.buf); 383 mlx4_en_unmap_buffer(&ring->wqres.buf);
417 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE); 384 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
418 vfree(ring->rx_info); 385 vfree(ring->rx_info);
@@ -459,7 +426,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
459 goto fail; 426 goto fail;
460 427
461 /* Unmap buffer */ 428 /* Unmap buffer */
462 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, 429 pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
463 PCI_DMA_FROMDEVICE); 430 PCI_DMA_FROMDEVICE);
464 } 431 }
465 /* Adjust size of last fragment to match actual length */ 432 /* Adjust size of last fragment to match actual length */
@@ -541,6 +508,21 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
541 return skb; 508 return skb;
542} 509}
543 510
511static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
512{
513 int i;
514 int offset = ETH_HLEN;
515
516 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
517 if (*(skb->data + offset) != (unsigned char) (i & 0xff))
518 goto out_loopback;
519 }
520 /* Loopback found */
521 priv->loopback_ok = 1;
522
523out_loopback:
524 dev_kfree_skb_any(skb);
525}
544 526
545int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 527int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
546{ 528{
@@ -548,7 +530,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
548 struct mlx4_cqe *cqe; 530 struct mlx4_cqe *cqe;
549 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 531 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
550 struct skb_frag_struct *skb_frags; 532 struct skb_frag_struct *skb_frags;
551 struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
552 struct mlx4_en_rx_desc *rx_desc; 533 struct mlx4_en_rx_desc *rx_desc;
553 struct sk_buff *skb; 534 struct sk_buff *skb;
554 int index; 535 int index;
@@ -608,37 +589,35 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
608 * - TCP/IP (v4) 589 * - TCP/IP (v4)
609 * - without IP options 590 * - without IP options
610 * - not an IP fragment */ 591 * - not an IP fragment */
611 if (mlx4_en_can_lro(cqe->status) && 592 if (dev->features & NETIF_F_GRO) {
612 dev->features & NETIF_F_LRO) { 593 struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
594 if (!gro_skb)
595 goto next;
613 596
614 nr = mlx4_en_complete_rx_desc( 597 nr = mlx4_en_complete_rx_desc(
615 priv, rx_desc, 598 priv, rx_desc,
616 skb_frags, lro_frags, 599 skb_frags, skb_shinfo(gro_skb)->frags,
617 ring->page_alloc, length); 600 ring->page_alloc, length);
618 if (!nr) 601 if (!nr)
619 goto next; 602 goto next;
620 603
604 skb_shinfo(gro_skb)->nr_frags = nr;
605 gro_skb->len = length;
606 gro_skb->data_len = length;
607 gro_skb->truesize += length;
608 gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
609
621 if (priv->vlgrp && (cqe->vlan_my_qpn & 610 if (priv->vlgrp && (cqe->vlan_my_qpn &
622 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) { 611 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)))
623 lro_vlan_hwaccel_receive_frags( 612 vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid));
624 &ring->lro, lro_frags, 613 else
625 length, length, 614 napi_gro_frags(&cq->napi);
626 priv->vlgrp,
627 be16_to_cpu(cqe->sl_vid),
628 NULL, 0);
629 } else
630 lro_receive_frags(&ring->lro,
631 lro_frags,
632 length,
633 length,
634 NULL, 0);
635 615
636 goto next; 616 goto next;
637 } 617 }
638 618
639 /* LRO not possible, complete processing here */ 619 /* LRO not possible, complete processing here */
640 ip_summed = CHECKSUM_UNNECESSARY; 620 ip_summed = CHECKSUM_UNNECESSARY;
641 INC_PERF_COUNTER(priv->pstats.lro_misses);
642 } else { 621 } else {
643 ip_summed = CHECKSUM_NONE; 622 ip_summed = CHECKSUM_NONE;
644 priv->port_stats.rx_chksum_none++; 623 priv->port_stats.rx_chksum_none++;
@@ -655,6 +634,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
655 goto next; 634 goto next;
656 } 635 }
657 636
637 if (unlikely(priv->validate_loopback)) {
638 validate_loopback(priv, skb);
639 goto next;
640 }
641
658 skb->ip_summed = ip_summed; 642 skb->ip_summed = ip_summed;
659 skb->protocol = eth_type_trans(skb, dev); 643 skb->protocol = eth_type_trans(skb, dev);
660 skb_record_rx_queue(skb, cq->ring); 644 skb_record_rx_queue(skb, cq->ring);
@@ -674,14 +658,10 @@ next:
674 if (++polled == budget) { 658 if (++polled == budget) {
675 /* We are here because we reached the NAPI budget - 659 /* We are here because we reached the NAPI budget -
676 * flush only pending LRO sessions */ 660 * flush only pending LRO sessions */
677 lro_flush_all(&ring->lro);
678 goto out; 661 goto out;
679 } 662 }
680 } 663 }
681 664
682 /* If CQ is empty flush all LRO sessions unconditionally */
683 lro_flush_all(&ring->lro);
684
685out: 665out:
686 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); 666 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
687 mlx4_cq_set_ci(&cq->mcq); 667 mlx4_cq_set_ci(&cq->mcq);
@@ -816,7 +796,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
816 qp->event = mlx4_en_sqp_event; 796 qp->event = mlx4_en_sqp_event;
817 797
818 memset(context, 0, sizeof *context); 798 memset(context, 0, sizeof *context);
819 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 0, 0, 799 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
820 qpn, ring->cqn, context); 800 qpn, ring->cqn, context);
821 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 801 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
822 802
@@ -839,8 +819,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
839 struct mlx4_qp_context context; 819 struct mlx4_qp_context context;
840 struct mlx4_en_rss_context *rss_context; 820 struct mlx4_en_rss_context *rss_context;
841 void *ptr; 821 void *ptr;
842 int rss_xor = mdev->profile.rss_xor; 822 u8 rss_mask = 0x3f;
843 u8 rss_mask = mdev->profile.rss_mask;
844 int i, qpn; 823 int i, qpn;
845 int err = 0; 824 int err = 0;
846 int good_qps = 0; 825 int good_qps = 0;
@@ -886,9 +865,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
886 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | 865 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
887 (rss_map->base_qpn)); 866 (rss_map->base_qpn));
888 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 867 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
889 rss_context->hash_fn = rss_xor & 0x3; 868 rss_context->flags = rss_mask;
890 rss_context->flags = rss_mask << 2;
891 869
870 if (priv->mdev->profile.udp_rss)
871 rss_context->base_qpn_udp = rss_context->default_qpn;
892 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, 872 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
893 &rss_map->indir_qp, &rss_map->indir_state); 873 &rss_map->indir_qp, &rss_map->indir_state);
894 if (err) 874 if (err)
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
new file mode 100644
index 000000000000..43357d35616a
--- /dev/null
+++ b/drivers/net/mlx4/en_selftest.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h>
37#include <linux/delay.h>
38#include <linux/mlx4/driver.h>
39
40#include "mlx4_en.h"
41
42
43static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
44{
45 return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
46 MLX4_CMD_TIME_CLASS_A);
47}
48
49static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
50{
51 struct sk_buff *skb;
52 struct ethhdr *ethh;
53 unsigned char *packet;
54 unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
55 unsigned int i;
56 int err;
57
58
59 /* build the pkt before xmit */
60 skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
61 if (!skb) {
62 en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
63 return -ENOMEM;
64 }
65 skb_reserve(skb, NET_IP_ALIGN);
66
67 ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
68 packet = (unsigned char *)skb_put(skb, packet_size);
69 memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
70 memset(ethh->h_source, 0, ETH_ALEN);
71 ethh->h_proto = htons(ETH_P_ARP);
72 skb_set_mac_header(skb, 0);
73 for (i = 0; i < packet_size; ++i) /* fill our packet */
74 packet[i] = (unsigned char)(i & 0xff);
75
76 /* xmit the pkt */
77 err = mlx4_en_xmit(skb, priv->dev);
78 return err;
79}
80
81static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
82{
83 u32 loopback_ok = 0;
84 int i;
85
86
87 priv->loopback_ok = 0;
88 priv->validate_loopback = 1;
89
90 /* xmit */
91 if (mlx4_en_test_loopback_xmit(priv)) {
92 en_err(priv, "Transmitting loopback packet failed\n");
93 goto mlx4_en_test_loopback_exit;
94 }
95
96 /* polling for result */
97 for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
98 msleep(MLX4_EN_LOOPBACK_TIMEOUT);
99 if (priv->loopback_ok) {
100 loopback_ok = 1;
101 break;
102 }
103 }
104 if (!loopback_ok)
105 en_err(priv, "Loopback packet didn't arrive\n");
106
107mlx4_en_test_loopback_exit:
108
109 priv->validate_loopback = 0;
110 return (!loopback_ok);
111}
112
113
114static int mlx4_en_test_link(struct mlx4_en_priv *priv)
115{
116 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
117 return -ENOMEM;
118 if (priv->port_state.link_state == 1)
119 return 0;
120 else
121 return 1;
122}
123
124static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
125{
126
127 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
128 return -ENOMEM;
129
130 /* The device currently only supports 10G speed */
131 if (priv->port_state.link_speed != SPEED_10000)
132 return priv->port_state.link_speed;
133 return 0;
134}
135
136
137void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
138{
139 struct mlx4_en_priv *priv = netdev_priv(dev);
140 struct mlx4_en_dev *mdev = priv->mdev;
141 struct mlx4_en_tx_ring *tx_ring;
142 int i, carrier_ok;
143
144 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
145
146 if (*flags & ETH_TEST_FL_OFFLINE) {
147 /* disable the interface */
148 carrier_ok = netif_carrier_ok(dev);
149
150 netif_carrier_off(dev);
151retry_tx:
152 /* Wait untill all tx queues are empty.
153 * there should not be any additional incoming traffic
154 * since we turned the carrier off */
155 msleep(200);
156 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
157 tx_ring = &priv->tx_ring[i];
158 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
159 goto retry_tx;
160 }
161
162 if (priv->mdev->dev->caps.loopback_support){
163 buf[3] = mlx4_en_test_registers(priv);
164 buf[4] = mlx4_en_test_loopback(priv);
165 }
166
167 if (carrier_ok)
168 netif_carrier_on(dev);
169
170 }
171 buf[0] = mlx4_test_interrupts(mdev->dev);
172 buf[1] = mlx4_en_test_link(priv);
173 buf[2] = mlx4_en_test_speed(priv);
174
175 for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
176 if (buf[i])
177 *flags |= ETH_TEST_FL_FAILED;
178 }
179}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 580968f304eb..98dd620042a8 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -38,6 +38,7 @@
38#include <linux/skbuff.h> 38#include <linux/skbuff.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
41#include <linux/tcp.h>
41 42
42#include "mlx4_en.h" 43#include "mlx4_en.h"
43 44
@@ -600,6 +601,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
600 struct mlx4_wqe_data_seg *data; 601 struct mlx4_wqe_data_seg *data;
601 struct skb_frag_struct *frag; 602 struct skb_frag_struct *frag;
602 struct mlx4_en_tx_info *tx_info; 603 struct mlx4_en_tx_info *tx_info;
604 struct ethhdr *ethh;
605 u64 mac;
606 u32 mac_l, mac_h;
603 int tx_ind = 0; 607 int tx_ind = 0;
604 int nr_txbb; 608 int nr_txbb;
605 int desc_size; 609 int desc_size;
@@ -612,6 +616,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
612 int lso_header_size; 616 int lso_header_size;
613 void *fragptr; 617 void *fragptr;
614 618
619 if (!priv->port_up)
620 goto tx_drop;
621
615 real_size = get_real_size(skb, dev, &lso_header_size); 622 real_size = get_real_size(skb, dev, &lso_header_size);
616 if (unlikely(!real_size)) 623 if (unlikely(!real_size))
617 goto tx_drop; 624 goto tx_drop;
@@ -676,6 +683,19 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
676 priv->port_stats.tx_chksum_offload++; 683 priv->port_stats.tx_chksum_offload++;
677 } 684 }
678 685
686 if (unlikely(priv->validate_loopback)) {
687 /* Copy dst mac address to wqe */
688 skb_reset_mac_header(skb);
689 ethh = eth_hdr(skb);
690 if (ethh && ethh->h_dest) {
691 mac = mlx4_en_mac_to_u64(ethh->h_dest);
692 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
693 mac_l = (u32) (mac & 0xffffffff);
694 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
695 tx_desc->ctrl.imm = cpu_to_be32(mac_l);
696 }
697 }
698
679 /* Handle LSO (TSO) packets */ 699 /* Handle LSO (TSO) packets */
680 if (lso_header_size) { 700 if (lso_header_size) {
681 /* Mark opcode as LSO */ 701 /* Mark opcode as LSO */
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 6d7b2bf210ce..552d0fce6f67 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -699,3 +699,47 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
699 699
700 kfree(priv->eq_table.uar_map); 700 kfree(priv->eq_table.uar_map);
701} 701}
702
703/* A test that verifies that we can accept interrupts on all
704 * the irq vectors of the device.
705 * Interrupts are checked using the NOP command.
706 */
707int mlx4_test_interrupts(struct mlx4_dev *dev)
708{
709 struct mlx4_priv *priv = mlx4_priv(dev);
710 int i;
711 int err;
712
713 err = mlx4_NOP(dev);
714 /* When not in MSI_X, there is only one irq to check */
715 if (!(dev->flags & MLX4_FLAG_MSI_X))
716 return err;
717
718 /* A loop over all completion vectors, for each vector we will check
719 * whether it works by mapping command completions to that vector
720 * and performing a NOP command
721 */
722 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
723 /* Temporary use polling for command completions */
724 mlx4_cmd_use_polling(dev);
725
726 /* Map the new eq to handle all asyncronous events */
727 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
728 priv->eq_table.eq[i].eqn);
729 if (err) {
730 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
731 mlx4_cmd_use_events(dev);
732 break;
733 }
734
735 /* Go back to using events */
736 mlx4_cmd_use_events(dev);
737 err = mlx4_NOP(dev);
738 }
739
740 /* Return to default */
741 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
742 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
743 return err;
744}
745EXPORT_SYMBOL(mlx4_test_interrupts);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 04f42ae1eda0..b716e1a1b298 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -141,6 +141,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
141 struct mlx4_cmd_mailbox *mailbox; 141 struct mlx4_cmd_mailbox *mailbox;
142 u32 *outbox; 142 u32 *outbox;
143 u8 field; 143 u8 field;
144 u32 field32;
144 u16 size; 145 u16 size;
145 u16 stat_rate; 146 u16 stat_rate;
146 int err; 147 int err;
@@ -178,6 +179,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
178#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 179#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
179#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 180#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
180#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 181#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
182#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42
183#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
181#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 184#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
182#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 185#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
183#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 186#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -268,6 +271,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
268 dev_cap->max_msg_sz = 1 << (field & 0x1f); 271 dev_cap->max_msg_sz = 1 << (field & 0x1f);
269 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 272 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
270 dev_cap->stat_rate_support = stat_rate; 273 dev_cap->stat_rate_support = stat_rate;
274 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
275 dev_cap->udp_rss = field & 0x1;
276 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
277 dev_cap->loopback_support = field & 0x1;
271 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 278 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
272 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 279 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
273 dev_cap->reserved_uars = field >> 4; 280 dev_cap->reserved_uars = field >> 4;
@@ -365,6 +372,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
365#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 372#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
366#define QUERY_PORT_MAX_VL_OFFSET 0x0b 373#define QUERY_PORT_MAX_VL_OFFSET 0x0b
367#define QUERY_PORT_MAC_OFFSET 0x10 374#define QUERY_PORT_MAC_OFFSET 0x10
375#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
376#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
377#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
368 378
369 for (i = 1; i <= dev_cap->num_ports; ++i) { 379 for (i = 1; i <= dev_cap->num_ports; ++i) {
370 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 380 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
@@ -388,6 +398,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
388 dev_cap->log_max_vlans[i] = field >> 4; 398 dev_cap->log_max_vlans[i] = field >> 4;
389 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); 399 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
390 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); 400 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
401 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
402 dev_cap->trans_type[i] = field32 >> 24;
403 dev_cap->vendor_oui[i] = field32 & 0xffffff;
404 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
405 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
391 } 406 }
392 } 407 }
393 408
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 526d7f30c041..65cc72eb899d 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -73,7 +73,13 @@ struct mlx4_dev_cap {
73 int max_pkeys[MLX4_MAX_PORTS + 1]; 73 int max_pkeys[MLX4_MAX_PORTS + 1];
74 u64 def_mac[MLX4_MAX_PORTS + 1]; 74 u64 def_mac[MLX4_MAX_PORTS + 1];
75 u16 eth_mtu[MLX4_MAX_PORTS + 1]; 75 u16 eth_mtu[MLX4_MAX_PORTS + 1];
76 int trans_type[MLX4_MAX_PORTS + 1];
77 int vendor_oui[MLX4_MAX_PORTS + 1];
78 u16 wavelength[MLX4_MAX_PORTS + 1];
79 u64 trans_code[MLX4_MAX_PORTS + 1];
76 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int udp_rss;
82 int loopback_support;
77 u32 flags; 83 u32 flags;
78 int reserved_uars; 84 int reserved_uars;
79 int uar_size; 85 int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 5102ab1ac561..569fa3df381f 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -184,6 +184,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
184 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 184 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
185 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 185 dev->caps.def_mac[i] = dev_cap->def_mac[i];
186 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 186 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
187 dev->caps.trans_type[i] = dev_cap->trans_type[i];
188 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
189 dev->caps.wavelength[i] = dev_cap->wavelength[i];
190 dev->caps.trans_code[i] = dev_cap->trans_code[i];
187 } 191 }
188 192
189 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 193 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -221,6 +225,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
221 dev->caps.bmme_flags = dev_cap->bmme_flags; 225 dev->caps.bmme_flags = dev_cap->bmme_flags;
222 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 226 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
223 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 227 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
228 dev->caps.udp_rss = dev_cap->udp_rss;
229 dev->caps.loopback_support = dev_cap->loopback_support;
224 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 230 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
225 231
226 dev->caps.log_num_macs = log_num_mac; 232 dev->caps.log_num_macs = log_num_mac;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 449210994ee9..1fc16ab7ad2f 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -38,19 +38,19 @@
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/inet_lro.h>
42 41
43#include <linux/mlx4/device.h> 42#include <linux/mlx4/device.h>
44#include <linux/mlx4/qp.h> 43#include <linux/mlx4/qp.h>
45#include <linux/mlx4/cq.h> 44#include <linux/mlx4/cq.h>
46#include <linux/mlx4/srq.h> 45#include <linux/mlx4/srq.h>
47#include <linux/mlx4/doorbell.h> 46#include <linux/mlx4/doorbell.h>
47#include <linux/mlx4/cmd.h>
48 48
49#include "en_port.h" 49#include "en_port.h"
50 50
51#define DRV_NAME "mlx4_en" 51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.1.1" 52#define DRV_VERSION "1.5.1.6"
53#define DRV_RELDATE "June 2009" 53#define DRV_RELDATE "August 2010"
54 54
55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
56 56
@@ -61,7 +61,6 @@
61 61
62#define MLX4_EN_PAGE_SHIFT 12 62#define MLX4_EN_PAGE_SHIFT 12
63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
64#define MAX_TX_RINGS 16
65#define MAX_RX_RINGS 16 64#define MAX_RX_RINGS 16
66#define TXBB_SIZE 64 65#define TXBB_SIZE 64
67#define HEADROOM (2048 / TXBB_SIZE + 1) 66#define HEADROOM (2048 / TXBB_SIZE + 1)
@@ -107,6 +106,7 @@ enum {
107#define MLX4_EN_SMALL_PKT_SIZE 64 106#define MLX4_EN_SMALL_PKT_SIZE 64
108#define MLX4_EN_NUM_TX_RINGS 8 107#define MLX4_EN_NUM_TX_RINGS 8
109#define MLX4_EN_NUM_PPP_RINGS 8 108#define MLX4_EN_NUM_PPP_RINGS 8
109#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
110#define MLX4_EN_DEF_TX_RING_SIZE 512 110#define MLX4_EN_DEF_TX_RING_SIZE 512
111#define MLX4_EN_DEF_RX_RING_SIZE 1024 111#define MLX4_EN_DEF_RX_RING_SIZE 1024
112 112
@@ -139,10 +139,14 @@ enum {
139 139
140#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) 140#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
141#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) 141#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
142#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
142 143
143#define MLX4_EN_MIN_MTU 46 144#define MLX4_EN_MIN_MTU 46
144#define ETH_BCAST 0xffffffffffffULL 145#define ETH_BCAST 0xffffffffffffULL
145 146
147#define MLX4_EN_LOOPBACK_RETRIES 5
148#define MLX4_EN_LOOPBACK_TIMEOUT 100
149
146#ifdef MLX4_EN_PERF_STAT 150#ifdef MLX4_EN_PERF_STAT
147/* Number of samples to 'average' */ 151/* Number of samples to 'average' */
148#define AVG_SIZE 128 152#define AVG_SIZE 128
@@ -249,7 +253,6 @@ struct mlx4_en_rx_desc {
249struct mlx4_en_rx_ring { 253struct mlx4_en_rx_ring {
250 struct mlx4_hwq_resources wqres; 254 struct mlx4_hwq_resources wqres;
251 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; 255 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
252 struct net_lro_mgr lro;
253 u32 size ; /* number of Rx descs*/ 256 u32 size ; /* number of Rx descs*/
254 u32 actual_size; 257 u32 actual_size;
255 u32 size_mask; 258 u32 size_mask;
@@ -313,7 +316,8 @@ struct mlx4_en_port_profile {
313 316
314struct mlx4_en_profile { 317struct mlx4_en_profile {
315 int rss_xor; 318 int rss_xor;
316 int num_lro; 319 int tcp_rss;
320 int udp_rss;
317 u8 rss_mask; 321 u8 rss_mask;
318 u32 active_ports; 322 u32 active_ports;
319 u32 small_pkt_int; 323 u32 small_pkt_int;
@@ -337,6 +341,7 @@ struct mlx4_en_dev {
337 struct mlx4_mr mr; 341 struct mlx4_mr mr;
338 u32 priv_pdn; 342 u32 priv_pdn;
339 spinlock_t uar_lock; 343 spinlock_t uar_lock;
344 u8 mac_removed[MLX4_MAX_PORTS + 1];
340}; 345};
341 346
342 347
@@ -355,6 +360,13 @@ struct mlx4_en_rss_context {
355 u8 hash_fn; 360 u8 hash_fn;
356 u8 flags; 361 u8 flags;
357 __be32 rss_key[10]; 362 __be32 rss_key[10];
363 __be32 base_qpn_udp;
364};
365
366struct mlx4_en_port_state {
367 int link_state;
368 int link_speed;
369 int transciver;
358}; 370};
359 371
360struct mlx4_en_pkt_stats { 372struct mlx4_en_pkt_stats {
@@ -365,9 +377,6 @@ struct mlx4_en_pkt_stats {
365}; 377};
366 378
367struct mlx4_en_port_stats { 379struct mlx4_en_port_stats {
368 unsigned long lro_aggregated;
369 unsigned long lro_flushed;
370 unsigned long lro_no_desc;
371 unsigned long tso_packets; 380 unsigned long tso_packets;
372 unsigned long queue_stopped; 381 unsigned long queue_stopped;
373 unsigned long wake_queue; 382 unsigned long wake_queue;
@@ -376,7 +385,7 @@ struct mlx4_en_port_stats {
376 unsigned long rx_chksum_good; 385 unsigned long rx_chksum_good;
377 unsigned long rx_chksum_none; 386 unsigned long rx_chksum_none;
378 unsigned long tx_chksum_offload; 387 unsigned long tx_chksum_offload;
379#define NUM_PORT_STATS 11 388#define NUM_PORT_STATS 8
380}; 389};
381 390
382struct mlx4_en_perf_stats { 391struct mlx4_en_perf_stats {
@@ -405,6 +414,7 @@ struct mlx4_en_priv {
405 struct vlan_group *vlgrp; 414 struct vlan_group *vlgrp;
406 struct net_device_stats stats; 415 struct net_device_stats stats;
407 struct net_device_stats ret_stats; 416 struct net_device_stats ret_stats;
417 struct mlx4_en_port_state port_state;
408 spinlock_t stats_lock; 418 spinlock_t stats_lock;
409 419
410 unsigned long last_moder_packets; 420 unsigned long last_moder_packets;
@@ -423,6 +433,8 @@ struct mlx4_en_priv {
423 u16 sample_interval; 433 u16 sample_interval;
424 u16 adaptive_rx_coal; 434 u16 adaptive_rx_coal;
425 u32 msg_enable; 435 u32 msg_enable;
436 u32 loopback_ok;
437 u32 validate_loopback;
426 438
427 struct mlx4_hwq_resources res; 439 struct mlx4_hwq_resources res;
428 int link_state; 440 int link_state;
@@ -531,6 +543,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
531 u8 promisc); 543 u8 promisc);
532 544
533int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); 545int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
546int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
547
548#define MLX4_EN_NUM_SELF_TEST 5
549void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
550u64 mlx4_en_mac_to_u64(u8 *addr);
534 551
535/* 552/*
536 * Globals 553 * Globals
@@ -555,6 +572,8 @@ do { \
555 en_print(KERN_WARNING, priv, format, ##arg) 572 en_print(KERN_WARNING, priv, format, ##arg)
556#define en_err(priv, format, arg...) \ 573#define en_err(priv, format, arg...) \
557 en_print(KERN_ERR, priv, format, ##arg) 574 en_print(KERN_ERR, priv, format, ##arg)
575#define en_info(priv, format, arg...) \
576 en_print(KERN_INFO, priv, format, ## arg)
558 577
559#define mlx4_err(mdev, format, arg...) \ 578#define mlx4_err(mdev, format, arg...) \
560 pr_err("%s %s: " format, DRV_NAME, \ 579 pr_err("%s %s: " format, DRV_NAME, \
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 5caf0115fa5b..e749f82865fe 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -85,7 +85,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
85 struct mlx4_resource tmp; 85 struct mlx4_resource tmp;
86 int i, j; 86 int i, j;
87 87
88 profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL); 88 profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
89 if (!profile) 89 if (!profile)
90 return -ENOMEM; 90 return -ENOMEM;
91 91
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index fb2c0927d3cc..24ab8a43c777 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3753,8 +3753,8 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3753 * slices. We give up on MSI-X if we can only get a single 3753 * slices. We give up on MSI-X if we can only get a single
3754 * vector. */ 3754 * vector. */
3755 3755
3756 mgp->msix_vectors = kzalloc(mgp->num_slices * 3756 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
3757 sizeof(*mgp->msix_vectors), GFP_KERNEL); 3757 GFP_KERNEL);
3758 if (mgp->msix_vectors == NULL) 3758 if (mgp->msix_vectors == NULL)
3759 goto disable_msix; 3759 goto disable_msix;
3760 for (i = 0; i < mgp->num_slices; i++) { 3760 for (i = 0; i < mgp->num_slices; i++) {
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index a6033d48b5cc..2fd39630b1e5 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1570,7 +1570,7 @@ static int netdev_open(struct net_device *dev)
1570 init_timer(&np->timer); 1570 init_timer(&np->timer);
1571 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); 1571 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1572 np->timer.data = (unsigned long)dev; 1572 np->timer.data = (unsigned long)dev;
1573 np->timer.function = &netdev_timer; /* timer handler */ 1573 np->timer.function = netdev_timer; /* timer handler */
1574 add_timer(&np->timer); 1574 add_timer(&np->timer);
1575 1575
1576 return 0; 1576 return 0;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index fe6983af6918..8e1859c801a4 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3484,7 +3484,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3484 RCR_ENTRY_ERROR))) 3484 RCR_ENTRY_ERROR)))
3485 skb->ip_summed = CHECKSUM_UNNECESSARY; 3485 skb->ip_summed = CHECKSUM_UNNECESSARY;
3486 else 3486 else
3487 skb->ip_summed = CHECKSUM_NONE; 3487 skb_checksum_none_assert(skb);
3488 } else if (!(val & RCR_ENTRY_MULTI)) 3488 } else if (!(val & RCR_ENTRY_MULTI))
3489 append_size = len - skb->len; 3489 append_size = len - skb->len;
3490 3490
@@ -4504,7 +4504,7 @@ static int niu_alloc_channels(struct niu *np)
4504 4504
4505 np->dev->real_num_tx_queues = np->num_tx_rings; 4505 np->dev->real_num_tx_queues = np->num_tx_rings;
4506 4506
4507 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), 4507 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
4508 GFP_KERNEL); 4508 GFP_KERNEL);
4509 err = -ENOMEM; 4509 err = -ENOMEM;
4510 if (!np->rx_rings) 4510 if (!np->rx_rings)
@@ -4538,7 +4538,7 @@ static int niu_alloc_channels(struct niu *np)
4538 return err; 4538 return err;
4539 } 4539 }
4540 4540
4541 np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info), 4541 np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
4542 GFP_KERNEL); 4542 GFP_KERNEL);
4543 err = -ENOMEM; 4543 err = -ENOMEM;
4544 if (!np->tx_rings) 4544 if (!np->tx_rings)
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 5a3488f76b38..3bbd0aab17e8 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -923,7 +923,7 @@ static void rx_irq(struct net_device *ndev)
923 if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) { 923 if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
924 skb->ip_summed = CHECKSUM_UNNECESSARY; 924 skb->ip_summed = CHECKSUM_UNNECESSARY;
925 } else { 925 } else {
926 skb->ip_summed = CHECKSUM_NONE; 926 skb_checksum_none_assert(skb);
927 } 927 }
928 skb->protocol = eth_type_trans(skb, ndev); 928 skb->protocol = eth_type_trans(skb, ndev);
929#ifdef NS83820_VLAN_ACCEL_SUPPORT 929#ifdef NS83820_VLAN_ACCEL_SUPPORT
@@ -1246,7 +1246,6 @@ static int ns83820_get_settings(struct net_device *ndev,
1246{ 1246{
1247 struct ns83820 *dev = PRIV(ndev); 1247 struct ns83820 *dev = PRIV(ndev);
1248 u32 cfg, tanar, tbicr; 1248 u32 cfg, tanar, tbicr;
1249 int have_optical = 0;
1250 int fullduplex = 0; 1249 int fullduplex = 0;
1251 1250
1252 /* 1251 /*
@@ -1267,25 +1266,25 @@ static int ns83820_get_settings(struct net_device *ndev,
1267 tanar = readl(dev->base + TANAR); 1266 tanar = readl(dev->base + TANAR);
1268 tbicr = readl(dev->base + TBICR); 1267 tbicr = readl(dev->base + TBICR);
1269 1268
1270 if (dev->CFG_cache & CFG_TBI_EN) { 1269 fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
1271 /* we have an optical interface */
1272 have_optical = 1;
1273 fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
1274
1275 } else {
1276 /* We have copper */
1277 fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
1278 }
1279 1270
1280 cmd->supported = SUPPORTED_Autoneg; 1271 cmd->supported = SUPPORTED_Autoneg;
1281 1272
1282 /* we have optical interface */
1283 if (dev->CFG_cache & CFG_TBI_EN) { 1273 if (dev->CFG_cache & CFG_TBI_EN) {
1274 /* we have optical interface */
1284 cmd->supported |= SUPPORTED_1000baseT_Half | 1275 cmd->supported |= SUPPORTED_1000baseT_Half |
1285 SUPPORTED_1000baseT_Full | 1276 SUPPORTED_1000baseT_Full |
1286 SUPPORTED_FIBRE; 1277 SUPPORTED_FIBRE;
1287 cmd->port = PORT_FIBRE; 1278 cmd->port = PORT_FIBRE;
1288 } /* TODO: else copper related support */ 1279 } else {
1280 /* we have copper */
1281 cmd->supported |= SUPPORTED_10baseT_Half |
1282 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
1283 SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half |
1284 SUPPORTED_1000baseT_Full |
1285 SUPPORTED_MII;
1286 cmd->port = PORT_MII;
1287 }
1289 1288
1290 cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; 1289 cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
1291 switch (cfg / CFG_SPDSTS0 & 3) { 1290 switch (cfg / CFG_SPDSTS0 & 3) {
@@ -1299,7 +1298,8 @@ static int ns83820_get_settings(struct net_device *ndev,
1299 cmd->speed = SPEED_10; 1298 cmd->speed = SPEED_10;
1300 break; 1299 break;
1301 } 1300 }
1302 cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0; 1301 cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
1302 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1303 return 0; 1303 return 0;
1304} 1304}
1305 1305
@@ -1405,6 +1405,13 @@ static const struct ethtool_ops ops = {
1405 .get_link = ns83820_get_link 1405 .get_link = ns83820_get_link
1406}; 1406};
1407 1407
1408static inline void ns83820_disable_interrupts(struct ns83820 *dev)
1409{
1410 writel(0, dev->base + IMR);
1411 writel(0, dev->base + IER);
1412 readl(dev->base + IER);
1413}
1414
1408/* this function is called in irq context from the ISR */ 1415/* this function is called in irq context from the ISR */
1409static void ns83820_mib_isr(struct ns83820 *dev) 1416static void ns83820_mib_isr(struct ns83820 *dev)
1410{ 1417{
@@ -1557,10 +1564,7 @@ static int ns83820_stop(struct net_device *ndev)
1557 /* FIXME: protect against interrupt handler? */ 1564 /* FIXME: protect against interrupt handler? */
1558 del_timer_sync(&dev->tx_watchdog); 1565 del_timer_sync(&dev->tx_watchdog);
1559 1566
1560 /* disable interrupts */ 1567 ns83820_disable_interrupts(dev);
1561 writel(0, dev->base + IMR);
1562 writel(0, dev->base + IER);
1563 readl(dev->base + IER);
1564 1568
1565 dev->rx_info.up = 0; 1569 dev->rx_info.up = 0;
1566 synchronize_irq(dev->pci_dev->irq); 1570 synchronize_irq(dev->pci_dev->irq);
@@ -2023,10 +2027,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
2023 dev->tx_descs, (long)dev->tx_phy_descs, 2027 dev->tx_descs, (long)dev->tx_phy_descs,
2024 dev->rx_info.descs, (long)dev->rx_info.phy_descs); 2028 dev->rx_info.descs, (long)dev->rx_info.phy_descs);
2025 2029
2026 /* disable interrupts */ 2030 ns83820_disable_interrupts(dev);
2027 writel(0, dev->base + IMR);
2028 writel(0, dev->base + IER);
2029 readl(dev->base + IER);
2030 2031
2031 dev->IMR_cache = 0; 2032 dev->IMR_cache = 0;
2032 2033
@@ -2250,9 +2251,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
2250 return 0; 2251 return 0;
2251 2252
2252out_cleanup: 2253out_cleanup:
2253 writel(0, dev->base + IMR); /* paranoia */ 2254 ns83820_disable_interrupts(dev); /* paranoia */
2254 writel(0, dev->base + IER);
2255 readl(dev->base + IER);
2256out_free_irq: 2255out_free_irq:
2257 rtnl_unlock(); 2256 rtnl_unlock();
2258 free_irq(pci_dev->irq, ndev); 2257 free_irq(pci_dev->irq, ndev);
@@ -2277,9 +2276,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
2277 if (!ndev) /* paranoia */ 2276 if (!ndev) /* paranoia */
2278 return; 2277 return;
2279 2278
2280 writel(0, dev->base + IMR); /* paranoia */ 2279 ns83820_disable_interrupts(dev); /* paranoia */
2281 writel(0, dev->base + IER);
2282 readl(dev->base + IER);
2283 2280
2284 unregister_netdev(ndev); 2281 unregister_netdev(ndev);
2285 free_irq(dev->pci_dev->irq, ndev); 2282 free_irq(dev->pci_dev->irq, ndev);
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 8ab6ae0a6107..828e97cacdbf 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -808,7 +808,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
808 skb->csum = (macrx & XCT_MACRX_CSUM_M) >> 808 skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
809 XCT_MACRX_CSUM_S; 809 XCT_MACRX_CSUM_S;
810 } else 810 } else
811 skb->ip_summed = CHECKSUM_NONE; 811 skb_checksum_none_assert(skb);
812 812
813 packets++; 813 packets++;
814 tot_bytes += len; 814 tot_bytes += len;
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index fefa79e34b95..4825959a0efe 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -90,21 +90,6 @@ pasemi_mac_ethtool_set_settings(struct net_device *netdev,
90 return phy_ethtool_sset(phydev, cmd); 90 return phy_ethtool_sset(phydev, cmd);
91} 91}
92 92
93static void
94pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev,
95 struct ethtool_drvinfo *drvinfo)
96{
97 struct pasemi_mac *mac;
98 mac = netdev_priv(netdev);
99
100 /* clear and fill out info */
101 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
102 strncpy(drvinfo->driver, "pasemi_mac", 12);
103 strcpy(drvinfo->version, "N/A");
104 strcpy(drvinfo->fw_version, "N/A");
105 strncpy(drvinfo->bus_info, pci_name(mac->pdev), 32);
106}
107
108static u32 93static u32
109pasemi_mac_ethtool_get_msglevel(struct net_device *netdev) 94pasemi_mac_ethtool_get_msglevel(struct net_device *netdev)
110{ 95{
@@ -164,7 +149,6 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
164const struct ethtool_ops pasemi_mac_ethtool_ops = { 149const struct ethtool_ops pasemi_mac_ethtool_ops = {
165 .get_settings = pasemi_mac_ethtool_get_settings, 150 .get_settings = pasemi_mac_ethtool_get_settings,
166 .set_settings = pasemi_mac_ethtool_set_settings, 151 .set_settings = pasemi_mac_ethtool_set_settings,
167 .get_drvinfo = pasemi_mac_ethtool_get_drvinfo,
168 .get_msglevel = pasemi_mac_ethtool_get_msglevel, 152 .get_msglevel = pasemi_mac_ethtool_get_msglevel,
169 .set_msglevel = pasemi_mac_ethtool_set_msglevel, 153 .set_msglevel = pasemi_mac_ethtool_set_msglevel,
170 .get_link = ethtool_op_get_link, 154 .get_link = ethtool_op_get_link,
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 56f3fc45dbaa..8dd03439d994 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1125,7 +1125,7 @@ static int netdrv_open(struct net_device *dev)
1125 init_timer(&tp->timer); 1125 init_timer(&tp->timer);
1126 tp->timer.expires = jiffies + 3 * HZ; 1126 tp->timer.expires = jiffies + 3 * HZ;
1127 tp->timer.data = (unsigned long) dev; 1127 tp->timer.data = (unsigned long) dev;
1128 tp->timer.function = &netdrv_timer; 1128 tp->timer.function = netdrv_timer;
1129 add_timer(&tp->timer); 1129 add_timer(&tp->timer);
1130 1130
1131 DPRINTK("EXIT, returning 0\n"); 1131 DPRINTK("EXIT, returning 0\n");
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index c683f77c6f42..042f6777e6b9 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -69,6 +69,8 @@ earlier 3Com products.
69 69
70*/ 70*/
71 71
72#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73
72#include <linux/module.h> 74#include <linux/module.h>
73#include <linux/kernel.h> 75#include <linux/kernel.h>
74#include <linux/init.h> 76#include <linux/init.h>
@@ -83,7 +85,6 @@ earlier 3Com products.
83#include <linux/skbuff.h> 85#include <linux/skbuff.h>
84#include <linux/if_arp.h> 86#include <linux/if_arp.h>
85#include <linux/ioport.h> 87#include <linux/ioport.h>
86#include <linux/ethtool.h>
87#include <linux/bitops.h> 88#include <linux/bitops.h>
88#include <linux/mii.h> 89#include <linux/mii.h>
89 90
@@ -238,7 +239,6 @@ static int el3_rx(struct net_device *dev, int worklimit);
238static int el3_close(struct net_device *dev); 239static int el3_close(struct net_device *dev);
239static void el3_tx_timeout(struct net_device *dev); 240static void el3_tx_timeout(struct net_device *dev);
240static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 241static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
241static const struct ethtool_ops netdev_ethtool_ops;
242static void set_rx_mode(struct net_device *dev); 242static void set_rx_mode(struct net_device *dev);
243static void set_multicast_list(struct net_device *dev); 243static void set_multicast_list(struct net_device *dev);
244 244
@@ -285,7 +285,6 @@ static int tc574_probe(struct pcmcia_device *link)
285 link->conf.ConfigIndex = 1; 285 link->conf.ConfigIndex = 1;
286 286
287 dev->netdev_ops = &el3_netdev_ops; 287 dev->netdev_ops = &el3_netdev_ops;
288 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
289 dev->watchdog_timeo = TX_TIMEOUT; 288 dev->watchdog_timeo = TX_TIMEOUT;
290 289
291 return tc574_config(link); 290 return tc574_config(link);
@@ -376,8 +375,8 @@ static int tc574_config(struct pcmcia_device *link)
376 for (i = 0; i < 3; i++) 375 for (i = 0; i < 3; i++)
377 phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); 376 phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
378 if (phys_addr[0] == htons(0x6060)) { 377 if (phys_addr[0] == htons(0x6060)) {
379 printk(KERN_NOTICE "3c574_cs: IO port conflict at 0x%03lx" 378 pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
380 "-0x%03lx\n", dev->base_addr, dev->base_addr+15); 379 dev->base_addr, dev->base_addr+15);
381 goto failed; 380 goto failed;
382 } 381 }
383 } 382 }
@@ -391,7 +390,7 @@ static int tc574_config(struct pcmcia_device *link)
391 outw(2<<11, ioaddr + RunnerRdCtrl); 390 outw(2<<11, ioaddr + RunnerRdCtrl);
392 mcr = inb(ioaddr + 2); 391 mcr = inb(ioaddr + 2);
393 outw(0<<11, ioaddr + RunnerRdCtrl); 392 outw(0<<11, ioaddr + RunnerRdCtrl);
394 printk(KERN_INFO " ASIC rev %d,", mcr>>3); 393 pr_info(" ASIC rev %d,", mcr>>3);
395 EL3WINDOW(3); 394 EL3WINDOW(3);
396 config = inl(ioaddr + Wn3_Config); 395 config = inl(ioaddr + Wn3_Config);
397 lp->default_media = (config & Xcvr) >> Xcvr_shift; 396 lp->default_media = (config & Xcvr) >> Xcvr_shift;
@@ -428,7 +427,7 @@ static int tc574_config(struct pcmcia_device *link)
428 } 427 }
429 } 428 }
430 if (phy > 32) { 429 if (phy > 32) {
431 printk(KERN_NOTICE " No MII transceivers found!\n"); 430 pr_notice(" No MII transceivers found!\n");
432 goto failed; 431 goto failed;
433 } 432 }
434 i = mdio_read(ioaddr, lp->phys, 16) | 0x40; 433 i = mdio_read(ioaddr, lp->phys, 16) | 0x40;
@@ -444,18 +443,16 @@ static int tc574_config(struct pcmcia_device *link)
444 SET_NETDEV_DEV(dev, &link->dev); 443 SET_NETDEV_DEV(dev, &link->dev);
445 444
446 if (register_netdev(dev) != 0) { 445 if (register_netdev(dev) != 0) {
447 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); 446 pr_notice("register_netdev() failed\n");
448 goto failed; 447 goto failed;
449 } 448 }
450 449
451 printk(KERN_INFO "%s: %s at io %#3lx, irq %d, " 450 netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
452 "hw_addr %pM.\n", 451 cardname, dev->base_addr, dev->irq, dev->dev_addr);
453 dev->name, cardname, dev->base_addr, dev->irq, 452 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
454 dev->dev_addr); 453 8 << config & Ram_size,
455 printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n", 454 ram_split[(config & Ram_split) >> Ram_split_shift],
456 8 << config & Ram_size, 455 config & Autoselect ? "autoselect " : "");
457 ram_split[(config & Ram_split) >> Ram_split_shift],
458 config & Autoselect ? "autoselect " : "");
459 456
460 return 0; 457 return 0;
461 458
@@ -502,14 +499,14 @@ static void dump_status(struct net_device *dev)
502{ 499{
503 unsigned int ioaddr = dev->base_addr; 500 unsigned int ioaddr = dev->base_addr;
504 EL3WINDOW(1); 501 EL3WINDOW(1);
505 printk(KERN_INFO " irq status %04x, rx status %04x, tx status " 502 netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n",
506 "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS), 503 inw(ioaddr+EL3_STATUS),
507 inw(ioaddr+RxStatus), inb(ioaddr+TxStatus), 504 inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
508 inw(ioaddr+TxFree)); 505 inw(ioaddr+TxFree));
509 EL3WINDOW(4); 506 EL3WINDOW(4);
510 printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x" 507 netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
511 " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06), 508 inw(ioaddr+0x04), inw(ioaddr+0x06),
512 inw(ioaddr+0x08), inw(ioaddr+0x0a)); 509 inw(ioaddr+0x08), inw(ioaddr+0x0a));
513 EL3WINDOW(1); 510 EL3WINDOW(1);
514} 511}
515 512
@@ -523,7 +520,7 @@ static void tc574_wait_for_completion(struct net_device *dev, int cmd)
523 while (--i > 0) 520 while (--i > 0)
524 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; 521 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
525 if (i == 0) 522 if (i == 0)
526 printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n", dev->name, cmd); 523 netdev_notice(dev, "command 0x%04x did not complete!\n", cmd);
527} 524}
528 525
529/* Read a word from the EEPROM using the regular EEPROM access register. 526/* Read a word from the EEPROM using the regular EEPROM access register.
@@ -710,7 +707,7 @@ static int el3_open(struct net_device *dev)
710 netif_start_queue(dev); 707 netif_start_queue(dev);
711 708
712 tc574_reset(dev); 709 tc574_reset(dev);
713 lp->media.function = &media_check; 710 lp->media.function = media_check;
714 lp->media.data = (unsigned long) dev; 711 lp->media.data = (unsigned long) dev;
715 lp->media.expires = jiffies + HZ; 712 lp->media.expires = jiffies + HZ;
716 add_timer(&lp->media); 713 add_timer(&lp->media);
@@ -725,7 +722,7 @@ static void el3_tx_timeout(struct net_device *dev)
725{ 722{
726 unsigned int ioaddr = dev->base_addr; 723 unsigned int ioaddr = dev->base_addr;
727 724
728 printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name); 725 netdev_notice(dev, "Transmit timed out!\n");
729 dump_status(dev); 726 dump_status(dev);
730 dev->stats.tx_errors++; 727 dev->stats.tx_errors++;
731 dev->trans_start = jiffies; /* prevent tx timeout */ 728 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -848,8 +845,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
848 EL3WINDOW(4); 845 EL3WINDOW(4);
849 fifo_diag = inw(ioaddr + Wn4_FIFODiag); 846 fifo_diag = inw(ioaddr + Wn4_FIFODiag);
850 EL3WINDOW(1); 847 EL3WINDOW(1);
851 printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic" 848 netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n",
852 " register %04x.\n", dev->name, fifo_diag); 849 fifo_diag);
853 if (fifo_diag & 0x0400) { 850 if (fifo_diag & 0x0400) {
854 /* Tx overrun */ 851 /* Tx overrun */
855 tc574_wait_for_completion(dev, TxReset); 852 tc574_wait_for_completion(dev, TxReset);
@@ -903,7 +900,7 @@ static void media_check(unsigned long arg)
903 this, we can limp along even if the interrupt is blocked */ 900 this, we can limp along even if the interrupt is blocked */
904 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) { 901 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
905 if (!lp->fast_poll) 902 if (!lp->fast_poll)
906 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 903 netdev_info(dev, "interrupt(s) dropped!\n");
907 904
908 local_irq_save(flags); 905 local_irq_save(flags);
909 el3_interrupt(dev->irq, dev); 906 el3_interrupt(dev->irq, dev);
@@ -926,23 +923,21 @@ static void media_check(unsigned long arg)
926 923
927 if (media != lp->media_status) { 924 if (media != lp->media_status) {
928 if ((media ^ lp->media_status) & 0x0004) 925 if ((media ^ lp->media_status) & 0x0004)
929 printk(KERN_INFO "%s: %s link beat\n", dev->name, 926 netdev_info(dev, "%s link beat\n",
930 (lp->media_status & 0x0004) ? "lost" : "found"); 927 (lp->media_status & 0x0004) ? "lost" : "found");
931 if ((media ^ lp->media_status) & 0x0020) { 928 if ((media ^ lp->media_status) & 0x0020) {
932 lp->partner = 0; 929 lp->partner = 0;
933 if (lp->media_status & 0x0020) { 930 if (lp->media_status & 0x0020) {
934 printk(KERN_INFO "%s: autonegotiation restarted\n", 931 netdev_info(dev, "autonegotiation restarted\n");
935 dev->name);
936 } else if (partner) { 932 } else if (partner) {
937 partner &= lp->advertising; 933 partner &= lp->advertising;
938 lp->partner = partner; 934 lp->partner = partner;
939 printk(KERN_INFO "%s: autonegotiation complete: " 935 netdev_info(dev, "autonegotiation complete: "
940 "%sbaseT-%cD selected\n", dev->name, 936 "%dbaseT-%cD selected\n",
941 ((partner & 0x0180) ? "100" : "10"), 937 (partner & 0x0180) ? 100 : 10,
942 ((partner & 0x0140) ? 'F' : 'H')); 938 (partner & 0x0140) ? 'F' : 'H');
943 } else { 939 } else {
944 printk(KERN_INFO "%s: link partner did not autonegotiate\n", 940 netdev_info(dev, "link partner did not autonegotiate\n");
945 dev->name);
946 } 941 }
947 942
948 EL3WINDOW(3); 943 EL3WINDOW(3);
@@ -952,10 +947,9 @@ static void media_check(unsigned long arg)
952 947
953 } 948 }
954 if (media & 0x0010) 949 if (media & 0x0010)
955 printk(KERN_INFO "%s: remote fault detected\n", 950 netdev_info(dev, "remote fault detected\n");
956 dev->name);
957 if (media & 0x0002) 951 if (media & 0x0002)
958 printk(KERN_INFO "%s: jabber detected\n", dev->name); 952 netdev_info(dev, "jabber detected\n");
959 lp->media_status = media; 953 lp->media_status = media;
960 } 954 }
961 spin_unlock_irqrestore(&lp->window_lock, flags); 955 spin_unlock_irqrestore(&lp->window_lock, flags);
@@ -1065,16 +1059,6 @@ static int el3_rx(struct net_device *dev, int worklimit)
1065 return worklimit; 1059 return worklimit;
1066} 1060}
1067 1061
1068static void netdev_get_drvinfo(struct net_device *dev,
1069 struct ethtool_drvinfo *info)
1070{
1071 strcpy(info->driver, "3c574_cs");
1072}
1073
1074static const struct ethtool_ops netdev_ethtool_ops = {
1075 .get_drvinfo = netdev_get_drvinfo,
1076};
1077
1078/* Provide ioctl() calls to examine the MII xcvr state. */ 1062/* Provide ioctl() calls to examine the MII xcvr state. */
1079static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1063static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1080{ 1064{
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 61f9cf2100ff..7f2baf5eae26 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -19,6 +19,8 @@
19 19
20======================================================================*/ 20======================================================================*/
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#define DRV_NAME "3c589_cs" 24#define DRV_NAME "3c589_cs"
23#define DRV_VERSION "1.162-ac" 25#define DRV_VERSION "1.162-ac"
24 26
@@ -273,8 +275,7 @@ static int tc589_config(struct pcmcia_device *link)
273 phys_addr = (__be16 *)dev->dev_addr; 275 phys_addr = (__be16 *)dev->dev_addr;
274 /* Is this a 3c562? */ 276 /* Is this a 3c562? */
275 if (link->manf_id != MANFID_3COM) 277 if (link->manf_id != MANFID_3COM)
276 printk(KERN_INFO "3c589_cs: hmmm, is this really a " 278 dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
277 "3Com card??\n");
278 multi = (link->card_id == PRODID_3COM_3C562); 279 multi = (link->card_id == PRODID_3COM_3C562);
279 280
280 link->io_lines = 16; 281 link->io_lines = 16;
@@ -315,8 +316,8 @@ static int tc589_config(struct pcmcia_device *link)
315 for (i = 0; i < 3; i++) 316 for (i = 0; i < 3; i++)
316 phys_addr[i] = htons(read_eeprom(ioaddr, i)); 317 phys_addr[i] = htons(read_eeprom(ioaddr, i));
317 if (phys_addr[0] == htons(0x6060)) { 318 if (phys_addr[0] == htons(0x6060)) {
318 printk(KERN_ERR "3c589_cs: IO port conflict at 0x%03lx" 319 dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
319 "-0x%03lx\n", dev->base_addr, dev->base_addr+15); 320 dev->base_addr, dev->base_addr+15);
320 goto failed; 321 goto failed;
321 } 322 }
322 } 323 }
@@ -330,12 +331,12 @@ static int tc589_config(struct pcmcia_device *link)
330 if ((if_port >= 0) && (if_port <= 3)) 331 if ((if_port >= 0) && (if_port <= 3))
331 dev->if_port = if_port; 332 dev->if_port = if_port;
332 else 333 else
333 printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); 334 dev_err(&link->dev, "invalid if_port requested\n");
334 335
335 SET_NETDEV_DEV(dev, &link->dev); 336 SET_NETDEV_DEV(dev, &link->dev);
336 337
337 if (register_netdev(dev) != 0) { 338 if (register_netdev(dev) != 0) {
338 printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); 339 dev_err(&link->dev, "register_netdev() failed\n");
339 goto failed; 340 goto failed;
340 } 341 }
341 342
@@ -537,7 +538,7 @@ static int el3_open(struct net_device *dev)
537 538
538 tc589_reset(dev); 539 tc589_reset(dev);
539 init_timer(&lp->media); 540 init_timer(&lp->media);
540 lp->media.function = &media_check; 541 lp->media.function = media_check;
541 lp->media.data = (unsigned long) dev; 542 lp->media.data = (unsigned long) dev;
542 lp->media.expires = jiffies + HZ; 543 lp->media.expires = jiffies + HZ;
543 add_timer(&lp->media); 544 add_timer(&lp->media);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 5f05ffb240cc..3f61fde70d73 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -24,6 +24,8 @@
24 24
25======================================================================*/ 25======================================================================*/
26 26
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28
27#include <linux/kernel.h> 29#include <linux/kernel.h>
28#include <linux/module.h> 30#include <linux/module.h>
29#include <linux/init.h> 31#include <linux/init.h>
@@ -32,7 +34,6 @@
32#include <linux/timer.h> 34#include <linux/timer.h>
33#include <linux/delay.h> 35#include <linux/delay.h>
34#include <linux/spinlock.h> 36#include <linux/spinlock.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h> 37#include <linux/netdevice.h>
37#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
38#include <linux/crc32.h> 39#include <linux/crc32.h>
@@ -86,7 +87,6 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
86static struct net_device_stats *get_stats(struct net_device *dev); 87static struct net_device_stats *get_stats(struct net_device *dev);
87static void set_multicast_list(struct net_device *dev); 88static void set_multicast_list(struct net_device *dev);
88static void axnet_tx_timeout(struct net_device *dev); 89static void axnet_tx_timeout(struct net_device *dev);
89static const struct ethtool_ops netdev_ethtool_ops;
90static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); 90static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
91static void ei_watchdog(u_long arg); 91static void ei_watchdog(u_long arg);
92static void axnet_reset_8390(struct net_device *dev); 92static void axnet_reset_8390(struct net_device *dev);
@@ -171,7 +171,6 @@ static int axnet_probe(struct pcmcia_device *link)
171 171
172 dev->netdev_ops = &axnet_netdev_ops; 172 dev->netdev_ops = &axnet_netdev_ops;
173 173
174 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
175 dev->watchdog_timeo = TX_TIMEOUT; 174 dev->watchdog_timeo = TX_TIMEOUT;
176 175
177 return axnet_config(link); 176 return axnet_config(link);
@@ -347,8 +346,8 @@ static int axnet_config(struct pcmcia_device *link)
347 dev->base_addr = link->resource[0]->start; 346 dev->base_addr = link->resource[0]->start;
348 347
349 if (!get_prom(link)) { 348 if (!get_prom(link)) {
350 printk(KERN_NOTICE "axnet_cs: this is not an AX88190 card!\n"); 349 pr_notice("this is not an AX88190 card!\n");
351 printk(KERN_NOTICE "axnet_cs: use pcnet_cs instead.\n"); 350 pr_notice("use pcnet_cs instead.\n");
352 goto failed; 351 goto failed;
353 } 352 }
354 353
@@ -357,10 +356,10 @@ static int axnet_config(struct pcmcia_device *link)
357 ei_status.tx_start_page = AXNET_START_PG; 356 ei_status.tx_start_page = AXNET_START_PG;
358 ei_status.rx_start_page = AXNET_START_PG + TX_PAGES; 357 ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
359 ei_status.stop_page = AXNET_STOP_PG; 358 ei_status.stop_page = AXNET_STOP_PG;
360 ei_status.reset_8390 = &axnet_reset_8390; 359 ei_status.reset_8390 = axnet_reset_8390;
361 ei_status.get_8390_hdr = &get_8390_hdr; 360 ei_status.get_8390_hdr = get_8390_hdr;
362 ei_status.block_input = &block_input; 361 ei_status.block_input = block_input;
363 ei_status.block_output = &block_output; 362 ei_status.block_output = block_output;
364 363
365 if (inb(dev->base_addr + AXNET_TEST) != 0) 364 if (inb(dev->base_addr + AXNET_TEST) != 0)
366 info->flags |= IS_AX88790; 365 info->flags |= IS_AX88790;
@@ -393,19 +392,18 @@ static int axnet_config(struct pcmcia_device *link)
393 SET_NETDEV_DEV(dev, &link->dev); 392 SET_NETDEV_DEV(dev, &link->dev);
394 393
395 if (register_netdev(dev) != 0) { 394 if (register_netdev(dev) != 0) {
396 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); 395 pr_notice("register_netdev() failed\n");
397 goto failed; 396 goto failed;
398 } 397 }
399 398
400 printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, " 399 netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n",
401 "hw_addr %pM\n", 400 ((info->flags & IS_AX88790) ? 7 : 1),
402 dev->name, ((info->flags & IS_AX88790) ? 7 : 1), 401 dev->base_addr, dev->irq, dev->dev_addr);
403 dev->base_addr, dev->irq,
404 dev->dev_addr);
405 if (info->phy_id != -1) { 402 if (info->phy_id != -1) {
406 dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", info->phy_id, j); 403 netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
404 info->phy_id, j);
407 } else { 405 } else {
408 printk(KERN_NOTICE " No MII transceivers found!\n"); 406 netdev_notice(dev, " No MII transceivers found!\n");
409 } 407 }
410 return 0; 408 return 0;
411 409
@@ -532,7 +530,7 @@ static int axnet_open(struct net_device *dev)
532 530
533 info->link_status = 0x00; 531 info->link_status = 0x00;
534 init_timer(&info->watchdog); 532 init_timer(&info->watchdog);
535 info->watchdog.function = &ei_watchdog; 533 info->watchdog.function = ei_watchdog;
536 info->watchdog.data = (u_long)dev; 534 info->watchdog.data = (u_long)dev;
537 info->watchdog.expires = jiffies + HZ; 535 info->watchdog.expires = jiffies + HZ;
538 add_timer(&info->watchdog); 536 add_timer(&info->watchdog);
@@ -585,8 +583,7 @@ static void axnet_reset_8390(struct net_device *dev)
585 outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ 583 outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
586 584
587 if (i == 100) 585 if (i == 100)
588 printk(KERN_ERR "%s: axnet_reset_8390() did not complete.\n", 586 netdev_err(dev, "axnet_reset_8390() did not complete\n");
589 dev->name);
590 587
591} /* axnet_reset_8390 */ 588} /* axnet_reset_8390 */
592 589
@@ -613,7 +610,7 @@ static void ei_watchdog(u_long arg)
613 this, we can limp along even if the interrupt is blocked */ 610 this, we can limp along even if the interrupt is blocked */
614 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { 611 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
615 if (!info->fast_poll) 612 if (!info->fast_poll)
616 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 613 netdev_info(dev, "interrupt(s) dropped!\n");
617 ei_irq_wrapper(dev->irq, dev); 614 ei_irq_wrapper(dev->irq, dev);
618 info->fast_poll = HZ; 615 info->fast_poll = HZ;
619 } 616 }
@@ -628,7 +625,7 @@ static void ei_watchdog(u_long arg)
628 goto reschedule; 625 goto reschedule;
629 link = mdio_read(mii_addr, info->phy_id, 1); 626 link = mdio_read(mii_addr, info->phy_id, 1);
630 if (!link || (link == 0xffff)) { 627 if (!link || (link == 0xffff)) {
631 printk(KERN_INFO "%s: MII is missing!\n", dev->name); 628 netdev_info(dev, "MII is missing!\n");
632 info->phy_id = -1; 629 info->phy_id = -1;
633 goto reschedule; 630 goto reschedule;
634 } 631 }
@@ -636,18 +633,14 @@ static void ei_watchdog(u_long arg)
636 link &= 0x0004; 633 link &= 0x0004;
637 if (link != info->link_status) { 634 if (link != info->link_status) {
638 u_short p = mdio_read(mii_addr, info->phy_id, 5); 635 u_short p = mdio_read(mii_addr, info->phy_id, 5);
639 printk(KERN_INFO "%s: %s link beat\n", dev->name, 636 netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
640 (link) ? "found" : "lost");
641 if (link) { 637 if (link) {
642 info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00; 638 info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
643 if (p) 639 if (p)
644 printk(KERN_INFO "%s: autonegotiation complete: " 640 netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n",
645 "%sbaseT-%cD selected\n", dev->name, 641 (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H');
646 ((p & 0x0180) ? "100" : "10"),
647 ((p & 0x0140) ? 'F' : 'H'));
648 else 642 else
649 printk(KERN_INFO "%s: link partner did not autonegotiate\n", 643 netdev_info(dev, "link partner did not autonegotiate\n");
650 dev->name);
651 AX88190_init(dev, 1); 644 AX88190_init(dev, 1);
652 } 645 }
653 info->link_status = link; 646 info->link_status = link;
@@ -658,16 +651,6 @@ reschedule:
658 add_timer(&info->watchdog); 651 add_timer(&info->watchdog);
659} 652}
660 653
661static void netdev_get_drvinfo(struct net_device *dev,
662 struct ethtool_drvinfo *info)
663{
664 strcpy(info->driver, "axnet_cs");
665}
666
667static const struct ethtool_ops netdev_ethtool_ops = {
668 .get_drvinfo = netdev_get_drvinfo,
669};
670
671/*====================================================================*/ 654/*====================================================================*/
672 655
673static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 656static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -855,9 +838,6 @@ module_exit(exit_axnet_cs);
855 838
856 */ 839 */
857 840
858static const char version_8390[] = KERN_INFO \
859 "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@scyld.com)\n";
860
861#include <linux/bitops.h> 841#include <linux/bitops.h>
862#include <asm/irq.h> 842#include <asm/irq.h>
863#include <linux/fcntl.h> 843#include <linux/fcntl.h>
@@ -1004,9 +984,11 @@ static void axnet_tx_timeout(struct net_device *dev)
1004 isr = inb(e8390_base+EN0_ISR); 984 isr = inb(e8390_base+EN0_ISR);
1005 spin_unlock_irqrestore(&ei_local->page_lock, flags); 985 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1006 986
1007 printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", 987 netdev_printk(KERN_DEBUG, dev,
1008 dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : 988 "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
1009 (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); 989 (txsr & ENTSR_ABT) ? "excess collisions." :
990 (isr) ? "lost interrupt?" : "cable problem?",
991 txsr, isr, tickssofar);
1010 992
1011 if (!isr && !dev->stats.tx_packets) 993 if (!isr && !dev->stats.tx_packets)
1012 { 994 {
@@ -1076,22 +1058,28 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
1076 output_page = ei_local->tx_start_page; 1058 output_page = ei_local->tx_start_page;
1077 ei_local->tx1 = send_length; 1059 ei_local->tx1 = send_length;
1078 if (ei_debug && ei_local->tx2 > 0) 1060 if (ei_debug && ei_local->tx2 > 0)
1079 printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n", 1061 netdev_printk(KERN_DEBUG, dev,
1080 dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing); 1062 "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
1063 ei_local->tx2, ei_local->lasttx,
1064 ei_local->txing);
1081 } 1065 }
1082 else if (ei_local->tx2 == 0) 1066 else if (ei_local->tx2 == 0)
1083 { 1067 {
1084 output_page = ei_local->tx_start_page + TX_PAGES/2; 1068 output_page = ei_local->tx_start_page + TX_PAGES/2;
1085 ei_local->tx2 = send_length; 1069 ei_local->tx2 = send_length;
1086 if (ei_debug && ei_local->tx1 > 0) 1070 if (ei_debug && ei_local->tx1 > 0)
1087 printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n", 1071 netdev_printk(KERN_DEBUG, dev,
1088 dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing); 1072 "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
1073 ei_local->tx1, ei_local->lasttx,
1074 ei_local->txing);
1089 } 1075 }
1090 else 1076 else
1091 { /* We should never get here. */ 1077 { /* We should never get here. */
1092 if (ei_debug) 1078 if (ei_debug)
1093 printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n", 1079 netdev_printk(KERN_DEBUG, dev,
1094 dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx); 1080 "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
1081 ei_local->tx1, ei_local->tx2,
1082 ei_local->lasttx);
1095 ei_local->irqlock = 0; 1083 ei_local->irqlock = 0;
1096 netif_stop_queue(dev); 1084 netif_stop_queue(dev);
1097 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 1085 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1179,23 +1167,26 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1179 1167
1180 spin_lock_irqsave(&ei_local->page_lock, flags); 1168 spin_lock_irqsave(&ei_local->page_lock, flags);
1181 1169
1182 if (ei_local->irqlock) 1170 if (ei_local->irqlock) {
1183 {
1184#if 1 /* This might just be an interrupt for a PCI device sharing this line */ 1171#if 1 /* This might just be an interrupt for a PCI device sharing this line */
1172 const char *msg;
1185 /* The "irqlock" check is only for testing. */ 1173 /* The "irqlock" check is only for testing. */
1186 printk(ei_local->irqlock 1174 if (ei_local->irqlock)
1187 ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n" 1175 msg = "Interrupted while interrupts are masked!";
1188 : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n", 1176 else
1189 dev->name, inb_p(e8390_base + EN0_ISR), 1177 msg = "Reentering the interrupt handler!";
1190 inb_p(e8390_base + EN0_IMR)); 1178 netdev_info(dev, "%s, isr=%#2x imr=%#2x\n",
1179 msg,
1180 inb_p(e8390_base + EN0_ISR),
1181 inb_p(e8390_base + EN0_IMR));
1191#endif 1182#endif
1192 spin_unlock_irqrestore(&ei_local->page_lock, flags); 1183 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1193 return IRQ_NONE; 1184 return IRQ_NONE;
1194 } 1185 }
1195 1186
1196 if (ei_debug > 3) 1187 if (ei_debug > 3)
1197 printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name, 1188 netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
1198 inb_p(e8390_base + EN0_ISR)); 1189 inb_p(e8390_base + EN0_ISR));
1199 1190
1200 outb_p(0x00, e8390_base + EN0_ISR); 1191 outb_p(0x00, e8390_base + EN0_ISR);
1201 ei_local->irqlock = 1; 1192 ei_local->irqlock = 1;
@@ -1206,7 +1197,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1206 { 1197 {
1207 if (!netif_running(dev) || (interrupts == 0xff)) { 1198 if (!netif_running(dev) || (interrupts == 0xff)) {
1208 if (ei_debug > 1) 1199 if (ei_debug > 1)
1209 printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name); 1200 netdev_warn(dev,
1201 "interrupt from stopped card\n");
1210 outb_p(interrupts, e8390_base + EN0_ISR); 1202 outb_p(interrupts, e8390_base + EN0_ISR);
1211 interrupts = 0; 1203 interrupts = 0;
1212 break; 1204 break;
@@ -1249,11 +1241,12 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1249 { 1241 {
1250 /* 0xFF is valid for a card removal */ 1242 /* 0xFF is valid for a card removal */
1251 if(interrupts!=0xFF) 1243 if(interrupts!=0xFF)
1252 printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n", 1244 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
1253 dev->name, interrupts); 1245 interrupts);
1254 outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ 1246 outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
1255 } else { 1247 } else {
1256 printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts); 1248 netdev_warn(dev, "unknown interrupt %#2x\n",
1249 interrupts);
1257 outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ 1250 outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
1258 } 1251 }
1259 } 1252 }
@@ -1287,18 +1280,19 @@ static void ei_tx_err(struct net_device *dev)
1287 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); 1280 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
1288 1281
1289#ifdef VERBOSE_ERROR_DUMP 1282#ifdef VERBOSE_ERROR_DUMP
1290 printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr); 1283 netdev_printk(KERN_DEBUG, dev,
1284 "transmitter error (%#2x):", txsr);
1291 if (txsr & ENTSR_ABT) 1285 if (txsr & ENTSR_ABT)
1292 printk("excess-collisions "); 1286 pr_cont(" excess-collisions");
1293 if (txsr & ENTSR_ND) 1287 if (txsr & ENTSR_ND)
1294 printk("non-deferral "); 1288 pr_cont(" non-deferral");
1295 if (txsr & ENTSR_CRS) 1289 if (txsr & ENTSR_CRS)
1296 printk("lost-carrier "); 1290 pr_cont(" lost-carrier");
1297 if (txsr & ENTSR_FU) 1291 if (txsr & ENTSR_FU)
1298 printk("FIFO-underrun "); 1292 pr_cont(" FIFO-underrun");
1299 if (txsr & ENTSR_CDH) 1293 if (txsr & ENTSR_CDH)
1300 printk("lost-heartbeat "); 1294 pr_cont(" lost-heartbeat");
1301 printk("\n"); 1295 pr_cont("\n");
1302#endif 1296#endif
1303 1297
1304 if (tx_was_aborted) 1298 if (tx_was_aborted)
@@ -1335,8 +1329,9 @@ static void ei_tx_intr(struct net_device *dev)
1335 if (ei_local->tx1 < 0) 1329 if (ei_local->tx1 < 0)
1336 { 1330 {
1337 if (ei_local->lasttx != 1 && ei_local->lasttx != -1) 1331 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
1338 printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n", 1332 netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n",
1339 ei_local->name, ei_local->lasttx, ei_local->tx1); 1333 ei_local->name, ei_local->lasttx,
1334 ei_local->tx1);
1340 ei_local->tx1 = 0; 1335 ei_local->tx1 = 0;
1341 if (ei_local->tx2 > 0) 1336 if (ei_local->tx2 > 0)
1342 { 1337 {
@@ -1351,8 +1346,9 @@ static void ei_tx_intr(struct net_device *dev)
1351 else if (ei_local->tx2 < 0) 1346 else if (ei_local->tx2 < 0)
1352 { 1347 {
1353 if (ei_local->lasttx != 2 && ei_local->lasttx != -2) 1348 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
1354 printk("%s: bogus last_tx_buffer %d, tx2=%d.\n", 1349 netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
1355 ei_local->name, ei_local->lasttx, ei_local->tx2); 1350 ei_local->name, ei_local->lasttx,
1351 ei_local->tx2);
1356 ei_local->tx2 = 0; 1352 ei_local->tx2 = 0;
1357 if (ei_local->tx1 > 0) 1353 if (ei_local->tx1 > 0)
1358 { 1354 {
@@ -1365,8 +1361,9 @@ static void ei_tx_intr(struct net_device *dev)
1365 else 1361 else
1366 ei_local->lasttx = 10, ei_local->txing = 0; 1362 ei_local->lasttx = 10, ei_local->txing = 0;
1367 } 1363 }
1368// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n", 1364// else
1369// dev->name, ei_local->lasttx); 1365// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
1366// ei_local->lasttx);
1370 1367
1371 /* Minimize Tx latency: update the statistics after we restart TXing. */ 1368 /* Minimize Tx latency: update the statistics after we restart TXing. */
1372 if (status & ENTSR_COL) 1369 if (status & ENTSR_COL)
@@ -1429,8 +1426,8 @@ static void ei_receive(struct net_device *dev)
1429 is that some clones crash in roughly the same way. 1426 is that some clones crash in roughly the same way.
1430 */ 1427 */
1431 if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) 1428 if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
1432 printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n", 1429 netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
1433 dev->name, this_frame, ei_local->current_page); 1430 this_frame, ei_local->current_page);
1434 1431
1435 if (this_frame == rxing_page) /* Read all the frames? */ 1432 if (this_frame == rxing_page) /* Read all the frames? */
1436 break; /* Done for now */ 1433 break; /* Done for now */
@@ -1446,9 +1443,10 @@ static void ei_receive(struct net_device *dev)
1446 if (pkt_len < 60 || pkt_len > 1518) 1443 if (pkt_len < 60 || pkt_len > 1518)
1447 { 1444 {
1448 if (ei_debug) 1445 if (ei_debug)
1449 printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", 1446 netdev_printk(KERN_DEBUG, dev,
1450 dev->name, rx_frame.count, rx_frame.status, 1447 "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
1451 rx_frame.next); 1448 rx_frame.count, rx_frame.status,
1449 rx_frame.next);
1452 dev->stats.rx_errors++; 1450 dev->stats.rx_errors++;
1453 dev->stats.rx_length_errors++; 1451 dev->stats.rx_length_errors++;
1454 } 1452 }
@@ -1460,8 +1458,9 @@ static void ei_receive(struct net_device *dev)
1460 if (skb == NULL) 1458 if (skb == NULL)
1461 { 1459 {
1462 if (ei_debug > 1) 1460 if (ei_debug > 1)
1463 printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", 1461 netdev_printk(KERN_DEBUG, dev,
1464 dev->name, pkt_len); 1462 "Couldn't allocate a sk_buff of size %d\n",
1463 pkt_len);
1465 dev->stats.rx_dropped++; 1464 dev->stats.rx_dropped++;
1466 break; 1465 break;
1467 } 1466 }
@@ -1481,9 +1480,10 @@ static void ei_receive(struct net_device *dev)
1481 else 1480 else
1482 { 1481 {
1483 if (ei_debug) 1482 if (ei_debug)
1484 printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", 1483 netdev_printk(KERN_DEBUG, dev,
1485 dev->name, rx_frame.status, rx_frame.next, 1484 "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
1486 rx_frame.count); 1485 rx_frame.status, rx_frame.next,
1486 rx_frame.count);
1487 dev->stats.rx_errors++; 1487 dev->stats.rx_errors++;
1488 /* NB: The NIC counts CRC, frame and missed errors. */ 1488 /* NB: The NIC counts CRC, frame and missed errors. */
1489 if (pkt_stat & ENRSR_FO) 1489 if (pkt_stat & ENRSR_FO)
@@ -1493,8 +1493,8 @@ static void ei_receive(struct net_device *dev)
1493 1493
1494 /* This _should_ never happen: it's here for avoiding bad clones. */ 1494 /* This _should_ never happen: it's here for avoiding bad clones. */
1495 if (next_frame >= ei_local->stop_page) { 1495 if (next_frame >= ei_local->stop_page) {
1496 printk("%s: next frame inconsistency, %#2x\n", dev->name, 1496 netdev_info(dev, "next frame inconsistency, %#2x\n",
1497 next_frame); 1497 next_frame);
1498 next_frame = ei_local->rx_start_page; 1498 next_frame = ei_local->rx_start_page;
1499 } 1499 }
1500 ei_local->current_page = next_frame; 1500 ei_local->current_page = next_frame;
@@ -1529,7 +1529,7 @@ static void ei_rx_overrun(struct net_device *dev)
1529 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); 1529 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1530 1530
1531 if (ei_debug > 1) 1531 if (ei_debug > 1)
1532 printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); 1532 netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
1533 dev->stats.rx_over_errors++; 1533 dev->stats.rx_over_errors++;
1534 1534
1535 /* 1535 /*
@@ -1726,7 +1726,7 @@ static void AX88190_init(struct net_device *dev, int startp)
1726 { 1726 {
1727 outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); 1727 outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1728 if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) 1728 if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
1729 printk(KERN_ERR "Hw. address read/write mismap %d\n",i); 1729 netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1730 } 1730 }
1731 1731
1732 outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); 1732 outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
@@ -1763,8 +1763,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1763 1763
1764 if (inb_p(e8390_base) & E8390_TRANS) 1764 if (inb_p(e8390_base) & E8390_TRANS)
1765 { 1765 {
1766 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", 1766 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1767 dev->name);
1768 return; 1767 return;
1769 } 1768 }
1770 outb_p(length & 0xff, e8390_base + EN0_TCNTLO); 1769 outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 3c400cfa82ae..f065c35cd4b7 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -52,23 +52,23 @@
52 52
53#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n" 53#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n"
54 54
55#ifdef DEBUG
56 55
57static void regdump(struct net_device *dev) 56static void regdump(struct net_device *dev)
58{ 57{
58#ifdef DEBUG
59 int ioaddr = dev->base_addr; 59 int ioaddr = dev->base_addr;
60 int count; 60 int count;
61 61
62 printk("com20020 register dump:\n"); 62 netdev_dbg(dev, "register dump:\n");
63 for (count = ioaddr; count < ioaddr + 16; count++) 63 for (count = ioaddr; count < ioaddr + 16; count++)
64 { 64 {
65 if (!(count % 16)) 65 if (!(count % 16))
66 printk("\n%04X: ", count); 66 pr_cont("%04X:", count);
67 printk("%02X ", inb(count)); 67 pr_cont(" %02X", inb(count));
68 } 68 }
69 printk("\n"); 69 pr_cont("\n");
70 70
71 printk("buffer0 dump:\n"); 71 netdev_dbg(dev, "buffer0 dump:\n");
72 /* set up the address register */ 72 /* set up the address register */
73 count = 0; 73 count = 0;
74 outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI); 74 outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
@@ -77,19 +77,15 @@ static void regdump(struct net_device *dev)
77 for (count = 0; count < 256+32; count++) 77 for (count = 0; count < 256+32; count++)
78 { 78 {
79 if (!(count % 16)) 79 if (!(count % 16))
80 printk("\n%04X: ", count); 80 pr_cont("%04X:", count);
81 81
82 /* copy the data */ 82 /* copy the data */
83 printk("%02X ", inb(_MEMDATA)); 83 pr_cont(" %02X", inb(_MEMDATA));
84 } 84 }
85 printk("\n"); 85 pr_cont("\n");
86#endif
86} 87}
87 88
88#else
89
90static inline void regdump(struct net_device *dev) { }
91
92#endif
93 89
94 90
95/*====================================================================*/ 91/*====================================================================*/
@@ -301,13 +297,13 @@ static int com20020_config(struct pcmcia_device *link)
301 i = com20020_found(dev, 0); /* calls register_netdev */ 297 i = com20020_found(dev, 0); /* calls register_netdev */
302 298
303 if (i != 0) { 299 if (i != 0) {
304 dev_printk(KERN_NOTICE, &link->dev, 300 dev_notice(&link->dev,
305 "com20020_cs: com20020_found() failed\n"); 301 "com20020_found() failed\n");
306 goto failed; 302 goto failed;
307 } 303 }
308 304
309 dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n", 305 netdev_dbg(dev, "port %#3lx, irq %d\n",
310 dev->name, dev->base_addr, dev->irq); 306 dev->base_addr, dev->irq);
311 return 0; 307 return 0;
312 308
313failed: 309failed:
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 98fffb03ecd7..8f26d548d1bb 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -28,6 +28,8 @@
28 28
29======================================================================*/ 29======================================================================*/
30 30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
31#define DRV_NAME "fmvj18x_cs" 33#define DRV_NAME "fmvj18x_cs"
32#define DRV_VERSION "2.9" 34#define DRV_VERSION "2.9"
33 35
@@ -291,7 +293,7 @@ static int mfc_try_io_port(struct pcmcia_device *link)
291 link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; 293 link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
292 if (link->resource[1]->start == 0) { 294 if (link->resource[1]->start == 0) {
293 link->resource[1]->end = 0; 295 link->resource[1]->end = 0;
294 printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n"); 296 pr_notice("out of resource for serial\n");
295 } 297 }
296 ret = pcmcia_request_io(link); 298 ret = pcmcia_request_io(link);
297 if (ret == 0) 299 if (ret == 0)
@@ -503,7 +505,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
503 case XXX10304: 505 case XXX10304:
504 /* Read MACID from Buggy CIS */ 506 /* Read MACID from Buggy CIS */
505 if (fmvj18x_get_hwinfo(link, buggybuf) == -1) { 507 if (fmvj18x_get_hwinfo(link, buggybuf) == -1) {
506 printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n"); 508 pr_notice("unable to read hardware net address\n");
507 goto failed; 509 goto failed;
508 } 510 }
509 for (i = 0 ; i < 6; i++) { 511 for (i = 0 ; i < 6; i++) {
@@ -524,15 +526,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
524 SET_NETDEV_DEV(dev, &link->dev); 526 SET_NETDEV_DEV(dev, &link->dev);
525 527
526 if (register_netdev(dev) != 0) { 528 if (register_netdev(dev) != 0) {
527 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); 529 pr_notice("register_netdev() failed\n");
528 goto failed; 530 goto failed;
529 } 531 }
530 532
531 /* print current configuration */ 533 /* print current configuration */
532 printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, " 534 netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n",
533 "hw_addr %pM\n", 535 card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
534 dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", 536 dev->base_addr, dev->irq, dev->dev_addr);
535 dev->base_addr, dev->irq, dev->dev_addr);
536 537
537 return 0; 538 return 0;
538 539
@@ -606,7 +607,7 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
606 607
607 lp->base = ioremap(req.Base, req.Size); 608 lp->base = ioremap(req.Base, req.Size);
608 if (lp->base == NULL) { 609 if (lp->base == NULL) {
609 printk(KERN_NOTICE "fmvj18x_cs: ioremap failed\n"); 610 netdev_notice(dev, "ioremap failed\n");
610 return -1; 611 return -1;
611 } 612 }
612 613
@@ -800,17 +801,16 @@ static void fjn_tx_timeout(struct net_device *dev)
800 struct local_info_t *lp = netdev_priv(dev); 801 struct local_info_t *lp = netdev_priv(dev);
801 unsigned int ioaddr = dev->base_addr; 802 unsigned int ioaddr = dev->base_addr;
802 803
803 printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n", 804 netdev_notice(dev, "transmit timed out with status %04x, %s?\n",
804 dev->name, htons(inw(ioaddr + TX_STATUS)), 805 htons(inw(ioaddr + TX_STATUS)),
805 inb(ioaddr + TX_STATUS) & F_TMT_RDY 806 inb(ioaddr + TX_STATUS) & F_TMT_RDY
806 ? "IRQ conflict" : "network cable problem"); 807 ? "IRQ conflict" : "network cable problem");
807 printk(KERN_NOTICE "%s: timeout registers: %04x %04x %04x " 808 netdev_notice(dev, "timeout registers: %04x %04x %04x "
808 "%04x %04x %04x %04x %04x.\n", 809 "%04x %04x %04x %04x %04x.\n",
809 dev->name, htons(inw(ioaddr + 0)), 810 htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)),
810 htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)), 811 htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)),
811 htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)), 812 htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)),
812 htons(inw(ioaddr +10)), htons(inw(ioaddr +12)), 813 htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14)));
813 htons(inw(ioaddr +14)));
814 dev->stats.tx_errors++; 814 dev->stats.tx_errors++;
815 /* ToDo: We should try to restart the adaptor... */ 815 /* ToDo: We should try to restart the adaptor... */
816 local_irq_disable(); 816 local_irq_disable();
@@ -845,13 +845,13 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
845 unsigned char *buf = skb->data; 845 unsigned char *buf = skb->data;
846 846
847 if (length > ETH_FRAME_LEN) { 847 if (length > ETH_FRAME_LEN) {
848 printk(KERN_NOTICE "%s: Attempting to send a large packet" 848 netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n",
849 " (%d bytes).\n", dev->name, length); 849 length);
850 return NETDEV_TX_BUSY; 850 return NETDEV_TX_BUSY;
851 } 851 }
852 852
853 pr_debug("%s: Transmitting a packet of length %lu.\n", 853 netdev_dbg(dev, "Transmitting a packet of length %lu\n",
854 dev->name, (unsigned long)skb->len); 854 (unsigned long)skb->len);
855 dev->stats.tx_bytes += skb->len; 855 dev->stats.tx_bytes += skb->len;
856 856
857 /* Disable both interrupts. */ 857 /* Disable both interrupts. */
@@ -904,7 +904,7 @@ static void fjn_reset(struct net_device *dev)
904 unsigned int ioaddr = dev->base_addr; 904 unsigned int ioaddr = dev->base_addr;
905 int i; 905 int i;
906 906
907 pr_debug("fjn_reset(%s) called.\n",dev->name); 907 netdev_dbg(dev, "fjn_reset() called\n");
908 908
909 /* Reset controller */ 909 /* Reset controller */
910 if( sram_config == 0 ) 910 if( sram_config == 0 )
@@ -988,8 +988,8 @@ static void fjn_rx(struct net_device *dev)
988 while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { 988 while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
989 u_short status = inw(ioaddr + DATAPORT); 989 u_short status = inw(ioaddr + DATAPORT);
990 990
991 pr_debug("%s: Rxing packet mode %02x status %04x.\n", 991 netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n",
992 dev->name, inb(ioaddr + RX_MODE), status); 992 inb(ioaddr + RX_MODE), status);
993#ifndef final_version 993#ifndef final_version
994 if (status == 0) { 994 if (status == 0) {
995 outb(F_SKP_PKT, ioaddr + RX_SKIP); 995 outb(F_SKP_PKT, ioaddr + RX_SKIP);
@@ -1008,16 +1008,16 @@ static void fjn_rx(struct net_device *dev)
1008 struct sk_buff *skb; 1008 struct sk_buff *skb;
1009 1009
1010 if (pkt_len > 1550) { 1010 if (pkt_len > 1550) {
1011 printk(KERN_NOTICE "%s: The FMV-18x claimed a very " 1011 netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n",
1012 "large packet, size %d.\n", dev->name, pkt_len); 1012 pkt_len);
1013 outb(F_SKP_PKT, ioaddr + RX_SKIP); 1013 outb(F_SKP_PKT, ioaddr + RX_SKIP);
1014 dev->stats.rx_errors++; 1014 dev->stats.rx_errors++;
1015 break; 1015 break;
1016 } 1016 }
1017 skb = dev_alloc_skb(pkt_len+2); 1017 skb = dev_alloc_skb(pkt_len+2);
1018 if (skb == NULL) { 1018 if (skb == NULL) {
1019 printk(KERN_NOTICE "%s: Memory squeeze, dropping " 1019 netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
1020 "packet (len %d).\n", dev->name, pkt_len); 1020 pkt_len);
1021 outb(F_SKP_PKT, ioaddr + RX_SKIP); 1021 outb(F_SKP_PKT, ioaddr + RX_SKIP);
1022 dev->stats.rx_dropped++; 1022 dev->stats.rx_dropped++;
1023 break; 1023 break;
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index b0d06a3d962f..dc85282193bf 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -45,6 +45,8 @@
45 45
46======================================================================*/ 46======================================================================*/
47 47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
48#include <linux/kernel.h> 50#include <linux/kernel.h>
49#include <linux/init.h> 51#include <linux/init.h>
50#include <linux/ptrace.h> 52#include <linux/ptrace.h>
@@ -52,7 +54,6 @@
52#include <linux/string.h> 54#include <linux/string.h>
53#include <linux/timer.h> 55#include <linux/timer.h>
54#include <linux/module.h> 56#include <linux/module.h>
55#include <linux/ethtool.h>
56#include <linux/netdevice.h> 57#include <linux/netdevice.h>
57#include <linux/trdevice.h> 58#include <linux/trdevice.h>
58#include <linux/ibmtr.h> 59#include <linux/ibmtr.h>
@@ -107,16 +108,6 @@ typedef struct ibmtr_dev_t {
107 struct tok_info *ti; 108 struct tok_info *ti;
108} ibmtr_dev_t; 109} ibmtr_dev_t;
109 110
110static void netdev_get_drvinfo(struct net_device *dev,
111 struct ethtool_drvinfo *info)
112{
113 strcpy(info->driver, "ibmtr_cs");
114}
115
116static const struct ethtool_ops netdev_ethtool_ops = {
117 .get_drvinfo = netdev_get_drvinfo,
118};
119
120static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) { 111static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
121 ibmtr_dev_t *info = dev_id; 112 ibmtr_dev_t *info = dev_id;
122 struct net_device *dev = info->dev; 113 struct net_device *dev = info->dev;
@@ -159,8 +150,6 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
159 150
160 info->dev = dev; 151 info->dev = dev;
161 152
162 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
163
164 return ibmtr_config(link); 153 return ibmtr_config(link);
165} /* ibmtr_attach */ 154} /* ibmtr_attach */
166 155
@@ -285,15 +274,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
285 274
286 i = ibmtr_probe_card(dev); 275 i = ibmtr_probe_card(dev);
287 if (i != 0) { 276 if (i != 0) {
288 printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n"); 277 pr_notice("register_netdev() failed\n");
289 goto failed; 278 goto failed;
290 } 279 }
291 280
292 printk(KERN_INFO 281 netdev_info(dev, "port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
293 "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n", 282 dev->base_addr, dev->irq,
294 dev->name, dev->base_addr, dev->irq, 283 (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
295 (u_long)ti->mmio, (u_long)(ti->sram_base << 12), 284 dev->dev_addr);
296 dev->dev_addr);
297 return 0; 285 return 0;
298 286
299failed: 287failed:
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 68f2deeb3ade..89cf63bb8c91 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -111,6 +111,8 @@ Log: nmclan_cs.c,v
111 111
112---------------------------------------------------------------------------- */ 112---------------------------------------------------------------------------- */
113 113
114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
114#define DRV_NAME "nmclan_cs" 116#define DRV_NAME "nmclan_cs"
115#define DRV_VERSION "0.16" 117#define DRV_VERSION "0.16"
116 118
@@ -563,7 +565,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
563 /* Wait for reset bit to be cleared automatically after <= 200ns */; 565 /* Wait for reset bit to be cleared automatically after <= 200ns */;
564 if(++ct > 500) 566 if(++ct > 500)
565 { 567 {
566 printk(KERN_ERR "mace: reset failed, card removed ?\n"); 568 pr_err("reset failed, card removed?\n");
567 return -1; 569 return -1;
568 } 570 }
569 udelay(1); 571 udelay(1);
@@ -610,7 +612,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
610 { 612 {
611 if(++ ct > 500) 613 if(++ ct > 500)
612 { 614 {
613 printk(KERN_ERR "mace: ADDRCHG timeout, card removed ?\n"); 615 pr_err("ADDRCHG timeout, card removed?\n");
614 return -1; 616 return -1;
615 } 617 }
616 } 618 }
@@ -678,8 +680,8 @@ static int nmclan_config(struct pcmcia_device *link)
678 dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n", 680 dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
679 sig[0], sig[1]); 681 sig[0], sig[1]);
680 } else { 682 } else {
681 printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should" 683 pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
682 " be 0x40 0x?9\n", sig[0], sig[1]); 684 sig[0], sig[1]);
683 return -ENODEV; 685 return -ENODEV;
684 } 686 }
685 } 687 }
@@ -691,20 +693,18 @@ static int nmclan_config(struct pcmcia_device *link)
691 if (if_port <= 2) 693 if (if_port <= 2)
692 dev->if_port = if_port; 694 dev->if_port = if_port;
693 else 695 else
694 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); 696 pr_notice("invalid if_port requested\n");
695 697
696 SET_NETDEV_DEV(dev, &link->dev); 698 SET_NETDEV_DEV(dev, &link->dev);
697 699
698 i = register_netdev(dev); 700 i = register_netdev(dev);
699 if (i != 0) { 701 if (i != 0) {
700 printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n"); 702 pr_notice("register_netdev() failed\n");
701 goto failed; 703 goto failed;
702 } 704 }
703 705
704 printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port," 706 netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n",
705 " hw_addr %pM\n", 707 dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr);
706 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
707 dev->dev_addr);
708 return 0; 708 return 0;
709 709
710failed: 710failed:
@@ -798,8 +798,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map)
798 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { 798 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
799 if (map->port <= 2) { 799 if (map->port <= 2) {
800 dev->if_port = map->port; 800 dev->if_port = map->port;
801 printk(KERN_INFO "%s: switched to %s port\n", dev->name, 801 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
802 if_names[dev->if_port]);
803 } else 802 } else
804 return -EINVAL; 803 return -EINVAL;
805 } 804 }
@@ -878,12 +877,12 @@ static void mace_tx_timeout(struct net_device *dev)
878 mace_private *lp = netdev_priv(dev); 877 mace_private *lp = netdev_priv(dev);
879 struct pcmcia_device *link = lp->p_dev; 878 struct pcmcia_device *link = lp->p_dev;
880 879
881 printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name); 880 netdev_notice(dev, "transmit timed out -- ");
882#if RESET_ON_TIMEOUT 881#if RESET_ON_TIMEOUT
883 printk("resetting card\n"); 882 pr_cont("resetting card\n");
884 pcmcia_reset_card(link->socket); 883 pcmcia_reset_card(link->socket);
885#else /* #if RESET_ON_TIMEOUT */ 884#else /* #if RESET_ON_TIMEOUT */
886 printk("NOT resetting card\n"); 885 pr_cont("NOT resetting card\n");
887#endif /* #if RESET_ON_TIMEOUT */ 886#endif /* #if RESET_ON_TIMEOUT */
888 dev->trans_start = jiffies; /* prevent tx timeout */ 887 dev->trans_start = jiffies; /* prevent tx timeout */
889 netif_wake_queue(dev); 888 netif_wake_queue(dev);
@@ -965,22 +964,21 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
965 ioaddr = dev->base_addr; 964 ioaddr = dev->base_addr;
966 965
967 if (lp->tx_irq_disabled) { 966 if (lp->tx_irq_disabled) {
968 printk( 967 const char *msg;
969 (lp->tx_irq_disabled? 968 if (lp->tx_irq_disabled)
970 KERN_NOTICE "%s: Interrupt with tx_irq_disabled " 969 msg = "Interrupt with tx_irq_disabled";
971 "[isr=%02X, imr=%02X]\n": 970 else
972 KERN_NOTICE "%s: Re-entering the interrupt handler " 971 msg = "Re-entering the interrupt handler";
973 "[isr=%02X, imr=%02X]\n"), 972 netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
974 dev->name, 973 msg,
975 inb(ioaddr + AM2150_MACE_BASE + MACE_IR), 974 inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
976 inb(ioaddr + AM2150_MACE_BASE + MACE_IMR) 975 inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
977 );
978 /* WARNING: MACE_IR has been read! */ 976 /* WARNING: MACE_IR has been read! */
979 return IRQ_NONE; 977 return IRQ_NONE;
980 } 978 }
981 979
982 if (!netif_device_present(dev)) { 980 if (!netif_device_present(dev)) {
983 pr_debug("%s: interrupt from dead card\n", dev->name); 981 netdev_dbg(dev, "interrupt from dead card\n");
984 return IRQ_NONE; 982 return IRQ_NONE;
985 } 983 }
986 984
@@ -1378,8 +1376,8 @@ static void BuildLAF(int *ladrf, int *adr)
1378 printk(KERN_DEBUG " adr =%pM\n", adr); 1376 printk(KERN_DEBUG " adr =%pM\n", adr);
1379 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); 1377 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
1380 for (i = 0; i < 8; i++) 1378 for (i = 0; i < 8; i++)
1381 printk(KERN_CONT " %02X", ladrf[i]); 1379 pr_cont(" %02X", ladrf[i]);
1382 printk(KERN_CONT "\n"); 1380 pr_cont("\n");
1383#endif 1381#endif
1384} /* BuildLAF */ 1382} /* BuildLAF */
1385 1383
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 49279b0ee526..e180832c278f 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -28,6 +28,8 @@
28 28
29======================================================================*/ 29======================================================================*/
30 30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
31#include <linux/kernel.h> 33#include <linux/kernel.h>
32#include <linux/module.h> 34#include <linux/module.h>
33#include <linux/init.h> 35#include <linux/init.h>
@@ -35,7 +37,6 @@
35#include <linux/string.h> 37#include <linux/string.h>
36#include <linux/timer.h> 38#include <linux/timer.h>
37#include <linux/delay.h> 39#include <linux/delay.h>
38#include <linux/ethtool.h>
39#include <linux/netdevice.h> 40#include <linux/netdevice.h>
40#include <linux/log2.h> 41#include <linux/log2.h>
41#include <linux/etherdevice.h> 42#include <linux/etherdevice.h>
@@ -100,7 +101,6 @@ static void pcnet_release(struct pcmcia_device *link);
100static int pcnet_open(struct net_device *dev); 101static int pcnet_open(struct net_device *dev);
101static int pcnet_close(struct net_device *dev); 102static int pcnet_close(struct net_device *dev);
102static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 103static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
103static const struct ethtool_ops netdev_ethtool_ops;
104static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); 104static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
105static void ei_watchdog(u_long arg); 105static void ei_watchdog(u_long arg);
106static void pcnet_reset_8390(struct net_device *dev); 106static void pcnet_reset_8390(struct net_device *dev);
@@ -434,8 +434,6 @@ static hw_info_t *get_ax88190(struct pcmcia_device *link)
434 dev->dev_addr[i] = j & 0xff; 434 dev->dev_addr[i] = j & 0xff;
435 dev->dev_addr[i+1] = j >> 8; 435 dev->dev_addr[i+1] = j >> 8;
436 } 436 }
437 printk(KERN_NOTICE "pcnet_cs: this is an AX88190 card!\n");
438 printk(KERN_NOTICE "pcnet_cs: use axnet_cs instead.\n");
439 return NULL; 437 return NULL;
440} 438}
441 439
@@ -570,15 +568,15 @@ static int pcnet_config(struct pcmcia_device *link)
570 if ((if_port == 1) || (if_port == 2)) 568 if ((if_port == 1) || (if_port == 2))
571 dev->if_port = if_port; 569 dev->if_port = if_port;
572 else 570 else
573 printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n"); 571 pr_notice("invalid if_port requested\n");
574 } else { 572 } else {
575 dev->if_port = 0; 573 dev->if_port = 0;
576 } 574 }
577 575
578 if ((link->conf.ConfigBase == 0x03c0) && 576 if ((link->conf.ConfigBase == 0x03c0) &&
579 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { 577 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
580 printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n"); 578 pr_notice("this is an AX88190 card!\n");
581 printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n"); 579 pr_notice("use axnet_cs instead.\n");
582 goto failed; 580 goto failed;
583 } 581 }
584 582
@@ -593,8 +591,8 @@ static int pcnet_config(struct pcmcia_device *link)
593 local_hw_info = get_hwired(link); 591 local_hw_info = get_hwired(link);
594 592
595 if (local_hw_info == NULL) { 593 if (local_hw_info == NULL) {
596 printk(KERN_NOTICE "pcnet_cs: unable to read hardware net" 594 pr_notice("unable to read hardware net address for io base %#3lx\n",
597 " address for io base %#3lx\n", dev->base_addr); 595 dev->base_addr);
598 goto failed; 596 goto failed;
599 } 597 }
600 598
@@ -626,9 +624,7 @@ static int pcnet_config(struct pcmcia_device *link)
626 624
627 ei_status.name = "NE2000"; 625 ei_status.name = "NE2000";
628 ei_status.word16 = 1; 626 ei_status.word16 = 1;
629 ei_status.reset_8390 = &pcnet_reset_8390; 627 ei_status.reset_8390 = pcnet_reset_8390;
630
631 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
632 628
633 if (info->flags & (IS_DL10019|IS_DL10022)) 629 if (info->flags & (IS_DL10019|IS_DL10022))
634 mii_phy_probe(dev); 630 mii_phy_probe(dev);
@@ -636,25 +632,25 @@ static int pcnet_config(struct pcmcia_device *link)
636 SET_NETDEV_DEV(dev, &link->dev); 632 SET_NETDEV_DEV(dev, &link->dev);
637 633
638 if (register_netdev(dev) != 0) { 634 if (register_netdev(dev) != 0) {
639 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); 635 pr_notice("register_netdev() failed\n");
640 goto failed; 636 goto failed;
641 } 637 }
642 638
643 if (info->flags & (IS_DL10019|IS_DL10022)) { 639 if (info->flags & (IS_DL10019|IS_DL10022)) {
644 u_char id = inb(dev->base_addr + 0x1a); 640 u_char id = inb(dev->base_addr + 0x1a);
645 printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ", 641 netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
646 dev->name, ((info->flags & IS_DL10022) ? 22 : 19), id); 642 (info->flags & IS_DL10022) ? 22 : 19, id);
647 if (info->pna_phy) 643 if (info->pna_phy)
648 printk("PNA, "); 644 pr_cont("PNA, ");
649 } else { 645 } else {
650 printk(KERN_INFO "%s: NE2000 Compatible: ", dev->name); 646 netdev_info(dev, "NE2000 Compatible: ");
651 } 647 }
652 printk("io %#3lx, irq %d,", dev->base_addr, dev->irq); 648 pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq);
653 if (info->flags & USE_SHMEM) 649 if (info->flags & USE_SHMEM)
654 printk (" mem %#5lx,", dev->mem_start); 650 pr_cont(" mem %#5lx,", dev->mem_start);
655 if (info->flags & HAS_MISC_REG) 651 if (info->flags & HAS_MISC_REG)
656 printk(" %s xcvr,", if_names[dev->if_port]); 652 pr_cont(" %s xcvr,", if_names[dev->if_port]);
657 printk(" hw_addr %pM\n", dev->dev_addr); 653 pr_cont(" hw_addr %pM\n", dev->dev_addr);
658 return 0; 654 return 0;
659 655
660failed: 656failed:
@@ -928,7 +924,7 @@ static void mii_phy_probe(struct net_device *dev)
928 phyid = tmp << 16; 924 phyid = tmp << 16;
929 phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); 925 phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
930 phyid &= MII_PHYID_REV_MASK; 926 phyid &= MII_PHYID_REV_MASK;
931 pr_debug("%s: MII at %d is 0x%08x\n", dev->name, i, phyid); 927 netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid);
932 if (phyid == AM79C9XX_HOME_PHY) { 928 if (phyid == AM79C9XX_HOME_PHY) {
933 info->pna_phy = i; 929 info->pna_phy = i;
934 } else if (phyid != AM79C9XX_ETH_PHY) { 930 } else if (phyid != AM79C9XX_ETH_PHY) {
@@ -961,7 +957,7 @@ static int pcnet_open(struct net_device *dev)
961 info->phy_id = info->eth_phy; 957 info->phy_id = info->eth_phy;
962 info->link_status = 0x00; 958 info->link_status = 0x00;
963 init_timer(&info->watchdog); 959 init_timer(&info->watchdog);
964 info->watchdog.function = &ei_watchdog; 960 info->watchdog.function = ei_watchdog;
965 info->watchdog.data = (u_long)dev; 961 info->watchdog.data = (u_long)dev;
966 info->watchdog.expires = jiffies + HZ; 962 info->watchdog.expires = jiffies + HZ;
967 add_timer(&info->watchdog); 963 add_timer(&info->watchdog);
@@ -1014,8 +1010,8 @@ static void pcnet_reset_8390(struct net_device *dev)
1014 outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ 1010 outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
1015 1011
1016 if (i == 100) 1012 if (i == 100)
1017 printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n", 1013 netdev_err(dev, "pcnet_reset_8390() did not complete.\n");
1018 dev->name); 1014
1019 set_misc_reg(dev); 1015 set_misc_reg(dev);
1020 1016
1021} /* pcnet_reset_8390 */ 1017} /* pcnet_reset_8390 */
@@ -1031,8 +1027,7 @@ static int set_config(struct net_device *dev, struct ifmap *map)
1031 else if ((map->port < 1) || (map->port > 2)) 1027 else if ((map->port < 1) || (map->port > 2))
1032 return -EINVAL; 1028 return -EINVAL;
1033 dev->if_port = map->port; 1029 dev->if_port = map->port;
1034 printk(KERN_INFO "%s: switched to %s port\n", 1030 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
1035 dev->name, if_names[dev->if_port]);
1036 NS8390_init(dev, 1); 1031 NS8390_init(dev, 1);
1037 } 1032 }
1038 return 0; 1033 return 0;
@@ -1067,7 +1062,7 @@ static void ei_watchdog(u_long arg)
1067 this, we can limp along even if the interrupt is blocked */ 1062 this, we can limp along even if the interrupt is blocked */
1068 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { 1063 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
1069 if (!info->fast_poll) 1064 if (!info->fast_poll)
1070 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 1065 netdev_info(dev, "interrupt(s) dropped!\n");
1071 ei_irq_wrapper(dev->irq, dev); 1066 ei_irq_wrapper(dev->irq, dev);
1072 info->fast_poll = HZ; 1067 info->fast_poll = HZ;
1073 } 1068 }
@@ -1087,7 +1082,7 @@ static void ei_watchdog(u_long arg)
1087 if (info->eth_phy) { 1082 if (info->eth_phy) {
1088 info->phy_id = info->eth_phy = 0; 1083 info->phy_id = info->eth_phy = 0;
1089 } else { 1084 } else {
1090 printk(KERN_INFO "%s: MII is missing!\n", dev->name); 1085 netdev_info(dev, "MII is missing!\n");
1091 info->flags &= ~HAS_MII; 1086 info->flags &= ~HAS_MII;
1092 } 1087 }
1093 goto reschedule; 1088 goto reschedule;
@@ -1096,8 +1091,7 @@ static void ei_watchdog(u_long arg)
1096 link &= 0x0004; 1091 link &= 0x0004;
1097 if (link != info->link_status) { 1092 if (link != info->link_status) {
1098 u_short p = mdio_read(mii_addr, info->phy_id, 5); 1093 u_short p = mdio_read(mii_addr, info->phy_id, 5);
1099 printk(KERN_INFO "%s: %s link beat\n", dev->name, 1094 netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
1100 (link) ? "found" : "lost");
1101 if (link && (info->flags & IS_DL10022)) { 1095 if (link && (info->flags & IS_DL10022)) {
1102 /* Disable collision detection on full duplex links */ 1096 /* Disable collision detection on full duplex links */
1103 outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG); 1097 outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
@@ -1108,13 +1102,12 @@ static void ei_watchdog(u_long arg)
1108 if (link) { 1102 if (link) {
1109 if (info->phy_id == info->eth_phy) { 1103 if (info->phy_id == info->eth_phy) {
1110 if (p) 1104 if (p)
1111 printk(KERN_INFO "%s: autonegotiation complete: " 1105 netdev_info(dev, "autonegotiation complete: "
1112 "%sbaseT-%cD selected\n", dev->name, 1106 "%sbaseT-%cD selected\n",
1113 ((p & 0x0180) ? "100" : "10"), 1107 ((p & 0x0180) ? "100" : "10"),
1114 ((p & 0x0140) ? 'F' : 'H')); 1108 ((p & 0x0140) ? 'F' : 'H'));
1115 else 1109 else
1116 printk(KERN_INFO "%s: link partner did not " 1110 netdev_info(dev, "link partner did not autonegotiate\n");
1117 "autonegotiate\n", dev->name);
1118 } 1111 }
1119 NS8390_init(dev, 1); 1112 NS8390_init(dev, 1);
1120 } 1113 }
@@ -1127,7 +1120,7 @@ static void ei_watchdog(u_long arg)
1127 /* isolate this MII and try flipping to the other one */ 1120 /* isolate this MII and try flipping to the other one */
1128 mdio_write(mii_addr, info->phy_id, 0, 0x0400); 1121 mdio_write(mii_addr, info->phy_id, 0, 0x0400);
1129 info->phy_id ^= info->pna_phy ^ info->eth_phy; 1122 info->phy_id ^= info->pna_phy ^ info->eth_phy;
1130 printk(KERN_INFO "%s: switched to %s transceiver\n", dev->name, 1123 netdev_info(dev, "switched to %s transceiver\n",
1131 (info->phy_id == info->eth_phy) ? "ethernet" : "PNA"); 1124 (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
1132 mdio_write(mii_addr, info->phy_id, 0, 1125 mdio_write(mii_addr, info->phy_id, 0,
1133 (info->phy_id == info->eth_phy) ? 0x1000 : 0); 1126 (info->phy_id == info->eth_phy) ? 0x1000 : 0);
@@ -1143,18 +1136,6 @@ reschedule:
1143 1136
1144/*====================================================================*/ 1137/*====================================================================*/
1145 1138
1146static void netdev_get_drvinfo(struct net_device *dev,
1147 struct ethtool_drvinfo *info)
1148{
1149 strcpy(info->driver, "pcnet_cs");
1150}
1151
1152static const struct ethtool_ops netdev_ethtool_ops = {
1153 .get_drvinfo = netdev_get_drvinfo,
1154};
1155
1156/*====================================================================*/
1157
1158 1139
1159static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1140static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1160{ 1141{
@@ -1187,9 +1168,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
1187 unsigned int nic_base = dev->base_addr; 1168 unsigned int nic_base = dev->base_addr;
1188 1169
1189 if (ei_status.dmaing) { 1170 if (ei_status.dmaing) {
1190 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." 1171 netdev_notice(dev, "DMAing conflict in dma_block_input."
1191 "[DMAstat:%1x][irqlock:%1x]\n", 1172 "[DMAstat:%1x][irqlock:%1x]\n",
1192 dev->name, ei_status.dmaing, ei_status.irqlock); 1173 ei_status.dmaing, ei_status.irqlock);
1193 return; 1174 return;
1194 } 1175 }
1195 1176
@@ -1220,11 +1201,11 @@ static void dma_block_input(struct net_device *dev, int count,
1220 char *buf = skb->data; 1201 char *buf = skb->data;
1221 1202
1222 if ((ei_debug > 4) && (count != 4)) 1203 if ((ei_debug > 4) && (count != 4))
1223 pr_debug("%s: [bi=%d]\n", dev->name, count+4); 1204 netdev_dbg(dev, "[bi=%d]\n", count+4);
1224 if (ei_status.dmaing) { 1205 if (ei_status.dmaing) {
1225 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." 1206 netdev_notice(dev, "DMAing conflict in dma_block_input."
1226 "[DMAstat:%1x][irqlock:%1x]\n", 1207 "[DMAstat:%1x][irqlock:%1x]\n",
1227 dev->name, ei_status.dmaing, ei_status.irqlock); 1208 ei_status.dmaing, ei_status.irqlock);
1228 return; 1209 return;
1229 } 1210 }
1230 ei_status.dmaing |= 0x01; 1211 ei_status.dmaing |= 0x01;
@@ -1254,9 +1235,9 @@ static void dma_block_input(struct net_device *dev, int count,
1254 break; 1235 break;
1255 } while (--tries > 0); 1236 } while (--tries > 0);
1256 if (tries <= 0) 1237 if (tries <= 0)
1257 printk(KERN_NOTICE "%s: RX transfer address mismatch," 1238 netdev_notice(dev, "RX transfer address mismatch,"
1258 "%#4.4x (expected) vs. %#4.4x (actual).\n", 1239 "%#4.4x (expected) vs. %#4.4x (actual).\n",
1259 dev->name, ring_offset + xfer_count, addr); 1240 ring_offset + xfer_count, addr);
1260 } 1241 }
1261#endif 1242#endif
1262 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 1243 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -1277,7 +1258,7 @@ static void dma_block_output(struct net_device *dev, int count,
1277 1258
1278#ifdef PCMCIA_DEBUG 1259#ifdef PCMCIA_DEBUG
1279 if (ei_debug > 4) 1260 if (ei_debug > 4)
1280 printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count); 1261 netdev_dbg(dev, "[bo=%d]\n", count);
1281#endif 1262#endif
1282 1263
1283 /* Round the count up for word writes. Do we need to do this? 1264 /* Round the count up for word writes. Do we need to do this?
@@ -1286,9 +1267,9 @@ static void dma_block_output(struct net_device *dev, int count,
1286 if (count & 0x01) 1267 if (count & 0x01)
1287 count++; 1268 count++;
1288 if (ei_status.dmaing) { 1269 if (ei_status.dmaing) {
1289 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_output." 1270 netdev_notice(dev, "DMAing conflict in dma_block_output."
1290 "[DMAstat:%1x][irqlock:%1x]\n", 1271 "[DMAstat:%1x][irqlock:%1x]\n",
1291 dev->name, ei_status.dmaing, ei_status.irqlock); 1272 ei_status.dmaing, ei_status.irqlock);
1292 return; 1273 return;
1293 } 1274 }
1294 ei_status.dmaing |= 0x01; 1275 ei_status.dmaing |= 0x01;
@@ -1325,9 +1306,9 @@ static void dma_block_output(struct net_device *dev, int count,
1325 break; 1306 break;
1326 } while (--tries > 0); 1307 } while (--tries > 0);
1327 if (tries <= 0) { 1308 if (tries <= 0) {
1328 printk(KERN_NOTICE "%s: Tx packet transfer address mismatch," 1309 netdev_notice(dev, "Tx packet transfer address mismatch,"
1329 "%#4.4x (expected) vs. %#4.4x (actual).\n", 1310 "%#4.4x (expected) vs. %#4.4x (actual).\n",
1330 dev->name, (start_page << 8) + count, addr); 1311 (start_page << 8) + count, addr);
1331 if (retries++ == 0) 1312 if (retries++ == 0)
1332 goto retry; 1313 goto retry;
1333 } 1314 }
@@ -1336,8 +1317,7 @@ static void dma_block_output(struct net_device *dev, int count,
1336 1317
1337 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) 1318 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
1338 if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) { 1319 if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
1339 printk(KERN_NOTICE "%s: timeout waiting for Tx RDC.\n", 1320 netdev_notice(dev, "timeout waiting for Tx RDC.\n");
1340 dev->name);
1341 pcnet_reset_8390(dev); 1321 pcnet_reset_8390(dev);
1342 NS8390_init(dev, 1); 1322 NS8390_init(dev, 1);
1343 break; 1323 break;
@@ -1361,9 +1341,9 @@ static int setup_dma_config(struct pcmcia_device *link, int start_pg,
1361 ei_status.stop_page = stop_pg; 1341 ei_status.stop_page = stop_pg;
1362 1342
1363 /* set up block i/o functions */ 1343 /* set up block i/o functions */
1364 ei_status.get_8390_hdr = &dma_get_8390_hdr; 1344 ei_status.get_8390_hdr = dma_get_8390_hdr;
1365 ei_status.block_input = &dma_block_input; 1345 ei_status.block_input = dma_block_input;
1366 ei_status.block_output = &dma_block_output; 1346 ei_status.block_output = dma_block_output;
1367 1347
1368 return 0; 1348 return 0;
1369} 1349}
@@ -1509,9 +1489,9 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1509 ei_status.stop_page = start_pg + ((req.Size - offset) >> 8); 1489 ei_status.stop_page = start_pg + ((req.Size - offset) >> 8);
1510 1490
1511 /* set up block i/o functions */ 1491 /* set up block i/o functions */
1512 ei_status.get_8390_hdr = &shmem_get_8390_hdr; 1492 ei_status.get_8390_hdr = shmem_get_8390_hdr;
1513 ei_status.block_input = &shmem_block_input; 1493 ei_status.block_input = shmem_block_input;
1514 ei_status.block_output = &shmem_block_output; 1494 ei_status.block_output = shmem_block_output;
1515 1495
1516 info->flags |= USE_SHMEM; 1496 info->flags |= USE_SHMEM;
1517 return 0; 1497 return 0;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 377367d03b41..3d1c549b7038 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -25,6 +25,8 @@
25 25
26======================================================================*/ 26======================================================================*/
27 27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
28#include <linux/module.h> 30#include <linux/module.h>
29#include <linux/kernel.h> 31#include <linux/kernel.h>
30#include <linux/init.h> 32#include <linux/init.h>
@@ -294,7 +296,7 @@ static const struct net_device_ops smc_netdev_ops = {
294 .ndo_tx_timeout = smc_tx_timeout, 296 .ndo_tx_timeout = smc_tx_timeout,
295 .ndo_set_config = s9k_config, 297 .ndo_set_config = s9k_config,
296 .ndo_set_multicast_list = set_rx_mode, 298 .ndo_set_multicast_list = set_rx_mode,
297 .ndo_do_ioctl = &smc_ioctl, 299 .ndo_do_ioctl = smc_ioctl,
298 .ndo_change_mtu = eth_change_mtu, 300 .ndo_change_mtu = eth_change_mtu,
299 .ndo_set_mac_address = eth_mac_addr, 301 .ndo_set_mac_address = eth_mac_addr,
300 .ndo_validate_addr = eth_validate_addr, 302 .ndo_validate_addr = eth_validate_addr,
@@ -820,7 +822,7 @@ static int check_sig(struct pcmcia_device *link)
820 modconf_t mod = { 822 modconf_t mod = {
821 .Attributes = CONF_IO_CHANGE_WIDTH, 823 .Attributes = CONF_IO_CHANGE_WIDTH,
822 }; 824 };
823 printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n"); 825 pr_info("using 8-bit IO window\n");
824 826
825 smc91c92_suspend(link); 827 smc91c92_suspend(link);
826 pcmcia_modify_configuration(link, &mod); 828 pcmcia_modify_configuration(link, &mod);
@@ -881,7 +883,7 @@ static int smc91c92_config(struct pcmcia_device *link)
881 if ((if_port >= 0) && (if_port <= 2)) 883 if ((if_port >= 0) && (if_port <= 2))
882 dev->if_port = if_port; 884 dev->if_port = if_port;
883 else 885 else
884 printk(KERN_NOTICE "smc91c92_cs: invalid if_port requested\n"); 886 dev_notice(&link->dev, "invalid if_port requested\n");
885 887
886 switch (smc->manfid) { 888 switch (smc->manfid) {
887 case MANFID_OSITECH: 889 case MANFID_OSITECH:
@@ -899,7 +901,7 @@ static int smc91c92_config(struct pcmcia_device *link)
899 } 901 }
900 902
901 if (i != 0) { 903 if (i != 0) {
902 printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); 904 dev_notice(&link->dev, "Unable to find hardware address.\n");
903 goto config_failed; 905 goto config_failed;
904 } 906 }
905 907
@@ -952,30 +954,28 @@ static int smc91c92_config(struct pcmcia_device *link)
952 SET_NETDEV_DEV(dev, &link->dev); 954 SET_NETDEV_DEV(dev, &link->dev);
953 955
954 if (register_netdev(dev) != 0) { 956 if (register_netdev(dev) != 0) {
955 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); 957 dev_err(&link->dev, "register_netdev() failed\n");
956 goto config_undo; 958 goto config_undo;
957 } 959 }
958 960
959 printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, " 961 netdev_info(dev, "smc91c%s rev %d: io %#3lx, irq %d, hw_addr %pM\n",
960 "hw_addr %pM\n", 962 name, (rev & 0x0f), dev->base_addr, dev->irq, dev->dev_addr);
961 dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
962 dev->dev_addr);
963 963
964 if (rev > 0) { 964 if (rev > 0) {
965 if (mir & 0x3ff) 965 if (mir & 0x3ff)
966 printk(KERN_INFO " %lu byte", mir); 966 netdev_info(dev, " %lu byte", mir);
967 else 967 else
968 printk(KERN_INFO " %lu kb", mir>>10); 968 netdev_info(dev, " %lu kb", mir>>10);
969 printk(" buffer, %s xcvr\n", (smc->cfg & CFG_MII_SELECT) ? 969 pr_cont(" buffer, %s xcvr\n",
970 "MII" : if_names[dev->if_port]); 970 (smc->cfg & CFG_MII_SELECT) ? "MII" : if_names[dev->if_port]);
971 } 971 }
972 972
973 if (smc->cfg & CFG_MII_SELECT) { 973 if (smc->cfg & CFG_MII_SELECT) {
974 if (smc->mii_if.phy_id != -1) { 974 if (smc->mii_if.phy_id != -1) {
975 dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", 975 netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
976 smc->mii_if.phy_id, j); 976 smc->mii_if.phy_id, j);
977 } else { 977 } else {
978 printk(KERN_NOTICE " No MII transceivers found!\n"); 978 netdev_notice(dev, " No MII transceivers found!\n");
979 } 979 }
980 } 980 }
981 return 0; 981 return 0;
@@ -1081,10 +1081,10 @@ static void smc_dump(struct net_device *dev)
1081 save = inw(ioaddr + BANK_SELECT); 1081 save = inw(ioaddr + BANK_SELECT);
1082 for (w = 0; w < 4; w++) { 1082 for (w = 0; w < 4; w++) {
1083 SMC_SELECT_BANK(w); 1083 SMC_SELECT_BANK(w);
1084 printk(KERN_DEBUG "bank %d: ", w); 1084 netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
1085 for (i = 0; i < 14; i += 2) 1085 for (i = 0; i < 14; i += 2)
1086 printk(" %04x", inw(ioaddr + i)); 1086 pr_cont(" %04x", inw(ioaddr + i));
1087 printk("\n"); 1087 pr_cont("\n");
1088 } 1088 }
1089 outw(save, ioaddr + BANK_SELECT); 1089 outw(save, ioaddr + BANK_SELECT);
1090} 1090}
@@ -1106,7 +1106,7 @@ static int smc_open(struct net_device *dev)
1106 return -ENODEV; 1106 return -ENODEV;
1107 /* Physical device present signature. */ 1107 /* Physical device present signature. */
1108 if (check_sig(link) < 0) { 1108 if (check_sig(link) < 0) {
1109 printk("smc91c92_cs: Yikes! Bad chip signature!\n"); 1109 netdev_info(dev, "Yikes! Bad chip signature!\n");
1110 return -ENODEV; 1110 return -ENODEV;
1111 } 1111 }
1112 link->open++; 1112 link->open++;
@@ -1117,7 +1117,7 @@ static int smc_open(struct net_device *dev)
1117 1117
1118 smc_reset(dev); 1118 smc_reset(dev);
1119 init_timer(&smc->media); 1119 init_timer(&smc->media);
1120 smc->media.function = &media_check; 1120 smc->media.function = media_check;
1121 smc->media.data = (u_long) dev; 1121 smc->media.data = (u_long) dev;
1122 smc->media.expires = jiffies + HZ; 1122 smc->media.expires = jiffies + HZ;
1123 add_timer(&smc->media); 1123 add_timer(&smc->media);
@@ -1172,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
1172 u_char packet_no; 1172 u_char packet_no;
1173 1173
1174 if (!skb) { 1174 if (!skb) {
1175 printk(KERN_ERR "%s: In XMIT with no packet to send.\n", dev->name); 1175 netdev_err(dev, "In XMIT with no packet to send\n");
1176 return; 1176 return;
1177 } 1177 }
1178 1178
@@ -1180,8 +1180,8 @@ static void smc_hardware_send_packet(struct net_device * dev)
1180 packet_no = inw(ioaddr + PNR_ARR) >> 8; 1180 packet_no = inw(ioaddr + PNR_ARR) >> 8;
1181 if (packet_no & 0x80) { 1181 if (packet_no & 0x80) {
1182 /* If not, there is a hardware problem! Likely an ejected card. */ 1182 /* If not, there is a hardware problem! Likely an ejected card. */
1183 printk(KERN_WARNING "%s: 91c92 hardware Tx buffer allocation" 1183 netdev_warn(dev, "hardware Tx buffer allocation failed, status %#2.2x\n",
1184 " failed, status %#2.2x.\n", dev->name, packet_no); 1184 packet_no);
1185 dev_kfree_skb_irq(skb); 1185 dev_kfree_skb_irq(skb);
1186 smc->saved_skb = NULL; 1186 smc->saved_skb = NULL;
1187 netif_start_queue(dev); 1187 netif_start_queue(dev);
@@ -1200,8 +1200,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
1200 u_char *buf = skb->data; 1200 u_char *buf = skb->data;
1201 u_int length = skb->len; /* The chip will pad to ethernet min. */ 1201 u_int length = skb->len; /* The chip will pad to ethernet min. */
1202 1202
1203 pr_debug("%s: Trying to xmit packet of length %d.\n", 1203 netdev_dbg(dev, "Trying to xmit packet of length %d\n", length);
1204 dev->name, length);
1205 1204
1206 /* send the packet length: +6 for status word, length, and ctl */ 1205 /* send the packet length: +6 for status word, length, and ctl */
1207 outw(0, ioaddr + DATA_1); 1206 outw(0, ioaddr + DATA_1);
@@ -1233,9 +1232,8 @@ static void smc_tx_timeout(struct net_device *dev)
1233 struct smc_private *smc = netdev_priv(dev); 1232 struct smc_private *smc = netdev_priv(dev);
1234 unsigned int ioaddr = dev->base_addr; 1233 unsigned int ioaddr = dev->base_addr;
1235 1234
1236 printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, " 1235 netdev_notice(dev, "transmit timed out, Tx_status %2.2x status %4.4x.\n",
1237 "Tx_status %2.2x status %4.4x.\n", 1236 inw(ioaddr)&0xff, inw(ioaddr + 2));
1238 dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
1239 dev->stats.tx_errors++; 1237 dev->stats.tx_errors++;
1240 smc_reset(dev); 1238 smc_reset(dev);
1241 dev->trans_start = jiffies; /* prevent tx timeout */ 1239 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1254,14 +1252,14 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
1254 1252
1255 netif_stop_queue(dev); 1253 netif_stop_queue(dev);
1256 1254
1257 pr_debug("%s: smc_start_xmit(length = %d) called," 1255 netdev_dbg(dev, "smc_start_xmit(length = %d) called, status %04x\n",
1258 " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2)); 1256 skb->len, inw(ioaddr + 2));
1259 1257
1260 if (smc->saved_skb) { 1258 if (smc->saved_skb) {
1261 /* THIS SHOULD NEVER HAPPEN. */ 1259 /* THIS SHOULD NEVER HAPPEN. */
1262 dev->stats.tx_aborted_errors++; 1260 dev->stats.tx_aborted_errors++;
1263 printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n", 1261 netdev_printk(KERN_DEBUG, dev,
1264 dev->name); 1262 "Internal error -- sent packet while busy\n");
1265 return NETDEV_TX_BUSY; 1263 return NETDEV_TX_BUSY;
1266 } 1264 }
1267 smc->saved_skb = skb; 1265 smc->saved_skb = skb;
@@ -1269,7 +1267,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
1269 num_pages = skb->len >> 8; 1267 num_pages = skb->len >> 8;
1270 1268
1271 if (num_pages > 7) { 1269 if (num_pages > 7) {
1272 printk(KERN_ERR "%s: Far too big packet error.\n", dev->name); 1270 netdev_err(dev, "Far too big packet error: %d pages\n", num_pages);
1273 dev_kfree_skb (skb); 1271 dev_kfree_skb (skb);
1274 smc->saved_skb = NULL; 1272 smc->saved_skb = NULL;
1275 dev->stats.tx_dropped++; 1273 dev->stats.tx_dropped++;
@@ -1339,8 +1337,7 @@ static void smc_tx_err(struct net_device * dev)
1339 } 1337 }
1340 1338
1341 if (tx_status & TS_SUCCESS) { 1339 if (tx_status & TS_SUCCESS) {
1342 printk(KERN_NOTICE "%s: Successful packet caused error " 1340 netdev_notice(dev, "Successful packet caused error interrupt?\n");
1343 "interrupt?\n", dev->name);
1344 } 1341 }
1345 /* re-enable transmit */ 1342 /* re-enable transmit */
1346 SMC_SELECT_BANK(0); 1343 SMC_SELECT_BANK(0);
@@ -1530,8 +1527,7 @@ static void smc_rx(struct net_device *dev)
1530 /* Assertion: we are in Window 2. */ 1527 /* Assertion: we are in Window 2. */
1531 1528
1532 if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) { 1529 if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) {
1533 printk(KERN_ERR "%s: smc_rx() with nothing on Rx FIFO.\n", 1530 netdev_err(dev, "smc_rx() with nothing on Rx FIFO\n");
1534 dev->name);
1535 return; 1531 return;
1536 } 1532 }
1537 1533
@@ -1646,8 +1642,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
1646 else if (map->port > 2) 1642 else if (map->port > 2)
1647 return -EINVAL; 1643 return -EINVAL;
1648 dev->if_port = map->port; 1644 dev->if_port = map->port;
1649 printk(KERN_INFO "%s: switched to %s port\n", 1645 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
1650 dev->name, if_names[dev->if_port]);
1651 smc_reset(dev); 1646 smc_reset(dev);
1652 } 1647 }
1653 return 0; 1648 return 0;
@@ -1798,7 +1793,7 @@ static void media_check(u_long arg)
1798 this, we can limp along even if the interrupt is blocked */ 1793 this, we can limp along even if the interrupt is blocked */
1799 if (smc->watchdog++ && ((i>>8) & i)) { 1794 if (smc->watchdog++ && ((i>>8) & i)) {
1800 if (!smc->fast_poll) 1795 if (!smc->fast_poll)
1801 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 1796 netdev_info(dev, "interrupt(s) dropped!\n");
1802 local_irq_save(flags); 1797 local_irq_save(flags);
1803 smc_interrupt(dev->irq, dev); 1798 smc_interrupt(dev->irq, dev);
1804 local_irq_restore(flags); 1799 local_irq_restore(flags);
@@ -1822,7 +1817,7 @@ static void media_check(u_long arg)
1822 SMC_SELECT_BANK(3); 1817 SMC_SELECT_BANK(3);
1823 link = mdio_read(dev, smc->mii_if.phy_id, 1); 1818 link = mdio_read(dev, smc->mii_if.phy_id, 1);
1824 if (!link || (link == 0xffff)) { 1819 if (!link || (link == 0xffff)) {
1825 printk(KERN_INFO "%s: MII is missing!\n", dev->name); 1820 netdev_info(dev, "MII is missing!\n");
1826 smc->mii_if.phy_id = -1; 1821 smc->mii_if.phy_id = -1;
1827 goto reschedule; 1822 goto reschedule;
1828 } 1823 }
@@ -1830,15 +1825,13 @@ static void media_check(u_long arg)
1830 link &= 0x0004; 1825 link &= 0x0004;
1831 if (link != smc->link_status) { 1826 if (link != smc->link_status) {
1832 u_short p = mdio_read(dev, smc->mii_if.phy_id, 5); 1827 u_short p = mdio_read(dev, smc->mii_if.phy_id, 5);
1833 printk(KERN_INFO "%s: %s link beat\n", dev->name, 1828 netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
1834 (link) ? "found" : "lost");
1835 smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40)) 1829 smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
1836 ? TCR_FDUPLX : 0); 1830 ? TCR_FDUPLX : 0);
1837 if (link) { 1831 if (link) {
1838 printk(KERN_INFO "%s: autonegotiation complete: " 1832 netdev_info(dev, "autonegotiation complete: "
1839 "%sbaseT-%cD selected\n", dev->name, 1833 "%dbaseT-%cD selected\n",
1840 ((p & 0x0180) ? "100" : "10"), 1834 (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H');
1841 (smc->duplex ? 'F' : 'H'));
1842 } 1835 }
1843 SMC_SELECT_BANK(0); 1836 SMC_SELECT_BANK(0);
1844 outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR); 1837 outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR);
@@ -1857,25 +1850,23 @@ static void media_check(u_long arg)
1857 if (media != smc->media_status) { 1850 if (media != smc->media_status) {
1858 if ((media & smc->media_status & 1) && 1851 if ((media & smc->media_status & 1) &&
1859 ((smc->media_status ^ media) & EPH_LINK_OK)) 1852 ((smc->media_status ^ media) & EPH_LINK_OK))
1860 printk(KERN_INFO "%s: %s link beat\n", dev->name, 1853 netdev_info(dev, "%s link beat\n",
1861 (smc->media_status & EPH_LINK_OK ? "lost" : "found")); 1854 smc->media_status & EPH_LINK_OK ? "lost" : "found");
1862 else if ((media & smc->media_status & 2) && 1855 else if ((media & smc->media_status & 2) &&
1863 ((smc->media_status ^ media) & EPH_16COL)) 1856 ((smc->media_status ^ media) & EPH_16COL))
1864 printk(KERN_INFO "%s: coax cable %s\n", dev->name, 1857 netdev_info(dev, "coax cable %s\n",
1865 (media & EPH_16COL ? "problem" : "ok")); 1858 media & EPH_16COL ? "problem" : "ok");
1866 if (dev->if_port == 0) { 1859 if (dev->if_port == 0) {
1867 if (media & 1) { 1860 if (media & 1) {
1868 if (media & EPH_LINK_OK) 1861 if (media & EPH_LINK_OK)
1869 printk(KERN_INFO "%s: flipped to 10baseT\n", 1862 netdev_info(dev, "flipped to 10baseT\n");
1870 dev->name);
1871 else 1863 else
1872 smc_set_xcvr(dev, 2); 1864 smc_set_xcvr(dev, 2);
1873 } else { 1865 } else {
1874 if (media & EPH_16COL) 1866 if (media & EPH_16COL)
1875 smc_set_xcvr(dev, 1); 1867 smc_set_xcvr(dev, 1);
1876 else 1868 else
1877 printk(KERN_INFO "%s: flipped to 10base2\n", 1869 netdev_info(dev, "flipped to 10base2\n");
1878 dev->name);
1879 } 1870 }
1880 } 1871 }
1881 smc->media_status = media; 1872 smc->media_status = media;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index f5819526b5ee..d858b5e4c4a7 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -63,6 +63,8 @@
63 * OF THE POSSIBILITY OF SUCH DAMAGE. 63 * OF THE POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67
66#include <linux/module.h> 68#include <linux/module.h>
67#include <linux/kernel.h> 69#include <linux/kernel.h>
68#include <linux/init.h> 70#include <linux/init.h>
@@ -210,13 +212,6 @@ enum xirc_cmd { /* Commands */
210 212
211static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; 213static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
212 214
213
214#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
215#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
216#define KWRN_XIRC KERN_WARNING "xirc2ps_cs: "
217#define KNOT_XIRC KERN_NOTICE "xirc2ps_cs: "
218#define KINF_XIRC KERN_INFO "xirc2ps_cs: "
219
220/* card types */ 215/* card types */
221#define XIR_UNKNOWN 0 /* unknown: not supported */ 216#define XIR_UNKNOWN 0 /* unknown: not supported */
222#define XIR_CE 1 /* (prodid 1) different hardware: not supported */ 217#define XIR_CE 1 /* (prodid 1) different hardware: not supported */
@@ -350,26 +345,26 @@ PrintRegisters(struct net_device *dev)
350 if (pc_debug > 1) { 345 if (pc_debug > 1) {
351 int i, page; 346 int i, page;
352 347
353 printk(KDBG_XIRC "Register common: "); 348 printk(KERN_DEBUG pr_fmt("Register common: "));
354 for (i = 0; i < 8; i++) 349 for (i = 0; i < 8; i++)
355 printk(" %2.2x", GetByte(i)); 350 pr_cont(" %2.2x", GetByte(i));
356 printk("\n"); 351 pr_cont("\n");
357 for (page = 0; page <= 8; page++) { 352 for (page = 0; page <= 8; page++) {
358 printk(KDBG_XIRC "Register page %2x: ", page); 353 printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
359 SelectPage(page); 354 SelectPage(page);
360 for (i = 8; i < 16; i++) 355 for (i = 8; i < 16; i++)
361 printk(" %2.2x", GetByte(i)); 356 pr_cont(" %2.2x", GetByte(i));
362 printk("\n"); 357 pr_cont("\n");
363 } 358 }
364 for (page=0x40 ; page <= 0x5f; page++) { 359 for (page=0x40 ; page <= 0x5f; page++) {
365 if (page == 0x43 || (page >= 0x46 && page <= 0x4f) || 360 if (page == 0x43 || (page >= 0x46 && page <= 0x4f) ||
366 (page >= 0x51 && page <=0x5e)) 361 (page >= 0x51 && page <=0x5e))
367 continue; 362 continue;
368 printk(KDBG_XIRC "Register page %2x: ", page); 363 printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
369 SelectPage(page); 364 SelectPage(page);
370 for (i = 8; i < 16; i++) 365 for (i = 8; i < 16; i++)
371 printk(" %2.2x", GetByte(i)); 366 pr_cont(" %2.2x", GetByte(i));
372 printk("\n"); 367 pr_cont("\n");
373 } 368 }
374 } 369 }
375} 370}
@@ -608,11 +603,11 @@ set_card_type(struct pcmcia_device *link)
608 local->modem = 0; 603 local->modem = 0;
609 local->card_type = XIR_UNKNOWN; 604 local->card_type = XIR_UNKNOWN;
610 if (!(prodid & 0x40)) { 605 if (!(prodid & 0x40)) {
611 printk(KNOT_XIRC "Ooops: Not a creditcard\n"); 606 pr_notice("Oops: Not a creditcard\n");
612 return 0; 607 return 0;
613 } 608 }
614 if (!(mediaid & 0x01)) { 609 if (!(mediaid & 0x01)) {
615 printk(KNOT_XIRC "Not an Ethernet card\n"); 610 pr_notice("Not an Ethernet card\n");
616 return 0; 611 return 0;
617 } 612 }
618 if (mediaid & 0x10) { 613 if (mediaid & 0x10) {
@@ -643,12 +638,11 @@ set_card_type(struct pcmcia_device *link)
643 } 638 }
644 } 639 }
645 if (local->card_type == XIR_CE || local->card_type == XIR_CEM) { 640 if (local->card_type == XIR_CE || local->card_type == XIR_CEM) {
646 printk(KNOT_XIRC "Sorry, this is an old CE card\n"); 641 pr_notice("Sorry, this is an old CE card\n");
647 return 0; 642 return 0;
648 } 643 }
649 if (local->card_type == XIR_UNKNOWN) 644 if (local->card_type == XIR_UNKNOWN)
650 printk(KNOT_XIRC "unknown card (mediaid=%02x prodid=%02x)\n", 645 pr_notice("unknown card (mediaid=%02x prodid=%02x)\n", mediaid, prodid);
651 mediaid, prodid);
652 646
653 return 1; 647 return 1;
654} 648}
@@ -748,7 +742,7 @@ xirc2ps_config(struct pcmcia_device * link)
748 742
749 /* Is this a valid card */ 743 /* Is this a valid card */
750 if (link->has_manf_id == 0) { 744 if (link->has_manf_id == 0) {
751 printk(KNOT_XIRC "manfid not found in CIS\n"); 745 pr_notice("manfid not found in CIS\n");
752 goto failure; 746 goto failure;
753 } 747 }
754 748
@@ -770,14 +764,14 @@ xirc2ps_config(struct pcmcia_device * link)
770 local->manf_str = "Toshiba"; 764 local->manf_str = "Toshiba";
771 break; 765 break;
772 default: 766 default:
773 printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n", 767 pr_notice("Unknown Card Manufacturer ID: 0x%04x\n",
774 (unsigned)link->manf_id); 768 (unsigned)link->manf_id);
775 goto failure; 769 goto failure;
776 } 770 }
777 dev_dbg(&link->dev, "found %s card\n", local->manf_str); 771 dev_dbg(&link->dev, "found %s card\n", local->manf_str);
778 772
779 if (!set_card_type(link)) { 773 if (!set_card_type(link)) {
780 printk(KNOT_XIRC "this card is not supported\n"); 774 pr_notice("this card is not supported\n");
781 goto failure; 775 goto failure;
782 } 776 }
783 777
@@ -803,7 +797,7 @@ xirc2ps_config(struct pcmcia_device * link)
803 err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev); 797 err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev);
804 798
805 if (err) { 799 if (err) {
806 printk(KNOT_XIRC "node-id not found in CIS\n"); 800 pr_notice("node-id not found in CIS\n");
807 goto failure; 801 goto failure;
808 } 802 }
809 803
@@ -838,7 +832,7 @@ xirc2ps_config(struct pcmcia_device * link)
838 * try to configure as Ethernet only. 832 * try to configure as Ethernet only.
839 * .... */ 833 * .... */
840 } 834 }
841 printk(KNOT_XIRC "no ports available\n"); 835 pr_notice("no ports available\n");
842 } else { 836 } else {
843 link->resource[0]->end = 16; 837 link->resource[0]->end = 16;
844 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { 838 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
@@ -911,24 +905,24 @@ xirc2ps_config(struct pcmcia_device * link)
911 #if 0 905 #if 0
912 { 906 {
913 u_char tmp; 907 u_char tmp;
914 printk(KERN_INFO "ECOR:"); 908 pr_info("ECOR:");
915 for (i=0; i < 7; i++) { 909 for (i=0; i < 7; i++) {
916 tmp = readb(local->dingo_ccr + i*2); 910 tmp = readb(local->dingo_ccr + i*2);
917 printk(" %02x", tmp); 911 pr_cont(" %02x", tmp);
918 } 912 }
919 printk("\n"); 913 pr_cont("\n");
920 printk(KERN_INFO "DCOR:"); 914 pr_info("DCOR:");
921 for (i=0; i < 4; i++) { 915 for (i=0; i < 4; i++) {
922 tmp = readb(local->dingo_ccr + 0x20 + i*2); 916 tmp = readb(local->dingo_ccr + 0x20 + i*2);
923 printk(" %02x", tmp); 917 pr_cont(" %02x", tmp);
924 } 918 }
925 printk("\n"); 919 pr_cont("\n");
926 printk(KERN_INFO "SCOR:"); 920 pr_info("SCOR:");
927 for (i=0; i < 10; i++) { 921 for (i=0; i < 10; i++) {
928 tmp = readb(local->dingo_ccr + 0x40 + i*2); 922 tmp = readb(local->dingo_ccr + 0x40 + i*2);
929 printk(" %02x", tmp); 923 pr_cont(" %02x", tmp);
930 } 924 }
931 printk("\n"); 925 pr_cont("\n");
932 } 926 }
933 #endif 927 #endif
934 928
@@ -947,7 +941,7 @@ xirc2ps_config(struct pcmcia_device * link)
947 (local->mohawk && if_port==4)) 941 (local->mohawk && if_port==4))
948 dev->if_port = if_port; 942 dev->if_port = if_port;
949 else 943 else
950 printk(KNOT_XIRC "invalid if_port requested\n"); 944 pr_notice("invalid if_port requested\n");
951 945
952 /* we can now register the device with the net subsystem */ 946 /* we can now register the device with the net subsystem */
953 dev->irq = link->irq; 947 dev->irq = link->irq;
@@ -959,14 +953,14 @@ xirc2ps_config(struct pcmcia_device * link)
959 SET_NETDEV_DEV(dev, &link->dev); 953 SET_NETDEV_DEV(dev, &link->dev);
960 954
961 if ((err=register_netdev(dev))) { 955 if ((err=register_netdev(dev))) {
962 printk(KNOT_XIRC "register_netdev() failed\n"); 956 pr_notice("register_netdev() failed\n");
963 goto config_error; 957 goto config_error;
964 } 958 }
965 959
966 /* give some infos about the hardware */ 960 /* give some infos about the hardware */
967 printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n", 961 netdev_info(dev, "%s: port %#3lx, irq %d, hwaddr %pM\n",
968 dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq, 962 local->manf_str, (u_long)dev->base_addr, (int)dev->irq,
969 dev->dev_addr); 963 dev->dev_addr);
970 964
971 return 0; 965 return 0;
972 966
@@ -1098,8 +1092,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1098 1092
1099 skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */ 1093 skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
1100 if (!skb) { 1094 if (!skb) {
1101 printk(KNOT_XIRC "low memory, packet dropped (size=%u)\n", 1095 pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
1102 pktlen);
1103 dev->stats.rx_dropped++; 1096 dev->stats.rx_dropped++;
1104 } else { /* okay get the packet */ 1097 } else { /* okay get the packet */
1105 skb_reserve(skb, 2); 1098 skb_reserve(skb, 2);
@@ -1268,7 +1261,7 @@ xirc_tx_timeout(struct net_device *dev)
1268{ 1261{
1269 local_info_t *lp = netdev_priv(dev); 1262 local_info_t *lp = netdev_priv(dev);
1270 dev->stats.tx_errors++; 1263 dev->stats.tx_errors++;
1271 printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); 1264 netdev_notice(dev, "transmit timed out\n");
1272 schedule_work(&lp->tx_timeout_task); 1265 schedule_work(&lp->tx_timeout_task);
1273} 1266}
1274 1267
@@ -1435,8 +1428,7 @@ do_config(struct net_device *dev, struct ifmap *map)
1435 local->probe_port = 0; 1428 local->probe_port = 0;
1436 dev->if_port = map->port; 1429 dev->if_port = map->port;
1437 } 1430 }
1438 printk(KERN_INFO "%s: switching to %s port\n", 1431 netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]);
1439 dev->name, if_names[dev->if_port]);
1440 do_reset(dev,1); /* not the fine way :-) */ 1432 do_reset(dev,1); /* not the fine way :-) */
1441 } 1433 }
1442 return 0; 1434 return 0;
@@ -1576,7 +1568,7 @@ do_reset(struct net_device *dev, int full)
1576 { 1568 {
1577 SelectPage(0); 1569 SelectPage(0);
1578 value = GetByte(XIRCREG_ESR); /* read the ESR */ 1570 value = GetByte(XIRCREG_ESR); /* read the ESR */
1579 printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value); 1571 pr_debug("%s: ESR is: %#02x\n", dev->name, value);
1580 } 1572 }
1581 #endif 1573 #endif
1582 1574
@@ -1626,13 +1618,12 @@ do_reset(struct net_device *dev, int full)
1626 1618
1627 if (full && local->mohawk && init_mii(dev)) { 1619 if (full && local->mohawk && init_mii(dev)) {
1628 if (dev->if_port == 4 || local->dingo || local->new_mii) { 1620 if (dev->if_port == 4 || local->dingo || local->new_mii) {
1629 printk(KERN_INFO "%s: MII selected\n", dev->name); 1621 netdev_info(dev, "MII selected\n");
1630 SelectPage(2); 1622 SelectPage(2);
1631 PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08); 1623 PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08);
1632 msleep(20); 1624 msleep(20);
1633 } else { 1625 } else {
1634 printk(KERN_INFO "%s: MII detected; using 10mbs\n", 1626 netdev_info(dev, "MII detected; using 10mbs\n");
1635 dev->name);
1636 SelectPage(0x42); 1627 SelectPage(0x42);
1637 if (dev->if_port == 2) /* enable 10Base2 */ 1628 if (dev->if_port == 2) /* enable 10Base2 */
1638 PutByte(XIRCREG42_SWC1, 0xC0); 1629 PutByte(XIRCREG42_SWC1, 0xC0);
@@ -1677,8 +1668,8 @@ do_reset(struct net_device *dev, int full)
1677 } 1668 }
1678 1669
1679 if (full) 1670 if (full)
1680 printk(KERN_INFO "%s: media %s, silicon revision %d\n", 1671 netdev_info(dev, "media %s, silicon revision %d\n",
1681 dev->name, if_names[dev->if_port], local->silicon); 1672 if_names[dev->if_port], local->silicon);
1682 /* We should switch back to page 0 to avoid a bug in revision 0 1673 /* We should switch back to page 0 to avoid a bug in revision 0
1683 * where regs with offset below 8 can't be read after an access 1674 * where regs with offset below 8 can't be read after an access
1684 * to the MAC registers */ 1675 * to the MAC registers */
@@ -1720,8 +1711,7 @@ init_mii(struct net_device *dev)
1720 control = mii_rd(ioaddr, 0, 0); 1711 control = mii_rd(ioaddr, 0, 0);
1721 1712
1722 if (control & 0x0400) { 1713 if (control & 0x0400) {
1723 printk(KERN_NOTICE "%s can't take PHY out of isolation mode\n", 1714 netdev_notice(dev, "can't take PHY out of isolation mode\n");
1724 dev->name);
1725 local->probe_port = 0; 1715 local->probe_port = 0;
1726 return 0; 1716 return 0;
1727 } 1717 }
@@ -1739,8 +1729,7 @@ init_mii(struct net_device *dev)
1739 } 1729 }
1740 1730
1741 if (!(status & 0x0020)) { 1731 if (!(status & 0x0020)) {
1742 printk(KERN_INFO "%s: autonegotiation failed;" 1732 netdev_info(dev, "autonegotiation failed; using 10mbs\n");
1743 " using 10mbs\n", dev->name);
1744 if (!local->new_mii) { 1733 if (!local->new_mii) {
1745 control = 0x0000; 1734 control = 0x0000;
1746 mii_wr(ioaddr, 0, 0, control, 16); 1735 mii_wr(ioaddr, 0, 0, control, 16);
@@ -1750,8 +1739,7 @@ init_mii(struct net_device *dev)
1750 } 1739 }
1751 } else { 1740 } else {
1752 linkpartner = mii_rd(ioaddr, 0, 5); 1741 linkpartner = mii_rd(ioaddr, 0, 5);
1753 printk(KERN_INFO "%s: MII link partner: %04x\n", 1742 netdev_info(dev, "MII link partner: %04x\n", linkpartner);
1754 dev->name, linkpartner);
1755 if (linkpartner & 0x0080) { 1743 if (linkpartner & 0x0080) {
1756 dev->if_port = 4; 1744 dev->if_port = 4;
1757 } else 1745 } else
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index ec0349e84a8a..7e82a82422cf 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -1279,7 +1279,6 @@ static void plip_attach (struct parport *port)
1279 if (!nl->pardev) { 1279 if (!nl->pardev) {
1280 printk(KERN_ERR "%s: parport_register failed\n", name); 1280 printk(KERN_ERR "%s: parport_register failed\n", name);
1281 goto err_free_dev; 1281 goto err_free_dev;
1282 return;
1283 } 1282 }
1284 1283
1285 plip_init_netdev(dev); 1284 plip_init_netdev(dev);
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
new file mode 100644
index 000000000000..761f0eced724
--- /dev/null
+++ b/drivers/net/pptp.c
@@ -0,0 +1,726 @@
1/*
2 * Point-to-Point Tunneling Protocol for Linux
3 *
4 * Authors: Dmitry Kozlov <xeb@mail.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/string.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/errno.h>
18#include <linux/netdevice.h>
19#include <linux/net.h>
20#include <linux/skbuff.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/ppp_channel.h>
24#include <linux/ppp_defs.h>
25#include <linux/if_pppox.h>
26#include <linux/if_ppp.h>
27#include <linux/notifier.h>
28#include <linux/file.h>
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/netfilter.h>
32#include <linux/netfilter_ipv4.h>
33#include <linux/version.h>
34#include <linux/rcupdate.h>
35#include <linux/spinlock.h>
36
37#include <net/sock.h>
38#include <net/protocol.h>
39#include <net/ip.h>
40#include <net/icmp.h>
41#include <net/route.h>
42#include <net/gre.h>
43
44#include <linux/uaccess.h>
45
46#define PPTP_DRIVER_VERSION "0.8.5"
47
48#define MAX_CALLID 65535
49
50static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
51static struct pppox_sock **callid_sock;
52
53static DEFINE_SPINLOCK(chan_lock);
54
55static struct proto pptp_sk_proto __read_mostly;
56static struct ppp_channel_ops pptp_chan_ops;
57static const struct proto_ops pptp_ops;
58
59#define PPP_LCP_ECHOREQ 0x09
60#define PPP_LCP_ECHOREP 0x0A
61#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
62
63#define MISSING_WINDOW 20
64#define WRAPPED(curseq, lastseq)\
65 ((((curseq) & 0xffffff00) == 0) &&\
66 (((lastseq) & 0xffffff00) == 0xffffff00))
67
68#define PPTP_GRE_PROTO 0x880B
69#define PPTP_GRE_VER 0x1
70
71#define PPTP_GRE_FLAG_C 0x80
72#define PPTP_GRE_FLAG_R 0x40
73#define PPTP_GRE_FLAG_K 0x20
74#define PPTP_GRE_FLAG_S 0x10
75#define PPTP_GRE_FLAG_A 0x80
76
77#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C)
78#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R)
79#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K)
80#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S)
81#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A)
82
83#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
84struct pptp_gre_header {
85 u8 flags;
86 u8 ver;
87 u16 protocol;
88 u16 payload_len;
89 u16 call_id;
90 u32 seq;
91 u32 ack;
92} __packed;
93
94static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
95{
96 struct pppox_sock *sock;
97 struct pptp_opt *opt;
98
99 rcu_read_lock();
100 sock = rcu_dereference(callid_sock[call_id]);
101 if (sock) {
102 opt = &sock->proto.pptp;
103 if (opt->dst_addr.sin_addr.s_addr != s_addr)
104 sock = NULL;
105 else
106 sock_hold(sk_pppox(sock));
107 }
108 rcu_read_unlock();
109
110 return sock;
111}
112
113static int lookup_chan_dst(u16 call_id, __be32 d_addr)
114{
115 struct pppox_sock *sock;
116 struct pptp_opt *opt;
117 int i;
118
119 rcu_read_lock();
120 for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID;
121 i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) {
122 sock = rcu_dereference(callid_sock[i]);
123 if (!sock)
124 continue;
125 opt = &sock->proto.pptp;
126 if (opt->dst_addr.call_id == call_id &&
127 opt->dst_addr.sin_addr.s_addr == d_addr)
128 break;
129 }
130 rcu_read_unlock();
131
132 return i < MAX_CALLID;
133}
134
135static int add_chan(struct pppox_sock *sock)
136{
137 static int call_id;
138
139 spin_lock(&chan_lock);
140 if (!sock->proto.pptp.src_addr.call_id) {
141 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
142 if (call_id == MAX_CALLID) {
143 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
144 if (call_id == MAX_CALLID)
145 goto out_err;
146 }
147 sock->proto.pptp.src_addr.call_id = call_id;
148 } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
149 goto out_err;
150
151 set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
152 rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
153 spin_unlock(&chan_lock);
154
155 return 0;
156
157out_err:
158 spin_unlock(&chan_lock);
159 return -1;
160}
161
162static void del_chan(struct pppox_sock *sock)
163{
164 spin_lock(&chan_lock);
165 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
166 rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
167 spin_unlock(&chan_lock);
168 synchronize_rcu();
169}
170
171static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
172{
173 struct sock *sk = (struct sock *) chan->private;
174 struct pppox_sock *po = pppox_sk(sk);
175 struct pptp_opt *opt = &po->proto.pptp;
176 struct pptp_gre_header *hdr;
177 unsigned int header_len = sizeof(*hdr);
178 int err = 0;
179 int islcp;
180 int len;
181 unsigned char *data;
182 __u32 seq_recv;
183
184
185 struct rtable *rt;
186 struct net_device *tdev;
187 struct iphdr *iph;
188 int max_headroom;
189
190 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
191 goto tx_error;
192
193 {
194 struct flowi fl = { .oif = 0,
195 .nl_u = {
196 .ip4_u = {
197 .daddr = opt->dst_addr.sin_addr.s_addr,
198 .saddr = opt->src_addr.sin_addr.s_addr,
199 .tos = RT_TOS(0) } },
200 .proto = IPPROTO_GRE };
201 err = ip_route_output_key(&init_net, &rt, &fl);
202 if (err)
203 goto tx_error;
204 }
205 tdev = rt->dst.dev;
206
207 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
208
209 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
210 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
211 if (!new_skb) {
212 ip_rt_put(rt);
213 goto tx_error;
214 }
215 if (skb->sk)
216 skb_set_owner_w(new_skb, skb->sk);
217 kfree_skb(skb);
218 skb = new_skb;
219 }
220
221 data = skb->data;
222 islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
223
224 /* compress protocol field */
225 if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
226 skb_pull(skb, 1);
227
228 /* Put in the address/control bytes if necessary */
229 if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
230 data = skb_push(skb, 2);
231 data[0] = PPP_ALLSTATIONS;
232 data[1] = PPP_UI;
233 }
234
235 len = skb->len;
236
237 seq_recv = opt->seq_recv;
238
239 if (opt->ack_sent == seq_recv)
240 header_len -= sizeof(hdr->ack);
241
242 /* Push down and install GRE header */
243 skb_push(skb, header_len);
244 hdr = (struct pptp_gre_header *)(skb->data);
245
246 hdr->flags = PPTP_GRE_FLAG_K;
247 hdr->ver = PPTP_GRE_VER;
248 hdr->protocol = htons(PPTP_GRE_PROTO);
249 hdr->call_id = htons(opt->dst_addr.call_id);
250
251 hdr->flags |= PPTP_GRE_FLAG_S;
252 hdr->seq = htonl(++opt->seq_sent);
253 if (opt->ack_sent != seq_recv) {
254 /* send ack with this message */
255 hdr->ver |= PPTP_GRE_FLAG_A;
256 hdr->ack = htonl(seq_recv);
257 opt->ack_sent = seq_recv;
258 }
259 hdr->payload_len = htons(len);
260
261 /* Push down and install the IP header. */
262
263 skb_reset_transport_header(skb);
264 skb_push(skb, sizeof(*iph));
265 skb_reset_network_header(skb);
266 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
267 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
268
269 iph = ip_hdr(skb);
270 iph->version = 4;
271 iph->ihl = sizeof(struct iphdr) >> 2;
272 if (ip_dont_fragment(sk, &rt->dst))
273 iph->frag_off = htons(IP_DF);
274 else
275 iph->frag_off = 0;
276 iph->protocol = IPPROTO_GRE;
277 iph->tos = 0;
278 iph->daddr = rt->rt_dst;
279 iph->saddr = rt->rt_src;
280 iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
281 iph->tot_len = htons(skb->len);
282
283 skb_dst_drop(skb);
284 skb_dst_set(skb, &rt->dst);
285
286 nf_reset(skb);
287
288 skb->ip_summed = CHECKSUM_NONE;
289 ip_select_ident(iph, &rt->dst, NULL);
290 ip_send_check(iph);
291
292 ip_local_out(skb);
293
294tx_error:
295 return 1;
296}
297
298static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
299{
300 struct pppox_sock *po = pppox_sk(sk);
301 struct pptp_opt *opt = &po->proto.pptp;
302 int headersize, payload_len, seq;
303 __u8 *payload;
304 struct pptp_gre_header *header;
305
306 if (!(sk->sk_state & PPPOX_CONNECTED)) {
307 if (sock_queue_rcv_skb(sk, skb))
308 goto drop;
309 return NET_RX_SUCCESS;
310 }
311
312 header = (struct pptp_gre_header *)(skb->data);
313
314 /* test if acknowledgement present */
315 if (PPTP_GRE_IS_A(header->ver)) {
316 __u32 ack = (PPTP_GRE_IS_S(header->flags)) ?
317 header->ack : header->seq; /* ack in different place if S = 0 */
318
319 ack = ntohl(ack);
320
321 if (ack > opt->ack_recv)
322 opt->ack_recv = ack;
323 /* also handle sequence number wrap-around */
324 if (WRAPPED(ack, opt->ack_recv))
325 opt->ack_recv = ack;
326 }
327
328 /* test if payload present */
329 if (!PPTP_GRE_IS_S(header->flags))
330 goto drop;
331
332 headersize = sizeof(*header);
333 payload_len = ntohs(header->payload_len);
334 seq = ntohl(header->seq);
335
336 /* no ack present? */
337 if (!PPTP_GRE_IS_A(header->ver))
338 headersize -= sizeof(header->ack);
339 /* check for incomplete packet (length smaller than expected) */
340 if (skb->len - headersize < payload_len)
341 goto drop;
342
343 payload = skb->data + headersize;
344 /* check for expected sequence number */
345 if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
346 if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
347 (PPP_PROTOCOL(payload) == PPP_LCP) &&
348 ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
349 goto allow_packet;
350 } else {
351 opt->seq_recv = seq;
352allow_packet:
353 skb_pull(skb, headersize);
354
355 if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
356 /* chop off address/control */
357 if (skb->len < 3)
358 goto drop;
359 skb_pull(skb, 2);
360 }
361
362 if ((*skb->data) & 1) {
363 /* protocol is compressed */
364 skb_push(skb, 1)[0] = 0;
365 }
366
367 skb->ip_summed = CHECKSUM_NONE;
368 skb_set_network_header(skb, skb->head-skb->data);
369 ppp_input(&po->chan, skb);
370
371 return NET_RX_SUCCESS;
372 }
373drop:
374 kfree_skb(skb);
375 return NET_RX_DROP;
376}
377
378static int pptp_rcv(struct sk_buff *skb)
379{
380 struct pppox_sock *po;
381 struct pptp_gre_header *header;
382 struct iphdr *iph;
383
384 if (skb->pkt_type != PACKET_HOST)
385 goto drop;
386
387 if (!pskb_may_pull(skb, 12))
388 goto drop;
389
390 iph = ip_hdr(skb);
391
392 header = (struct pptp_gre_header *)skb->data;
393
394 if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */
395 PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */
396 PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */
397 !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */
398 (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */
399 /* if invalid, discard this packet */
400 goto drop;
401
402 po = lookup_chan(htons(header->call_id), iph->saddr);
403 if (po) {
404 skb_dst_drop(skb);
405 nf_reset(skb);
406 return sk_receive_skb(sk_pppox(po), skb, 0);
407 }
408drop:
409 kfree_skb(skb);
410 return NET_RX_DROP;
411}
412
413static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
414 int sockaddr_len)
415{
416 struct sock *sk = sock->sk;
417 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
418 struct pppox_sock *po = pppox_sk(sk);
419 struct pptp_opt *opt = &po->proto.pptp;
420 int error = 0;
421
422 lock_sock(sk);
423
424 opt->src_addr = sp->sa_addr.pptp;
425 if (add_chan(po)) {
426 release_sock(sk);
427 error = -EBUSY;
428 }
429
430 release_sock(sk);
431 return error;
432}
433
434static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
435 int sockaddr_len, int flags)
436{
437 struct sock *sk = sock->sk;
438 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
439 struct pppox_sock *po = pppox_sk(sk);
440 struct pptp_opt *opt = &po->proto.pptp;
441 struct rtable *rt;
442 int error = 0;
443
444 if (sp->sa_protocol != PX_PROTO_PPTP)
445 return -EINVAL;
446
447 if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
448 return -EALREADY;
449
450 lock_sock(sk);
451 /* Check for already bound sockets */
452 if (sk->sk_state & PPPOX_CONNECTED) {
453 error = -EBUSY;
454 goto end;
455 }
456
457 /* Check for already disconnected sockets, on attempts to disconnect */
458 if (sk->sk_state & PPPOX_DEAD) {
459 error = -EALREADY;
460 goto end;
461 }
462
463 if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
464 error = -EINVAL;
465 goto end;
466 }
467
468 po->chan.private = sk;
469 po->chan.ops = &pptp_chan_ops;
470
471 {
472 struct flowi fl = {
473 .nl_u = {
474 .ip4_u = {
475 .daddr = opt->dst_addr.sin_addr.s_addr,
476 .saddr = opt->src_addr.sin_addr.s_addr,
477 .tos = RT_CONN_FLAGS(sk) } },
478 .proto = IPPROTO_GRE };
479 security_sk_classify_flow(sk, &fl);
480 if (ip_route_output_key(&init_net, &rt, &fl)) {
481 error = -EHOSTUNREACH;
482 goto end;
483 }
484 sk_setup_caps(sk, &rt->dst);
485 }
486 po->chan.mtu = dst_mtu(&rt->dst);
487 if (!po->chan.mtu)
488 po->chan.mtu = PPP_MTU;
489 ip_rt_put(rt);
490 po->chan.mtu -= PPTP_HEADER_OVERHEAD;
491
492 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
493 error = ppp_register_channel(&po->chan);
494 if (error) {
495 pr_err("PPTP: failed to register PPP channel (%d)\n", error);
496 goto end;
497 }
498
499 opt->dst_addr = sp->sa_addr.pptp;
500 sk->sk_state = PPPOX_CONNECTED;
501
502 end:
503 release_sock(sk);
504 return error;
505}
506
507static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
508 int *usockaddr_len, int peer)
509{
510 int len = sizeof(struct sockaddr_pppox);
511 struct sockaddr_pppox sp;
512
513 sp.sa_family = AF_PPPOX;
514 sp.sa_protocol = PX_PROTO_PPTP;
515 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
516
517 memcpy(uaddr, &sp, len);
518
519 *usockaddr_len = len;
520
521 return 0;
522}
523
524static int pptp_release(struct socket *sock)
525{
526 struct sock *sk = sock->sk;
527 struct pppox_sock *po;
528 struct pptp_opt *opt;
529 int error = 0;
530
531 if (!sk)
532 return 0;
533
534 lock_sock(sk);
535
536 if (sock_flag(sk, SOCK_DEAD)) {
537 release_sock(sk);
538 return -EBADF;
539 }
540
541 po = pppox_sk(sk);
542 opt = &po->proto.pptp;
543 del_chan(po);
544
545 pppox_unbind_sock(sk);
546 sk->sk_state = PPPOX_DEAD;
547
548 sock_orphan(sk);
549 sock->sk = NULL;
550
551 release_sock(sk);
552 sock_put(sk);
553
554 return error;
555}
556
557static void pptp_sock_destruct(struct sock *sk)
558{
559 if (!(sk->sk_state & PPPOX_DEAD)) {
560 del_chan(pppox_sk(sk));
561 pppox_unbind_sock(sk);
562 }
563 skb_queue_purge(&sk->sk_receive_queue);
564}
565
566static int pptp_create(struct net *net, struct socket *sock)
567{
568 int error = -ENOMEM;
569 struct sock *sk;
570 struct pppox_sock *po;
571 struct pptp_opt *opt;
572
573 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
574 if (!sk)
575 goto out;
576
577 sock_init_data(sock, sk);
578
579 sock->state = SS_UNCONNECTED;
580 sock->ops = &pptp_ops;
581
582 sk->sk_backlog_rcv = pptp_rcv_core;
583 sk->sk_state = PPPOX_NONE;
584 sk->sk_type = SOCK_STREAM;
585 sk->sk_family = PF_PPPOX;
586 sk->sk_protocol = PX_PROTO_PPTP;
587 sk->sk_destruct = pptp_sock_destruct;
588
589 po = pppox_sk(sk);
590 opt = &po->proto.pptp;
591
592 opt->seq_sent = 0; opt->seq_recv = 0;
593 opt->ack_recv = 0; opt->ack_sent = 0;
594
595 error = 0;
596out:
597 return error;
598}
599
600static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
601 unsigned long arg)
602{
603 struct sock *sk = (struct sock *) chan->private;
604 struct pppox_sock *po = pppox_sk(sk);
605 struct pptp_opt *opt = &po->proto.pptp;
606 void __user *argp = (void __user *)arg;
607 int __user *p = argp;
608 int err, val;
609
610 err = -EFAULT;
611 switch (cmd) {
612 case PPPIOCGFLAGS:
613 val = opt->ppp_flags;
614 if (put_user(val, p))
615 break;
616 err = 0;
617 break;
618 case PPPIOCSFLAGS:
619 if (get_user(val, p))
620 break;
621 opt->ppp_flags = val & ~SC_RCV_BITS;
622 err = 0;
623 break;
624 default:
625 err = -ENOTTY;
626 }
627
628 return err;
629}
630
631static struct ppp_channel_ops pptp_chan_ops = {
632 .start_xmit = pptp_xmit,
633 .ioctl = pptp_ppp_ioctl,
634};
635
636static struct proto pptp_sk_proto __read_mostly = {
637 .name = "PPTP",
638 .owner = THIS_MODULE,
639 .obj_size = sizeof(struct pppox_sock),
640};
641
642static const struct proto_ops pptp_ops = {
643 .family = AF_PPPOX,
644 .owner = THIS_MODULE,
645 .release = pptp_release,
646 .bind = pptp_bind,
647 .connect = pptp_connect,
648 .socketpair = sock_no_socketpair,
649 .accept = sock_no_accept,
650 .getname = pptp_getname,
651 .poll = sock_no_poll,
652 .listen = sock_no_listen,
653 .shutdown = sock_no_shutdown,
654 .setsockopt = sock_no_setsockopt,
655 .getsockopt = sock_no_getsockopt,
656 .sendmsg = sock_no_sendmsg,
657 .recvmsg = sock_no_recvmsg,
658 .mmap = sock_no_mmap,
659 .ioctl = pppox_ioctl,
660};
661
662static struct pppox_proto pppox_pptp_proto = {
663 .create = pptp_create,
664 .owner = THIS_MODULE,
665};
666
667static struct gre_protocol gre_pptp_protocol = {
668 .handler = pptp_rcv,
669};
670
671static int __init pptp_init_module(void)
672{
673 int err = 0;
674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
675
676 callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
677 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
678 if (!callid_sock) {
679 pr_err("PPTP: cann't allocate memory\n");
680 return -ENOMEM;
681 }
682
683 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
684 if (err) {
685 pr_err("PPTP: can't add gre protocol\n");
686 goto out_mem_free;
687 }
688
689 err = proto_register(&pptp_sk_proto, 0);
690 if (err) {
691 pr_err("PPTP: can't register sk_proto\n");
692 goto out_gre_del_protocol;
693 }
694
695 err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
696 if (err) {
697 pr_err("PPTP: can't register pppox_proto\n");
698 goto out_unregister_sk_proto;
699 }
700
701 return 0;
702
703out_unregister_sk_proto:
704 proto_unregister(&pptp_sk_proto);
705out_gre_del_protocol:
706 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
707out_mem_free:
708 vfree(callid_sock);
709
710 return err;
711}
712
713static void __exit pptp_exit_module(void)
714{
715 unregister_pppox_proto(PX_PROTO_PPTP);
716 proto_unregister(&pptp_sk_proto);
717 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
718 vfree(callid_sock);
719}
720
721module_init(pptp_init_module);
722module_exit(pptp_exit_module);
723
724MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
725MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
726MODULE_LICENSE("GPL");
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 87d6b8f36304..5526ab4895e6 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -956,9 +956,9 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
956 (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK))) 956 (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
957 skb->ip_summed = CHECKSUM_UNNECESSARY; 957 skb->ip_summed = CHECKSUM_UNNECESSARY;
958 else 958 else
959 skb->ip_summed = CHECKSUM_NONE; 959 skb_checksum_none_assert(skb);
960 } else 960 } else
961 skb->ip_summed = CHECKSUM_NONE; 961 skb_checksum_none_assert(skb);
962 962
963 /* update netdevice statistics */ 963 /* update netdevice statistics */
964 netdev->stats.rx_packets++; 964 netdev->stats.rx_packets++;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 85eddda276bd..75c2ff99d66d 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -42,8 +42,6 @@
42#include <linux/types.h> 42#include <linux/types.h>
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/system.h> 44#include <asm/system.h>
45#include <linux/delay.h>
46#include <linux/dma-mapping.h>
47#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
48#include <linux/pxa168_eth.h> 46#include <linux/pxa168_eth.h>
49 47
@@ -850,7 +848,6 @@ static int rxq_process(struct net_device *dev, int budget)
850 skb->protocol = eth_type_trans(skb, dev); 848 skb->protocol = eth_type_trans(skb, dev);
851 netif_receive_skb(skb); 849 netif_receive_skb(skb);
852 } 850 }
853 dev->last_rx = jiffies;
854 } 851 }
855 /* Fill RX ring with skb's */ 852 /* Fill RX ring with skb's */
856 rxq_refill(dev); 853 rxq_refill(dev);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 6168a130f33f..7496ed2c34ab 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2029,7 +2029,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2029 dma_unmap_len(lrg_buf_cb2, maplen), 2029 dma_unmap_len(lrg_buf_cb2, maplen),
2030 PCI_DMA_FROMDEVICE); 2030 PCI_DMA_FROMDEVICE);
2031 prefetch(skb->data); 2031 prefetch(skb->data);
2032 skb->ip_summed = CHECKSUM_NONE; 2032 skb_checksum_none_assert(skb);
2033 skb->protocol = eth_type_trans(skb, qdev->ndev); 2033 skb->protocol = eth_type_trans(skb, qdev->ndev);
2034 2034
2035 netif_receive_skb(skb); 2035 netif_receive_skb(skb);
@@ -2076,7 +2076,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2076 PCI_DMA_FROMDEVICE); 2076 PCI_DMA_FROMDEVICE);
2077 prefetch(skb2->data); 2077 prefetch(skb2->data);
2078 2078
2079 skb2->ip_summed = CHECKSUM_NONE; 2079 skb_checksum_none_assert(skb2);
2080 if (qdev->device_id == QL3022_DEVICE_ID) { 2080 if (qdev->device_id == QL3022_DEVICE_ID) {
2081 /* 2081 /*
2082 * Copy the ethhdr from first buffer to second. This 2082 * Copy the ethhdr from first buffer to second. This
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 970389331bbc..cc8385a6727e 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,9 +51,11 @@
51 51
52#define _QLCNIC_LINUX_MAJOR 5 52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 7 54#define _QLCNIC_LINUX_SUBVERSION 9
55#define QLCNIC_LINUX_VERSIONID "5.0.7" 55#define QLCNIC_LINUX_VERSIONID "5.0.9"
56#define QLCNIC_DRV_IDC_VER 0x01 56#define QLCNIC_DRV_IDC_VER 0x01
57#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
58 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
57 59
58#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 60#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
59#define _major(v) (((v) >> 24) & 0xff) 61#define _major(v) (((v) >> 24) & 0xff)
@@ -148,6 +150,7 @@
148 150
149#define DEFAULT_RCV_DESCRIPTORS_1G 2048 151#define DEFAULT_RCV_DESCRIPTORS_1G 2048
150#define DEFAULT_RCV_DESCRIPTORS_10G 4096 152#define DEFAULT_RCV_DESCRIPTORS_10G 4096
153#define MAX_RDS_RINGS 2
151 154
152#define get_next_index(index, length) \ 155#define get_next_index(index, length) \
153 (((index) + 1) & ((length) - 1)) 156 (((index) + 1) & ((length) - 1))
@@ -172,7 +175,7 @@
172 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) 175 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
173 176
174#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ 177#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
175 ((_desc)->flags_opcode = \ 178 ((_desc)->flags_opcode |= \
176 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) 179 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
177 180
178#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ 181#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
@@ -221,7 +224,8 @@ struct rcv_desc {
221#define QLCNIC_LRO_DESC 0x12 224#define QLCNIC_LRO_DESC 0x12
222 225
223/* for status field in status_desc */ 226/* for status field in status_desc */
224#define STATUS_CKSUM_OK (2) 227#define STATUS_CKSUM_LOOP 0
228#define STATUS_CKSUM_OK 2
225 229
226/* owner bits of status_desc */ 230/* owner bits of status_desc */
227#define STATUS_OWNER_HOST (0x1ULL << 56) 231#define STATUS_OWNER_HOST (0x1ULL << 56)
@@ -555,6 +559,8 @@ struct qlcnic_recv_context {
555#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 559#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
556#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 560#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
557#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 561#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
562#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
563#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
558 564
559#define QLCNIC_RCODE_SUCCESS 0 565#define QLCNIC_RCODE_SUCCESS 0
560#define QLCNIC_RCODE_TIMEOUT 17 566#define QLCNIC_RCODE_TIMEOUT 17
@@ -717,6 +723,8 @@ struct qlcnic_cardrsp_tx_ctx {
717#define QLCNIC_MAC_NOOP 0 723#define QLCNIC_MAC_NOOP 0
718#define QLCNIC_MAC_ADD 1 724#define QLCNIC_MAC_ADD 1
719#define QLCNIC_MAC_DEL 2 725#define QLCNIC_MAC_DEL 2
726#define QLCNIC_MAC_VLAN_ADD 3
727#define QLCNIC_MAC_VLAN_DEL 4
720 728
721struct qlcnic_mac_list_s { 729struct qlcnic_mac_list_s {
722 struct list_head list; 730 struct list_head list;
@@ -893,9 +901,14 @@ struct qlcnic_mac_req {
893#define QLCNIC_MSI_ENABLED 0x02 901#define QLCNIC_MSI_ENABLED 0x02
894#define QLCNIC_MSIX_ENABLED 0x04 902#define QLCNIC_MSIX_ENABLED 0x04
895#define QLCNIC_LRO_ENABLED 0x08 903#define QLCNIC_LRO_ENABLED 0x08
904#define QLCNIC_LRO_DISABLED 0x00
896#define QLCNIC_BRIDGE_ENABLED 0X10 905#define QLCNIC_BRIDGE_ENABLED 0X10
897#define QLCNIC_DIAG_ENABLED 0x20 906#define QLCNIC_DIAG_ENABLED 0x20
898#define QLCNIC_ESWITCH_ENABLED 0x40 907#define QLCNIC_ESWITCH_ENABLED 0x40
908#define QLCNIC_ADAPTER_INITIALIZED 0x80
909#define QLCNIC_TAGGING_ENABLED 0x100
910#define QLCNIC_MACSPOOF 0x200
911#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
899#define QLCNIC_IS_MSI_FAMILY(adapter) \ 912#define QLCNIC_IS_MSI_FAMILY(adapter) \
900 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 913 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
901 914
@@ -916,6 +929,22 @@ struct qlcnic_mac_req {
916#define QLCNIC_INTERRUPT_TEST 1 929#define QLCNIC_INTERRUPT_TEST 1
917#define QLCNIC_LOOPBACK_TEST 2 930#define QLCNIC_LOOPBACK_TEST 2
918 931
932#define QLCNIC_FILTER_AGE 80
933#define QLCNIC_LB_MAX_FILTERS 64
934
935struct qlcnic_filter {
936 struct hlist_node fnode;
937 u8 faddr[ETH_ALEN];
938 u16 vlan_id;
939 unsigned long ftime;
940};
941
942struct qlcnic_filter_hash {
943 struct hlist_head *fhead;
944 u8 fnum;
945 u8 fmax;
946};
947
919struct qlcnic_adapter { 948struct qlcnic_adapter {
920 struct qlcnic_hardware_context ahw; 949 struct qlcnic_hardware_context ahw;
921 950
@@ -924,6 +953,7 @@ struct qlcnic_adapter {
924 struct list_head mac_list; 953 struct list_head mac_list;
925 954
926 spinlock_t tx_clean_lock; 955 spinlock_t tx_clean_lock;
956 spinlock_t mac_learn_lock;
927 957
928 u16 num_txd; 958 u16 num_txd;
929 u16 num_rxd; 959 u16 num_rxd;
@@ -931,7 +961,6 @@ struct qlcnic_adapter {
931 961
932 u8 max_rds_rings; 962 u8 max_rds_rings;
933 u8 max_sds_rings; 963 u8 max_sds_rings;
934 u8 driver_mismatch;
935 u8 msix_supported; 964 u8 msix_supported;
936 u8 rx_csum; 965 u8 rx_csum;
937 u8 portnum; 966 u8 portnum;
@@ -961,6 +990,7 @@ struct qlcnic_adapter {
961 u16 max_tx_ques; 990 u16 max_tx_ques;
962 u16 max_rx_ques; 991 u16 max_rx_ques;
963 u16 max_mtu; 992 u16 max_mtu;
993 u16 pvid;
964 994
965 u32 fw_hal_version; 995 u32 fw_hal_version;
966 u32 capabilities; 996 u32 capabilities;
@@ -969,7 +999,7 @@ struct qlcnic_adapter {
969 u32 temp; 999 u32 temp;
970 1000
971 u32 int_vec_bit; 1001 u32 int_vec_bit;
972 u32 heartbit; 1002 u32 heartbeat;
973 1003
974 u8 max_mac_filters; 1004 u8 max_mac_filters;
975 u8 dev_state; 1005 u8 dev_state;
@@ -1003,6 +1033,8 @@ struct qlcnic_adapter {
1003 1033
1004 struct qlcnic_nic_intr_coalesce coal; 1034 struct qlcnic_nic_intr_coalesce coal;
1005 1035
1036 struct qlcnic_filter_hash fhash;
1037
1006 unsigned long state; 1038 unsigned long state;
1007 __le32 file_prd_off; /*File fw product offset*/ 1039 __le32 file_prd_off; /*File fw product offset*/
1008 u32 fw_version; 1040 u32 fw_version;
@@ -1042,7 +1074,7 @@ struct qlcnic_pci_info {
1042}; 1074};
1043 1075
1044struct qlcnic_npar_info { 1076struct qlcnic_npar_info {
1045 u16 vlan_id; 1077 u16 pvid;
1046 u16 min_bw; 1078 u16 min_bw;
1047 u16 max_bw; 1079 u16 max_bw;
1048 u8 phy_port; 1080 u8 phy_port;
@@ -1050,11 +1082,13 @@ struct qlcnic_npar_info {
1050 u8 active; 1082 u8 active;
1051 u8 enable_pm; 1083 u8 enable_pm;
1052 u8 dest_npar; 1084 u8 dest_npar;
1053 u8 host_vlan_tag;
1054 u8 promisc_mode;
1055 u8 discard_tagged; 1085 u8 discard_tagged;
1056 u8 mac_learning; 1086 u8 mac_override;
1087 u8 mac_anti_spoof;
1088 u8 promisc_mode;
1089 u8 offload_flags;
1057}; 1090};
1091
1058struct qlcnic_eswitch { 1092struct qlcnic_eswitch {
1059 u8 port; 1093 u8 port;
1060 u8 active_vports; 1094 u8 active_vports;
@@ -1086,7 +1120,6 @@ struct qlcnic_eswitch {
1086#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) 1120#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
1087#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) 1121#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1088#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) 1122#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1089#define IS_VALID_MODE(mode) (mode == 0 || mode == 1)
1090 1123
1091struct qlcnic_pci_func_cfg { 1124struct qlcnic_pci_func_cfg {
1092 u16 func_type; 1125 u16 func_type;
@@ -1118,12 +1151,41 @@ struct qlcnic_pm_func_cfg {
1118 1151
1119struct qlcnic_esw_func_cfg { 1152struct qlcnic_esw_func_cfg {
1120 u16 vlan_id; 1153 u16 vlan_id;
1154 u8 op_mode;
1155 u8 op_type;
1121 u8 pci_func; 1156 u8 pci_func;
1122 u8 host_vlan_tag; 1157 u8 host_vlan_tag;
1123 u8 promisc_mode; 1158 u8 promisc_mode;
1124 u8 discard_tagged; 1159 u8 discard_tagged;
1125 u8 mac_learning; 1160 u8 mac_override;
1126 u8 reserved; 1161 u8 mac_anti_spoof;
1162 u8 offload_flags;
1163 u8 reserved[5];
1164};
1165
1166#define QLCNIC_STATS_VERSION 1
1167#define QLCNIC_STATS_PORT 1
1168#define QLCNIC_STATS_ESWITCH 2
1169#define QLCNIC_QUERY_RX_COUNTER 0
1170#define QLCNIC_QUERY_TX_COUNTER 1
1171struct __qlcnic_esw_statistics {
1172 __le16 context_id;
1173 __le16 version;
1174 __le16 size;
1175 __le16 unused;
1176 __le64 unicast_frames;
1177 __le64 multicast_frames;
1178 __le64 broadcast_frames;
1179 __le64 dropped_frames;
1180 __le64 errors;
1181 __le64 local_frames;
1182 __le64 numbytes;
1183 __le64 rsvd[3];
1184};
1185
1186struct qlcnic_esw_statistics {
1187 struct __qlcnic_esw_statistics rx;
1188 struct __qlcnic_esw_statistics tx;
1127}; 1189};
1128 1190
1129int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val); 1191int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
@@ -1171,6 +1233,8 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
1171int qlcnic_get_board_info(struct qlcnic_adapter *adapter); 1233int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1172int qlcnic_wol_supported(struct qlcnic_adapter *adapter); 1234int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1173int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); 1235int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
1236void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
1237void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
1174 1238
1175/* Functions from qlcnic_init.c */ 1239/* Functions from qlcnic_init.c */
1176int qlcnic_load_firmware(struct qlcnic_adapter *adapter); 1240int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
@@ -1199,7 +1263,7 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
1199void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); 1263void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1200void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); 1264void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1201 1265
1202int qlcnic_init_firmware(struct qlcnic_adapter *adapter); 1266int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
1203void qlcnic_watchdog_task(struct work_struct *work); 1267void qlcnic_watchdog_task(struct work_struct *work);
1204void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid, 1268void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1205 struct qlcnic_host_rds_ring *rds_ring); 1269 struct qlcnic_host_rds_ring *rds_ring);
@@ -1220,7 +1284,6 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1220int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1284int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1221void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 1285void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1222 struct qlcnic_host_tx_ring *tx_ring); 1286 struct qlcnic_host_tx_ring *tx_ring);
1223int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
1224void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter); 1287void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1225int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter); 1288int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1226void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); 1289void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
@@ -1249,9 +1312,16 @@ int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
1249int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8, 1312int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
1250 struct qlcnic_eswitch *); 1313 struct qlcnic_eswitch *);
1251int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8); 1314int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
1252int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8, 1315int qlcnic_config_switch_port(struct qlcnic_adapter *,
1253 u8, u8, u16); 1316 struct qlcnic_esw_func_cfg *);
1317int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
1318 struct qlcnic_esw_func_cfg *);
1254int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); 1319int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
1320int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
1321 struct __qlcnic_esw_statistics *);
1322int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
1323 struct __qlcnic_esw_statistics *);
1324int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
1255extern int qlcnic_config_tso; 1325extern int qlcnic_config_tso;
1256 1326
1257/* 1327/*
@@ -1280,6 +1350,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
1280 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"}, 1350 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
1281 {0x1077, 0x8020, 0x1077, 0x20f, 1351 {0x1077, 0x8020, 0x1077, 0x20f,
1282 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, 1352 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1353 {0x1077, 0x8020, 0x103c, 0x3733,
1354 "NC523SFP 10Gb 2-port Flex-10 Server Adapter"},
1283 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, 1355 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1284}; 1356};
1285 1357
@@ -1298,7 +1370,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1298extern const struct ethtool_ops qlcnic_ethtool_ops; 1370extern const struct ethtool_ops qlcnic_ethtool_ops;
1299 1371
1300struct qlcnic_nic_template { 1372struct qlcnic_nic_template {
1301 int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
1302 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1373 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1303 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1374 int (*config_led) (struct qlcnic_adapter *, u32, u32);
1304 int (*start_firmware) (struct qlcnic_adapter *); 1375 int (*start_firmware) (struct qlcnic_adapter *);
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index cc5d861d9a12..95a821e0b66f 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -813,9 +813,8 @@ int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
813 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); 813 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
814 814
815 eswitch->port = arg1 & 0xf; 815 eswitch->port = arg1 & 0xf;
816 eswitch->active_vports = LSB(arg2); 816 eswitch->max_ucast_filters = LSW(arg2);
817 eswitch->max_ucast_filters = MSB(arg2); 817 eswitch->max_active_vlans = MSW(arg2) & 0xfff;
818 eswitch->max_active_vlans = LSB(MSW(arg2));
819 if (arg1 & BIT_6) 818 if (arg1 & BIT_6)
820 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING; 819 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
821 if (arg1 & BIT_7) 820 if (arg1 & BIT_7)
@@ -943,43 +942,271 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
943 return err; 942 return err;
944} 943}
945 944
946/* Configure eSwitch port */ 945int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
947int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id, 946 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
948 int vlan_tagging, u8 discard_tagged, u8 promsc_mode, 947
949 u8 mac_learn, u8 pci_func, u16 vlan_id) 948 size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
949 struct __qlcnic_esw_statistics *stats;
950 dma_addr_t stats_dma_t;
951 void *stats_addr;
952 u32 arg1;
953 int err;
954
955 if (esw_stats == NULL)
956 return -ENOMEM;
957
958 if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
959 func != adapter->ahw.pci_func) {
960 dev_err(&adapter->pdev->dev,
961 "Not privilege to query stats for func=%d", func);
962 return -EIO;
963 }
964
965 stats_addr = pci_alloc_consistent(adapter->pdev, stats_size,
966 &stats_dma_t);
967 if (!stats_addr) {
968 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
969 return -ENOMEM;
970 }
971 memset(stats_addr, 0, stats_size);
972
973 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
974 arg1 |= rx_tx << 15 | stats_size << 16;
975
976 err = qlcnic_issue_cmd(adapter,
977 adapter->ahw.pci_func,
978 adapter->fw_hal_version,
979 arg1,
980 MSD(stats_dma_t),
981 LSD(stats_dma_t),
982 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
983
984 if (!err) {
985 stats = (struct __qlcnic_esw_statistics *)stats_addr;
986 esw_stats->context_id = le16_to_cpu(stats->context_id);
987 esw_stats->version = le16_to_cpu(stats->version);
988 esw_stats->size = le16_to_cpu(stats->size);
989 esw_stats->multicast_frames =
990 le64_to_cpu(stats->multicast_frames);
991 esw_stats->broadcast_frames =
992 le64_to_cpu(stats->broadcast_frames);
993 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
994 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
995 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
996 esw_stats->errors = le64_to_cpu(stats->errors);
997 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
998 }
999
1000 pci_free_consistent(adapter->pdev, stats_size, stats_addr,
1001 stats_dma_t);
1002 return err;
1003}
1004
1005int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1006 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1007
1008 struct __qlcnic_esw_statistics port_stats;
1009 u8 i;
1010 int ret = -EIO;
1011
1012 if (esw_stats == NULL)
1013 return -ENOMEM;
1014 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
1015 return -EIO;
1016 if (adapter->npars == NULL)
1017 return -EIO;
1018
1019 memset(esw_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1020 esw_stats->context_id = eswitch;
1021
1022 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
1023 if (adapter->npars[i].phy_port != eswitch)
1024 continue;
1025
1026 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1027 if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
1028 continue;
1029
1030 esw_stats->size = port_stats.size;
1031 esw_stats->version = port_stats.version;
1032 esw_stats->unicast_frames += port_stats.unicast_frames;
1033 esw_stats->multicast_frames += port_stats.multicast_frames;
1034 esw_stats->broadcast_frames += port_stats.broadcast_frames;
1035 esw_stats->dropped_frames += port_stats.dropped_frames;
1036 esw_stats->errors += port_stats.errors;
1037 esw_stats->local_frames += port_stats.local_frames;
1038 esw_stats->numbytes += port_stats.numbytes;
1039
1040 ret = 0;
1041 }
1042 return ret;
1043}
1044
1045int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1046 const u8 port, const u8 rx_tx)
950{ 1047{
951 int err = -EIO; 1048
952 u32 arg1; 1049 u32 arg1;
953 struct qlcnic_eswitch *eswitch;
954 1050
955 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1051 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
956 return err; 1052 return -EIO;
957 1053
958 eswitch = &adapter->eswitch[id]; 1054 if (func_esw == QLCNIC_STATS_PORT) {
959 if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE)) 1055 if (port >= QLCNIC_MAX_PCI_FUNC)
1056 goto err_ret;
1057 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
1058 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
1059 goto err_ret;
1060 } else {
1061 goto err_ret;
1062 }
1063
1064 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
1065 goto err_ret;
1066
1067 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1068 arg1 |= BIT_14 | rx_tx << 15;
1069
1070 return qlcnic_issue_cmd(adapter,
1071 adapter->ahw.pci_func,
1072 adapter->fw_hal_version,
1073 arg1,
1074 0,
1075 0,
1076 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
1077
1078err_ret:
1079 dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
1080 "rx_ctx=%d\n", func_esw, port, rx_tx);
1081 return -EIO;
1082}
1083
1084static int
1085__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1086 u32 *arg1, u32 *arg2)
1087{
1088 int err = -EIO;
1089 u8 pci_func;
1090 pci_func = (*arg1 >> 8);
1091 err = qlcnic_issue_cmd(adapter,
1092 adapter->ahw.pci_func,
1093 adapter->fw_hal_version,
1094 *arg1,
1095 0,
1096 0,
1097 QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG);
1098
1099 if (err == QLCNIC_RCODE_SUCCESS) {
1100 *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
1101 *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
1102 dev_info(&adapter->pdev->dev,
1103 "eSwitch port config for pci func %d\n", pci_func);
1104 } else {
1105 dev_err(&adapter->pdev->dev,
1106 "Failed to get eswitch port config for pci func %d\n",
1107 pci_func);
1108 }
1109 return err;
1110}
1111/* Configure eSwitch port
1112op_mode = 0 for setting default port behavior
1113op_mode = 1 for setting vlan id
1114op_mode = 2 for deleting vlan id
1115op_type = 0 for vlan_id
1116op_type = 1 for port vlan_id
1117*/
1118int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1119 struct qlcnic_esw_func_cfg *esw_cfg)
1120{
1121 int err = -EIO;
1122 u32 arg1, arg2 = 0;
1123 u8 pci_func;
1124
1125 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
960 return err; 1126 return err;
1127 pci_func = esw_cfg->pci_func;
1128 arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
1129 arg1 |= (pci_func << 8);
961 1130
962 arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0); 1131 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
963 arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0); 1132 return err;
964 arg1 |= pci_func << 8; 1133 arg1 &= ~(0x0ff << 8);
965 if (vlan_tagging) 1134 arg1 |= (pci_func << 8);
966 arg1 |= BIT_5 | (vlan_id << 16); 1135 arg1 &= ~(BIT_2 | BIT_3);
1136 switch (esw_cfg->op_mode) {
1137 case QLCNIC_PORT_DEFAULTS:
1138 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1139 arg2 |= (BIT_0 | BIT_1);
1140 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1141 arg2 |= (BIT_2 | BIT_3);
1142 if (!(esw_cfg->discard_tagged))
1143 arg1 &= ~BIT_4;
1144 if (!(esw_cfg->promisc_mode))
1145 arg1 &= ~BIT_6;
1146 if (!(esw_cfg->mac_override))
1147 arg1 &= ~BIT_7;
1148 if (!(esw_cfg->mac_anti_spoof))
1149 arg2 &= ~BIT_0;
1150 if (!(esw_cfg->offload_flags & BIT_0))
1151 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1152 if (!(esw_cfg->offload_flags & BIT_1))
1153 arg2 &= ~BIT_2;
1154 if (!(esw_cfg->offload_flags & BIT_2))
1155 arg2 &= ~BIT_3;
1156 break;
1157 case QLCNIC_ADD_VLAN:
1158 arg1 |= (BIT_2 | BIT_5);
1159 arg1 |= (esw_cfg->vlan_id << 16);
1160 break;
1161 case QLCNIC_DEL_VLAN:
1162 arg1 |= (BIT_3 | BIT_5);
1163 arg1 &= ~(0x0ffff << 16);
1164 break;
1165 default:
1166 return err;
1167 }
967 1168
968 err = qlcnic_issue_cmd(adapter, 1169 err = qlcnic_issue_cmd(adapter,
969 adapter->ahw.pci_func, 1170 adapter->ahw.pci_func,
970 adapter->fw_hal_version, 1171 adapter->fw_hal_version,
971 arg1, 1172 arg1,
972 0, 1173 arg2,
973 0, 1174 0,
974 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH); 1175 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
975 1176
976 if (err != QLCNIC_RCODE_SUCCESS) { 1177 if (err != QLCNIC_RCODE_SUCCESS) {
977 dev_err(&adapter->pdev->dev, 1178 dev_err(&adapter->pdev->dev,
978 "Failed to configure eswitch port%d\n", eswitch->port); 1179 "Failed to configure eswitch pci func %d\n", pci_func);
979 } else { 1180 } else {
980 dev_info(&adapter->pdev->dev, 1181 dev_info(&adapter->pdev->dev,
981 "Configured eSwitch for port %d\n", eswitch->port); 1182 "Configured eSwitch for pci func %d\n", pci_func);
982 } 1183 }
983 1184
984 return err; 1185 return err;
985} 1186}
1187
1188int
1189qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1190 struct qlcnic_esw_func_cfg *esw_cfg)
1191{
1192 u32 arg1, arg2;
1193 u8 phy_port;
1194 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
1195 phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
1196 else
1197 phy_port = adapter->physical_port;
1198 arg1 = phy_port;
1199 arg1 |= (esw_cfg->pci_func << 8);
1200 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1201 return -EIO;
1202
1203 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1204 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1205 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1206 esw_cfg->mac_override = !!(arg1 & BIT_7);
1207 esw_cfg->vlan_id = LSW(arg1 >> 16);
1208 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1209 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
1210
1211 return 0;
1212}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 9328d59e21e0..cb9463bd6b1e 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -99,7 +99,7 @@ static const u32 diag_registers[] = {
99 CRB_XG_STATE_P3, 99 CRB_XG_STATE_P3,
100 CRB_FW_CAPABILITIES_1, 100 CRB_FW_CAPABILITIES_1,
101 ISR_INT_STATE_REG, 101 ISR_INT_STATE_REG,
102 QLCNIC_CRB_DEV_REF_COUNT, 102 QLCNIC_CRB_DRV_ACTIVE,
103 QLCNIC_CRB_DEV_STATE, 103 QLCNIC_CRB_DEV_STATE,
104 QLCNIC_CRB_DRV_STATE, 104 QLCNIC_CRB_DRV_STATE,
105 QLCNIC_CRB_DRV_SCRATCH, 105 QLCNIC_CRB_DRV_SCRATCH,
@@ -115,9 +115,13 @@ static const u32 diag_registers[] = {
115 -1 115 -1
116}; 116};
117 117
118#define QLCNIC_MGMT_API_VERSION 2
119#define QLCNIC_DEV_INFO_SIZE 1
120#define QLCNIC_ETHTOOL_REGS_VER 2
118static int qlcnic_get_regs_len(struct net_device *dev) 121static int qlcnic_get_regs_len(struct net_device *dev)
119{ 122{
120 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN; 123 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
124 QLCNIC_DEV_INFO_SIZE + 1;
121} 125}
122 126
123static int qlcnic_get_eeprom_len(struct net_device *dev) 127static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -342,10 +346,13 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
342 int ring, i = 0; 346 int ring, i = 0;
343 347
344 memset(p, 0, qlcnic_get_regs_len(dev)); 348 memset(p, 0, qlcnic_get_regs_len(dev));
345 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | 349 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
346 (adapter->pdev)->device; 350 (adapter->ahw.revision_id << 16) | (adapter->pdev)->device;
347 351
348 for (i = 0; diag_registers[i] != -1; i++) 352 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
353 regs_buff[1] = QLCNIC_MGMT_API_VERSION;
354
355 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[i] != -1; i++)
349 regs_buff[i] = QLCRD32(adapter, diag_registers[i]); 356 regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
350 357
351 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 358 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
@@ -747,6 +754,14 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
747{ 754{
748 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN); 755 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
749 756
757 data[0] = qlcnic_reg_test(dev);
758 if (data[0])
759 eth_test->flags |= ETH_TEST_FL_FAILED;
760
761 data[1] = (u64) qlcnic_test_link(dev);
762 if (data[1])
763 eth_test->flags |= ETH_TEST_FL_FAILED;
764
750 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 765 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
751 data[2] = qlcnic_irq_test(dev); 766 data[2] = qlcnic_irq_test(dev);
752 if (data[2]) 767 if (data[2])
@@ -757,15 +772,6 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
757 eth_test->flags |= ETH_TEST_FL_FAILED; 772 eth_test->flags |= ETH_TEST_FL_FAILED;
758 773
759 } 774 }
760
761 data[0] = qlcnic_reg_test(dev);
762 if (data[0])
763 eth_test->flags |= ETH_TEST_FL_FAILED;
764
765 /* link test */
766 data[1] = (u64) qlcnic_test_link(dev);
767 if (data[1])
768 eth_test->flags |= ETH_TEST_FL_FAILED;
769} 775}
770 776
771static void 777static void
@@ -805,6 +811,20 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
805 } 811 }
806} 812}
807 813
814static int qlcnic_set_tx_csum(struct net_device *dev, u32 data)
815{
816 struct qlcnic_adapter *adapter = netdev_priv(dev);
817
818 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
819 return -EOPNOTSUPP;
820 if (data)
821 dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
822 else
823 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
824
825 return 0;
826
827}
808static u32 qlcnic_get_tx_csum(struct net_device *dev) 828static u32 qlcnic_get_tx_csum(struct net_device *dev)
809{ 829{
810 return dev->features & NETIF_F_IP_CSUM; 830 return dev->features & NETIF_F_IP_CSUM;
@@ -819,7 +839,23 @@ static u32 qlcnic_get_rx_csum(struct net_device *dev)
819static int qlcnic_set_rx_csum(struct net_device *dev, u32 data) 839static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
820{ 840{
821 struct qlcnic_adapter *adapter = netdev_priv(dev); 841 struct qlcnic_adapter *adapter = netdev_priv(dev);
842
843 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
844 return -EOPNOTSUPP;
845 if (!!data) {
846 adapter->rx_csum = !!data;
847 return 0;
848 }
849
850 if (adapter->flags & QLCNIC_LRO_ENABLED) {
851 if (qlcnic_config_hw_lro(adapter, QLCNIC_LRO_DISABLED))
852 return -EIO;
853
854 dev->features &= ~NETIF_F_LRO;
855 qlcnic_send_lro_cleanup(adapter);
856 }
822 adapter->rx_csum = !!data; 857 adapter->rx_csum = !!data;
858 dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
823 return 0; 859 return 0;
824} 860}
825 861
@@ -1002,6 +1038,15 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
1002 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) 1038 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
1003 return -EINVAL; 1039 return -EINVAL;
1004 1040
1041 if (!adapter->rx_csum) {
1042 dev_info(&adapter->pdev->dev, "rx csum is off, "
1043 "cannot toggle lro\n");
1044 return -EINVAL;
1045 }
1046
1047 if ((data & ETH_FLAG_LRO) && (adapter->flags & QLCNIC_LRO_ENABLED))
1048 return 0;
1049
1005 if (data & ETH_FLAG_LRO) { 1050 if (data & ETH_FLAG_LRO) {
1006 hw_lro = QLCNIC_LRO_ENABLED; 1051 hw_lro = QLCNIC_LRO_ENABLED;
1007 netdev->features |= NETIF_F_LRO; 1052 netdev->features |= NETIF_F_LRO;
@@ -1048,7 +1093,7 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1048 .get_pauseparam = qlcnic_get_pauseparam, 1093 .get_pauseparam = qlcnic_get_pauseparam,
1049 .set_pauseparam = qlcnic_set_pauseparam, 1094 .set_pauseparam = qlcnic_set_pauseparam,
1050 .get_tx_csum = qlcnic_get_tx_csum, 1095 .get_tx_csum = qlcnic_get_tx_csum,
1051 .set_tx_csum = ethtool_op_set_tx_csum, 1096 .set_tx_csum = qlcnic_set_tx_csum,
1052 .set_sg = ethtool_op_set_sg, 1097 .set_sg = ethtool_op_set_sg,
1053 .get_tso = qlcnic_get_tso, 1098 .get_tso = qlcnic_get_tso,
1054 .set_tso = qlcnic_set_tso, 1099 .set_tso = qlcnic_set_tso,
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 15fc32070be3..716203e41dc7 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -698,7 +698,7 @@ enum {
698#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0)) 698#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
699#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8)) 699#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
700#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac)) 700#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
701#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138)) 701#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
702#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) 702#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
703 703
704#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) 704#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
@@ -718,8 +718,9 @@ enum {
718#define QLCNIC_DEV_FAILED 0x6 718#define QLCNIC_DEV_FAILED 0x6
719#define QLCNIC_DEV_QUISCENT 0x7 719#define QLCNIC_DEV_QUISCENT 0x7
720 720
721#define QLCNIC_DEV_NPAR_NOT_RDY 0 721#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
722#define QLCNIC_DEV_NPAR_RDY 1 722#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
723#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
723 724
724#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4))) 725#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
725#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) 726#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
@@ -744,6 +745,15 @@ enum {
744#define FW_POLL_DELAY (1 * HZ) 745#define FW_POLL_DELAY (1 * HZ)
745#define FW_FAIL_THRESH 2 746#define FW_FAIL_THRESH 2
746 747
748#define QLCNIC_RESET_TIMEOUT_SECS 10
749#define QLCNIC_INIT_TIMEOUT_SECS 30
750#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
751#define QLCNIC_RCVPEG_CHECK_DELAY 10
752#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
753#define QLCNIC_CMDPEG_CHECK_DELAY 500
754#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
755#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
756
747#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 757#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
748#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) 758#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
749 759
@@ -770,6 +780,7 @@ struct qlcnic_legacy_intr_set {
770#define QLCNIC_DRV_OP_MODE 0x1b2170 780#define QLCNIC_DRV_OP_MODE 0x1b2170
771#define QLCNIC_MSIX_BASE 0x132110 781#define QLCNIC_MSIX_BASE 0x132110
772#define QLCNIC_MAX_PCI_FUNC 8 782#define QLCNIC_MAX_PCI_FUNC 8
783#define QLCNIC_MAX_VLAN_FILTERS 64
773 784
774/* PCI function operational mode */ 785/* PCI function operational mode */
775enum { 786enum {
@@ -778,6 +789,12 @@ enum {
778 QLCNIC_NON_PRIV_FUNC = 2 789 QLCNIC_NON_PRIV_FUNC = 2
779}; 790};
780 791
792enum {
793 QLCNIC_PORT_DEFAULTS = 0,
794 QLCNIC_ADD_VLAN = 1,
795 QLCNIC_DEL_VLAN = 2
796};
797
781#define QLC_DEV_DRV_DEFAULT 0x11111111 798#define QLC_DEV_DRV_DEFAULT 0x11111111
782 799
783#define LSB(x) ((uint8_t)(x)) 800#define LSB(x) ((uint8_t)(x))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e08c8b0556a4..c198df90ff3c 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -297,8 +297,8 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
297 break; 297 break;
298 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { 298 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
299 dev_err(&adapter->pdev->dev, 299 dev_err(&adapter->pdev->dev,
300 "Failed to acquire sem=%d lock;reg_id=%d\n", 300 "Failed to acquire sem=%d lock; holdby=%d\n",
301 sem, id_reg); 301 sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
302 return -EIO; 302 return -EIO;
303 } 303 }
304 msleep(1); 304 msleep(1);
@@ -375,7 +375,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
375 375
376static int 376static int
377qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, 377qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
378 unsigned op) 378 u16 vlan_id, unsigned op)
379{ 379{
380 struct qlcnic_nic_req req; 380 struct qlcnic_nic_req req;
381 struct qlcnic_mac_req *mac_req; 381 struct qlcnic_mac_req *mac_req;
@@ -391,6 +391,8 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
391 mac_req->op = op; 391 mac_req->op = op;
392 memcpy(mac_req->mac_addr, addr, 6); 392 memcpy(mac_req->mac_addr, addr, 6);
393 393
394 req.words[1] = cpu_to_le64(vlan_id);
395
394 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 396 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
395} 397}
396 398
@@ -415,7 +417,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
415 memcpy(cur->mac_addr, addr, ETH_ALEN); 417 memcpy(cur->mac_addr, addr, ETH_ALEN);
416 418
417 if (qlcnic_sre_macaddr_change(adapter, 419 if (qlcnic_sre_macaddr_change(adapter,
418 cur->mac_addr, QLCNIC_MAC_ADD)) { 420 cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
419 kfree(cur); 421 kfree(cur);
420 return -EIO; 422 return -EIO;
421 } 423 }
@@ -485,12 +487,63 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
485 while (!list_empty(head)) { 487 while (!list_empty(head)) {
486 cur = list_entry(head->next, struct qlcnic_mac_list_s, list); 488 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
487 qlcnic_sre_macaddr_change(adapter, 489 qlcnic_sre_macaddr_change(adapter,
488 cur->mac_addr, QLCNIC_MAC_DEL); 490 cur->mac_addr, 0, QLCNIC_MAC_DEL);
489 list_del(&cur->list); 491 list_del(&cur->list);
490 kfree(cur); 492 kfree(cur);
491 } 493 }
492} 494}
493 495
496void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
497{
498 struct qlcnic_filter *tmp_fil;
499 struct hlist_node *tmp_hnode, *n;
500 struct hlist_head *head;
501 int i;
502
503 for (i = 0; i < adapter->fhash.fmax; i++) {
504 head = &(adapter->fhash.fhead[i]);
505
506 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
507 {
508 if (jiffies >
509 (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
510 qlcnic_sre_macaddr_change(adapter,
511 tmp_fil->faddr, tmp_fil->vlan_id,
512 tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
513 QLCNIC_MAC_DEL);
514 spin_lock_bh(&adapter->mac_learn_lock);
515 adapter->fhash.fnum--;
516 hlist_del(&tmp_fil->fnode);
517 spin_unlock_bh(&adapter->mac_learn_lock);
518 kfree(tmp_fil);
519 }
520 }
521 }
522}
523
524void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
525{
526 struct qlcnic_filter *tmp_fil;
527 struct hlist_node *tmp_hnode, *n;
528 struct hlist_head *head;
529 int i;
530
531 for (i = 0; i < adapter->fhash.fmax; i++) {
532 head = &(adapter->fhash.fhead[i]);
533
534 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
535 qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
536 tmp_fil->vlan_id, tmp_fil->vlan_id ?
537 QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
538 spin_lock_bh(&adapter->mac_learn_lock);
539 adapter->fhash.fnum--;
540 hlist_del(&tmp_fil->fnode);
541 spin_unlock_bh(&adapter->mac_learn_lock);
542 kfree(tmp_fil);
543 }
544 }
545}
546
494#define QLCNIC_CONFIG_INTR_COALESCE 3 547#define QLCNIC_CONFIG_INTR_COALESCE 3
495 548
496/* 549/*
@@ -715,19 +768,6 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
715 return rc; 768 return rc;
716} 769}
717 770
718int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
719{
720 u32 crbaddr;
721 int pci_func = adapter->ahw.pci_func;
722
723 crbaddr = CRB_MAC_BLOCK_START +
724 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
725
726 qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
727
728 return 0;
729}
730
731/* 771/*
732 * Changes the CRB window to the specified window. 772 * Changes the CRB window to the specified window.
733 */ 773 */
@@ -1245,4 +1285,5 @@ void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
1245 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 1285 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1246 1286
1247 qlcnic_nic_set_promisc(adapter, mode); 1287 qlcnic_nic_set_promisc(adapter, mode);
1288 msleep(1000);
1248} 1289}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 75ba744b173c..26a7d6bca5c7 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -25,6 +25,7 @@
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/if_vlan.h>
28#include "qlcnic.h" 29#include "qlcnic.h"
29 30
30struct crb_addr_pair { 31struct crb_addr_pair {
@@ -45,6 +46,9 @@ static void
45qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, 46qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
46 struct qlcnic_host_rds_ring *rds_ring); 47 struct qlcnic_host_rds_ring *rds_ring);
47 48
49static int
50qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
51
48static void crb_addr_transform_setup(void) 52static void crb_addr_transform_setup(void)
49{ 53{
50 crb_addr_transform(XDMA); 54 crb_addr_transform(XDMA);
@@ -136,8 +140,6 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
136 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 140 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
137 rds_ring = &recv_ctx->rds_rings[ring]; 141 rds_ring = &recv_ctx->rds_rings[ring];
138 142
139 spin_lock(&rds_ring->lock);
140
141 INIT_LIST_HEAD(&rds_ring->free_list); 143 INIT_LIST_HEAD(&rds_ring->free_list);
142 144
143 rx_buf = rds_ring->rx_buf_arr; 145 rx_buf = rds_ring->rx_buf_arr;
@@ -146,8 +148,6 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
146 &rds_ring->free_list); 148 &rds_ring->free_list);
147 rx_buf++; 149 rx_buf++;
148 } 150 }
149
150 spin_unlock(&rds_ring->lock);
151 } 151 }
152} 152}
153 153
@@ -439,11 +439,14 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
439 u32 off; 439 u32 off;
440 struct pci_dev *pdev = adapter->pdev; 440 struct pci_dev *pdev = adapter->pdev;
441 441
442 /* resetall */ 442 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
443 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
444
443 qlcnic_rom_lock(adapter); 445 qlcnic_rom_lock(adapter);
444 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); 446 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
445 qlcnic_rom_unlock(adapter); 447 qlcnic_rom_unlock(adapter);
446 448
449 /* Init HW CRB block */
447 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || 450 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
448 qlcnic_rom_fast_read(adapter, 4, &n) != 0) { 451 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
449 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); 452 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
@@ -524,13 +527,10 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
524 } 527 }
525 kfree(buf); 528 kfree(buf);
526 529
527 /* p2dn replyCount */ 530 /* Initialize protocol process engine */
528 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); 531 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
529 /* disable_peg_cache 0 & 1*/
530 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); 532 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
531 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); 533 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
532
533 /* peg_clr_all */
534 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); 534 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
535 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); 535 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
536 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); 536 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
@@ -539,10 +539,88 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
539 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); 539 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
540 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); 540 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
541 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); 541 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
542 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
543 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
544 msleep(1);
545 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
546 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
547 return 0;
548}
549
550static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
551{
552 u32 val;
553 int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
554
555 do {
556 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
557
558 switch (val) {
559 case PHAN_INITIALIZE_COMPLETE:
560 case PHAN_INITIALIZE_ACK:
561 return 0;
562 case PHAN_INITIALIZE_FAILED:
563 goto out_err;
564 default:
565 break;
566 }
567
568 msleep(QLCNIC_CMDPEG_CHECK_DELAY);
569
570 } while (--retries);
571
572 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
573
574out_err:
575 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
576 "complete, state: 0x%x.\n", val);
577 return -EIO;
578}
579
580static int
581qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
582{
583 u32 val;
584 int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
585
586 do {
587 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
588
589 if (val == PHAN_PEG_RCV_INITIALIZED)
590 return 0;
591
592 msleep(QLCNIC_RCVPEG_CHECK_DELAY);
593
594 } while (--retries);
595
596 if (!retries) {
597 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
598 "complete, state: 0x%x.\n", val);
599 return -EIO;
600 }
601
542 return 0; 602 return 0;
543} 603}
544 604
545int 605int
606qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
607{
608 int err;
609
610 err = qlcnic_cmd_peg_ready(adapter);
611 if (err)
612 return err;
613
614 err = qlcnic_receive_peg_ready(adapter);
615 if (err)
616 return err;
617
618 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
619
620 return err;
621}
622
623int
546qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { 624qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
547 625
548 int timeo; 626 int timeo;
@@ -557,12 +635,12 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
557 } 635 }
558 adapter->physical_port = (val >> 2); 636 adapter->physical_port = (val >> 2);
559 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) 637 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
560 timeo = 30; 638 timeo = QLCNIC_INIT_TIMEOUT_SECS;
561 639
562 adapter->dev_init_timeo = timeo; 640 adapter->dev_init_timeo = timeo;
563 641
564 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) 642 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
565 timeo = 10; 643 timeo = QLCNIC_RESET_TIMEOUT_SECS;
566 644
567 adapter->reset_ack_timeo = timeo; 645 adapter->reset_ack_timeo = timeo;
568 646
@@ -906,38 +984,45 @@ qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
906 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); 984 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
907} 985}
908 986
909int 987static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
910qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
911{ 988{
912 u32 count, old_count; 989 if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
913 u32 val, version, major, minor, build; 990 dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
914 int i, timeout;
915
916 if (adapter->need_fw_reset)
917 return 1;
918 991
919 /* last attempt had failed */ 992 qlcnic_pcie_sem_unlock(adapter, 2);
920 if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 993}
921 return 1;
922 994
923 old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); 995static int
996qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
997{
998 u32 heartbeat, ret = -EIO;
999 int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
924 1000
925 for (i = 0; i < 10; i++) { 1001 adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
926 1002
927 timeout = msleep_interruptible(200); 1003 do {
928 if (timeout) { 1004 msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
929 QLCWR32(adapter, CRB_CMDPEG_STATE, 1005 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
930 PHAN_INITIALIZE_FAILED); 1006 if (heartbeat != adapter->heartbeat) {
931 return -EINTR; 1007 ret = QLCNIC_RCODE_SUCCESS;
1008 break;
932 } 1009 }
1010 } while (--retries);
933 1011
934 count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); 1012 return ret;
935 if (count != old_count) 1013}
936 break; 1014
1015int
1016qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
1017{
1018 u32 val, version, major, minor, build;
1019
1020 if (qlcnic_check_fw_hearbeat(adapter)) {
1021 qlcnic_rom_lock_recovery(adapter);
1022 return 1;
937 } 1023 }
938 1024
939 /* firmware is dead */ 1025 if (adapter->need_fw_reset)
940 if (count == old_count)
941 return 1; 1026 return 1;
942 1027
943 /* check if we have got newer or different file firmware */ 1028 /* check if we have got newer or different file firmware */
@@ -1162,78 +1247,6 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
1162 adapter->fw = NULL; 1247 adapter->fw = NULL;
1163} 1248}
1164 1249
1165static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
1166{
1167 u32 val;
1168 int retries = 60;
1169
1170 do {
1171 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
1172
1173 switch (val) {
1174 case PHAN_INITIALIZE_COMPLETE:
1175 case PHAN_INITIALIZE_ACK:
1176 return 0;
1177 case PHAN_INITIALIZE_FAILED:
1178 goto out_err;
1179 default:
1180 break;
1181 }
1182
1183 msleep(500);
1184
1185 } while (--retries);
1186
1187 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1188
1189out_err:
1190 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
1191 "complete, state: 0x%x.\n", val);
1192 return -EIO;
1193}
1194
1195static int
1196qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
1197{
1198 u32 val;
1199 int retries = 2000;
1200
1201 do {
1202 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
1203
1204 if (val == PHAN_PEG_RCV_INITIALIZED)
1205 return 0;
1206
1207 msleep(10);
1208
1209 } while (--retries);
1210
1211 if (!retries) {
1212 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
1213 "complete, state: 0x%x.\n", val);
1214 return -EIO;
1215 }
1216
1217 return 0;
1218}
1219
1220int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
1221{
1222 int err;
1223
1224 err = qlcnic_cmd_peg_ready(adapter);
1225 if (err)
1226 return err;
1227
1228 err = qlcnic_receive_peg_ready(adapter);
1229 if (err)
1230 return err;
1231
1232 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1233
1234 return err;
1235}
1236
1237static void 1250static void
1238qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, 1251qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1239 struct qlcnic_fw_msg *msg) 1252 struct qlcnic_fw_msg *msg)
@@ -1351,11 +1364,12 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1351 1364
1352 skb = buffer->skb; 1365 skb = buffer->skb;
1353 1366
1354 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { 1367 if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK ||
1368 cksum == STATUS_CKSUM_LOOP))) {
1355 adapter->stats.csummed++; 1369 adapter->stats.csummed++;
1356 skb->ip_summed = CHECKSUM_UNNECESSARY; 1370 skb->ip_summed = CHECKSUM_UNNECESSARY;
1357 } else { 1371 } else {
1358 skb->ip_summed = CHECKSUM_NONE; 1372 skb_checksum_none_assert(skb);
1359 } 1373 }
1360 1374
1361 skb->dev = adapter->netdev; 1375 skb->dev = adapter->netdev;
@@ -1365,6 +1379,27 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1365 return skb; 1379 return skb;
1366} 1380}
1367 1381
1382static int
1383qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb)
1384{
1385 u16 vlan_tag;
1386 struct ethhdr *eth_hdr;
1387
1388 if (!__vlan_get_tag(skb, &vlan_tag)) {
1389 if (vlan_tag == adapter->pvid) {
1390 /* strip the tag from the packet and send it up */
1391 eth_hdr = (struct ethhdr *) skb->data;
1392 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1393 skb_pull(skb, VLAN_HLEN);
1394 return 0;
1395 }
1396 }
1397 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1398 return 0;
1399
1400 return -EIO;
1401}
1402
1368static struct qlcnic_rx_buffer * 1403static struct qlcnic_rx_buffer *
1369qlcnic_process_rcv(struct qlcnic_adapter *adapter, 1404qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1370 struct qlcnic_host_sds_ring *sds_ring, 1405 struct qlcnic_host_sds_ring *sds_ring,
@@ -1405,6 +1440,15 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1405 skb_pull(skb, pkt_offset); 1440 skb_pull(skb, pkt_offset);
1406 1441
1407 skb->truesize = skb->len + sizeof(struct sk_buff); 1442 skb->truesize = skb->len + sizeof(struct sk_buff);
1443
1444 if (unlikely(adapter->pvid)) {
1445 if (qlcnic_check_rx_tagging(adapter, skb)) {
1446 adapter->stats.rxdropped++;
1447 dev_kfree_skb_any(skb);
1448 return buffer;
1449 }
1450 }
1451
1408 skb->protocol = eth_type_trans(skb, netdev); 1452 skb->protocol = eth_type_trans(skb, netdev);
1409 1453
1410 napi_gro_receive(&sds_ring->napi, skb); 1454 napi_gro_receive(&sds_ring->napi, skb);
@@ -1469,6 +1513,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1469 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); 1513 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1470 1514
1471 skb_pull(skb, l2_hdr_offset); 1515 skb_pull(skb, l2_hdr_offset);
1516
1517 if (unlikely(adapter->pvid)) {
1518 if (qlcnic_check_rx_tagging(adapter, skb)) {
1519 adapter->stats.rxdropped++;
1520 dev_kfree_skb_any(skb);
1521 return buffer;
1522 }
1523 }
1472 skb->protocol = eth_type_trans(skb, netdev); 1524 skb->protocol = eth_type_trans(skb, netdev);
1473 1525
1474 iph = (struct iphdr *)skb->data; 1526 iph = (struct iphdr *)skb->data;
@@ -1587,8 +1639,6 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1587 int producer, count = 0; 1639 int producer, count = 0;
1588 struct list_head *head; 1640 struct list_head *head;
1589 1641
1590 spin_lock(&rds_ring->lock);
1591
1592 producer = rds_ring->producer; 1642 producer = rds_ring->producer;
1593 1643
1594 head = &rds_ring->free_list; 1644 head = &rds_ring->free_list;
@@ -1618,7 +1668,6 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1618 writel((producer-1) & (rds_ring->num_desc-1), 1668 writel((producer-1) & (rds_ring->num_desc-1),
1619 rds_ring->crb_rcv_producer); 1669 rds_ring->crb_rcv_producer);
1620 } 1670 }
1621 spin_unlock(&rds_ring->lock);
1622} 1671}
1623 1672
1624static void 1673static void
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 66eea5972020..5fd2abd1eb67 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -50,6 +50,10 @@ static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
50/* Default to restricted 1G auto-neg mode */ 50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5; 51static int wol_port_mode = 5;
52 52
53static int qlcnic_mac_learn;
54module_param(qlcnic_mac_learn, int, 0644);
55MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
56
53static int use_msi = 1; 57static int use_msi = 1;
54module_param(use_msi, int, 0644); 58module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 59MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
@@ -94,7 +98,7 @@ static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); 98static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95 99
96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); 100static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter); 101static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); 102static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99 103
100static irqreturn_t qlcnic_tmp_intr(int irq, void *data); 104static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
@@ -106,10 +110,14 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); 110static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
107static int qlcnic_start_firmware(struct qlcnic_adapter *); 111static int qlcnic_start_firmware(struct qlcnic_adapter *);
108 112
113static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
114static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); 115static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
110static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); 116static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); 117static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112static int qlcnicvf_start_firmware(struct qlcnic_adapter *); 118static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
119static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
120 struct qlcnic_esw_func_cfg *);
113/* PCI Device ID Table */ 121/* PCI Device ID Table */
114#define ENTRY(device) \ 122#define ENTRY(device) \
115 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ 123 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -320,7 +328,7 @@ qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
320 struct net_device *netdev = adapter->netdev; 328 struct net_device *netdev = adapter->netdev;
321 struct pci_dev *pdev = adapter->pdev; 329 struct pci_dev *pdev = adapter->pdev;
322 330
323 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0) 331 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
324 return -EIO; 332 return -EIO;
325 333
326 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 334 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
@@ -341,6 +349,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
341 struct qlcnic_adapter *adapter = netdev_priv(netdev); 349 struct qlcnic_adapter *adapter = netdev_priv(netdev);
342 struct sockaddr *addr = p; 350 struct sockaddr *addr = p;
343 351
352 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
353 return -EOPNOTSUPP;
354
344 if (!is_valid_ether_addr(addr->sa_data)) 355 if (!is_valid_ether_addr(addr->sa_data))
345 return -EINVAL; 356 return -EINVAL;
346 357
@@ -376,14 +387,12 @@ static const struct net_device_ops qlcnic_netdev_ops = {
376}; 387};
377 388
378static struct qlcnic_nic_template qlcnic_ops = { 389static struct qlcnic_nic_template qlcnic_ops = {
379 .get_mac_addr = qlcnic_get_mac_address,
380 .config_bridged_mode = qlcnic_config_bridged_mode, 390 .config_bridged_mode = qlcnic_config_bridged_mode,
381 .config_led = qlcnic_config_led, 391 .config_led = qlcnic_config_led,
382 .start_firmware = qlcnic_start_firmware 392 .start_firmware = qlcnic_start_firmware
383}; 393};
384 394
385static struct qlcnic_nic_template qlcnic_vf_ops = { 395static struct qlcnic_nic_template qlcnic_vf_ops = {
386 .get_mac_addr = qlcnic_get_mac_address,
387 .config_bridged_mode = qlcnicvf_config_bridged_mode, 396 .config_bridged_mode = qlcnicvf_config_bridged_mode,
388 .config_led = qlcnicvf_config_led, 397 .config_led = qlcnicvf_config_led,
389 .start_firmware = qlcnicvf_start_firmware 398 .start_firmware = qlcnicvf_start_firmware
@@ -474,7 +483,7 @@ static int
474qlcnic_init_pci_info(struct qlcnic_adapter *adapter) 483qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
475{ 484{
476 struct qlcnic_pci_info *pci_info; 485 struct qlcnic_pci_info *pci_info;
477 int i, ret = 0, err; 486 int i, ret = 0;
478 u8 pfn; 487 u8 pfn;
479 488
480 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); 489 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
@@ -484,14 +493,14 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
484 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * 493 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
485 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL); 494 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
486 if (!adapter->npars) { 495 if (!adapter->npars) {
487 err = -ENOMEM; 496 ret = -ENOMEM;
488 goto err_pci_info; 497 goto err_pci_info;
489 } 498 }
490 499
491 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * 500 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
492 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); 501 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
493 if (!adapter->eswitch) { 502 if (!adapter->eswitch) {
494 err = -ENOMEM; 503 ret = -ENOMEM;
495 goto err_npars; 504 goto err_npars;
496 } 505 }
497 506
@@ -506,7 +515,6 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
506 adapter->npars[pfn].active = pci_info[i].active; 515 adapter->npars[pfn].active = pci_info[i].active;
507 adapter->npars[pfn].type = pci_info[i].type; 516 adapter->npars[pfn].type = pci_info[i].type;
508 adapter->npars[pfn].phy_port = pci_info[i].default_port; 517 adapter->npars[pfn].phy_port = pci_info[i].default_port;
509 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
510 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; 518 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
511 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; 519 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
512 } 520 }
@@ -539,12 +547,10 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
539 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 547 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
540 548
541 /* If other drivers are not in use set their privilege level */ 549 /* If other drivers are not in use set their privilege level */
542 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 550 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
543 ret = qlcnic_api_lock(adapter); 551 ret = qlcnic_api_lock(adapter);
544 if (ret) 552 if (ret)
545 goto err_lock; 553 goto err_lock;
546 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
547 goto err_npar;
548 554
549 if (qlcnic_config_npars) { 555 if (qlcnic_config_npars) {
550 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 556 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
@@ -562,18 +568,16 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
562 adapter->ahw.pci_func)); 568 adapter->ahw.pci_func));
563 } 569 }
564 writel(data, priv_op); 570 writel(data, priv_op);
565err_npar:
566 qlcnic_api_unlock(adapter); 571 qlcnic_api_unlock(adapter);
567err_lock: 572err_lock:
568 return ret; 573 return ret;
569} 574}
570 575
571static u32 576static void
572qlcnic_get_driver_mode(struct qlcnic_adapter *adapter) 577qlcnic_check_vf(struct qlcnic_adapter *adapter)
573{ 578{
574 void __iomem *msix_base_addr; 579 void __iomem *msix_base_addr;
575 void __iomem *priv_op; 580 void __iomem *priv_op;
576 struct qlcnic_info nic_info;
577 u32 func; 581 u32 func;
578 u32 msix_base; 582 u32 msix_base;
579 u32 op_mode, priv_level; 583 u32 op_mode, priv_level;
@@ -588,20 +592,6 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
588 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; 592 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
589 adapter->ahw.pci_func = func; 593 adapter->ahw.pci_func = func;
590 594
591 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
592 adapter->capabilities = nic_info.capabilities;
593
594 if (adapter->capabilities & BIT_6)
595 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
596 else
597 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
598 }
599
600 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
601 adapter->nic_ops = &qlcnic_ops;
602 return adapter->fw_hal_version;
603 }
604
605 /* Determine function privilege level */ 595 /* Determine function privilege level */
606 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 596 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
607 op_mode = readl(priv_op); 597 op_mode = readl(priv_op);
@@ -610,37 +600,14 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
610 else 600 else
611 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); 601 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
612 602
613 switch (priv_level) { 603 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
614 case QLCNIC_MGMT_FUNC:
615 adapter->op_mode = QLCNIC_MGMT_FUNC;
616 adapter->nic_ops = &qlcnic_ops;
617 qlcnic_init_pci_info(adapter);
618 /* Set privilege level for other functions */
619 qlcnic_set_function_modes(adapter);
620 dev_info(&adapter->pdev->dev,
621 "HAL Version: %d, Management function\n",
622 adapter->fw_hal_version);
623 break;
624 case QLCNIC_PRIV_FUNC:
625 adapter->op_mode = QLCNIC_PRIV_FUNC;
626 dev_info(&adapter->pdev->dev,
627 "HAL Version: %d, Privileged function\n",
628 adapter->fw_hal_version);
629 adapter->nic_ops = &qlcnic_ops;
630 break;
631 case QLCNIC_NON_PRIV_FUNC:
632 adapter->op_mode = QLCNIC_NON_PRIV_FUNC; 604 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
633 dev_info(&adapter->pdev->dev, 605 dev_info(&adapter->pdev->dev,
634 "HAL Version: %d Non Privileged function\n", 606 "HAL Version: %d Non Privileged function\n",
635 adapter->fw_hal_version); 607 adapter->fw_hal_version);
636 adapter->nic_ops = &qlcnic_vf_ops; 608 adapter->nic_ops = &qlcnic_vf_ops;
637 break; 609 } else
638 default: 610 adapter->nic_ops = &qlcnic_ops;
639 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
640 priv_level);
641 return 0;
642 }
643 return adapter->fw_hal_version;
644} 611}
645 612
646static int 613static int
@@ -673,10 +640,7 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
673 adapter->ahw.pci_base0 = mem_ptr0; 640 adapter->ahw.pci_base0 = mem_ptr0;
674 adapter->ahw.pci_len0 = pci_len0; 641 adapter->ahw.pci_len0 = pci_len0;
675 642
676 if (!qlcnic_get_driver_mode(adapter)) { 643 qlcnic_check_vf(adapter);
677 iounmap(adapter->ahw.pci_base0);
678 return -EIO;
679 }
680 644
681 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, 645 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
682 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func))); 646 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
@@ -711,25 +675,7 @@ static void
711qlcnic_check_options(struct qlcnic_adapter *adapter) 675qlcnic_check_options(struct qlcnic_adapter *adapter)
712{ 676{
713 u32 fw_major, fw_minor, fw_build; 677 u32 fw_major, fw_minor, fw_build;
714 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
715 char serial_num[32];
716 int i, offset, val;
717 int *ptr32;
718 struct pci_dev *pdev = adapter->pdev; 678 struct pci_dev *pdev = adapter->pdev;
719 struct qlcnic_info nic_info;
720 adapter->driver_mismatch = 0;
721
722 ptr32 = (int *)&serial_num;
723 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
724 for (i = 0; i < 8; i++) {
725 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
726 dev_err(&pdev->dev, "error reading board info\n");
727 adapter->driver_mismatch = 1;
728 return;
729 }
730 ptr32[i] = cpu_to_le32(val);
731 offset += sizeof(u32);
732 }
733 679
734 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); 680 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
735 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); 681 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
@@ -737,14 +683,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
737 683
738 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); 684 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
739 685
740 if (adapter->portnum == 0) {
741 get_brd_name(adapter, brd_name);
742
743 pr_info("%s: %s Board Chip rev 0x%x\n",
744 module_name(THIS_MODULE),
745 brd_name, adapter->ahw.revision_id);
746 }
747
748 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 686 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
749 fw_major, fw_minor, fw_build); 687 fw_major, fw_minor, fw_build);
750 688
@@ -758,109 +696,333 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
758 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 696 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
759 } 697 }
760 698
761 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
762 adapter->physical_port = nic_info.phys_port;
763 adapter->switch_mode = nic_info.switch_mode;
764 adapter->max_tx_ques = nic_info.max_tx_ques;
765 adapter->max_rx_ques = nic_info.max_rx_ques;
766 adapter->capabilities = nic_info.capabilities;
767 adapter->max_mac_filters = nic_info.max_mac_filters;
768 adapter->max_mtu = nic_info.max_mtu;
769 }
770
771 adapter->msix_supported = !!use_msi_x; 699 adapter->msix_supported = !!use_msi_x;
772 adapter->rss_supported = !!use_msi_x; 700 adapter->rss_supported = !!use_msi_x;
773 701
774 adapter->num_txd = MAX_CMD_DESCRIPTORS; 702 adapter->num_txd = MAX_CMD_DESCRIPTORS;
775 703
776 adapter->max_rds_rings = 2; 704 adapter->max_rds_rings = MAX_RDS_RINGS;
705}
706
707static int
708qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
709{
710 int err;
711 struct qlcnic_info nic_info;
712
713 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
714 if (err)
715 return err;
716
717 adapter->physical_port = nic_info.phys_port;
718 adapter->switch_mode = nic_info.switch_mode;
719 adapter->max_tx_ques = nic_info.max_tx_ques;
720 adapter->max_rx_ques = nic_info.max_rx_ques;
721 adapter->capabilities = nic_info.capabilities;
722 adapter->max_mac_filters = nic_info.max_mac_filters;
723 adapter->max_mtu = nic_info.max_mtu;
724
725 if (adapter->capabilities & BIT_6)
726 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
727 else
728 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
729
730 return err;
731}
732
733static void
734qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
735 struct qlcnic_esw_func_cfg *esw_cfg)
736{
737 if (esw_cfg->discard_tagged)
738 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
739 else
740 adapter->flags |= QLCNIC_TAGGING_ENABLED;
741
742 if (esw_cfg->vlan_id)
743 adapter->pvid = esw_cfg->vlan_id;
744 else
745 adapter->pvid = 0;
746}
747
748static void
749qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
750 struct qlcnic_esw_func_cfg *esw_cfg)
751{
752 adapter->flags &= ~QLCNIC_MACSPOOF;
753 adapter->flags &= ~QLCNIC_MAC_OVERRIDE_DISABLED;
754
755 if (esw_cfg->mac_anti_spoof)
756 adapter->flags |= QLCNIC_MACSPOOF;
757
758 if (!esw_cfg->mac_override)
759 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
760
761 qlcnic_set_netdev_features(adapter, esw_cfg);
762}
763
764static int
765qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
766{
767 struct qlcnic_esw_func_cfg esw_cfg;
768
769 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
770 return 0;
771
772 esw_cfg.pci_func = adapter->ahw.pci_func;
773 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
774 return -EIO;
775 qlcnic_set_vlan_config(adapter, &esw_cfg);
776 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
777
778 return 0;
779}
780
781static void
782qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
783 struct qlcnic_esw_func_cfg *esw_cfg)
784{
785 struct net_device *netdev = adapter->netdev;
786 unsigned long features, vlan_features;
787
788 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
789 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
790 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
791 NETIF_F_IPV6_CSUM);
792
793 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
794 features |= (NETIF_F_TSO | NETIF_F_TSO6);
795 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
796 }
797 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
798 features |= NETIF_F_LRO;
799
800 if (esw_cfg->offload_flags & BIT_0) {
801 netdev->features |= features;
802 adapter->rx_csum = 1;
803 if (!(esw_cfg->offload_flags & BIT_1))
804 netdev->features &= ~NETIF_F_TSO;
805 if (!(esw_cfg->offload_flags & BIT_2))
806 netdev->features &= ~NETIF_F_TSO6;
807 } else {
808 netdev->features &= ~features;
809 adapter->rx_csum = 0;
810 }
811
812 netdev->vlan_features = (features & vlan_features);
813}
814
815static int
816qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
817{
818 void __iomem *priv_op;
819 u32 op_mode, priv_level;
820 int err = 0;
821
822 err = qlcnic_initialize_nic(adapter);
823 if (err)
824 return err;
825
826 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
827 return 0;
828
829 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
830 op_mode = readl(priv_op);
831 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
832
833 if (op_mode == QLC_DEV_DRV_DEFAULT)
834 priv_level = QLCNIC_MGMT_FUNC;
835 else
836 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
837
838 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
839 if (priv_level == QLCNIC_MGMT_FUNC) {
840 adapter->op_mode = QLCNIC_MGMT_FUNC;
841 err = qlcnic_init_pci_info(adapter);
842 if (err)
843 return err;
844 /* Set privilege level for other functions */
845 qlcnic_set_function_modes(adapter);
846 dev_info(&adapter->pdev->dev,
847 "HAL Version: %d, Management function\n",
848 adapter->fw_hal_version);
849 } else if (priv_level == QLCNIC_PRIV_FUNC) {
850 adapter->op_mode = QLCNIC_PRIV_FUNC;
851 dev_info(&adapter->pdev->dev,
852 "HAL Version: %d, Privileged function\n",
853 adapter->fw_hal_version);
854 }
855 }
856
857 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
858
859 return err;
860}
861
862static int
863qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
864{
865 struct qlcnic_esw_func_cfg esw_cfg;
866 struct qlcnic_npar_info *npar;
867 u8 i;
868
869 if (adapter->need_fw_reset)
870 return 0;
871
872 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
873 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
874 continue;
875 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
876 esw_cfg.pci_func = i;
877 esw_cfg.offload_flags = BIT_0;
878 esw_cfg.mac_override = BIT_0;
879 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
880 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
881 if (qlcnic_config_switch_port(adapter, &esw_cfg))
882 return -EIO;
883 npar = &adapter->npars[i];
884 npar->pvid = esw_cfg.vlan_id;
885 npar->mac_override = esw_cfg.mac_override;
886 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
887 npar->discard_tagged = esw_cfg.discard_tagged;
888 npar->promisc_mode = esw_cfg.promisc_mode;
889 npar->offload_flags = esw_cfg.offload_flags;
890 }
891
892 return 0;
893}
894
895static int
896qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
897 struct qlcnic_npar_info *npar, int pci_func)
898{
899 struct qlcnic_esw_func_cfg esw_cfg;
900 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
901 esw_cfg.pci_func = pci_func;
902 esw_cfg.vlan_id = npar->pvid;
903 esw_cfg.mac_override = npar->mac_override;
904 esw_cfg.discard_tagged = npar->discard_tagged;
905 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
906 esw_cfg.offload_flags = npar->offload_flags;
907 esw_cfg.promisc_mode = npar->promisc_mode;
908 if (qlcnic_config_switch_port(adapter, &esw_cfg))
909 return -EIO;
910
911 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
912 if (qlcnic_config_switch_port(adapter, &esw_cfg))
913 return -EIO;
914
915 return 0;
777} 916}
778 917
779static int 918static int
780qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) 919qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
781{ 920{
782 int i, err = 0; 921 int i, err;
783 struct qlcnic_npar_info *npar; 922 struct qlcnic_npar_info *npar;
784 struct qlcnic_info nic_info; 923 struct qlcnic_info nic_info;
785 924
786 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || 925 if (!adapter->need_fw_reset)
787 !adapter->need_fw_reset)
788 return 0; 926 return 0;
789 927
790 if (adapter->op_mode == QLCNIC_MGMT_FUNC) { 928 /* Set the NPAR config data after FW reset */
791 /* Set the NPAR config data after FW reset */ 929 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
792 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 930 npar = &adapter->npars[i];
793 npar = &adapter->npars[i]; 931 if (npar->type != QLCNIC_TYPE_NIC)
794 if (npar->type != QLCNIC_TYPE_NIC) 932 continue;
795 continue; 933 err = qlcnic_get_nic_info(adapter, &nic_info, i);
796 err = qlcnic_get_nic_info(adapter, &nic_info, i); 934 if (err)
797 if (err) 935 return err;
798 goto err_out; 936 nic_info.min_tx_bw = npar->min_bw;
799 nic_info.min_tx_bw = npar->min_bw; 937 nic_info.max_tx_bw = npar->max_bw;
800 nic_info.max_tx_bw = npar->max_bw; 938 err = qlcnic_set_nic_info(adapter, &nic_info);
801 err = qlcnic_set_nic_info(adapter, &nic_info); 939 if (err)
940 return err;
941
942 if (npar->enable_pm) {
943 err = qlcnic_config_port_mirroring(adapter,
944 npar->dest_npar, 1, i);
802 if (err) 945 if (err)
803 goto err_out; 946 return err;
947 }
948 err = qlcnic_reset_eswitch_config(adapter, npar, i);
949 if (err)
950 return err;
951 }
952 return 0;
953}
804 954
805 if (npar->enable_pm) { 955static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
806 err = qlcnic_config_port_mirroring(adapter, 956{
807 npar->dest_npar, 1, i); 957 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
808 if (err) 958 u32 npar_state;
809 goto err_out;
810 959
811 } 960 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
812 npar->mac_learning = DEFAULT_MAC_LEARN; 961 return 0;
813 npar->host_vlan_tag = 0; 962
814 npar->promisc_mode = 0; 963 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
815 npar->discard_tagged = 0; 964 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
816 npar->vlan_id = 0; 965 msleep(1000);
817 } 966 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
818 } 967 }
819err_out: 968 if (!npar_opt_timeo) {
969 dev_err(&adapter->pdev->dev,
970 "Waiting for NPAR state to opertional timeout\n");
971 return -EIO;
972 }
973 return 0;
974}
975
976static int
977qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
978{
979 int err;
980
981 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
982 adapter->op_mode != QLCNIC_MGMT_FUNC)
983 return 0;
984
985 err = qlcnic_set_default_offload_settings(adapter);
986 if (err)
987 return err;
988
989 err = qlcnic_reset_npar_config(adapter);
990 if (err)
991 return err;
992
993 qlcnic_dev_set_npar_ready(adapter);
994
820 return err; 995 return err;
821} 996}
822 997
823static int 998static int
824qlcnic_start_firmware(struct qlcnic_adapter *adapter) 999qlcnic_start_firmware(struct qlcnic_adapter *adapter)
825{ 1000{
826 int val, err, first_boot; 1001 int err;
827 1002
828 err = qlcnic_can_start_firmware(adapter); 1003 err = qlcnic_can_start_firmware(adapter);
829 if (err < 0) 1004 if (err < 0)
830 return err; 1005 return err;
831 else if (!err) 1006 else if (!err)
832 goto wait_init; 1007 goto check_fw_status;
833
834 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
835 if (first_boot == 0x55555555)
836 /* This is the first boot after power up */
837 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
838 1008
839 if (load_fw_file) 1009 if (load_fw_file)
840 qlcnic_request_firmware(adapter); 1010 qlcnic_request_firmware(adapter);
841 else { 1011 else {
842 if (qlcnic_check_flash_fw_ver(adapter)) 1012 err = qlcnic_check_flash_fw_ver(adapter);
1013 if (err)
843 goto err_out; 1014 goto err_out;
844 1015
845 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; 1016 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
846 } 1017 }
847 1018
848 err = qlcnic_need_fw_reset(adapter); 1019 err = qlcnic_need_fw_reset(adapter);
849 if (err < 0)
850 goto err_out;
851 if (err == 0) 1020 if (err == 0)
852 goto wait_init; 1021 goto check_fw_status;
853
854 if (first_boot != 0x55555555) {
855 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
856 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
857 qlcnic_pinit_from_rom(adapter);
858 msleep(1);
859 }
860
861 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
862 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
863 1022
1023 err = qlcnic_pinit_from_rom(adapter);
1024 if (err)
1025 goto err_out;
864 qlcnic_set_port_mode(adapter); 1026 qlcnic_set_port_mode(adapter);
865 1027
866 err = qlcnic_load_firmware(adapter); 1028 err = qlcnic_load_firmware(adapter);
@@ -868,26 +1030,27 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
868 goto err_out; 1030 goto err_out;
869 1031
870 qlcnic_release_firmware(adapter); 1032 qlcnic_release_firmware(adapter);
1033 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
871 1034
872 val = (_QLCNIC_LINUX_MAJOR << 16) 1035check_fw_status:
873 | ((_QLCNIC_LINUX_MINOR << 8)) 1036 err = qlcnic_check_fw_status(adapter);
874 | (_QLCNIC_LINUX_SUBVERSION);
875 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
876
877wait_init:
878 /* Handshake with the card before we register the devices. */
879 err = qlcnic_init_firmware(adapter);
880 if (err) 1037 if (err)
881 goto err_out; 1038 goto err_out;
882 1039
883 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); 1040 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
884 qlcnic_idc_debug_info(adapter, 1); 1041 qlcnic_idc_debug_info(adapter, 1);
885 1042
886 qlcnic_check_options(adapter); 1043 err = qlcnic_check_eswitch_mode(adapter);
887 if (qlcnic_reset_npar_config(adapter)) 1044 if (err) {
1045 dev_err(&adapter->pdev->dev,
1046 "Memory allocation failed for eswitch\n");
1047 goto err_out;
1048 }
1049 err = qlcnic_set_mgmt_operations(adapter);
1050 if (err)
888 goto err_out; 1051 goto err_out;
889 qlcnic_dev_set_npar_ready(adapter);
890 1052
1053 qlcnic_check_options(adapter);
891 adapter->need_fw_reset = 0; 1054 adapter->need_fw_reset = 0;
892 1055
893 qlcnic_release_firmware(adapter); 1056 qlcnic_release_firmware(adapter);
@@ -896,6 +1059,7 @@ wait_init:
896err_out: 1059err_out:
897 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); 1060 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
898 dev_err(&adapter->pdev->dev, "Device state set to failed\n"); 1061 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
1062
899 qlcnic_release_firmware(adapter); 1063 qlcnic_release_firmware(adapter);
900 return err; 1064 return err;
901} 1065}
@@ -979,6 +1143,8 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
979 1143
980 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 1144 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
981 return 0; 1145 return 0;
1146 if (qlcnic_set_eswitch_port_config(adapter))
1147 return -EIO;
982 1148
983 if (qlcnic_fw_create_ctx(adapter)) 1149 if (qlcnic_fw_create_ctx(adapter))
984 return -EIO; 1150 return -EIO;
@@ -998,7 +1164,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
998 1164
999 qlcnic_config_intr_coalesce(adapter); 1165 qlcnic_config_intr_coalesce(adapter);
1000 1166
1001 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 1167 if (netdev->features & NETIF_F_LRO)
1002 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); 1168 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1003 1169
1004 qlcnic_napi_enable(adapter); 1170 qlcnic_napi_enable(adapter);
@@ -1041,6 +1207,9 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1041 1207
1042 qlcnic_free_mac_list(adapter); 1208 qlcnic_free_mac_list(adapter);
1043 1209
1210 if (adapter->fhash.fnum)
1211 qlcnic_delete_lb_filters(adapter);
1212
1044 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); 1213 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1045 1214
1046 qlcnic_napi_disable(adapter); 1215 qlcnic_napi_disable(adapter);
@@ -1296,12 +1465,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1296 1465
1297 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 1466 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1298 netdev->features |= NETIF_F_LRO; 1467 netdev->features |= NETIF_F_LRO;
1299
1300 netdev->irq = adapter->msix_entries[0].vector; 1468 netdev->irq = adapter->msix_entries[0].vector;
1301 1469
1302 if (qlcnic_read_mac_addr(adapter))
1303 dev_warn(&pdev->dev, "failed to read mac addr\n");
1304
1305 netif_carrier_off(netdev); 1470 netif_carrier_off(netdev);
1306 netif_stop_queue(netdev); 1471 netif_stop_queue(netdev);
1307 1472
@@ -1338,6 +1503,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1338 int err; 1503 int err;
1339 uint8_t revision_id; 1504 uint8_t revision_id;
1340 uint8_t pci_using_dac; 1505 uint8_t pci_using_dac;
1506 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1341 1507
1342 err = pci_enable_device(pdev); 1508 err = pci_enable_device(pdev);
1343 if (err) 1509 if (err)
@@ -1395,10 +1561,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1395 goto err_out_iounmap; 1561 goto err_out_iounmap;
1396 } 1562 }
1397 1563
1398 if (qlcnic_read_mac_addr(adapter)) 1564 err = qlcnic_setup_idc_param(adapter);
1399 dev_warn(&pdev->dev, "failed to read mac addr\n"); 1565 if (err)
1400
1401 if (qlcnic_setup_idc_param(adapter))
1402 goto err_out_iounmap; 1566 goto err_out_iounmap;
1403 1567
1404 err = adapter->nic_ops->start_firmware(adapter); 1568 err = adapter->nic_ops->start_firmware(adapter);
@@ -1407,6 +1571,17 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1407 goto err_out_decr_ref; 1571 goto err_out_decr_ref;
1408 } 1572 }
1409 1573
1574 if (qlcnic_read_mac_addr(adapter))
1575 dev_warn(&pdev->dev, "failed to read mac addr\n");
1576
1577 if (adapter->portnum == 0) {
1578 get_brd_name(adapter, brd_name);
1579
1580 pr_info("%s: %s Board Chip rev 0x%x\n",
1581 module_name(THIS_MODULE),
1582 brd_name, adapter->ahw.revision_id);
1583 }
1584
1410 qlcnic_clear_stats(adapter); 1585 qlcnic_clear_stats(adapter);
1411 1586
1412 qlcnic_setup_intr(adapter); 1587 qlcnic_setup_intr(adapter);
@@ -1430,6 +1605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1430 break; 1605 break;
1431 } 1606 }
1432 1607
1608 qlcnic_alloc_lb_filters_mem(adapter);
1433 qlcnic_create_diag_entries(adapter); 1609 qlcnic_create_diag_entries(adapter);
1434 1610
1435 return 0; 1611 return 0;
@@ -1438,7 +1614,7 @@ err_out_disable_msi:
1438 qlcnic_teardown_intr(adapter); 1614 qlcnic_teardown_intr(adapter);
1439 1615
1440err_out_decr_ref: 1616err_out_decr_ref:
1441 qlcnic_clr_all_drv_state(adapter); 1617 qlcnic_clr_all_drv_state(adapter, 0);
1442 1618
1443err_out_iounmap: 1619err_out_iounmap:
1444 qlcnic_cleanup_pci_map(adapter); 1620 qlcnic_cleanup_pci_map(adapter);
@@ -1477,10 +1653,12 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1477 if (adapter->eswitch != NULL) 1653 if (adapter->eswitch != NULL)
1478 kfree(adapter->eswitch); 1654 kfree(adapter->eswitch);
1479 1655
1480 qlcnic_clr_all_drv_state(adapter); 1656 qlcnic_clr_all_drv_state(adapter, 0);
1481 1657
1482 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1658 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1483 1659
1660 qlcnic_free_lb_filters_mem(adapter);
1661
1484 qlcnic_teardown_intr(adapter); 1662 qlcnic_teardown_intr(adapter);
1485 1663
1486 qlcnic_remove_diag_entries(adapter); 1664 qlcnic_remove_diag_entries(adapter);
@@ -1509,7 +1687,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
1509 if (netif_running(netdev)) 1687 if (netif_running(netdev))
1510 qlcnic_down(adapter, netdev); 1688 qlcnic_down(adapter, netdev);
1511 1689
1512 qlcnic_clr_all_drv_state(adapter); 1690 qlcnic_clr_all_drv_state(adapter, 0);
1513 1691
1514 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1692 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1515 1693
@@ -1587,9 +1765,6 @@ static int qlcnic_open(struct net_device *netdev)
1587 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1765 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1588 int err; 1766 int err;
1589 1767
1590 if (adapter->driver_mismatch)
1591 return -EIO;
1592
1593 err = qlcnic_attach(adapter); 1768 err = qlcnic_attach(adapter);
1594 if (err) 1769 if (err)
1595 return err; 1770 return err;
@@ -1619,6 +1794,119 @@ static int qlcnic_close(struct net_device *netdev)
1619} 1794}
1620 1795
1621static void 1796static void
1797qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1798{
1799 void *head;
1800 int i;
1801
1802 if (!qlcnic_mac_learn)
1803 return;
1804
1805 spin_lock_init(&adapter->mac_learn_lock);
1806
1807 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1808 GFP_KERNEL);
1809 if (!head)
1810 return;
1811
1812 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1813 adapter->fhash.fhead = (struct hlist_head *)head;
1814
1815 for (i = 0; i < adapter->fhash.fmax; i++)
1816 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1817}
1818
1819static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1820{
1821 if (adapter->fhash.fmax && adapter->fhash.fhead)
1822 kfree(adapter->fhash.fhead);
1823
1824 adapter->fhash.fhead = NULL;
1825 adapter->fhash.fmax = 0;
1826}
1827
1828static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1829 u64 uaddr, u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
1830{
1831 struct cmd_desc_type0 *hwdesc;
1832 struct qlcnic_nic_req *req;
1833 struct qlcnic_mac_req *mac_req;
1834 u32 producer;
1835 u64 word;
1836
1837 producer = tx_ring->producer;
1838 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1839
1840 req = (struct qlcnic_nic_req *)hwdesc;
1841 memset(req, 0, sizeof(struct qlcnic_nic_req));
1842 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1843
1844 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1845 req->req_hdr = cpu_to_le64(word);
1846
1847 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
1848 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
1849 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1850
1851 req->words[1] = cpu_to_le64(vlan_id);
1852
1853 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1854}
1855
1856#define QLCNIC_MAC_HASH(MAC)\
1857 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1858
1859static void
1860qlcnic_send_filter(struct qlcnic_adapter *adapter,
1861 struct qlcnic_host_tx_ring *tx_ring,
1862 struct cmd_desc_type0 *first_desc,
1863 struct sk_buff *skb)
1864{
1865 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1866 struct qlcnic_filter *fil, *tmp_fil;
1867 struct hlist_node *tmp_hnode, *n;
1868 struct hlist_head *head;
1869 u64 src_addr = 0;
1870 u16 vlan_id = 0;
1871 u8 hindex;
1872
1873 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1874 return;
1875
1876 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1877 return;
1878
1879 /* Only NPAR capable devices support vlan based learning*/
1880 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1881 vlan_id = first_desc->vlan_TCI;
1882 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1883 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1884 head = &(adapter->fhash.fhead[hindex]);
1885
1886 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
1887 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1888 tmp_fil->vlan_id == vlan_id) {
1889 tmp_fil->ftime = jiffies;
1890 return;
1891 }
1892 }
1893
1894 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1895 if (!fil)
1896 return;
1897
1898 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
1899
1900 fil->ftime = jiffies;
1901 fil->vlan_id = vlan_id;
1902 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1903 spin_lock(&adapter->mac_learn_lock);
1904 hlist_add_head(&(fil->fnode), head);
1905 adapter->fhash.fnum++;
1906 spin_unlock(&adapter->mac_learn_lock);
1907}
1908
1909static void
1622qlcnic_tso_check(struct net_device *netdev, 1910qlcnic_tso_check(struct net_device *netdev,
1623 struct qlcnic_host_tx_ring *tx_ring, 1911 struct qlcnic_host_tx_ring *tx_ring,
1624 struct cmd_desc_type0 *first_desc, 1912 struct cmd_desc_type0 *first_desc,
@@ -1626,26 +1914,13 @@ qlcnic_tso_check(struct net_device *netdev,
1626{ 1914{
1627 u8 opcode = TX_ETHER_PKT; 1915 u8 opcode = TX_ETHER_PKT;
1628 __be16 protocol = skb->protocol; 1916 __be16 protocol = skb->protocol;
1629 u16 flags = 0, vid = 0; 1917 u16 flags = 0;
1630 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; 1918 int copied, offset, copy_len, hdr_len = 0, tso = 0;
1631 struct cmd_desc_type0 *hwdesc; 1919 struct cmd_desc_type0 *hwdesc;
1632 struct vlan_ethhdr *vh; 1920 struct vlan_ethhdr *vh;
1633 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1921 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1634 u32 producer = tx_ring->producer; 1922 u32 producer = tx_ring->producer;
1635 1923 int vlan_oob = first_desc->flags_opcode & cpu_to_le16(FLAGS_VLAN_OOB);
1636 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1637
1638 vh = (struct vlan_ethhdr *)skb->data;
1639 protocol = vh->h_vlan_encapsulated_proto;
1640 flags = FLAGS_VLAN_TAGGED;
1641
1642 } else if (vlan_tx_tag_present(skb)) {
1643
1644 flags = FLAGS_VLAN_OOB;
1645 vid = vlan_tx_tag_get(skb);
1646 qlcnic_set_tx_vlan_tci(first_desc, vid);
1647 vlan_oob = 1;
1648 }
1649 1924
1650 if (*(skb->data) & BIT_0) { 1925 if (*(skb->data) & BIT_0) {
1651 flags |= BIT_0; 1926 flags |= BIT_0;
@@ -1716,7 +1991,7 @@ qlcnic_tso_check(struct net_device *netdev,
1716 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); 1991 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1717 skb_copy_from_linear_data(skb, vh, 12); 1992 skb_copy_from_linear_data(skb, vh, 12);
1718 vh->h_vlan_proto = htons(ETH_P_8021Q); 1993 vh->h_vlan_proto = htons(ETH_P_8021Q);
1719 vh->h_vlan_TCI = htons(vid); 1994 vh->h_vlan_TCI = htons(first_desc->vlan_TCI);
1720 skb_copy_from_linear_data_offset(skb, 12, 1995 skb_copy_from_linear_data_offset(skb, 12,
1721 (char *)vh + 16, copy_len - 16); 1996 (char *)vh + 16, copy_len - 16);
1722 1997
@@ -1796,11 +2071,47 @@ out_err:
1796 return -ENOMEM; 2071 return -ENOMEM;
1797} 2072}
1798 2073
2074static int
2075qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
2076 struct sk_buff *skb,
2077 struct cmd_desc_type0 *first_desc)
2078{
2079 u8 opcode = 0;
2080 u16 flags = 0;
2081 __be16 protocol = skb->protocol;
2082 struct vlan_ethhdr *vh;
2083
2084 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
2085 vh = (struct vlan_ethhdr *)skb->data;
2086 protocol = vh->h_vlan_encapsulated_proto;
2087 flags = FLAGS_VLAN_TAGGED;
2088 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2089 } else if (vlan_tx_tag_present(skb)) {
2090 flags = FLAGS_VLAN_OOB;
2091 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2092 }
2093 if (unlikely(adapter->pvid)) {
2094 if (first_desc->vlan_TCI &&
2095 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2096 return -EIO;
2097 if (first_desc->vlan_TCI &&
2098 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2099 goto set_flags;
2100
2101 flags = FLAGS_VLAN_OOB;
2102 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
2103 }
2104set_flags:
2105 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2106 return 0;
2107}
2108
1799static inline void 2109static inline void
1800qlcnic_clear_cmddesc(u64 *desc) 2110qlcnic_clear_cmddesc(u64 *desc)
1801{ 2111{
1802 desc[0] = 0ULL; 2112 desc[0] = 0ULL;
1803 desc[2] = 0ULL; 2113 desc[2] = 0ULL;
2114 desc[7] = 0ULL;
1804} 2115}
1805 2116
1806netdev_tx_t 2117netdev_tx_t
@@ -1812,6 +2123,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1812 struct qlcnic_skb_frag *buffrag; 2123 struct qlcnic_skb_frag *buffrag;
1813 struct cmd_desc_type0 *hwdesc, *first_desc; 2124 struct cmd_desc_type0 *hwdesc, *first_desc;
1814 struct pci_dev *pdev; 2125 struct pci_dev *pdev;
2126 struct ethhdr *phdr;
1815 int i, k; 2127 int i, k;
1816 2128
1817 u32 producer; 2129 u32 producer;
@@ -1823,6 +2135,13 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1823 return NETDEV_TX_BUSY; 2135 return NETDEV_TX_BUSY;
1824 } 2136 }
1825 2137
2138 if (adapter->flags & QLCNIC_MACSPOOF) {
2139 phdr = (struct ethhdr *)skb->data;
2140 if (compare_ether_addr(phdr->h_source,
2141 adapter->mac_addr))
2142 goto drop_packet;
2143 }
2144
1826 frag_count = skb_shinfo(skb)->nr_frags + 1; 2145 frag_count = skb_shinfo(skb)->nr_frags + 1;
1827 2146
1828 /* 4 fragments per cmd des */ 2147 /* 4 fragments per cmd des */
@@ -1844,6 +2163,12 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1844 2163
1845 pdev = adapter->pdev; 2164 pdev = adapter->pdev;
1846 2165
2166 first_desc = hwdesc = &tx_ring->desc_head[producer];
2167 qlcnic_clear_cmddesc((u64 *)hwdesc);
2168
2169 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2170 goto drop_packet;
2171
1847 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { 2172 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1848 adapter->stats.tx_dma_map_error++; 2173 adapter->stats.tx_dma_map_error++;
1849 goto drop_packet; 2174 goto drop_packet;
@@ -1852,9 +2177,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1852 pbuf->skb = skb; 2177 pbuf->skb = skb;
1853 pbuf->frag_count = frag_count; 2178 pbuf->frag_count = frag_count;
1854 2179
1855 first_desc = hwdesc = &tx_ring->desc_head[producer];
1856 qlcnic_clear_cmddesc((u64 *)hwdesc);
1857
1858 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); 2180 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1859 qlcnic_set_tx_port(first_desc, adapter->portnum); 2181 qlcnic_set_tx_port(first_desc, adapter->portnum);
1860 2182
@@ -1893,6 +2215,9 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1893 2215
1894 qlcnic_tso_check(netdev, tx_ring, first_desc, skb); 2216 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1895 2217
2218 if (qlcnic_mac_learn)
2219 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2220
1896 qlcnic_update_cmd_producer(adapter, tx_ring); 2221 qlcnic_update_cmd_producer(adapter, tx_ring);
1897 2222
1898 adapter->stats.txbytes += skb->len; 2223 adapter->stats.txbytes += skb->len;
@@ -1947,14 +2272,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1947 struct net_device *netdev = adapter->netdev; 2272 struct net_device *netdev = adapter->netdev;
1948 2273
1949 if (adapter->ahw.linkup && !linkup) { 2274 if (adapter->ahw.linkup && !linkup) {
1950 dev_info(&netdev->dev, "NIC Link is down\n"); 2275 netdev_info(netdev, "NIC Link is down\n");
1951 adapter->ahw.linkup = 0; 2276 adapter->ahw.linkup = 0;
1952 if (netif_running(netdev)) { 2277 if (netif_running(netdev)) {
1953 netif_carrier_off(netdev); 2278 netif_carrier_off(netdev);
1954 netif_stop_queue(netdev); 2279 netif_stop_queue(netdev);
1955 } 2280 }
1956 } else if (!adapter->ahw.linkup && linkup) { 2281 } else if (!adapter->ahw.linkup && linkup) {
1957 dev_info(&netdev->dev, "NIC Link is up\n"); 2282 netdev_info(netdev, "NIC Link is up\n");
1958 adapter->ahw.linkup = 1; 2283 adapter->ahw.linkup = 1;
1959 if (netif_running(netdev)) { 2284 if (netif_running(netdev)) {
1960 netif_carrier_on(netdev); 2285 netif_carrier_on(netdev);
@@ -2258,18 +2583,22 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2258} 2583}
2259 2584
2260static void 2585static void
2261qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter) 2586qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
2262{ 2587{
2263 u32 val; 2588 u32 val;
2264 2589
2265 if (qlcnic_api_lock(adapter)) 2590 if (qlcnic_api_lock(adapter))
2266 goto err; 2591 goto err;
2267 2592
2268 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2593 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2269 QLC_DEV_CLR_REF_CNT(val, adapter->portnum); 2594 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2270 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val); 2595 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2271 2596
2272 if (!(val & 0x11111111)) 2597 if (failed) {
2598 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2599 dev_info(&adapter->pdev->dev,
2600 "Device state set to Failed. Please Reboot\n");
2601 } else if (!(val & 0x11111111))
2273 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); 2602 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2274 2603
2275 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2604 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
@@ -2290,7 +2619,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2290 int act, state; 2619 int act, state;
2291 2620
2292 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2621 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2293 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2622 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2294 2623
2295 if (((state & 0x11111111) == (act & 0x11111111)) || 2624 if (((state & 0x11111111) == (act & 0x11111111)) ||
2296 ((act & 0x11111111) == ((state >> 1) & 0x11111111))) 2625 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
@@ -2325,10 +2654,10 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2325 if (qlcnic_api_lock(adapter)) 2654 if (qlcnic_api_lock(adapter))
2326 return -1; 2655 return -1;
2327 2656
2328 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2657 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2329 if (!(val & (1 << (portnum * 4)))) { 2658 if (!(val & (1 << (portnum * 4)))) {
2330 QLC_DEV_SET_REF_CNT(val, portnum); 2659 QLC_DEV_SET_REF_CNT(val, portnum);
2331 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val); 2660 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2332 } 2661 }
2333 2662
2334 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2663 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
@@ -2403,7 +2732,7 @@ qlcnic_fwinit_work(struct work_struct *work)
2403{ 2732{
2404 struct qlcnic_adapter *adapter = container_of(work, 2733 struct qlcnic_adapter *adapter = container_of(work,
2405 struct qlcnic_adapter, fw_work.work); 2734 struct qlcnic_adapter, fw_work.work);
2406 u32 dev_state = 0xf, npar_state; 2735 u32 dev_state = 0xf;
2407 2736
2408 if (qlcnic_api_lock(adapter)) 2737 if (qlcnic_api_lock(adapter))
2409 goto err_ret; 2738 goto err_ret;
@@ -2417,16 +2746,8 @@ qlcnic_fwinit_work(struct work_struct *work)
2417 } 2746 }
2418 2747
2419 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { 2748 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2420 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); 2749 qlcnic_api_unlock(adapter);
2421 if (npar_state == QLCNIC_DEV_NPAR_RDY) { 2750 goto wait_npar;
2422 qlcnic_api_unlock(adapter);
2423 goto wait_npar;
2424 } else {
2425 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2426 FW_POLL_DELAY);
2427 qlcnic_api_unlock(adapter);
2428 return;
2429 }
2430 } 2751 }
2431 2752
2432 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { 2753 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
@@ -2463,6 +2784,7 @@ skip_ack_check:
2463 2784
2464 if (!adapter->nic_ops->start_firmware(adapter)) { 2785 if (!adapter->nic_ops->start_firmware(adapter)) {
2465 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2786 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2787 adapter->fw_wait_cnt = 0;
2466 return; 2788 return;
2467 } 2789 }
2468 goto err_ret; 2790 goto err_ret;
@@ -2475,27 +2797,25 @@ wait_npar:
2475 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); 2797 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2476 2798
2477 switch (dev_state) { 2799 switch (dev_state) {
2478 case QLCNIC_DEV_QUISCENT: 2800 case QLCNIC_DEV_READY:
2479 case QLCNIC_DEV_NEED_QUISCENT:
2480 case QLCNIC_DEV_NEED_RESET:
2481 qlcnic_schedule_work(adapter,
2482 qlcnic_fwinit_work, FW_POLL_DELAY);
2483 return;
2484 case QLCNIC_DEV_FAILED:
2485 break;
2486
2487 default:
2488 if (!adapter->nic_ops->start_firmware(adapter)) { 2801 if (!adapter->nic_ops->start_firmware(adapter)) {
2489 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2802 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2803 adapter->fw_wait_cnt = 0;
2490 return; 2804 return;
2491 } 2805 }
2806 case QLCNIC_DEV_FAILED:
2807 break;
2808 default:
2809 qlcnic_schedule_work(adapter,
2810 qlcnic_fwinit_work, FW_POLL_DELAY);
2811 return;
2492 } 2812 }
2493 2813
2494err_ret: 2814err_ret:
2495 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u " 2815 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2496 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt); 2816 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2497 netif_device_attach(adapter->netdev); 2817 netif_device_attach(adapter->netdev);
2498 qlcnic_clr_all_drv_state(adapter); 2818 qlcnic_clr_all_drv_state(adapter, 0);
2499} 2819}
2500 2820
2501static void 2821static void
@@ -2531,8 +2851,23 @@ err_ret:
2531 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n", 2851 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2532 status, adapter->temp); 2852 status, adapter->temp);
2533 netif_device_attach(netdev); 2853 netif_device_attach(netdev);
2534 qlcnic_clr_all_drv_state(adapter); 2854 qlcnic_clr_all_drv_state(adapter, 1);
2855}
2856
2857/*Transit NPAR state to NON Operational */
2858static void
2859qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2860{
2861 u32 state;
2862
2863 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2864 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2865 return;
2535 2866
2867 if (qlcnic_api_lock(adapter))
2868 return;
2869 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2870 qlcnic_api_unlock(adapter);
2536} 2871}
2537 2872
2538/*Transit to RESET state from READY state only */ 2873/*Transit to RESET state from READY state only */
@@ -2553,6 +2888,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2553 qlcnic_idc_debug_info(adapter, 0); 2888 qlcnic_idc_debug_info(adapter, 0);
2554 } 2889 }
2555 2890
2891 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2556 qlcnic_api_unlock(adapter); 2892 qlcnic_api_unlock(adapter);
2557} 2893}
2558 2894
@@ -2560,21 +2896,11 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2560static void 2896static void
2561qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) 2897qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2562{ 2898{
2563 u32 state;
2564
2565 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
2566 adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
2567 return;
2568 if (qlcnic_api_lock(adapter)) 2899 if (qlcnic_api_lock(adapter))
2569 return; 2900 return;
2570 2901
2571 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); 2902 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2572 2903 QLCDB(adapter, DRV, "NPAR operational state set\n");
2573 if (state != QLCNIC_DEV_NPAR_RDY) {
2574 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2575 QLCNIC_DEV_NPAR_RDY);
2576 QLCDB(adapter, DRV, "NPAR READY state set\n");
2577 }
2578 2904
2579 qlcnic_api_unlock(adapter); 2905 qlcnic_api_unlock(adapter);
2580} 2906}
@@ -2605,7 +2931,21 @@ qlcnic_attach_work(struct work_struct *work)
2605 struct qlcnic_adapter *adapter = container_of(work, 2931 struct qlcnic_adapter *adapter = container_of(work,
2606 struct qlcnic_adapter, fw_work.work); 2932 struct qlcnic_adapter, fw_work.work);
2607 struct net_device *netdev = adapter->netdev; 2933 struct net_device *netdev = adapter->netdev;
2934 u32 npar_state;
2608 2935
2936 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2937 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2938 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2939 qlcnic_clr_all_drv_state(adapter, 0);
2940 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2941 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2942 FW_POLL_DELAY);
2943 else
2944 goto attach;
2945 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2946 return;
2947 }
2948attach:
2609 if (netif_running(netdev)) { 2949 if (netif_running(netdev)) {
2610 if (qlcnic_up(adapter, netdev)) 2950 if (qlcnic_up(adapter, netdev))
2611 goto done; 2951 goto done;
@@ -2626,7 +2966,7 @@ done:
2626static int 2966static int
2627qlcnic_check_health(struct qlcnic_adapter *adapter) 2967qlcnic_check_health(struct qlcnic_adapter *adapter)
2628{ 2968{
2629 u32 state = 0, heartbit; 2969 u32 state = 0, heartbeat;
2630 struct net_device *netdev = adapter->netdev; 2970 struct net_device *netdev = adapter->netdev;
2631 2971
2632 if (qlcnic_check_temp(adapter)) 2972 if (qlcnic_check_temp(adapter))
@@ -2636,12 +2976,15 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2636 qlcnic_dev_request_reset(adapter); 2976 qlcnic_dev_request_reset(adapter);
2637 2977
2638 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2978 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2639 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT) 2979 if (state == QLCNIC_DEV_NEED_RESET ||
2980 state == QLCNIC_DEV_NEED_QUISCENT) {
2981 qlcnic_set_npar_non_operational(adapter);
2640 adapter->need_fw_reset = 1; 2982 adapter->need_fw_reset = 1;
2983 }
2641 2984
2642 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); 2985 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2643 if (heartbit != adapter->heartbit) { 2986 if (heartbeat != adapter->heartbeat) {
2644 adapter->heartbit = heartbit; 2987 adapter->heartbeat = heartbeat;
2645 adapter->fw_fail_cnt = 0; 2988 adapter->fw_fail_cnt = 0;
2646 if (adapter->need_fw_reset) 2989 if (adapter->need_fw_reset)
2647 goto detach; 2990 goto detach;
@@ -2692,6 +3035,9 @@ qlcnic_fw_poll_work(struct work_struct *work)
2692 if (qlcnic_check_health(adapter)) 3035 if (qlcnic_check_health(adapter))
2693 return; 3036 return;
2694 3037
3038 if (adapter->fhash.fnum)
3039 qlcnic_prune_lb_filters(adapter);
3040
2695reschedule: 3041reschedule:
2696 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); 3042 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2697} 3043}
@@ -2738,7 +3084,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
2738 if (qlcnic_api_lock(adapter)) 3084 if (qlcnic_api_lock(adapter))
2739 return -EINVAL; 3085 return -EINVAL;
2740 3086
2741 if (first_func) { 3087 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
2742 adapter->need_fw_reset = 1; 3088 adapter->need_fw_reset = 1;
2743 set_bit(__QLCNIC_START_FW, &adapter->state); 3089 set_bit(__QLCNIC_START_FW, &adapter->state);
2744 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); 3090 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
@@ -2756,7 +3102,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
2756 if (netif_running(netdev)) { 3102 if (netif_running(netdev)) {
2757 err = qlcnic_attach(adapter); 3103 err = qlcnic_attach(adapter);
2758 if (err) { 3104 if (err) {
2759 qlcnic_clr_all_drv_state(adapter); 3105 qlcnic_clr_all_drv_state(adapter, 1);
2760 clear_bit(__QLCNIC_AER, &adapter->state); 3106 clear_bit(__QLCNIC_AER, &adapter->state);
2761 netif_device_attach(netdev); 3107 netif_device_attach(netdev);
2762 return err; 3108 return err;
@@ -2822,7 +3168,6 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
2822 FW_POLL_DELAY); 3168 FW_POLL_DELAY);
2823} 3169}
2824 3170
2825
2826static int 3171static int
2827qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) 3172qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2828{ 3173{
@@ -2832,8 +3177,20 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2832 if (err) 3177 if (err)
2833 return err; 3178 return err;
2834 3179
3180 err = qlcnic_check_npar_opertional(adapter);
3181 if (err)
3182 return err;
3183
3184 err = qlcnic_initialize_nic(adapter);
3185 if (err)
3186 return err;
3187
2835 qlcnic_check_options(adapter); 3188 qlcnic_check_options(adapter);
2836 3189
3190 err = qlcnic_set_eswitch_port_config(adapter);
3191 if (err)
3192 return err;
3193
2837 adapter->need_fw_reset = 0; 3194 adapter->need_fw_reset = 0;
2838 3195
2839 return err; 3196 return err;
@@ -3093,9 +3450,6 @@ validate_pm_config(struct qlcnic_adapter *adapter,
3093 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC) 3450 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3094 return QL_STATUS_INVALID_PARAM; 3451 return QL_STATUS_INVALID_PARAM;
3095 3452
3096 if (!IS_VALID_MODE(pm_cfg[i].action))
3097 return QL_STATUS_INVALID_PARAM;
3098
3099 s_esw_id = adapter->npars[src_pci_func].phy_port; 3453 s_esw_id = adapter->npars[src_pci_func].phy_port;
3100 d_esw_id = adapter->npars[dest_pci_func].phy_port; 3454 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3101 3455
@@ -3129,7 +3483,7 @@ qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3129 return ret; 3483 return ret;
3130 for (i = 0; i < count; i++) { 3484 for (i = 0; i < count; i++) {
3131 pci_func = pm_cfg[i].pci_func; 3485 pci_func = pm_cfg[i].pci_func;
3132 action = pm_cfg[i].action; 3486 action = !!pm_cfg[i].action;
3133 id = adapter->npars[pci_func].phy_port; 3487 id = adapter->npars[pci_func].phy_port;
3134 ret = qlcnic_config_port_mirroring(adapter, id, 3488 ret = qlcnic_config_port_mirroring(adapter, id,
3135 action, pci_func); 3489 action, pci_func);
@@ -3140,7 +3494,7 @@ qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3140 for (i = 0; i < count; i++) { 3494 for (i = 0; i < count; i++) {
3141 pci_func = pm_cfg[i].pci_func; 3495 pci_func = pm_cfg[i].pci_func;
3142 id = adapter->npars[pci_func].phy_port; 3496 id = adapter->npars[pci_func].phy_port;
3143 adapter->npars[pci_func].enable_pm = pm_cfg[i].action; 3497 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
3144 adapter->npars[pci_func].dest_npar = id; 3498 adapter->npars[pci_func].dest_npar = id;
3145 } 3499 }
3146 return size; 3500 return size;
@@ -3172,30 +3526,45 @@ qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3172 3526
3173static int 3527static int
3174validate_esw_config(struct qlcnic_adapter *adapter, 3528validate_esw_config(struct qlcnic_adapter *adapter,
3175 struct qlcnic_esw_func_cfg *esw_cfg, int count) 3529 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3176{ 3530{
3531 u32 op_mode;
3177 u8 pci_func; 3532 u8 pci_func;
3178 int i; 3533 int i;
3179 3534
3535 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3536
3180 for (i = 0; i < count; i++) { 3537 for (i = 0; i < count; i++) {
3181 pci_func = esw_cfg[i].pci_func; 3538 pci_func = esw_cfg[i].pci_func;
3182 if (pci_func >= QLCNIC_MAX_PCI_FUNC) 3539 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3183 return QL_STATUS_INVALID_PARAM; 3540 return QL_STATUS_INVALID_PARAM;
3184 3541
3185 if (adapter->npars[i].type != QLCNIC_TYPE_NIC) 3542 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3186 return QL_STATUS_INVALID_PARAM; 3543 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3544 return QL_STATUS_INVALID_PARAM;
3187 3545
3188 if (esw_cfg->host_vlan_tag == 1) 3546 switch (esw_cfg[i].op_mode) {
3547 case QLCNIC_PORT_DEFAULTS:
3548 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3549 QLCNIC_NON_PRIV_FUNC) {
3550 esw_cfg[i].mac_anti_spoof = 0;
3551 esw_cfg[i].mac_override = 1;
3552 }
3553 break;
3554 case QLCNIC_ADD_VLAN:
3189 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) 3555 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3190 return QL_STATUS_INVALID_PARAM; 3556 return QL_STATUS_INVALID_PARAM;
3191 3557 if (!esw_cfg[i].op_type)
3192 if (!IS_VALID_MODE(esw_cfg[i].promisc_mode) 3558 return QL_STATUS_INVALID_PARAM;
3193 || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag) 3559 break;
3194 || !IS_VALID_MODE(esw_cfg[i].mac_learning) 3560 case QLCNIC_DEL_VLAN:
3195 || !IS_VALID_MODE(esw_cfg[i].discard_tagged)) 3561 if (!esw_cfg[i].op_type)
3562 return QL_STATUS_INVALID_PARAM;
3563 break;
3564 default:
3196 return QL_STATUS_INVALID_PARAM; 3565 return QL_STATUS_INVALID_PARAM;
3566 }
3197 } 3567 }
3198
3199 return 0; 3568 return 0;
3200} 3569}
3201 3570
@@ -3206,8 +3575,9 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3206 struct device *dev = container_of(kobj, struct device, kobj); 3575 struct device *dev = container_of(kobj, struct device, kobj);
3207 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 3576 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3208 struct qlcnic_esw_func_cfg *esw_cfg; 3577 struct qlcnic_esw_func_cfg *esw_cfg;
3578 struct qlcnic_npar_info *npar;
3209 int count, rem, i, ret; 3579 int count, rem, i, ret;
3210 u8 id, pci_func; 3580 u8 pci_func, op_mode = 0;
3211 3581
3212 count = size / sizeof(struct qlcnic_esw_func_cfg); 3582 count = size / sizeof(struct qlcnic_esw_func_cfg);
3213 rem = size % sizeof(struct qlcnic_esw_func_cfg); 3583 rem = size % sizeof(struct qlcnic_esw_func_cfg);
@@ -3220,30 +3590,55 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3220 return ret; 3590 return ret;
3221 3591
3222 for (i = 0; i < count; i++) { 3592 for (i = 0; i < count; i++) {
3223 pci_func = esw_cfg[i].pci_func; 3593 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3224 id = adapter->npars[pci_func].phy_port; 3594 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3225 ret = qlcnic_config_switch_port(adapter, id, 3595 return QL_STATUS_INVALID_PARAM;
3226 esw_cfg[i].host_vlan_tag, 3596
3227 esw_cfg[i].discard_tagged, 3597 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3228 esw_cfg[i].promisc_mode, 3598 continue;
3229 esw_cfg[i].mac_learning, 3599
3230 esw_cfg[i].pci_func, 3600 op_mode = esw_cfg[i].op_mode;
3231 esw_cfg[i].vlan_id); 3601 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3232 if (ret) 3602 esw_cfg[i].op_mode = op_mode;
3233 return ret; 3603 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3604
3605 switch (esw_cfg[i].op_mode) {
3606 case QLCNIC_PORT_DEFAULTS:
3607 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3608 break;
3609 case QLCNIC_ADD_VLAN:
3610 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3611 break;
3612 case QLCNIC_DEL_VLAN:
3613 esw_cfg[i].vlan_id = 0;
3614 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3615 break;
3616 }
3234 } 3617 }
3235 3618
3619 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3620 goto out;
3621
3236 for (i = 0; i < count; i++) { 3622 for (i = 0; i < count; i++) {
3237 pci_func = esw_cfg[i].pci_func; 3623 pci_func = esw_cfg[i].pci_func;
3238 adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode; 3624 npar = &adapter->npars[pci_func];
3239 adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning; 3625 switch (esw_cfg[i].op_mode) {
3240 adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id; 3626 case QLCNIC_PORT_DEFAULTS:
3241 adapter->npars[pci_func].discard_tagged = 3627 npar->promisc_mode = esw_cfg[i].promisc_mode;
3242 esw_cfg[i].discard_tagged; 3628 npar->mac_override = esw_cfg[i].mac_override;
3243 adapter->npars[pci_func].host_vlan_tag = 3629 npar->offload_flags = esw_cfg[i].offload_flags;
3244 esw_cfg[i].host_vlan_tag; 3630 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3631 npar->discard_tagged = esw_cfg[i].discard_tagged;
3632 break;
3633 case QLCNIC_ADD_VLAN:
3634 npar->pvid = esw_cfg[i].vlan_id;
3635 break;
3636 case QLCNIC_DEL_VLAN:
3637 npar->pvid = 0;
3638 break;
3639 }
3245 } 3640 }
3246 3641out:
3247 return size; 3642 return size;
3248} 3643}
3249 3644
@@ -3254,7 +3649,7 @@ qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3254 struct device *dev = container_of(kobj, struct device, kobj); 3649 struct device *dev = container_of(kobj, struct device, kobj);
3255 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 3650 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3256 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC]; 3651 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3257 int i; 3652 u8 i;
3258 3653
3259 if (size != sizeof(esw_cfg)) 3654 if (size != sizeof(esw_cfg))
3260 return QL_STATUS_INVALID_PARAM; 3655 return QL_STATUS_INVALID_PARAM;
@@ -3262,12 +3657,9 @@ qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3262 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 3657 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3263 if (adapter->npars[i].type != QLCNIC_TYPE_NIC) 3658 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3264 continue; 3659 continue;
3265 3660 esw_cfg[i].pci_func = i;
3266 esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag; 3661 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3267 esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode; 3662 return QL_STATUS_INVALID_PARAM;
3268 esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
3269 esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
3270 esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
3271 } 3663 }
3272 memcpy(buf, &esw_cfg, size); 3664 memcpy(buf, &esw_cfg, size);
3273 3665
@@ -3370,6 +3762,115 @@ qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3370} 3762}
3371 3763
3372static ssize_t 3764static ssize_t
3765qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3766 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3767{
3768 struct device *dev = container_of(kobj, struct device, kobj);
3769 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3770 struct qlcnic_esw_statistics port_stats;
3771 int ret;
3772
3773 if (size != sizeof(struct qlcnic_esw_statistics))
3774 return QL_STATUS_INVALID_PARAM;
3775
3776 if (offset >= QLCNIC_MAX_PCI_FUNC)
3777 return QL_STATUS_INVALID_PARAM;
3778
3779 memset(&port_stats, 0, size);
3780 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3781 &port_stats.rx);
3782 if (ret)
3783 return ret;
3784
3785 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3786 &port_stats.tx);
3787 if (ret)
3788 return ret;
3789
3790 memcpy(buf, &port_stats, size);
3791 return size;
3792}
3793
3794static ssize_t
3795qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3796 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3797{
3798 struct device *dev = container_of(kobj, struct device, kobj);
3799 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3800 struct qlcnic_esw_statistics esw_stats;
3801 int ret;
3802
3803 if (size != sizeof(struct qlcnic_esw_statistics))
3804 return QL_STATUS_INVALID_PARAM;
3805
3806 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3807 return QL_STATUS_INVALID_PARAM;
3808
3809 memset(&esw_stats, 0, size);
3810 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3811 &esw_stats.rx);
3812 if (ret)
3813 return ret;
3814
3815 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3816 &esw_stats.tx);
3817 if (ret)
3818 return ret;
3819
3820 memcpy(buf, &esw_stats, size);
3821 return size;
3822}
3823
3824static ssize_t
3825qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3826 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3827{
3828 struct device *dev = container_of(kobj, struct device, kobj);
3829 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3830 int ret;
3831
3832 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3833 return QL_STATUS_INVALID_PARAM;
3834
3835 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3836 QLCNIC_QUERY_RX_COUNTER);
3837 if (ret)
3838 return ret;
3839
3840 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3841 QLCNIC_QUERY_TX_COUNTER);
3842 if (ret)
3843 return ret;
3844
3845 return size;
3846}
3847
3848static ssize_t
3849qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3850 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3851{
3852
3853 struct device *dev = container_of(kobj, struct device, kobj);
3854 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3855 int ret;
3856
3857 if (offset >= QLCNIC_MAX_PCI_FUNC)
3858 return QL_STATUS_INVALID_PARAM;
3859
3860 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3861 QLCNIC_QUERY_RX_COUNTER);
3862 if (ret)
3863 return ret;
3864
3865 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3866 QLCNIC_QUERY_TX_COUNTER);
3867 if (ret)
3868 return ret;
3869
3870 return size;
3871}
3872
3873static ssize_t
3373qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, 3874qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3374 struct bin_attribute *attr, char *buf, loff_t offset, size_t size) 3875 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3375{ 3876{
@@ -3418,6 +3919,20 @@ static struct bin_attribute bin_attr_pci_config = {
3418 .write = NULL, 3919 .write = NULL,
3419}; 3920};
3420 3921
3922static struct bin_attribute bin_attr_port_stats = {
3923 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3924 .size = 0,
3925 .read = qlcnic_sysfs_get_port_stats,
3926 .write = qlcnic_sysfs_clear_port_stats,
3927};
3928
3929static struct bin_attribute bin_attr_esw_stats = {
3930 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3931 .size = 0,
3932 .read = qlcnic_sysfs_get_esw_stats,
3933 .write = qlcnic_sysfs_clear_esw_stats,
3934};
3935
3421static struct bin_attribute bin_attr_esw_config = { 3936static struct bin_attribute bin_attr_esw_config = {
3422 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, 3937 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3423 .size = 0, 3938 .size = 0,
@@ -3457,6 +3972,9 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3457{ 3972{
3458 struct device *dev = &adapter->pdev->dev; 3973 struct device *dev = &adapter->pdev->dev;
3459 3974
3975 if (device_create_bin_file(dev, &bin_attr_port_stats))
3976 dev_info(dev, "failed to create port stats sysfs entry");
3977
3460 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) 3978 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3461 return; 3979 return;
3462 if (device_create_file(dev, &dev_attr_diag_mode)) 3980 if (device_create_file(dev, &dev_attr_diag_mode))
@@ -3465,18 +3983,20 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3465 dev_info(dev, "failed to create crb sysfs entry\n"); 3983 dev_info(dev, "failed to create crb sysfs entry\n");
3466 if (device_create_bin_file(dev, &bin_attr_mem)) 3984 if (device_create_bin_file(dev, &bin_attr_mem))
3467 dev_info(dev, "failed to create mem sysfs entry\n"); 3985 dev_info(dev, "failed to create mem sysfs entry\n");
3468 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || 3986 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3469 adapter->op_mode != QLCNIC_MGMT_FUNC) 3987 return;
3988 if (device_create_bin_file(dev, &bin_attr_esw_config))
3989 dev_info(dev, "failed to create esw config sysfs entry");
3990 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3470 return; 3991 return;
3471 if (device_create_bin_file(dev, &bin_attr_pci_config)) 3992 if (device_create_bin_file(dev, &bin_attr_pci_config))
3472 dev_info(dev, "failed to create pci config sysfs entry"); 3993 dev_info(dev, "failed to create pci config sysfs entry");
3473 if (device_create_bin_file(dev, &bin_attr_npar_config)) 3994 if (device_create_bin_file(dev, &bin_attr_npar_config))
3474 dev_info(dev, "failed to create npar config sysfs entry"); 3995 dev_info(dev, "failed to create npar config sysfs entry");
3475 if (device_create_bin_file(dev, &bin_attr_esw_config))
3476 dev_info(dev, "failed to create esw config sysfs entry");
3477 if (device_create_bin_file(dev, &bin_attr_pm_config)) 3996 if (device_create_bin_file(dev, &bin_attr_pm_config))
3478 dev_info(dev, "failed to create pm config sysfs entry"); 3997 dev_info(dev, "failed to create pm config sysfs entry");
3479 3998 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3999 dev_info(dev, "failed to create eswitch stats sysfs entry");
3480} 4000}
3481 4001
3482static void 4002static void
@@ -3484,18 +4004,22 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3484{ 4004{
3485 struct device *dev = &adapter->pdev->dev; 4005 struct device *dev = &adapter->pdev->dev;
3486 4006
4007 device_remove_bin_file(dev, &bin_attr_port_stats);
4008
3487 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) 4009 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3488 return; 4010 return;
3489 device_remove_file(dev, &dev_attr_diag_mode); 4011 device_remove_file(dev, &dev_attr_diag_mode);
3490 device_remove_bin_file(dev, &bin_attr_crb); 4012 device_remove_bin_file(dev, &bin_attr_crb);
3491 device_remove_bin_file(dev, &bin_attr_mem); 4013 device_remove_bin_file(dev, &bin_attr_mem);
3492 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || 4014 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3493 adapter->op_mode != QLCNIC_MGMT_FUNC) 4015 return;
4016 device_remove_bin_file(dev, &bin_attr_esw_config);
4017 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3494 return; 4018 return;
3495 device_remove_bin_file(dev, &bin_attr_pci_config); 4019 device_remove_bin_file(dev, &bin_attr_pci_config);
3496 device_remove_bin_file(dev, &bin_attr_npar_config); 4020 device_remove_bin_file(dev, &bin_attr_npar_config);
3497 device_remove_bin_file(dev, &bin_attr_esw_config);
3498 device_remove_bin_file(dev, &bin_attr_pm_config); 4021 device_remove_bin_file(dev, &bin_attr_pm_config);
4022 device_remove_bin_file(dev, &bin_attr_esw_stats);
3499} 4023}
3500 4024
3501#ifdef CONFIG_INET 4025#ifdef CONFIG_INET
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 5f89e83501f4..4ffebe83d883 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1566,7 +1566,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1566 rx_ring->rx_packets++; 1566 rx_ring->rx_packets++;
1567 rx_ring->rx_bytes += skb->len; 1567 rx_ring->rx_bytes += skb->len;
1568 skb->protocol = eth_type_trans(skb, ndev); 1568 skb->protocol = eth_type_trans(skb, ndev);
1569 skb->ip_summed = CHECKSUM_NONE; 1569 skb_checksum_none_assert(skb);
1570 1570
1571 if (qdev->rx_csum && 1571 if (qdev->rx_csum &&
1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
@@ -1676,7 +1676,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1676 rx_ring->rx_packets++; 1676 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len; 1677 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev); 1678 skb->protocol = eth_type_trans(skb, ndev);
1679 skb->ip_summed = CHECKSUM_NONE; 1679 skb_checksum_none_assert(skb);
1680 1680
1681 /* If rx checksum is on, and there are no 1681 /* If rx checksum is on, and there are no
1682 * csum or frame errors. 1682 * csum or frame errors.
@@ -1996,7 +1996,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1996 } 1996 }
1997 1997
1998 skb->protocol = eth_type_trans(skb, ndev); 1998 skb->protocol = eth_type_trans(skb, ndev);
1999 skb->ip_summed = CHECKSUM_NONE; 1999 skb_checksum_none_assert(skb);
2000 2000
2001 /* If rx checksum is on, and there are no 2001 /* If rx checksum is on, and there are no
2002 * csum or frame errors. 2002 * csum or frame errors.
@@ -2222,10 +2222,11 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2222 ql_update_cq(rx_ring); 2222 ql_update_cq(rx_ring);
2223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224 } 2224 }
2225 if (!net_rsp)
2226 return 0;
2225 ql_write_cq_idx(rx_ring); 2227 ql_write_cq_idx(rx_ring);
2226 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 2228 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2227 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) && 2229 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2228 net_rsp != NULL) {
2229 if (atomic_read(&tx_ring->queue_stopped) && 2230 if (atomic_read(&tx_ring->queue_stopped) &&
2230 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) 2231 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2231 /* 2232 /*
@@ -3888,11 +3889,8 @@ int ql_wol(struct ql_adapter *qdev)
3888 return status; 3889 return status;
3889} 3890}
3890 3891
3891static int ql_adapter_down(struct ql_adapter *qdev) 3892static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3892{ 3893{
3893 int i, status = 0;
3894
3895 ql_link_off(qdev);
3896 3894
3897 /* Don't kill the reset worker thread if we 3895 /* Don't kill the reset worker thread if we
3898 * are in the process of recovery. 3896 * are in the process of recovery.
@@ -3904,6 +3902,15 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3904 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3902 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3905 cancel_delayed_work_sync(&qdev->mpi_core_to_log); 3903 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3906 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3904 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3905}
3906
3907static int ql_adapter_down(struct ql_adapter *qdev)
3908{
3909 int i, status = 0;
3910
3911 ql_link_off(qdev);
3912
3913 ql_cancel_all_work_sync(qdev);
3907 3914
3908 for (i = 0; i < qdev->rss_ring_count; i++) 3915 for (i = 0; i < qdev->rss_ring_count; i++)
3909 napi_disable(&qdev->rx_ring[i].napi); 3916 napi_disable(&qdev->rx_ring[i].napi);
@@ -4726,6 +4733,7 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
4726 struct net_device *ndev = pci_get_drvdata(pdev); 4733 struct net_device *ndev = pci_get_drvdata(pdev);
4727 struct ql_adapter *qdev = netdev_priv(ndev); 4734 struct ql_adapter *qdev = netdev_priv(ndev);
4728 del_timer_sync(&qdev->timer); 4735 del_timer_sync(&qdev->timer);
4736 ql_cancel_all_work_sync(qdev);
4729 unregister_netdev(ndev); 4737 unregister_netdev(ndev);
4730 ql_release_all(pdev); 4738 ql_release_all(pdev);
4731 pci_disable_device(pdev); 4739 pci_disable_device(pdev);
@@ -4745,13 +4753,7 @@ static void ql_eeh_close(struct net_device *ndev)
4745 4753
4746 /* Disabling the timer */ 4754 /* Disabling the timer */
4747 del_timer_sync(&qdev->timer); 4755 del_timer_sync(&qdev->timer);
4748 if (test_bit(QL_ADAPTER_UP, &qdev->flags)) 4756 ql_cancel_all_work_sync(qdev);
4749 cancel_delayed_work_sync(&qdev->asic_reset_work);
4750 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4751 cancel_delayed_work_sync(&qdev->mpi_work);
4752 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4753 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4754 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4755 4757
4756 for (i = 0; i < qdev->rss_ring_count; i++) 4758 for (i = 0; i < qdev->rss_ring_count; i++)
4757 netif_napi_del(&qdev->rx_ring[i].napi); 4759 netif_napi_del(&qdev->rx_ring[i].napi);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 142c381e1d73..63db065508f4 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -200,7 +200,7 @@ struct r6040_private {
200 int old_duplex; 200 int old_duplex;
201}; 201};
202 202
203static char version[] __devinitdata = KERN_INFO DRV_NAME 203static char version[] __devinitdata = DRV_NAME
204 ": RDC R6040 NAPI net driver," 204 ": RDC R6040 NAPI net driver,"
205 "version "DRV_VERSION " (" DRV_RELDATE ")"; 205 "version "DRV_VERSION " (" DRV_RELDATE ")";
206 206
@@ -224,7 +224,8 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
224} 224}
225 225
226/* Write a word data from PHY Chip */ 226/* Write a word data from PHY Chip */
227static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val) 227static void r6040_phy_write(void __iomem *ioaddr,
228 int phy_addr, int reg, u16 val)
228{ 229{
229 int limit = 2048; 230 int limit = 2048;
230 u16 cmd; 231 u16 cmd;
@@ -348,8 +349,8 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
348 } 349 }
349 desc->skb_ptr = skb; 350 desc->skb_ptr = skb;
350 desc->buf = cpu_to_le32(pci_map_single(lp->pdev, 351 desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
351 desc->skb_ptr->data, 352 desc->skb_ptr->data,
352 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); 353 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
353 desc->status = DSC_OWNER_MAC; 354 desc->status = DSC_OWNER_MAC;
354 desc = desc->vndescp; 355 desc = desc->vndescp;
355 } while (desc != lp->rx_ring); 356 } while (desc != lp->rx_ring);
@@ -491,12 +492,14 @@ static int r6040_close(struct net_device *dev)
491 492
492 /* Free Descriptor memory */ 493 /* Free Descriptor memory */
493 if (lp->rx_ring) { 494 if (lp->rx_ring) {
494 pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); 495 pci_free_consistent(pdev,
496 RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
495 lp->rx_ring = NULL; 497 lp->rx_ring = NULL;
496 } 498 }
497 499
498 if (lp->tx_ring) { 500 if (lp->tx_ring) {
499 pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma); 501 pci_free_consistent(pdev,
502 TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
500 lp->tx_ring = NULL; 503 lp->tx_ring = NULL;
501 } 504 }
502 505
@@ -547,7 +550,7 @@ static int r6040_rx(struct net_device *dev, int limit)
547 } 550 }
548 goto next_descr; 551 goto next_descr;
549 } 552 }
550 553
551 /* Packet successfully received */ 554 /* Packet successfully received */
552 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 555 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
553 if (!new_skb) { 556 if (!new_skb) {
@@ -556,13 +559,13 @@ static int r6040_rx(struct net_device *dev, int limit)
556 } 559 }
557 skb_ptr = descptr->skb_ptr; 560 skb_ptr = descptr->skb_ptr;
558 skb_ptr->dev = priv->dev; 561 skb_ptr->dev = priv->dev;
559 562
560 /* Do not count the CRC */ 563 /* Do not count the CRC */
561 skb_put(skb_ptr, descptr->len - 4); 564 skb_put(skb_ptr, descptr->len - 4);
562 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), 565 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
563 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); 566 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
564 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); 567 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
565 568
566 /* Send to upper layer */ 569 /* Send to upper layer */
567 netif_receive_skb(skb_ptr); 570 netif_receive_skb(skb_ptr);
568 dev->stats.rx_packets++; 571 dev->stats.rx_packets++;
@@ -710,8 +713,10 @@ static int r6040_up(struct net_device *dev)
710 return ret; 713 return ret;
711 714
712 /* improve performance (by RDC guys) */ 715 /* improve performance (by RDC guys) */
713 r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); 716 r6040_phy_write(ioaddr, 30, 17,
714 r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); 717 (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
718 r6040_phy_write(ioaddr, 30, 17,
719 ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
715 r6040_phy_write(ioaddr, 0, 19, 0x0000); 720 r6040_phy_write(ioaddr, 0, 19, 0x0000);
716 r6040_phy_write(ioaddr, 0, 30, 0x01F0); 721 r6040_phy_write(ioaddr, 0, 30, 0x01F0);
717 722
@@ -751,7 +756,7 @@ static int r6040_open(struct net_device *dev)
751 ret = request_irq(dev->irq, r6040_interrupt, 756 ret = request_irq(dev->irq, r6040_interrupt,
752 IRQF_SHARED, dev->name, dev); 757 IRQF_SHARED, dev->name, dev);
753 if (ret) 758 if (ret)
754 return ret; 759 goto out;
755 760
756 /* Set MAC address */ 761 /* Set MAC address */
757 r6040_mac_address(dev); 762 r6040_mac_address(dev);
@@ -759,30 +764,37 @@ static int r6040_open(struct net_device *dev)
759 /* Allocate Descriptor memory */ 764 /* Allocate Descriptor memory */
760 lp->rx_ring = 765 lp->rx_ring =
761 pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma); 766 pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
762 if (!lp->rx_ring) 767 if (!lp->rx_ring) {
763 return -ENOMEM; 768 ret = -ENOMEM;
769 goto err_free_irq;
770 }
764 771
765 lp->tx_ring = 772 lp->tx_ring =
766 pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma); 773 pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
767 if (!lp->tx_ring) { 774 if (!lp->tx_ring) {
768 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring, 775 ret = -ENOMEM;
769 lp->rx_ring_dma); 776 goto err_free_rx_ring;
770 return -ENOMEM;
771 } 777 }
772 778
773 ret = r6040_up(dev); 779 ret = r6040_up(dev);
774 if (ret) { 780 if (ret)
775 pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring, 781 goto err_free_tx_ring;
776 lp->tx_ring_dma);
777 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
778 lp->rx_ring_dma);
779 return ret;
780 }
781 782
782 napi_enable(&lp->napi); 783 napi_enable(&lp->napi);
783 netif_start_queue(dev); 784 netif_start_queue(dev);
784 785
785 return 0; 786 return 0;
787
788err_free_tx_ring:
789 pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
790 lp->tx_ring_dma);
791err_free_rx_ring:
792 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
793 lp->rx_ring_dma);
794err_free_irq:
795 free_irq(dev->irq, dev);
796out:
797 return ret;
786} 798}
787 799
788static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, 800static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
@@ -946,7 +958,7 @@ static const struct net_device_ops r6040_netdev_ops = {
946 .ndo_set_multicast_list = r6040_multicast_list, 958 .ndo_set_multicast_list = r6040_multicast_list,
947 .ndo_change_mtu = eth_change_mtu, 959 .ndo_change_mtu = eth_change_mtu,
948 .ndo_validate_addr = eth_validate_addr, 960 .ndo_validate_addr = eth_validate_addr,
949 .ndo_set_mac_address = eth_mac_addr, 961 .ndo_set_mac_address = eth_mac_addr,
950 .ndo_do_ioctl = r6040_ioctl, 962 .ndo_do_ioctl = r6040_ioctl,
951 .ndo_tx_timeout = r6040_tx_timeout, 963 .ndo_tx_timeout = r6040_tx_timeout,
952#ifdef CONFIG_NET_POLL_CONTROLLER 964#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1039,7 +1051,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1039 u16 *adrp; 1051 u16 *adrp;
1040 int i; 1052 int i;
1041 1053
1042 printk("%s\n", version); 1054 pr_info("%s\n", version);
1043 1055
1044 err = pci_enable_device(pdev); 1056 err = pci_enable_device(pdev);
1045 if (err) 1057 if (err)
@@ -1113,7 +1125,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1113 /* Some bootloader/BIOSes do not initialize 1125 /* Some bootloader/BIOSes do not initialize
1114 * MAC address, warn about that */ 1126 * MAC address, warn about that */
1115 if (!(adrp[0] || adrp[1] || adrp[2])) { 1127 if (!(adrp[0] || adrp[1] || adrp[2])) {
1116 netdev_warn(dev, "MAC address not initialized, generating random\n"); 1128 netdev_warn(dev, "MAC address not initialized, "
1129 "generating random\n");
1117 random_ether_addr(dev->dev_addr); 1130 random_ether_addr(dev->dev_addr);
1118 } 1131 }
1119 1132
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 078bbf4e6f19..54900332f12d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1076,7 +1076,12 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1076 int ret; 1076 int ret;
1077 1077
1078 if (vlgrp && (opts2 & RxVlanTag)) { 1078 if (vlgrp && (opts2 & RxVlanTag)) {
1079 __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling); 1079 u16 vtag = swab16(opts2 & 0xffff);
1080
1081 if (likely(polling))
1082 vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
1083 else
1084 __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
1080 ret = 0; 1085 ret = 0;
1081 } else 1086 } else
1082 ret = -1; 1087 ret = -1;
@@ -3186,6 +3191,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3186#ifdef CONFIG_R8169_VLAN 3191#ifdef CONFIG_R8169_VLAN
3187 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3192 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3188#endif 3193#endif
3194 dev->features |= NETIF_F_GRO;
3189 3195
3190 tp->intr_mask = 0xffff; 3196 tp->intr_mask = 0xffff;
3191 tp->align = cfg->align; 3197 tp->align = cfg->align;
@@ -4450,9 +4456,8 @@ static inline int rtl8169_fragmented_frame(u32 status)
4450 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); 4456 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
4451} 4457}
4452 4458
4453static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc) 4459static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4454{ 4460{
4455 u32 opts1 = le32_to_cpu(desc->opts1);
4456 u32 status = opts1 & RxProtoMask; 4461 u32 status = opts1 & RxProtoMask;
4457 4462
4458 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || 4463 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
@@ -4460,7 +4465,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
4460 ((status == RxProtoIP) && !(opts1 & IPFail))) 4465 ((status == RxProtoIP) && !(opts1 & IPFail)))
4461 skb->ip_summed = CHECKSUM_UNNECESSARY; 4466 skb->ip_summed = CHECKSUM_UNNECESSARY;
4462 else 4467 else
4463 skb->ip_summed = CHECKSUM_NONE; 4468 skb_checksum_none_assert(skb);
4464} 4469}
4465 4470
4466static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, 4471static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
@@ -4546,8 +4551,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4546 continue; 4551 continue;
4547 } 4552 }
4548 4553
4549 rtl8169_rx_csum(skb, desc);
4550
4551 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { 4554 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
4552 pci_dma_sync_single_for_device(pdev, addr, 4555 pci_dma_sync_single_for_device(pdev, addr,
4553 pkt_size, PCI_DMA_FROMDEVICE); 4556 pkt_size, PCI_DMA_FROMDEVICE);
@@ -4558,12 +4561,13 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4558 tp->Rx_skbuff[entry] = NULL; 4561 tp->Rx_skbuff[entry] = NULL;
4559 } 4562 }
4560 4563
4564 rtl8169_rx_csum(skb, status);
4561 skb_put(skb, pkt_size); 4565 skb_put(skb, pkt_size);
4562 skb->protocol = eth_type_trans(skb, dev); 4566 skb->protocol = eth_type_trans(skb, dev);
4563 4567
4564 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) { 4568 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
4565 if (likely(polling)) 4569 if (likely(polling))
4566 netif_receive_skb(skb); 4570 napi_gro_receive(&tp->napi, skb);
4567 else 4571 else
4568 netif_rx(skb); 4572 netif_rx(skb);
4569 } 4573 }
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index e26e107f93e0..e68c941926f1 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1245,7 +1245,7 @@ static int rr_open(struct net_device *dev)
1245 init_timer(&rrpriv->timer); 1245 init_timer(&rrpriv->timer);
1246 rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */ 1246 rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
1247 rrpriv->timer.data = (unsigned long)dev; 1247 rrpriv->timer.data = (unsigned long)dev;
1248 rrpriv->timer.function = &rr_timer; /* timer handler */ 1248 rrpriv->timer.function = rr_timer; /* timer handler */
1249 add_timer(&rrpriv->timer); 1249 add_timer(&rrpriv->timer);
1250 1250
1251 netif_start_queue(dev); 1251 netif_start_queue(dev);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 18bc5b718bbb..c70ad515383a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -38,8 +38,6 @@
38 * Tx descriptors that can be associated with each corresponding FIFO. 38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA), 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)' 40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be 41 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet 42 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx) 43 * napi: This parameter used to enable/disable NAPI (polling Rx)
@@ -90,7 +88,7 @@
90#include "s2io.h" 88#include "s2io.h"
91#include "s2io-regs.h" 89#include "s2io-regs.h"
92 90
93#define DRV_VERSION "2.0.26.26" 91#define DRV_VERSION "2.0.26.27"
94 92
95/* S2io Driver name & version. */ 93/* S2io Driver name & version. */
96static char s2io_driver_name[] = "Neterion"; 94static char s2io_driver_name[] = "Neterion";
@@ -496,8 +494,6 @@ S2IO_PARM_INT(rxsync_frequency, 3);
496/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ 494/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
497S2IO_PARM_INT(intr_type, 2); 495S2IO_PARM_INT(intr_type, 2);
498/* Large receive offload feature */ 496/* Large receive offload feature */
499static unsigned int lro_enable = 1;
500module_param_named(lro, lro_enable, uint, 0);
501 497
502/* Max pkts to be aggregated by LRO at one time. If not specified, 498/* Max pkts to be aggregated by LRO at one time. If not specified,
503 * aggregation happens until we hit max IP pkt size(64K) 499 * aggregation happens until we hit max IP pkt size(64K)
@@ -5124,8 +5120,6 @@ static void s2io_set_multicast(struct net_device *dev)
5124 /* Create the new Rx filter list and update the same in H/W. */ 5120 /* Create the new Rx filter list and update the same in H/W. */
5125 i = 0; 5121 i = 0;
5126 netdev_for_each_mc_addr(ha, dev) { 5122 netdev_for_each_mc_addr(ha, dev) {
5127 memcpy(sp->usr_addrs[i].addr, ha->addr,
5128 ETH_ALEN);
5129 mac_addr = 0; 5123 mac_addr = 0;
5130 for (j = 0; j < ETH_ALEN; j++) { 5124 for (j = 0; j < ETH_ALEN; j++) {
5131 mac_addr |= ha->addr[j]; 5125 mac_addr |= ha->addr[j];
@@ -6735,13 +6729,10 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6735 return -EINVAL; 6729 return -EINVAL;
6736 6730
6737 if (data & ETH_FLAG_LRO) { 6731 if (data & ETH_FLAG_LRO) {
6738 if (lro_enable) { 6732 if (!(dev->features & NETIF_F_LRO)) {
6739 if (!(dev->features & NETIF_F_LRO)) { 6733 dev->features |= NETIF_F_LRO;
6740 dev->features |= NETIF_F_LRO; 6734 changed = 1;
6741 changed = 1; 6735 }
6742 }
6743 } else
6744 rc = -EINVAL;
6745 } else if (dev->features & NETIF_F_LRO) { 6736 } else if (dev->features & NETIF_F_LRO) {
6746 dev->features &= ~NETIF_F_LRO; 6737 dev->features &= ~NETIF_F_LRO;
6747 changed = 1; 6738 changed = 1;
@@ -6750,7 +6741,6 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6750 if (changed && netif_running(dev)) { 6741 if (changed && netif_running(dev)) {
6751 s2io_stop_all_tx_queue(sp); 6742 s2io_stop_all_tx_queue(sp);
6752 s2io_card_down(sp); 6743 s2io_card_down(sp);
6753 sp->lro = !!(dev->features & NETIF_F_LRO);
6754 rc = s2io_card_up(sp); 6744 rc = s2io_card_up(sp);
6755 if (rc) 6745 if (rc)
6756 s2io_reset(sp); 6746 s2io_reset(sp);
@@ -7307,7 +7297,7 @@ static int s2io_card_up(struct s2io_nic *sp)
7307 struct ring_info *ring = &mac_control->rings[i]; 7297 struct ring_info *ring = &mac_control->rings[i];
7308 7298
7309 ring->mtu = dev->mtu; 7299 ring->mtu = dev->mtu;
7310 ring->lro = sp->lro; 7300 ring->lro = !!(dev->features & NETIF_F_LRO);
7311 ret = fill_rx_buffers(sp, ring, 1); 7301 ret = fill_rx_buffers(sp, ring, 1);
7312 if (ret) { 7302 if (ret) {
7313 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7303 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
@@ -7341,7 +7331,7 @@ static int s2io_card_up(struct s2io_nic *sp)
7341 /* Setting its receive mode */ 7331 /* Setting its receive mode */
7342 s2io_set_multicast(dev); 7332 s2io_set_multicast(dev);
7343 7333
7344 if (sp->lro) { 7334 if (dev->features & NETIF_F_LRO) {
7345 /* Initialize max aggregatable pkts per session based on MTU */ 7335 /* Initialize max aggregatable pkts per session based on MTU */
7346 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 7336 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7347 /* Check if we can use (if specified) user provided value */ 7337 /* Check if we can use (if specified) user provided value */
@@ -7613,10 +7603,10 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7613 * Packet with erroneous checksum, let the 7603 * Packet with erroneous checksum, let the
7614 * upper layers deal with it. 7604 * upper layers deal with it.
7615 */ 7605 */
7616 skb->ip_summed = CHECKSUM_NONE; 7606 skb_checksum_none_assert(skb);
7617 } 7607 }
7618 } else 7608 } else
7619 skb->ip_summed = CHECKSUM_NONE; 7609 skb_checksum_none_assert(skb);
7620 7610
7621 swstats->mem_freed += skb->truesize; 7611 swstats->mem_freed += skb->truesize;
7622send_up: 7612send_up:
@@ -7911,7 +7901,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7911 else 7901 else
7912 sp->device_type = XFRAME_I_DEVICE; 7902 sp->device_type = XFRAME_I_DEVICE;
7913 7903
7914 sp->lro = lro_enable;
7915 7904
7916 /* Initialize some PCI/PCI-X fields of the NIC. */ 7905 /* Initialize some PCI/PCI-X fields of the NIC. */
7917 s2io_init_pci(sp); 7906 s2io_init_pci(sp);
@@ -8047,8 +8036,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8047 dev->netdev_ops = &s2io_netdev_ops; 8036 dev->netdev_ops = &s2io_netdev_ops;
8048 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 8037 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8049 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 8038 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8050 if (lro_enable) 8039 dev->features |= NETIF_F_LRO;
8051 dev->features |= NETIF_F_LRO;
8052 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 8040 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8053 if (sp->high_dma_flag == true) 8041 if (sp->high_dma_flag == true)
8054 dev->features |= NETIF_F_HIGHDMA; 8042 dev->features |= NETIF_F_HIGHDMA;
@@ -8283,9 +8271,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8283 dev->name); 8271 dev->name);
8284 } 8272 }
8285 8273
8286 if (sp->lro) 8274 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8287 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", 8275 dev->name);
8288 dev->name);
8289 if (ufo) 8276 if (ufo)
8290 DBG_PRINT(ERR_DBG, 8277 DBG_PRINT(ERR_DBG,
8291 "%s: UDP Fragmentation Offload(UFO) enabled\n", 8278 "%s: UDP Fragmentation Offload(UFO) enabled\n",
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0af033533905..00b8614efe48 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -816,12 +816,6 @@ struct mac_info {
816 struct stat_block *stats_info; /* Logical address of the stat block */ 816 struct stat_block *stats_info; /* Logical address of the stat block */
817}; 817};
818 818
819/* structure representing the user defined MAC addresses */
820struct usr_addr {
821 char addr[ETH_ALEN];
822 int usage_cnt;
823};
824
825/* Default Tunable parameters of the NIC. */ 819/* Default Tunable parameters of the NIC. */
826#define DEFAULT_FIFO_0_LEN 4096 820#define DEFAULT_FIFO_0_LEN 4096
827#define DEFAULT_FIFO_1_7_LEN 512 821#define DEFAULT_FIFO_1_7_LEN 512
@@ -894,9 +888,7 @@ struct s2io_nic {
894#define ALL_MULTI 2 888#define ALL_MULTI 2
895 889
896#define MAX_ADDRS_SUPPORTED 64 890#define MAX_ADDRS_SUPPORTED 64
897 u16 usr_addr_count;
898 u16 mc_addr_count; 891 u16 mc_addr_count;
899 struct usr_addr usr_addrs[256];
900 892
901 u16 m_cast_flg; 893 u16 m_cast_flg;
902 u16 all_multi_pos; 894 u16 all_multi_pos;
@@ -971,7 +963,6 @@ struct s2io_nic {
971 963
972 unsigned long clubbed_frms_cnt; 964 unsigned long clubbed_frms_cnt;
973 unsigned long sending_both; 965 unsigned long sending_both;
974 u8 lro;
975 u16 lro_max_aggr_per_sess; 966 u16 lro_max_aggr_per_sess;
976 volatile unsigned long state; 967 volatile unsigned long state;
977 u64 general_int_mask; 968 u64 general_int_mask;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 8e6bd45b9f31..d8249d7653c6 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1170,7 +1170,7 @@ again:
1170 sb->ip_summed = CHECKSUM_UNNECESSARY; 1170 sb->ip_summed = CHECKSUM_UNNECESSARY;
1171 /* don't need to set sb->csum */ 1171 /* don't need to set sb->csum */
1172 } else { 1172 } else {
1173 sb->ip_summed = CHECKSUM_NONE; 1173 skb_checksum_none_assert(sb);
1174 } 1174 }
1175 } 1175 }
1176 prefetch(sb->data); 1176 prefetch(sb->data);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8c4067af32b0..31b92f5f32cb 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1251,16 +1251,6 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
1251 return 0; 1251 return 0;
1252} 1252}
1253 1253
1254static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
1255 struct ethtool_drvinfo *drvinfo)
1256{
1257 struct sc92031_priv *priv = netdev_priv(dev);
1258 struct pci_dev *pdev = priv->pdev;
1259
1260 strcpy(drvinfo->driver, SC92031_NAME);
1261 strcpy(drvinfo->bus_info, pci_name(pdev));
1262}
1263
1264static void sc92031_ethtool_get_wol(struct net_device *dev, 1254static void sc92031_ethtool_get_wol(struct net_device *dev,
1265 struct ethtool_wolinfo *wolinfo) 1255 struct ethtool_wolinfo *wolinfo)
1266{ 1256{
@@ -1382,7 +1372,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1382static const struct ethtool_ops sc92031_ethtool_ops = { 1372static const struct ethtool_ops sc92031_ethtool_ops = {
1383 .get_settings = sc92031_ethtool_get_settings, 1373 .get_settings = sc92031_ethtool_get_settings,
1384 .set_settings = sc92031_ethtool_set_settings, 1374 .set_settings = sc92031_ethtool_set_settings,
1385 .get_drvinfo = sc92031_ethtool_get_drvinfo,
1386 .get_wol = sc92031_ethtool_get_wol, 1375 .get_wol = sc92031_ethtool_get_wol,
1387 .set_wol = sc92031_ethtool_set_wol, 1376 .set_wol = sc92031_ethtool_set_wol,
1388 .nway_reset = sc92031_ethtool_nway_reset, 1377 .nway_reset = sc92031_ethtool_nway_reset,
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 799c461ce7b8..acb372e841b2 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -615,7 +615,7 @@ void __efx_rx_packet(struct efx_channel *channel,
615 EFX_BUG_ON_PARANOID(!skb); 615 EFX_BUG_ON_PARANOID(!skb);
616 616
617 /* Set the SKB flags */ 617 /* Set the SKB flags */
618 skb->ip_summed = CHECKSUM_NONE; 618 skb_checksum_none_assert(skb);
619 619
620 /* Pass the packet up */ 620 /* Pass the packet up */
621 netif_receive_skb(skb); 621 netif_receive_skb(skb);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 79fd02bc69fd..50259dfec583 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -798,7 +798,7 @@ static int sh_eth_rx(struct net_device *ndev)
798 skb->dev = ndev; 798 skb->dev = ndev;
799 sh_eth_set_receive_align(skb); 799 sh_eth_set_receive_align(skb);
800 800
801 skb->ip_summed = CHECKSUM_NONE; 801 skb_checksum_none_assert(skb);
802 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 802 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
803 } 803 }
804 if (entry >= RX_RING_SIZE - 1) 804 if (entry >= RX_RING_SIZE - 1)
@@ -1031,7 +1031,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
1031 mdp->duplex = -1; 1031 mdp->duplex = -1;
1032 1032
1033 /* Try connect to PHY */ 1033 /* Try connect to PHY */
1034 phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link, 1034 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1035 0, PHY_INTERFACE_MODE_MII); 1035 0, PHY_INTERFACE_MODE_MII);
1036 if (IS_ERR(phydev)) { 1036 if (IS_ERR(phydev)) {
1037 dev_err(&ndev->dev, "phy_connect failed\n"); 1037 dev_err(&ndev->dev, "phy_connect failed\n");
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index bbbded76ff14..ffdd8591d4bc 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1042,7 +1042,7 @@ sis900_open(struct net_device *net_dev)
1042 init_timer(&sis_priv->timer); 1042 init_timer(&sis_priv->timer);
1043 sis_priv->timer.expires = jiffies + HZ; 1043 sis_priv->timer.expires = jiffies + HZ;
1044 sis_priv->timer.data = (unsigned long)net_dev; 1044 sis_priv->timer.data = (unsigned long)net_dev;
1045 sis_priv->timer.function = &sis900_timer; 1045 sis_priv->timer.function = sis900_timer;
1046 add_timer(&sis_priv->timer); 1046 add_timer(&sis_priv->timer);
1047 1047
1048 return 0; 1048 return 0;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 40e5c46e7571..a8a63581d63d 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3178,8 +3178,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3178 3178
3179 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); 3179 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
3180 if (likely(skb)) { 3180 if (likely(skb)) {
3181 netif_receive_skb(skb); 3181 napi_gro_receive(napi, skb);
3182
3183 ++work_done; 3182 ++work_done;
3184 } 3183 }
3185 } 3184 }
@@ -3192,6 +3191,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3192 if (work_done < to_do) { 3191 if (work_done < to_do) {
3193 unsigned long flags; 3192 unsigned long flags;
3194 3193
3194 napi_gro_flush(napi);
3195 spin_lock_irqsave(&hw->hw_lock, flags); 3195 spin_lock_irqsave(&hw->hw_lock, flags);
3196 __napi_complete(napi); 3196 __napi_complete(napi);
3197 hw->intr_mask |= napimask[skge->port]; 3197 hw->intr_mask |= napimask[skge->port];
@@ -3849,6 +3849,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3849 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3849 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3850 skge->rx_csum = 1; 3850 skge->rx_csum = 1;
3851 } 3851 }
3852 dev->features |= NETIF_F_GRO;
3852 3853
3853 /* read the mac address */ 3854 /* read the mac address */
3854 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3855 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index fa434fb8fb7c..38547a8938fe 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -271,7 +271,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
271 memcpy(sl->xbuff, sl->xhead, sl->xleft); 271 memcpy(sl->xbuff, sl->xhead, sl->xleft);
272 } else { 272 } else {
273 sl->xleft = 0; 273 sl->xleft = 0;
274 sl->tx_dropped++; 274 dev->stats.tx_dropped++;
275 } 275 }
276 } 276 }
277 sl->xhead = sl->xbuff; 277 sl->xhead = sl->xbuff;
@@ -281,7 +281,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
281 memcpy(sl->rbuff, rbuff, sl->rcount); 281 memcpy(sl->rbuff, rbuff, sl->rcount);
282 } else { 282 } else {
283 sl->rcount = 0; 283 sl->rcount = 0;
284 sl->rx_over_errors++; 284 dev->stats.rx_over_errors++;
285 set_bit(SLF_ERROR, &sl->flags); 285 set_bit(SLF_ERROR, &sl->flags);
286 } 286 }
287 } 287 }
@@ -319,6 +319,7 @@ static inline void sl_unlock(struct slip *sl)
319/* Send one completely decapsulated IP datagram to the IP layer. */ 319/* Send one completely decapsulated IP datagram to the IP layer. */
320static void sl_bump(struct slip *sl) 320static void sl_bump(struct slip *sl)
321{ 321{
322 struct net_device *dev = sl->dev;
322 struct sk_buff *skb; 323 struct sk_buff *skb;
323 int count; 324 int count;
324 325
@@ -329,13 +330,13 @@ static void sl_bump(struct slip *sl)
329 if (c & SL_TYPE_COMPRESSED_TCP) { 330 if (c & SL_TYPE_COMPRESSED_TCP) {
330 /* ignore compressed packets when CSLIP is off */ 331 /* ignore compressed packets when CSLIP is off */
331 if (!(sl->mode & SL_MODE_CSLIP)) { 332 if (!(sl->mode & SL_MODE_CSLIP)) {
332 printk(KERN_WARNING "%s: compressed packet ignored\n", sl->dev->name); 333 printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name);
333 return; 334 return;
334 } 335 }
335 /* make sure we've reserved enough space for uncompress 336 /* make sure we've reserved enough space for uncompress
336 to use */ 337 to use */
337 if (count + 80 > sl->buffsize) { 338 if (count + 80 > sl->buffsize) {
338 sl->rx_over_errors++; 339 dev->stats.rx_over_errors++;
339 return; 340 return;
340 } 341 }
341 count = slhc_uncompress(sl->slcomp, sl->rbuff, count); 342 count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
@@ -346,7 +347,7 @@ static void sl_bump(struct slip *sl)
346 /* turn on header compression */ 347 /* turn on header compression */
347 sl->mode |= SL_MODE_CSLIP; 348 sl->mode |= SL_MODE_CSLIP;
348 sl->mode &= ~SL_MODE_ADAPTIVE; 349 sl->mode &= ~SL_MODE_ADAPTIVE;
349 printk(KERN_INFO "%s: header compression turned on\n", sl->dev->name); 350 printk(KERN_INFO "%s: header compression turned on\n", dev->name);
350 } 351 }
351 sl->rbuff[0] &= 0x4f; 352 sl->rbuff[0] &= 0x4f;
352 if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0) 353 if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0)
@@ -355,20 +356,20 @@ static void sl_bump(struct slip *sl)
355 } 356 }
356#endif /* SL_INCLUDE_CSLIP */ 357#endif /* SL_INCLUDE_CSLIP */
357 358
358 sl->rx_bytes += count; 359 dev->stats.rx_bytes += count;
359 360
360 skb = dev_alloc_skb(count); 361 skb = dev_alloc_skb(count);
361 if (skb == NULL) { 362 if (skb == NULL) {
362 printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", sl->dev->name); 363 printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
363 sl->rx_dropped++; 364 dev->stats.rx_dropped++;
364 return; 365 return;
365 } 366 }
366 skb->dev = sl->dev; 367 skb->dev = dev;
367 memcpy(skb_put(skb, count), sl->rbuff, count); 368 memcpy(skb_put(skb, count), sl->rbuff, count);
368 skb_reset_mac_header(skb); 369 skb_reset_mac_header(skb);
369 skb->protocol = htons(ETH_P_IP); 370 skb->protocol = htons(ETH_P_IP);
370 netif_rx(skb); 371 netif_rx(skb);
371 sl->rx_packets++; 372 dev->stats.rx_packets++;
372} 373}
373 374
374/* Encapsulate one IP datagram and stuff into a TTY queue. */ 375/* Encapsulate one IP datagram and stuff into a TTY queue. */
@@ -379,7 +380,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
379 380
380 if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */ 381 if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */
381 printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name); 382 printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
382 sl->tx_dropped++; 383 sl->dev->stats.tx_dropped++;
383 sl_unlock(sl); 384 sl_unlock(sl);
384 return; 385 return;
385 } 386 }
@@ -433,7 +434,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
433 if (sl->xleft <= 0) { 434 if (sl->xleft <= 0) {
434 /* Now serial buffer is almost free & we can start 435 /* Now serial buffer is almost free & we can start
435 * transmission of another packet */ 436 * transmission of another packet */
436 sl->tx_packets++; 437 sl->dev->stats.tx_packets++;
437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 438 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 sl_unlock(sl); 439 sl_unlock(sl);
439 return; 440 return;
@@ -496,7 +497,7 @@ sl_xmit(struct sk_buff *skb, struct net_device *dev)
496 } 497 }
497 498
498 sl_lock(sl); 499 sl_lock(sl);
499 sl->tx_bytes += skb->len; 500 dev->stats.tx_bytes += skb->len;
500 sl_encaps(sl, skb->data, skb->len); 501 sl_encaps(sl, skb->data, skb->len);
501 spin_unlock(&sl->lock); 502 spin_unlock(&sl->lock);
502 503
@@ -558,39 +559,39 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu)
558 559
559/* Netdevice get statistics request */ 560/* Netdevice get statistics request */
560 561
561static struct net_device_stats * 562static struct rtnl_link_stats64 *
562sl_get_stats(struct net_device *dev) 563sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
563{ 564{
564 static struct net_device_stats stats; 565 struct net_device_stats *devstats = &dev->stats;
565 struct slip *sl = netdev_priv(dev); 566 unsigned long c_rx_dropped = 0;
566#ifdef SL_INCLUDE_CSLIP 567#ifdef SL_INCLUDE_CSLIP
567 struct slcompress *comp; 568 unsigned long c_rx_fifo_errors = 0;
568#endif 569 unsigned long c_tx_fifo_errors = 0;
570 unsigned long c_collisions = 0;
571 struct slip *sl = netdev_priv(dev);
572 struct slcompress *comp = sl->slcomp;
569 573
570 memset(&stats, 0, sizeof(struct net_device_stats));
571
572 stats.rx_packets = sl->rx_packets;
573 stats.tx_packets = sl->tx_packets;
574 stats.rx_bytes = sl->rx_bytes;
575 stats.tx_bytes = sl->tx_bytes;
576 stats.rx_dropped = sl->rx_dropped;
577 stats.tx_dropped = sl->tx_dropped;
578 stats.tx_errors = sl->tx_errors;
579 stats.rx_errors = sl->rx_errors;
580 stats.rx_over_errors = sl->rx_over_errors;
581#ifdef SL_INCLUDE_CSLIP
582 stats.rx_fifo_errors = sl->rx_compressed;
583 stats.tx_fifo_errors = sl->tx_compressed;
584 stats.collisions = sl->tx_misses;
585 comp = sl->slcomp;
586 if (comp) { 574 if (comp) {
587 stats.rx_fifo_errors += comp->sls_i_compressed; 575 c_rx_fifo_errors = comp->sls_i_compressed;
588 stats.rx_dropped += comp->sls_i_tossed; 576 c_rx_dropped = comp->sls_i_tossed;
589 stats.tx_fifo_errors += comp->sls_o_compressed; 577 c_tx_fifo_errors = comp->sls_o_compressed;
590 stats.collisions += comp->sls_o_misses; 578 c_collisions = comp->sls_o_misses;
591 } 579 }
592#endif /* CONFIG_INET */ 580 stats->rx_fifo_errors = sl->rx_compressed + c_rx_fifo_errors;
593 return (&stats); 581 stats->tx_fifo_errors = sl->tx_compressed + c_tx_fifo_errors;
582 stats->collisions = sl->tx_misses + c_collisions;
583#endif
584 stats->rx_packets = devstats->rx_packets;
585 stats->tx_packets = devstats->tx_packets;
586 stats->rx_bytes = devstats->rx_bytes;
587 stats->tx_bytes = devstats->tx_bytes;
588 stats->rx_dropped = devstats->rx_dropped + c_rx_dropped;
589 stats->tx_dropped = devstats->tx_dropped;
590 stats->tx_errors = devstats->tx_errors;
591 stats->rx_errors = devstats->rx_errors;
592 stats->rx_over_errors = devstats->rx_over_errors;
593
594 return stats;
594} 595}
595 596
596/* Netdevice register callback */ 597/* Netdevice register callback */
@@ -633,7 +634,7 @@ static const struct net_device_ops sl_netdev_ops = {
633 .ndo_open = sl_open, 634 .ndo_open = sl_open,
634 .ndo_stop = sl_close, 635 .ndo_stop = sl_close,
635 .ndo_start_xmit = sl_xmit, 636 .ndo_start_xmit = sl_xmit,
636 .ndo_get_stats = sl_get_stats, 637 .ndo_get_stats64 = sl_get_stats64,
637 .ndo_change_mtu = sl_change_mtu, 638 .ndo_change_mtu = sl_change_mtu,
638 .ndo_tx_timeout = sl_tx_timeout, 639 .ndo_tx_timeout = sl_tx_timeout,
639#ifdef CONFIG_SLIP_SMART 640#ifdef CONFIG_SLIP_SMART
@@ -681,7 +682,7 @@ static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
681 while (count--) { 682 while (count--) {
682 if (fp && *fp++) { 683 if (fp && *fp++) {
683 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 684 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
684 sl->rx_errors++; 685 sl->dev->stats.rx_errors++;
685 cp++; 686 cp++;
686 continue; 687 continue;
687 } 688 }
@@ -981,7 +982,7 @@ static void slip_unesc(struct slip *sl, unsigned char s)
981 sl->rbuff[sl->rcount++] = s; 982 sl->rbuff[sl->rcount++] = s;
982 return; 983 return;
983 } 984 }
984 sl->rx_over_errors++; 985 sl->dev->stats.rx_over_errors++;
985 set_bit(SLF_ERROR, &sl->flags); 986 set_bit(SLF_ERROR, &sl->flags);
986 } 987 }
987} 988}
@@ -1057,7 +1058,7 @@ static void slip_unesc6(struct slip *sl, unsigned char s)
1057 sl->rbuff[sl->rcount++] = c; 1058 sl->rbuff[sl->rcount++] = c;
1058 return; 1059 return;
1059 } 1060 }
1060 sl->rx_over_errors++; 1061 sl->dev->stats.rx_over_errors++;
1061 set_bit(SLF_ERROR, &sl->flags); 1062 set_bit(SLF_ERROR, &sl->flags);
1062 } 1063 }
1063 } 1064 }
diff --git a/drivers/net/slip.h b/drivers/net/slip.h
index 9ea5c11287d2..914e958abbfc 100644
--- a/drivers/net/slip.h
+++ b/drivers/net/slip.h
@@ -67,15 +67,6 @@ struct slip {
67 int xleft; /* bytes left in XMIT queue */ 67 int xleft; /* bytes left in XMIT queue */
68 68
69 /* SLIP interface statistics. */ 69 /* SLIP interface statistics. */
70 unsigned long rx_packets; /* inbound frames counter */
71 unsigned long tx_packets; /* outbound frames counter */
72 unsigned long rx_bytes; /* inbound byte counte */
73 unsigned long tx_bytes; /* outbound byte counter */
74 unsigned long rx_errors; /* Parity, etc. errors */
75 unsigned long tx_errors; /* Planned stuff */
76 unsigned long rx_dropped; /* No memory for skb */
77 unsigned long tx_dropped; /* When MTU change */
78 unsigned long rx_over_errors; /* Frame bigger than SLIP buf. */
79#ifdef SL_INCLUDE_CSLIP 70#ifdef SL_INCLUDE_CSLIP
80 unsigned long tx_compressed; 71 unsigned long tx_compressed;
81 unsigned long rx_compressed; 72 unsigned long rx_compressed;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 0909ae934ad0..13ddcd487200 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1048,7 +1048,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1048 smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head, 1048 smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head,
1049 pktwords); 1049 pktwords);
1050 skb->protocol = eth_type_trans(skb, dev); 1050 skb->protocol = eth_type_trans(skb, dev);
1051 skb->ip_summed = CHECKSUM_NONE; 1051 skb_checksum_none_assert(skb);
1052 netif_receive_skb(skb); 1052 netif_receive_skb(skb);
1053 1053
1054 /* Update counters */ 1054 /* Update counters */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 1636a34d95dd..cb6bcca9d541 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1000,9 +1000,9 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1000 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK)) 1000 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1001 skb->ip_summed = CHECKSUM_UNNECESSARY; 1001 skb->ip_summed = CHECKSUM_UNNECESSARY;
1002 else 1002 else
1003 skb->ip_summed = CHECKSUM_NONE; 1003 skb_checksum_none_assert(skb);
1004 } else 1004 } else
1005 skb->ip_summed = CHECKSUM_NONE; 1005 skb_checksum_none_assert(skb);
1006 1006
1007 if (data_status & SPIDER_NET_VLAN_PACKET) { 1007 if (data_status & SPIDER_NET_VLAN_PACKET) {
1008 /* further enhancements: HW-accel VLAN 1008 /* further enhancements: HW-accel VLAN
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index a42b6873370b..4adf12422787 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -148,7 +148,7 @@ static int full_duplex[MAX_UNITS] = {0, };
148 * This SUCKS. 148 * This SUCKS.
149 * We need a much better method to determine if dma_addr_t is 64-bit. 149 * We need a much better method to determine if dma_addr_t is 64-bit.
150 */ 150 */
151#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) 151#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT))
152/* 64-bit dma_addr_t */ 152/* 64-bit dma_addr_t */
153#define ADDR_64BITS /* This chip uses 64 bit addresses. */ 153#define ADDR_64BITS /* This chip uses 64 bit addresses. */
154#define netdrv_addr_t __le64 154#define netdrv_addr_t __le64
@@ -302,7 +302,7 @@ enum chipset {
302}; 302};
303 303
304static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = { 304static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
305 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 }, 305 { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
306 { 0, } 306 { 0, }
307}; 307};
308MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); 308MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
@@ -2078,11 +2078,7 @@ static int __init starfire_init (void)
2078 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n"); 2078 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2079#endif 2079#endif
2080 2080
2081 /* we can do this test only at run-time... sigh */ 2081 BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2082 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2083 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2084 return -ENODEV;
2085 }
2086 2082
2087 return pci_register_driver(&starfire_driver); 2083 return pci_register_driver(&starfire_driver);
2088} 2084}
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index eb63d44748a7..3c2af7c6a39b 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -3,10 +3,10 @@ config STMMAC_ETH
3 select MII 3 select MII
4 select PHYLIB 4 select PHYLIB
5 select CRC32 5 select CRC32
6 depends on NETDEVICES && CPU_SUBTYPE_ST40 6 depends on NETDEVICES
7 help 7 help
8 This is the driver for the Ethernet IPs are built around a 8 This is the driver for the Ethernet IPs are built around a
9 Synopsys IP Core and fully tested on the STMicroelectronics 9 Synopsys IP Core and only tested on the STMicroelectronics
10 platforms. 10 platforms.
11 11
12if STMMAC_ETH 12if STMMAC_ETH
@@ -32,6 +32,7 @@ config STMMAC_DUAL_MAC
32config STMMAC_TIMER 32config STMMAC_TIMER
33 bool "STMMAC Timer optimisation" 33 bool "STMMAC Timer optimisation"
34 default n 34 default n
35 depends on RTC_HCTOSYS_DEVICE
35 help 36 help
36 Use an external timer for mitigating the number of network 37 Use an external timer for mitigating the number of network
37 interrupts. Currently, for SH architectures, it is possible 38 interrupts. Currently, for SH architectures, it is possible
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 66b9da0260fe..e8cbcb5c206e 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -167,7 +167,7 @@ struct stmmac_desc_ops {
167 int (*get_tx_ls) (struct dma_desc *p); 167 int (*get_tx_ls) (struct dma_desc *p);
168 /* Return the transmit status looking at the TDES1 */ 168 /* Return the transmit status looking at the TDES1 */
169 int (*tx_status) (void *data, struct stmmac_extra_stats *x, 169 int (*tx_status) (void *data, struct stmmac_extra_stats *x,
170 struct dma_desc *p, unsigned long ioaddr); 170 struct dma_desc *p, void __iomem *ioaddr);
171 /* Get the buffer size from the descriptor */ 171 /* Get the buffer size from the descriptor */
172 int (*get_tx_len) (struct dma_desc *p); 172 int (*get_tx_len) (struct dma_desc *p);
173 /* Handle extra events on specific interrupts hw dependent */ 173 /* Handle extra events on specific interrupts hw dependent */
@@ -182,44 +182,44 @@ struct stmmac_desc_ops {
182 182
183struct stmmac_dma_ops { 183struct stmmac_dma_ops {
184 /* DMA core initialization */ 184 /* DMA core initialization */
185 int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx); 185 int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
186 /* Dump DMA registers */ 186 /* Dump DMA registers */
187 void (*dump_regs) (unsigned long ioaddr); 187 void (*dump_regs) (void __iomem *ioaddr);
188 /* Set tx/rx threshold in the csr6 register 188 /* Set tx/rx threshold in the csr6 register
189 * An invalid value enables the store-and-forward mode */ 189 * An invalid value enables the store-and-forward mode */
190 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode); 190 void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
191 /* To track extra statistic (if supported) */ 191 /* To track extra statistic (if supported) */
192 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, 192 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
193 unsigned long ioaddr); 193 void __iomem *ioaddr);
194 void (*enable_dma_transmission) (unsigned long ioaddr); 194 void (*enable_dma_transmission) (void __iomem *ioaddr);
195 void (*enable_dma_irq) (unsigned long ioaddr); 195 void (*enable_dma_irq) (void __iomem *ioaddr);
196 void (*disable_dma_irq) (unsigned long ioaddr); 196 void (*disable_dma_irq) (void __iomem *ioaddr);
197 void (*start_tx) (unsigned long ioaddr); 197 void (*start_tx) (void __iomem *ioaddr);
198 void (*stop_tx) (unsigned long ioaddr); 198 void (*stop_tx) (void __iomem *ioaddr);
199 void (*start_rx) (unsigned long ioaddr); 199 void (*start_rx) (void __iomem *ioaddr);
200 void (*stop_rx) (unsigned long ioaddr); 200 void (*stop_rx) (void __iomem *ioaddr);
201 int (*dma_interrupt) (unsigned long ioaddr, 201 int (*dma_interrupt) (void __iomem *ioaddr,
202 struct stmmac_extra_stats *x); 202 struct stmmac_extra_stats *x);
203}; 203};
204 204
205struct stmmac_ops { 205struct stmmac_ops {
206 /* MAC core initialization */ 206 /* MAC core initialization */
207 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned; 207 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
208 /* Dump MAC registers */ 208 /* Dump MAC registers */
209 void (*dump_regs) (unsigned long ioaddr); 209 void (*dump_regs) (void __iomem *ioaddr);
210 /* Handle extra events on specific interrupts hw dependent */ 210 /* Handle extra events on specific interrupts hw dependent */
211 void (*host_irq_status) (unsigned long ioaddr); 211 void (*host_irq_status) (void __iomem *ioaddr);
212 /* Multicast filter setting */ 212 /* Multicast filter setting */
213 void (*set_filter) (struct net_device *dev); 213 void (*set_filter) (struct net_device *dev);
214 /* Flow control setting */ 214 /* Flow control setting */
215 void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex, 215 void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
216 unsigned int fc, unsigned int pause_time); 216 unsigned int fc, unsigned int pause_time);
217 /* Set power management mode (e.g. magic frame) */ 217 /* Set power management mode (e.g. magic frame) */
218 void (*pmt) (unsigned long ioaddr, unsigned long mode); 218 void (*pmt) (void __iomem *ioaddr, unsigned long mode);
219 /* Set/Get Unicast MAC addresses */ 219 /* Set/Get Unicast MAC addresses */
220 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr, 220 void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
221 unsigned int reg_n); 221 unsigned int reg_n);
222 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr, 222 void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
223 unsigned int reg_n); 223 unsigned int reg_n);
224}; 224};
225 225
@@ -243,11 +243,11 @@ struct mac_device_info {
243 struct mac_link link; 243 struct mac_link link;
244}; 244};
245 245
246struct mac_device_info *dwmac1000_setup(unsigned long addr); 246struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
247struct mac_device_info *dwmac100_setup(unsigned long addr); 247struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
248 248
249extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 249extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
250 unsigned int high, unsigned int low); 250 unsigned int high, unsigned int low);
251extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr, 251extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
252 unsigned int high, unsigned int low); 252 unsigned int high, unsigned int low);
253extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr); 253extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 2b2f5c8caf1c..f1f426146f40 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -30,7 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include "dwmac1000.h" 31#include "dwmac1000.h"
32 32
33static void dwmac1000_core_init(unsigned long ioaddr) 33static void dwmac1000_core_init(void __iomem *ioaddr)
34{ 34{
35 u32 value = readl(ioaddr + GMAC_CONTROL); 35 u32 value = readl(ioaddr + GMAC_CONTROL);
36 value |= GMAC_CORE_INIT; 36 value |= GMAC_CORE_INIT;
@@ -50,10 +50,10 @@ static void dwmac1000_core_init(unsigned long ioaddr)
50#endif 50#endif
51} 51}
52 52
53static void dwmac1000_dump_regs(unsigned long ioaddr) 53static void dwmac1000_dump_regs(void __iomem *ioaddr)
54{ 54{
55 int i; 55 int i;
56 pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr); 56 pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
57 57
58 for (i = 0; i < 55; i++) { 58 for (i = 0; i < 55; i++) {
59 int offset = i * 4; 59 int offset = i * 4;
@@ -62,14 +62,14 @@ static void dwmac1000_dump_regs(unsigned long ioaddr)
62 } 62 }
63} 63}
64 64
65static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr, 65static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
66 unsigned int reg_n) 66 unsigned int reg_n)
67{ 67{
68 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), 68 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
69 GMAC_ADDR_LOW(reg_n)); 69 GMAC_ADDR_LOW(reg_n));
70} 70}
71 71
72static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr, 72static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
73 unsigned int reg_n) 73 unsigned int reg_n)
74{ 74{
75 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), 75 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
@@ -78,7 +78,7 @@ static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
78 78
79static void dwmac1000_set_filter(struct net_device *dev) 79static void dwmac1000_set_filter(struct net_device *dev)
80{ 80{
81 unsigned long ioaddr = dev->base_addr; 81 void __iomem *ioaddr = (void __iomem *) dev->base_addr;
82 unsigned int value = 0; 82 unsigned int value = 0;
83 83
84 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", 84 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
@@ -139,7 +139,7 @@ static void dwmac1000_set_filter(struct net_device *dev)
139 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); 139 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
140} 140}
141 141
142static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex, 142static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
143 unsigned int fc, unsigned int pause_time) 143 unsigned int fc, unsigned int pause_time)
144{ 144{
145 unsigned int flow = 0; 145 unsigned int flow = 0;
@@ -162,7 +162,7 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
162 writel(flow, ioaddr + GMAC_FLOW_CTRL); 162 writel(flow, ioaddr + GMAC_FLOW_CTRL);
163} 163}
164 164
165static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode) 165static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
166{ 166{
167 unsigned int pmt = 0; 167 unsigned int pmt = 0;
168 168
@@ -178,7 +178,7 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
178} 178}
179 179
180 180
181static void dwmac1000_irq_status(unsigned long ioaddr) 181static void dwmac1000_irq_status(void __iomem *ioaddr)
182{ 182{
183 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 183 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
184 184
@@ -211,7 +211,7 @@ struct stmmac_ops dwmac1000_ops = {
211 .get_umac_addr = dwmac1000_get_umac_addr, 211 .get_umac_addr = dwmac1000_get_umac_addr,
212}; 212};
213 213
214struct mac_device_info *dwmac1000_setup(unsigned long ioaddr) 214struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
215{ 215{
216 struct mac_device_info *mac; 216 struct mac_device_info *mac;
217 u32 uid = readl(ioaddr + GMAC_VERSION); 217 u32 uid = readl(ioaddr + GMAC_VERSION);
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 415805057cb0..2ef5a56370e9 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -29,7 +29,7 @@
29#include "dwmac1000.h" 29#include "dwmac1000.h"
30#include "dwmac_dma.h" 30#include "dwmac_dma.h"
31 31
32static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, 32static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
33 u32 dma_rx) 33 u32 dma_rx)
34{ 34{
35 u32 value = readl(ioaddr + DMA_BUS_MODE); 35 u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -58,7 +58,7 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
58 return 0; 58 return 0;
59} 59}
60 60
61static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode, 61static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
62 int rxmode) 62 int rxmode)
63{ 63{
64 u32 csr6 = readl(ioaddr + DMA_CONTROL); 64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -111,12 +111,12 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
111 111
112/* Not yet implemented --- no RMON module */ 112/* Not yet implemented --- no RMON module */
113static void dwmac1000_dma_diagnostic_fr(void *data, 113static void dwmac1000_dma_diagnostic_fr(void *data,
114 struct stmmac_extra_stats *x, unsigned long ioaddr) 114 struct stmmac_extra_stats *x, void __iomem *ioaddr)
115{ 115{
116 return; 116 return;
117} 117}
118 118
119static void dwmac1000_dump_dma_regs(unsigned long ioaddr) 119static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
120{ 120{
121 int i; 121 int i;
122 pr_info(" DMA registers\n"); 122 pr_info(" DMA registers\n");
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
index 2fb165fa2ba0..db06c04ce480 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -31,7 +31,7 @@
31#include <linux/crc32.h> 31#include <linux/crc32.h>
32#include "dwmac100.h" 32#include "dwmac100.h"
33 33
34static void dwmac100_core_init(unsigned long ioaddr) 34static void dwmac100_core_init(void __iomem *ioaddr)
35{ 35{
36 u32 value = readl(ioaddr + MAC_CONTROL); 36 u32 value = readl(ioaddr + MAC_CONTROL);
37 37
@@ -42,12 +42,12 @@ static void dwmac100_core_init(unsigned long ioaddr)
42#endif 42#endif
43} 43}
44 44
45static void dwmac100_dump_mac_regs(unsigned long ioaddr) 45static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
46{ 46{
47 pr_info("\t----------------------------------------------\n" 47 pr_info("\t----------------------------------------------\n"
48 "\t DWMAC 100 CSR (base addr = 0x%8x)\n" 48 "\t DWMAC 100 CSR (base addr = 0x%p)\n"
49 "\t----------------------------------------------\n", 49 "\t----------------------------------------------\n",
50 (unsigned int)ioaddr); 50 ioaddr);
51 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, 51 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
52 readl(ioaddr + MAC_CONTROL)); 52 readl(ioaddr + MAC_CONTROL));
53 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, 53 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
@@ -77,18 +77,18 @@ static void dwmac100_dump_mac_regs(unsigned long ioaddr)
77 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); 77 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
78} 78}
79 79
80static void dwmac100_irq_status(unsigned long ioaddr) 80static void dwmac100_irq_status(void __iomem *ioaddr)
81{ 81{
82 return; 82 return;
83} 83}
84 84
85static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, 85static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
86 unsigned int reg_n) 86 unsigned int reg_n)
87{ 87{
88 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 88 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
89} 89}
90 90
91static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, 91static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
92 unsigned int reg_n) 92 unsigned int reg_n)
93{ 93{
94 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 94 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
@@ -96,7 +96,7 @@ static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
96 96
97static void dwmac100_set_filter(struct net_device *dev) 97static void dwmac100_set_filter(struct net_device *dev)
98{ 98{
99 unsigned long ioaddr = dev->base_addr; 99 void __iomem *ioaddr = (void __iomem *) dev->base_addr;
100 u32 value = readl(ioaddr + MAC_CONTROL); 100 u32 value = readl(ioaddr + MAC_CONTROL);
101 101
102 if (dev->flags & IFF_PROMISC) { 102 if (dev->flags & IFF_PROMISC) {
@@ -145,7 +145,7 @@ static void dwmac100_set_filter(struct net_device *dev)
145 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); 145 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
146} 146}
147 147
148static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, 148static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
149 unsigned int fc, unsigned int pause_time) 149 unsigned int fc, unsigned int pause_time)
150{ 150{
151 unsigned int flow = MAC_FLOW_CTRL_ENABLE; 151 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -158,7 +158,7 @@ static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
158/* No PMT module supported for this Ethernet Controller. 158/* No PMT module supported for this Ethernet Controller.
159 * Tested on ST platforms only. 159 * Tested on ST platforms only.
160 */ 160 */
161static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode) 161static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
162{ 162{
163 return; 163 return;
164} 164}
@@ -174,7 +174,7 @@ struct stmmac_ops dwmac100_ops = {
174 .get_umac_addr = dwmac100_get_umac_addr, 174 .get_umac_addr = dwmac100_get_umac_addr,
175}; 175};
176 176
177struct mac_device_info *dwmac100_setup(unsigned long ioaddr) 177struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
178{ 178{
179 struct mac_device_info *mac; 179 struct mac_device_info *mac;
180 180
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
index 2fece7b72727..c7279d2b946b 100644
--- a/drivers/net/stmmac/dwmac100_dma.c
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -31,7 +31,7 @@
31#include "dwmac100.h" 31#include "dwmac100.h"
32#include "dwmac_dma.h" 32#include "dwmac_dma.h"
33 33
34static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, 34static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
35 u32 dma_rx) 35 u32 dma_rx)
36{ 36{
37 u32 value = readl(ioaddr + DMA_BUS_MODE); 37 u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -58,7 +58,7 @@ static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
58/* Store and Forward capability is not used at all.. 58/* Store and Forward capability is not used at all..
59 * The transmit threshold can be programmed by 59 * The transmit threshold can be programmed by
60 * setting the TTC bits in the DMA control register.*/ 60 * setting the TTC bits in the DMA control register.*/
61static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode, 61static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
62 int rxmode) 62 int rxmode)
63{ 63{
64 u32 csr6 = readl(ioaddr + DMA_CONTROL); 64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -73,7 +73,7 @@ static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
73 writel(csr6, ioaddr + DMA_CONTROL); 73 writel(csr6, ioaddr + DMA_CONTROL);
74} 74}
75 75
76static void dwmac100_dump_dma_regs(unsigned long ioaddr) 76static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
77{ 77{
78 int i; 78 int i;
79 79
@@ -91,7 +91,7 @@ static void dwmac100_dump_dma_regs(unsigned long ioaddr)
91/* DMA controller has two counters to track the number of 91/* DMA controller has two counters to track the number of
92 * the receive missed frames. */ 92 * the receive missed frames. */
93static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 93static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
94 unsigned long ioaddr) 94 void __iomem *ioaddr)
95{ 95{
96 struct net_device_stats *stats = (struct net_device_stats *)data; 96 struct net_device_stats *stats = (struct net_device_stats *)data;
97 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); 97 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index 7b815a1b7b8c..da3f5ccf83d3 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -97,12 +97,12 @@
97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
98#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ 98#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
99 99
100extern void dwmac_enable_dma_transmission(unsigned long ioaddr); 100extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
101extern void dwmac_enable_dma_irq(unsigned long ioaddr); 101extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
102extern void dwmac_disable_dma_irq(unsigned long ioaddr); 102extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
103extern void dwmac_dma_start_tx(unsigned long ioaddr); 103extern void dwmac_dma_start_tx(void __iomem *ioaddr);
104extern void dwmac_dma_stop_tx(unsigned long ioaddr); 104extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
105extern void dwmac_dma_start_rx(unsigned long ioaddr); 105extern void dwmac_dma_start_rx(void __iomem *ioaddr);
106extern void dwmac_dma_stop_rx(unsigned long ioaddr); 106extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
107extern int dwmac_dma_interrupt(unsigned long ioaddr, 107extern int dwmac_dma_interrupt(void __iomem *ioaddr,
108 struct stmmac_extra_stats *x); 108 struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index a85415216ef4..d65fab1ba790 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -32,43 +32,43 @@
32#endif 32#endif
33 33
34/* CSR1 enables the transmit DMA to check for new descriptor */ 34/* CSR1 enables the transmit DMA to check for new descriptor */
35void dwmac_enable_dma_transmission(unsigned long ioaddr) 35void dwmac_enable_dma_transmission(void __iomem *ioaddr)
36{ 36{
37 writel(1, ioaddr + DMA_XMT_POLL_DEMAND); 37 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
38} 38}
39 39
40void dwmac_enable_dma_irq(unsigned long ioaddr) 40void dwmac_enable_dma_irq(void __iomem *ioaddr)
41{ 41{
42 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 42 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
43} 43}
44 44
45void dwmac_disable_dma_irq(unsigned long ioaddr) 45void dwmac_disable_dma_irq(void __iomem *ioaddr)
46{ 46{
47 writel(0, ioaddr + DMA_INTR_ENA); 47 writel(0, ioaddr + DMA_INTR_ENA);
48} 48}
49 49
50void dwmac_dma_start_tx(unsigned long ioaddr) 50void dwmac_dma_start_tx(void __iomem *ioaddr)
51{ 51{
52 u32 value = readl(ioaddr + DMA_CONTROL); 52 u32 value = readl(ioaddr + DMA_CONTROL);
53 value |= DMA_CONTROL_ST; 53 value |= DMA_CONTROL_ST;
54 writel(value, ioaddr + DMA_CONTROL); 54 writel(value, ioaddr + DMA_CONTROL);
55} 55}
56 56
57void dwmac_dma_stop_tx(unsigned long ioaddr) 57void dwmac_dma_stop_tx(void __iomem *ioaddr)
58{ 58{
59 u32 value = readl(ioaddr + DMA_CONTROL); 59 u32 value = readl(ioaddr + DMA_CONTROL);
60 value &= ~DMA_CONTROL_ST; 60 value &= ~DMA_CONTROL_ST;
61 writel(value, ioaddr + DMA_CONTROL); 61 writel(value, ioaddr + DMA_CONTROL);
62} 62}
63 63
64void dwmac_dma_start_rx(unsigned long ioaddr) 64void dwmac_dma_start_rx(void __iomem *ioaddr)
65{ 65{
66 u32 value = readl(ioaddr + DMA_CONTROL); 66 u32 value = readl(ioaddr + DMA_CONTROL);
67 value |= DMA_CONTROL_SR; 67 value |= DMA_CONTROL_SR;
68 writel(value, ioaddr + DMA_CONTROL); 68 writel(value, ioaddr + DMA_CONTROL);
69} 69}
70 70
71void dwmac_dma_stop_rx(unsigned long ioaddr) 71void dwmac_dma_stop_rx(void __iomem *ioaddr)
72{ 72{
73 u32 value = readl(ioaddr + DMA_CONTROL); 73 u32 value = readl(ioaddr + DMA_CONTROL);
74 value &= ~DMA_CONTROL_SR; 74 value &= ~DMA_CONTROL_SR;
@@ -145,7 +145,7 @@ static void show_rx_process_state(unsigned int status)
145} 145}
146#endif 146#endif
147 147
148int dwmac_dma_interrupt(unsigned long ioaddr, 148int dwmac_dma_interrupt(void __iomem *ioaddr,
149 struct stmmac_extra_stats *x) 149 struct stmmac_extra_stats *x)
150{ 150{
151 int ret = 0; 151 int ret = 0;
@@ -219,7 +219,7 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
219 return ret; 219 return ret;
220} 220}
221 221
222void dwmac_dma_flush_tx_fifo(unsigned long ioaddr) 222void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
223{ 223{
224 u32 csr6 = readl(ioaddr + DMA_CONTROL); 224 u32 csr6 = readl(ioaddr + DMA_CONTROL);
225 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); 225 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -227,7 +227,7 @@ void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
227 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); 227 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
228} 228}
229 229
230void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 230void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
231 unsigned int high, unsigned int low) 231 unsigned int high, unsigned int low)
232{ 232{
233 unsigned long data; 233 unsigned long data;
@@ -238,7 +238,7 @@ void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
238 writel(data, ioaddr + low); 238 writel(data, ioaddr + low);
239} 239}
240 240
241void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr, 241void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
242 unsigned int high, unsigned int low) 242 unsigned int high, unsigned int low)
243{ 243{
244 unsigned int hi_addr, lo_addr; 244 unsigned int hi_addr, lo_addr;
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
index f612f986a7e1..77ff88c3958b 100644
--- a/drivers/net/stmmac/enh_desc.c
+++ b/drivers/net/stmmac/enh_desc.c
@@ -25,7 +25,7 @@
25#include "common.h" 25#include "common.h"
26 26
27static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, 27static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28 struct dma_desc *p, unsigned long ioaddr) 28 struct dma_desc *p, void __iomem *ioaddr)
29{ 29{
30 int ret = 0; 30 int ret = 0;
31 struct net_device_stats *stats = (struct net_device_stats *)data; 31 struct net_device_stats *stats = (struct net_device_stats *)data;
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
index 31ad53643792..51f4440ab98b 100644
--- a/drivers/net/stmmac/norm_desc.c
+++ b/drivers/net/stmmac/norm_desc.c
@@ -25,7 +25,7 @@
25#include "common.h" 25#include "common.h"
26 26
27static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, 27static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28 struct dma_desc *p, unsigned long ioaddr) 28 struct dma_desc *p, void __iomem *ioaddr)
29{ 29{
30 int ret = 0; 30 int ret = 0;
31 struct net_device_stats *stats = (struct net_device_stats *)data; 31 struct net_device_stats *stats = (struct net_device_stats *)data;
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ebebc644b1b8..d0ddab0d21c2 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -21,6 +21,7 @@
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Apr_2010" 23#define DRV_MODULE_VERSION "Apr_2010"
24#include <linux/platform_device.h>
24#include <linux/stmmac.h> 25#include <linux/stmmac.h>
25 26
26#include "common.h" 27#include "common.h"
@@ -54,6 +55,7 @@ struct stmmac_priv {
54 unsigned int dma_buf_sz; 55 unsigned int dma_buf_sz;
55 struct device *device; 56 struct device *device;
56 struct mac_device_info *hw; 57 struct mac_device_info *hw;
58 void __iomem *ioaddr;
57 59
58 struct stmmac_extra_stats xstats; 60 struct stmmac_extra_stats xstats;
59 struct napi_struct napi; 61 struct napi_struct napi;
@@ -65,7 +67,7 @@ struct stmmac_priv {
65 int phy_mask; 67 int phy_mask;
66 int (*phy_reset) (void *priv); 68 int (*phy_reset) (void *priv);
67 void (*fix_mac_speed) (void *priv, unsigned int speed); 69 void (*fix_mac_speed) (void *priv, unsigned int speed);
68 void (*bus_setup)(unsigned long ioaddr); 70 void (*bus_setup)(void __iomem *ioaddr);
69 void *bsp_priv; 71 void *bsp_priv;
70 72
71 int phy_irq; 73 int phy_irq;
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index f080509923f0..63b68e61afce 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -177,21 +177,21 @@ void stmmac_ethtool_gregs(struct net_device *dev,
177 if (!priv->is_gmac) { 177 if (!priv->is_gmac) {
178 /* MAC registers */ 178 /* MAC registers */
179 for (i = 0; i < 12; i++) 179 for (i = 0; i < 12; i++)
180 reg_space[i] = readl(dev->base_addr + (i * 4)); 180 reg_space[i] = readl(priv->ioaddr + (i * 4));
181 /* DMA registers */ 181 /* DMA registers */
182 for (i = 0; i < 9; i++) 182 for (i = 0; i < 9; i++)
183 reg_space[i + 12] = 183 reg_space[i + 12] =
184 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); 184 readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
185 reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR); 185 reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
186 reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR); 186 reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
187 } else { 187 } else {
188 /* MAC registers */ 188 /* MAC registers */
189 for (i = 0; i < 55; i++) 189 for (i = 0; i < 55; i++)
190 reg_space[i] = readl(dev->base_addr + (i * 4)); 190 reg_space[i] = readl(priv->ioaddr + (i * 4));
191 /* DMA registers */ 191 /* DMA registers */
192 for (i = 0; i < 22; i++) 192 for (i = 0; i < 22; i++)
193 reg_space[i + 55] = 193 reg_space[i + 55] =
194 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); 194 readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
195 } 195 }
196} 196}
197 197
@@ -263,11 +263,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
263 cmd.phy_address = phy->addr; 263 cmd.phy_address = phy->addr;
264 ret = phy_ethtool_sset(phy, &cmd); 264 ret = phy_ethtool_sset(phy, &cmd);
265 } 265 }
266 } else { 266 } else
267 unsigned long ioaddr = netdev->base_addr; 267 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
268 priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
269 priv->flow_ctrl, priv->pause); 268 priv->flow_ctrl, priv->pause);
270 }
271 spin_unlock(&priv->lock); 269 spin_unlock(&priv->lock);
272 return ret; 270 return ret;
273} 271}
@@ -276,12 +274,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
276 struct ethtool_stats *dummy, u64 *data) 274 struct ethtool_stats *dummy, u64 *data)
277{ 275{
278 struct stmmac_priv *priv = netdev_priv(dev); 276 struct stmmac_priv *priv = netdev_priv(dev);
279 unsigned long ioaddr = dev->base_addr;
280 int i; 277 int i;
281 278
282 /* Update HW stats if supported */ 279 /* Update HW stats if supported */
283 priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats, 280 priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
284 ioaddr); 281 priv->ioaddr);
285 282
286 for (i = 0; i < STMMAC_STATS_LEN; i++) { 283 for (i = 0; i < STMMAC_STATS_LEN; i++) {
287 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 284 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index ea0461eb2dbe..03c160c6d75c 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -202,7 +202,6 @@ static void stmmac_adjust_link(struct net_device *dev)
202{ 202{
203 struct stmmac_priv *priv = netdev_priv(dev); 203 struct stmmac_priv *priv = netdev_priv(dev);
204 struct phy_device *phydev = priv->phydev; 204 struct phy_device *phydev = priv->phydev;
205 unsigned long ioaddr = dev->base_addr;
206 unsigned long flags; 205 unsigned long flags;
207 int new_state = 0; 206 int new_state = 0;
208 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; 207 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
@@ -215,7 +214,7 @@ static void stmmac_adjust_link(struct net_device *dev)
215 214
216 spin_lock_irqsave(&priv->lock, flags); 215 spin_lock_irqsave(&priv->lock, flags);
217 if (phydev->link) { 216 if (phydev->link) {
218 u32 ctrl = readl(ioaddr + MAC_CTRL_REG); 217 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
219 218
220 /* Now we make sure that we can be in full duplex mode. 219 /* Now we make sure that we can be in full duplex mode.
221 * If not, we operate in half-duplex mode. */ 220 * If not, we operate in half-duplex mode. */
@@ -229,7 +228,7 @@ static void stmmac_adjust_link(struct net_device *dev)
229 } 228 }
230 /* Flow Control operation */ 229 /* Flow Control operation */
231 if (phydev->pause) 230 if (phydev->pause)
232 priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex, 231 priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
233 fc, pause_time); 232 fc, pause_time);
234 233
235 if (phydev->speed != priv->speed) { 234 if (phydev->speed != priv->speed) {
@@ -238,6 +237,9 @@ static void stmmac_adjust_link(struct net_device *dev)
238 case 1000: 237 case 1000:
239 if (likely(priv->is_gmac)) 238 if (likely(priv->is_gmac))
240 ctrl &= ~priv->hw->link.port; 239 ctrl &= ~priv->hw->link.port;
240 if (likely(priv->fix_mac_speed))
241 priv->fix_mac_speed(priv->bsp_priv,
242 phydev->speed);
241 break; 243 break;
242 case 100: 244 case 100:
243 case 10: 245 case 10:
@@ -265,7 +267,7 @@ static void stmmac_adjust_link(struct net_device *dev)
265 priv->speed = phydev->speed; 267 priv->speed = phydev->speed;
266 } 268 }
267 269
268 writel(ctrl, ioaddr + MAC_CTRL_REG); 270 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
269 271
270 if (!priv->oldlink) { 272 if (!priv->oldlink) {
271 new_state = 1; 273 new_state = 1;
@@ -342,7 +344,7 @@ static int stmmac_init_phy(struct net_device *dev)
342 return 0; 344 return 0;
343} 345}
344 346
345static inline void stmmac_mac_enable_rx(unsigned long ioaddr) 347static inline void stmmac_mac_enable_rx(void __iomem *ioaddr)
346{ 348{
347 u32 value = readl(ioaddr + MAC_CTRL_REG); 349 u32 value = readl(ioaddr + MAC_CTRL_REG);
348 value |= MAC_RNABLE_RX; 350 value |= MAC_RNABLE_RX;
@@ -350,7 +352,7 @@ static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
350 writel(value, ioaddr + MAC_CTRL_REG); 352 writel(value, ioaddr + MAC_CTRL_REG);
351} 353}
352 354
353static inline void stmmac_mac_enable_tx(unsigned long ioaddr) 355static inline void stmmac_mac_enable_tx(void __iomem *ioaddr)
354{ 356{
355 u32 value = readl(ioaddr + MAC_CTRL_REG); 357 u32 value = readl(ioaddr + MAC_CTRL_REG);
356 value |= MAC_ENABLE_TX; 358 value |= MAC_ENABLE_TX;
@@ -358,14 +360,14 @@ static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
358 writel(value, ioaddr + MAC_CTRL_REG); 360 writel(value, ioaddr + MAC_CTRL_REG);
359} 361}
360 362
361static inline void stmmac_mac_disable_rx(unsigned long ioaddr) 363static inline void stmmac_mac_disable_rx(void __iomem *ioaddr)
362{ 364{
363 u32 value = readl(ioaddr + MAC_CTRL_REG); 365 u32 value = readl(ioaddr + MAC_CTRL_REG);
364 value &= ~MAC_RNABLE_RX; 366 value &= ~MAC_RNABLE_RX;
365 writel(value, ioaddr + MAC_CTRL_REG); 367 writel(value, ioaddr + MAC_CTRL_REG);
366} 368}
367 369
368static inline void stmmac_mac_disable_tx(unsigned long ioaddr) 370static inline void stmmac_mac_disable_tx(void __iomem *ioaddr)
369{ 371{
370 u32 value = readl(ioaddr + MAC_CTRL_REG); 372 u32 value = readl(ioaddr + MAC_CTRL_REG);
371 value &= ~MAC_ENABLE_TX; 373 value &= ~MAC_ENABLE_TX;
@@ -574,17 +576,17 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
574{ 576{
575 if (!priv->is_gmac) { 577 if (!priv->is_gmac) {
576 /* MAC 10/100 */ 578 /* MAC 10/100 */
577 priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0); 579 priv->hw->dma->dma_mode(priv->ioaddr, tc, 0);
578 priv->tx_coe = NO_HW_CSUM; 580 priv->tx_coe = NO_HW_CSUM;
579 } else { 581 } else {
580 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { 582 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
581 priv->hw->dma->dma_mode(priv->dev->base_addr, 583 priv->hw->dma->dma_mode(priv->ioaddr,
582 SF_DMA_MODE, SF_DMA_MODE); 584 SF_DMA_MODE, SF_DMA_MODE);
583 tc = SF_DMA_MODE; 585 tc = SF_DMA_MODE;
584 priv->tx_coe = HW_CSUM; 586 priv->tx_coe = HW_CSUM;
585 } else { 587 } else {
586 /* Checksum computation is performed in software. */ 588 /* Checksum computation is performed in software. */
587 priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 589 priv->hw->dma->dma_mode(priv->ioaddr, tc,
588 SF_DMA_MODE); 590 SF_DMA_MODE);
589 priv->tx_coe = NO_HW_CSUM; 591 priv->tx_coe = NO_HW_CSUM;
590 } 592 }
@@ -600,7 +602,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
600static void stmmac_tx(struct stmmac_priv *priv) 602static void stmmac_tx(struct stmmac_priv *priv)
601{ 603{
602 unsigned int txsize = priv->dma_tx_size; 604 unsigned int txsize = priv->dma_tx_size;
603 unsigned long ioaddr = priv->dev->base_addr;
604 605
605 while (priv->dirty_tx != priv->cur_tx) { 606 while (priv->dirty_tx != priv->cur_tx) {
606 int last; 607 int last;
@@ -618,7 +619,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
618 int tx_error = 619 int tx_error =
619 priv->hw->desc->tx_status(&priv->dev->stats, 620 priv->hw->desc->tx_status(&priv->dev->stats,
620 &priv->xstats, p, 621 &priv->xstats, p,
621 ioaddr); 622 priv->ioaddr);
622 if (likely(tx_error == 0)) { 623 if (likely(tx_error == 0)) {
623 priv->dev->stats.tx_packets++; 624 priv->dev->stats.tx_packets++;
624 priv->xstats.tx_pkt_n++; 625 priv->xstats.tx_pkt_n++;
@@ -674,7 +675,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
674 priv->tm->timer_start(tmrate); 675 priv->tm->timer_start(tmrate);
675 else 676 else
676#endif 677#endif
677 priv->hw->dma->enable_dma_irq(priv->dev->base_addr); 678 priv->hw->dma->enable_dma_irq(priv->ioaddr);
678} 679}
679 680
680static inline void stmmac_disable_irq(struct stmmac_priv *priv) 681static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -684,7 +685,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
684 priv->tm->timer_stop(); 685 priv->tm->timer_stop();
685 else 686 else
686#endif 687#endif
687 priv->hw->dma->disable_dma_irq(priv->dev->base_addr); 688 priv->hw->dma->disable_dma_irq(priv->ioaddr);
688} 689}
689 690
690static int stmmac_has_work(struct stmmac_priv *priv) 691static int stmmac_has_work(struct stmmac_priv *priv)
@@ -739,14 +740,15 @@ static void stmmac_no_timer_stopped(void)
739 */ 740 */
740static void stmmac_tx_err(struct stmmac_priv *priv) 741static void stmmac_tx_err(struct stmmac_priv *priv)
741{ 742{
743
742 netif_stop_queue(priv->dev); 744 netif_stop_queue(priv->dev);
743 745
744 priv->hw->dma->stop_tx(priv->dev->base_addr); 746 priv->hw->dma->stop_tx(priv->ioaddr);
745 dma_free_tx_skbufs(priv); 747 dma_free_tx_skbufs(priv);
746 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 748 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
747 priv->dirty_tx = 0; 749 priv->dirty_tx = 0;
748 priv->cur_tx = 0; 750 priv->cur_tx = 0;
749 priv->hw->dma->start_tx(priv->dev->base_addr); 751 priv->hw->dma->start_tx(priv->ioaddr);
750 752
751 priv->dev->stats.tx_errors++; 753 priv->dev->stats.tx_errors++;
752 netif_wake_queue(priv->dev); 754 netif_wake_queue(priv->dev);
@@ -755,11 +757,9 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
755 757
756static void stmmac_dma_interrupt(struct stmmac_priv *priv) 758static void stmmac_dma_interrupt(struct stmmac_priv *priv)
757{ 759{
758 unsigned long ioaddr = priv->dev->base_addr;
759 int status; 760 int status;
760 761
761 status = priv->hw->dma->dma_interrupt(priv->dev->base_addr, 762 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
762 &priv->xstats);
763 if (likely(status == handle_tx_rx)) 763 if (likely(status == handle_tx_rx))
764 _stmmac_schedule(priv); 764 _stmmac_schedule(priv);
765 765
@@ -767,7 +767,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
767 /* Try to bump up the dma threshold on this failure */ 767 /* Try to bump up the dma threshold on this failure */
768 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 768 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
769 tc += 64; 769 tc += 64;
770 priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE); 770 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
771 priv->xstats.threshold = tc; 771 priv->xstats.threshold = tc;
772 } 772 }
773 stmmac_tx_err(priv); 773 stmmac_tx_err(priv);
@@ -787,7 +787,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
787static int stmmac_open(struct net_device *dev) 787static int stmmac_open(struct net_device *dev)
788{ 788{
789 struct stmmac_priv *priv = netdev_priv(dev); 789 struct stmmac_priv *priv = netdev_priv(dev);
790 unsigned long ioaddr = dev->base_addr;
791 int ret; 790 int ret;
792 791
793 /* Check that the MAC address is valid. If its not, refuse 792 /* Check that the MAC address is valid. If its not, refuse
@@ -843,7 +842,8 @@ static int stmmac_open(struct net_device *dev)
843 init_dma_desc_rings(dev); 842 init_dma_desc_rings(dev);
844 843
845 /* DMA initialization and SW reset */ 844 /* DMA initialization and SW reset */
846 if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy, 845 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
846 priv->dma_tx_phy,
847 priv->dma_rx_phy) < 0)) { 847 priv->dma_rx_phy) < 0)) {
848 848
849 pr_err("%s: DMA initialization failed\n", __func__); 849 pr_err("%s: DMA initialization failed\n", __func__);
@@ -851,22 +851,22 @@ static int stmmac_open(struct net_device *dev)
851 } 851 }
852 852
853 /* Copy the MAC addr into the HW */ 853 /* Copy the MAC addr into the HW */
854 priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0); 854 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
855 /* If required, perform hw setup of the bus. */ 855 /* If required, perform hw setup of the bus. */
856 if (priv->bus_setup) 856 if (priv->bus_setup)
857 priv->bus_setup(ioaddr); 857 priv->bus_setup(priv->ioaddr);
858 /* Initialize the MAC Core */ 858 /* Initialize the MAC Core */
859 priv->hw->mac->core_init(ioaddr); 859 priv->hw->mac->core_init(priv->ioaddr);
860 860
861 priv->shutdown = 0; 861 priv->shutdown = 0;
862 862
863 /* Initialise the MMC (if present) to disable all interrupts. */ 863 /* Initialise the MMC (if present) to disable all interrupts. */
864 writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK); 864 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
865 writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK); 865 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
866 866
867 /* Enable the MAC Rx/Tx */ 867 /* Enable the MAC Rx/Tx */
868 stmmac_mac_enable_rx(ioaddr); 868 stmmac_mac_enable_rx(priv->ioaddr);
869 stmmac_mac_enable_tx(ioaddr); 869 stmmac_mac_enable_tx(priv->ioaddr);
870 870
871 /* Set the HW DMA mode and the COE */ 871 /* Set the HW DMA mode and the COE */
872 stmmac_dma_operation_mode(priv); 872 stmmac_dma_operation_mode(priv);
@@ -877,16 +877,16 @@ static int stmmac_open(struct net_device *dev)
877 877
878 /* Start the ball rolling... */ 878 /* Start the ball rolling... */
879 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 879 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
880 priv->hw->dma->start_tx(ioaddr); 880 priv->hw->dma->start_tx(priv->ioaddr);
881 priv->hw->dma->start_rx(ioaddr); 881 priv->hw->dma->start_rx(priv->ioaddr);
882 882
883#ifdef CONFIG_STMMAC_TIMER 883#ifdef CONFIG_STMMAC_TIMER
884 priv->tm->timer_start(tmrate); 884 priv->tm->timer_start(tmrate);
885#endif 885#endif
886 /* Dump DMA/MAC registers */ 886 /* Dump DMA/MAC registers */
887 if (netif_msg_hw(priv)) { 887 if (netif_msg_hw(priv)) {
888 priv->hw->mac->dump_regs(ioaddr); 888 priv->hw->mac->dump_regs(priv->ioaddr);
889 priv->hw->dma->dump_regs(ioaddr); 889 priv->hw->dma->dump_regs(priv->ioaddr);
890 } 890 }
891 891
892 if (priv->phydev) 892 if (priv->phydev)
@@ -930,15 +930,15 @@ static int stmmac_release(struct net_device *dev)
930 free_irq(dev->irq, dev); 930 free_irq(dev->irq, dev);
931 931
932 /* Stop TX/RX DMA and clear the descriptors */ 932 /* Stop TX/RX DMA and clear the descriptors */
933 priv->hw->dma->stop_tx(dev->base_addr); 933 priv->hw->dma->stop_tx(priv->ioaddr);
934 priv->hw->dma->stop_rx(dev->base_addr); 934 priv->hw->dma->stop_rx(priv->ioaddr);
935 935
936 /* Release and free the Rx/Tx resources */ 936 /* Release and free the Rx/Tx resources */
937 free_dma_desc_resources(priv); 937 free_dma_desc_resources(priv);
938 938
939 /* Disable the MAC core */ 939 /* Disable the MAC core */
940 stmmac_mac_disable_tx(dev->base_addr); 940 stmmac_mac_disable_tx(priv->ioaddr);
941 stmmac_mac_disable_rx(dev->base_addr); 941 stmmac_mac_disable_rx(priv->ioaddr);
942 942
943 netif_carrier_off(dev); 943 netif_carrier_off(dev);
944 944
@@ -1140,7 +1140,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1140 1140
1141 dev->stats.tx_bytes += skb->len; 1141 dev->stats.tx_bytes += skb->len;
1142 1142
1143 priv->hw->dma->enable_dma_transmission(dev->base_addr); 1143 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1144 1144
1145 return NETDEV_TX_OK; 1145 return NETDEV_TX_OK;
1146} 1146}
@@ -1256,7 +1256,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1256 1256
1257 if (unlikely(status == csum_none)) { 1257 if (unlikely(status == csum_none)) {
1258 /* always for the old mac 10/100 */ 1258 /* always for the old mac 10/100 */
1259 skb->ip_summed = CHECKSUM_NONE; 1259 skb_checksum_none_assert(skb);
1260 netif_receive_skb(skb); 1260 netif_receive_skb(skb);
1261 } else { 1261 } else {
1262 skb->ip_summed = CHECKSUM_UNNECESSARY; 1262 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1405,11 +1405,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1405 return IRQ_NONE; 1405 return IRQ_NONE;
1406 } 1406 }
1407 1407
1408 if (priv->is_gmac) { 1408 if (priv->is_gmac)
1409 unsigned long ioaddr = dev->base_addr;
1410 /* To handle GMAC own interrupts */ 1409 /* To handle GMAC own interrupts */
1411 priv->hw->mac->host_irq_status(ioaddr); 1410 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
1412 }
1413 1411
1414 stmmac_dma_interrupt(priv); 1412 stmmac_dma_interrupt(priv);
1415 1413
@@ -1522,7 +1520,8 @@ static int stmmac_probe(struct net_device *dev)
1522 netif_napi_add(dev, &priv->napi, stmmac_poll, 64); 1520 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1523 1521
1524 /* Get the MAC address */ 1522 /* Get the MAC address */
1525 priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0); 1523 priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
1524 dev->dev_addr, 0);
1526 1525
1527 if (!is_valid_ether_addr(dev->dev_addr)) 1526 if (!is_valid_ether_addr(dev->dev_addr))
1528 pr_warning("\tno valid MAC address;" 1527 pr_warning("\tno valid MAC address;"
@@ -1552,14 +1551,13 @@ static int stmmac_probe(struct net_device *dev)
1552static int stmmac_mac_device_setup(struct net_device *dev) 1551static int stmmac_mac_device_setup(struct net_device *dev)
1553{ 1552{
1554 struct stmmac_priv *priv = netdev_priv(dev); 1553 struct stmmac_priv *priv = netdev_priv(dev);
1555 unsigned long ioaddr = dev->base_addr;
1556 1554
1557 struct mac_device_info *device; 1555 struct mac_device_info *device;
1558 1556
1559 if (priv->is_gmac) 1557 if (priv->is_gmac)
1560 device = dwmac1000_setup(ioaddr); 1558 device = dwmac1000_setup(priv->ioaddr);
1561 else 1559 else
1562 device = dwmac100_setup(ioaddr); 1560 device = dwmac100_setup(priv->ioaddr);
1563 1561
1564 if (!device) 1562 if (!device)
1565 return -ENOMEM; 1563 return -ENOMEM;
@@ -1653,7 +1651,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1653{ 1651{
1654 int ret = 0; 1652 int ret = 0;
1655 struct resource *res; 1653 struct resource *res;
1656 unsigned int *addr = NULL; 1654 void __iomem *addr = NULL;
1657 struct net_device *ndev = NULL; 1655 struct net_device *ndev = NULL;
1658 struct stmmac_priv *priv; 1656 struct stmmac_priv *priv;
1659 struct plat_stmmacenet_data *plat_dat; 1657 struct plat_stmmacenet_data *plat_dat;
@@ -1708,6 +1706,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1708 priv->pbl = plat_dat->pbl; /* TLI */ 1706 priv->pbl = plat_dat->pbl; /* TLI */
1709 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1707 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1710 priv->enh_desc = plat_dat->enh_desc; 1708 priv->enh_desc = plat_dat->enh_desc;
1709 priv->ioaddr = addr;
1711 1710
1712 platform_set_drvdata(pdev, ndev); 1711 platform_set_drvdata(pdev, ndev);
1713 1712
@@ -1743,8 +1742,8 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1743 priv->bsp_priv = plat_dat->bsp_priv; 1742 priv->bsp_priv = plat_dat->bsp_priv;
1744 1743
1745 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1744 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1746 "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name, 1745 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
1747 pdev->id, ndev->irq, (unsigned int)addr); 1746 pdev->id, ndev->irq, addr);
1748 1747
1749 /* MDIO bus Registration */ 1748 /* MDIO bus Registration */
1750 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); 1749 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
@@ -1779,11 +1778,11 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1779 1778
1780 pr_info("%s:\n\tremoving driver", __func__); 1779 pr_info("%s:\n\tremoving driver", __func__);
1781 1780
1782 priv->hw->dma->stop_rx(ndev->base_addr); 1781 priv->hw->dma->stop_rx(priv->ioaddr);
1783 priv->hw->dma->stop_tx(ndev->base_addr); 1782 priv->hw->dma->stop_tx(priv->ioaddr);
1784 1783
1785 stmmac_mac_disable_rx(ndev->base_addr); 1784 stmmac_mac_disable_rx(priv->ioaddr);
1786 stmmac_mac_disable_tx(ndev->base_addr); 1785 stmmac_mac_disable_tx(priv->ioaddr);
1787 1786
1788 netif_carrier_off(ndev); 1787 netif_carrier_off(ndev);
1789 1788
@@ -1792,7 +1791,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1792 platform_set_drvdata(pdev, NULL); 1791 platform_set_drvdata(pdev, NULL);
1793 unregister_netdev(ndev); 1792 unregister_netdev(ndev);
1794 1793
1795 iounmap((void *)ndev->base_addr); 1794 iounmap((void *)priv->ioaddr);
1796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1795 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1797 release_mem_region(res->start, resource_size(res)); 1796 release_mem_region(res->start, resource_size(res));
1798 1797
@@ -1827,22 +1826,21 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
1827 napi_disable(&priv->napi); 1826 napi_disable(&priv->napi);
1828 1827
1829 /* Stop TX/RX DMA */ 1828 /* Stop TX/RX DMA */
1830 priv->hw->dma->stop_tx(dev->base_addr); 1829 priv->hw->dma->stop_tx(priv->ioaddr);
1831 priv->hw->dma->stop_rx(dev->base_addr); 1830 priv->hw->dma->stop_rx(priv->ioaddr);
1832 /* Clear the Rx/Tx descriptors */ 1831 /* Clear the Rx/Tx descriptors */
1833 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 1832 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
1834 dis_ic); 1833 dis_ic);
1835 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1834 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1836 1835
1837 stmmac_mac_disable_tx(dev->base_addr); 1836 stmmac_mac_disable_tx(priv->ioaddr);
1838 1837
1839 if (device_may_wakeup(&(pdev->dev))) { 1838 if (device_may_wakeup(&(pdev->dev))) {
1840 /* Enable Power down mode by programming the PMT regs */ 1839 /* Enable Power down mode by programming the PMT regs */
1841 if (priv->wolenabled == PMT_SUPPORTED) 1840 if (priv->wolenabled == PMT_SUPPORTED)
1842 priv->hw->mac->pmt(dev->base_addr, 1841 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1843 priv->wolopts);
1844 } else { 1842 } else {
1845 stmmac_mac_disable_rx(dev->base_addr); 1843 stmmac_mac_disable_rx(priv->ioaddr);
1846 } 1844 }
1847 } else { 1845 } else {
1848 priv->shutdown = 1; 1846 priv->shutdown = 1;
@@ -1860,7 +1858,6 @@ static int stmmac_resume(struct platform_device *pdev)
1860{ 1858{
1861 struct net_device *dev = platform_get_drvdata(pdev); 1859 struct net_device *dev = platform_get_drvdata(pdev);
1862 struct stmmac_priv *priv = netdev_priv(dev); 1860 struct stmmac_priv *priv = netdev_priv(dev);
1863 unsigned long ioaddr = dev->base_addr;
1864 1861
1865 if (!netif_running(dev)) 1862 if (!netif_running(dev))
1866 return 0; 1863 return 0;
@@ -1881,15 +1878,15 @@ static int stmmac_resume(struct platform_device *pdev)
1881 * from another devices (e.g. serial console). */ 1878 * from another devices (e.g. serial console). */
1882 if (device_may_wakeup(&(pdev->dev))) 1879 if (device_may_wakeup(&(pdev->dev)))
1883 if (priv->wolenabled == PMT_SUPPORTED) 1880 if (priv->wolenabled == PMT_SUPPORTED)
1884 priv->hw->mac->pmt(dev->base_addr, 0); 1881 priv->hw->mac->pmt(priv->ioaddr, 0);
1885 1882
1886 netif_device_attach(dev); 1883 netif_device_attach(dev);
1887 1884
1888 /* Enable the MAC and DMA */ 1885 /* Enable the MAC and DMA */
1889 stmmac_mac_enable_rx(ioaddr); 1886 stmmac_mac_enable_rx(priv->ioaddr);
1890 stmmac_mac_enable_tx(ioaddr); 1887 stmmac_mac_enable_tx(priv->ioaddr);
1891 priv->hw->dma->start_tx(ioaddr); 1888 priv->hw->dma->start_tx(priv->ioaddr);
1892 priv->hw->dma->start_rx(ioaddr); 1889 priv->hw->dma->start_rx(priv->ioaddr);
1893 1890
1894#ifdef CONFIG_STMMAC_TIMER 1891#ifdef CONFIG_STMMAC_TIMER
1895 priv->tm->timer_start(tmrate); 1892 priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 40b2c7929719..03dea1401571 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -47,7 +47,6 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
47{ 47{
48 struct net_device *ndev = bus->priv; 48 struct net_device *ndev = bus->priv;
49 struct stmmac_priv *priv = netdev_priv(ndev); 49 struct stmmac_priv *priv = netdev_priv(ndev);
50 unsigned long ioaddr = ndev->base_addr;
51 unsigned int mii_address = priv->hw->mii.addr; 50 unsigned int mii_address = priv->hw->mii.addr;
52 unsigned int mii_data = priv->hw->mii.data; 51 unsigned int mii_data = priv->hw->mii.data;
53 52
@@ -56,12 +55,12 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
56 ((phyreg << 6) & (0x000007C0))); 55 ((phyreg << 6) & (0x000007C0)));
57 regValue |= MII_BUSY; /* in case of GMAC */ 56 regValue |= MII_BUSY; /* in case of GMAC */
58 57
59 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); 58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
60 writel(regValue, ioaddr + mii_address); 59 writel(regValue, priv->ioaddr + mii_address);
61 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); 60 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
62 61
63 /* Read the data from the MII data register */ 62 /* Read the data from the MII data register */
64 data = (int)readl(ioaddr + mii_data); 63 data = (int)readl(priv->ioaddr + mii_data);
65 64
66 return data; 65 return data;
67} 66}
@@ -79,7 +78,6 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
79{ 78{
80 struct net_device *ndev = bus->priv; 79 struct net_device *ndev = bus->priv;
81 struct stmmac_priv *priv = netdev_priv(ndev); 80 struct stmmac_priv *priv = netdev_priv(ndev);
82 unsigned long ioaddr = ndev->base_addr;
83 unsigned int mii_address = priv->hw->mii.addr; 81 unsigned int mii_address = priv->hw->mii.addr;
84 unsigned int mii_data = priv->hw->mii.data; 82 unsigned int mii_data = priv->hw->mii.data;
85 83
@@ -90,14 +88,14 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
90 value |= MII_BUSY; 88 value |= MII_BUSY;
91 89
92 /* Wait until any existing MII operation is complete */ 90 /* Wait until any existing MII operation is complete */
93 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); 91 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
94 92
95 /* Set the MII address register to write */ 93 /* Set the MII address register to write */
96 writel(phydata, ioaddr + mii_data); 94 writel(phydata, priv->ioaddr + mii_data);
97 writel(value, ioaddr + mii_address); 95 writel(value, priv->ioaddr + mii_address);
98 96
99 /* Wait until any existing MII operation is complete */ 97 /* Wait until any existing MII operation is complete */
100 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); 98 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
101 99
102 return 0; 100 return 0;
103} 101}
@@ -111,7 +109,6 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
111{ 109{
112 struct net_device *ndev = bus->priv; 110 struct net_device *ndev = bus->priv;
113 struct stmmac_priv *priv = netdev_priv(ndev); 111 struct stmmac_priv *priv = netdev_priv(ndev);
114 unsigned long ioaddr = ndev->base_addr;
115 unsigned int mii_address = priv->hw->mii.addr; 112 unsigned int mii_address = priv->hw->mii.addr;
116 113
117 if (priv->phy_reset) { 114 if (priv->phy_reset) {
@@ -123,7 +120,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
123 * It doesn't complete its reset until at least one clock cycle 120 * It doesn't complete its reset until at least one clock cycle
124 * on MDC, so perform a dummy mdio read. 121 * on MDC, so perform a dummy mdio read.
125 */ 122 */
126 writel(0, ioaddr + mii_address); 123 writel(0, priv->ioaddr + mii_address);
127 124
128 return 0; 125 return 0;
129} 126}
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 618643e3ca3e..0a6a5ced3c1c 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -617,7 +617,7 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
617 bp->timer_ticks = 0; 617 bp->timer_ticks = 0;
618 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; 618 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
619 bp->bigmac_timer.data = (unsigned long) bp; 619 bp->bigmac_timer.data = (unsigned long) bp;
620 bp->bigmac_timer.function = &bigmac_timer; 620 bp->bigmac_timer.function = bigmac_timer;
621 add_timer(&bp->bigmac_timer); 621 add_timer(&bp->bigmac_timer);
622} 622}
623 623
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 2678588ea4b2..3fa949789b42 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -874,7 +874,7 @@ static int netdev_open(struct net_device *dev)
874 init_timer(&np->timer); 874 init_timer(&np->timer);
875 np->timer.expires = jiffies + 3*HZ; 875 np->timer.expires = jiffies + 3*HZ;
876 np->timer.data = (unsigned long)dev; 876 np->timer.data = (unsigned long)dev;
877 np->timer.function = &netdev_timer; /* timer handler */ 877 np->timer.function = netdev_timer; /* timer handler */
878 add_timer(&np->timer); 878 add_timer(&np->timer);
879 879
880 /* Enable interrupts by setting the interrupt mask. */ 880 /* Enable interrupts by setting the interrupt mask. */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 434f9d735333..4ceb3cf6a9a9 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -31,6 +31,8 @@
31 * about when we can start taking interrupts or get xmit() called... 31 * about when we can start taking interrupts or get xmit() called...
32 */ 32 */
33 33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
34#include <linux/module.h> 36#include <linux/module.h>
35#include <linux/kernel.h> 37#include <linux/kernel.h>
36#include <linux/types.h> 38#include <linux/types.h>
@@ -105,7 +107,6 @@ MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
105MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
106 108
107#define GEM_MODULE_NAME "gem" 109#define GEM_MODULE_NAME "gem"
108#define PFX GEM_MODULE_NAME ": "
109 110
110static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { 111static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 112 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
@@ -262,8 +263,7 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
262 gp->dev->name, pcs_istat); 263 gp->dev->name, pcs_istat);
263 264
264 if (!(pcs_istat & PCS_ISTAT_LSC)) { 265 if (!(pcs_istat & PCS_ISTAT_LSC)) {
265 printk(KERN_ERR "%s: PCS irq but no link status change???\n", 266 netdev_err(dev, "PCS irq but no link status change???\n");
266 dev->name);
267 return 0; 267 return 0;
268 } 268 }
269 269
@@ -282,20 +282,16 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
282 * when autoneg has completed. 282 * when autoneg has completed.
283 */ 283 */
284 if (pcs_miistat & PCS_MIISTAT_RF) 284 if (pcs_miistat & PCS_MIISTAT_RF)
285 printk(KERN_INFO "%s: PCS AutoNEG complete, " 285 netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
286 "RemoteFault\n", dev->name);
287 else 286 else
288 printk(KERN_INFO "%s: PCS AutoNEG complete.\n", 287 netdev_info(dev, "PCS AutoNEG complete\n");
289 dev->name);
290 } 288 }
291 289
292 if (pcs_miistat & PCS_MIISTAT_LS) { 290 if (pcs_miistat & PCS_MIISTAT_LS) {
293 printk(KERN_INFO "%s: PCS link is now up.\n", 291 netdev_info(dev, "PCS link is now up\n");
294 dev->name);
295 netif_carrier_on(gp->dev); 292 netif_carrier_on(gp->dev);
296 } else { 293 } else {
297 printk(KERN_INFO "%s: PCS link is now down.\n", 294 netdev_info(dev, "PCS link is now down\n");
298 dev->name);
299 netif_carrier_off(gp->dev); 295 netif_carrier_off(gp->dev);
300 /* If this happens and the link timer is not running, 296 /* If this happens and the link timer is not running,
301 * reset so we re-negotiate. 297 * reset so we re-negotiate.
@@ -323,14 +319,12 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
323 return 0; 319 return 0;
324 320
325 if (txmac_stat & MAC_TXSTAT_URUN) { 321 if (txmac_stat & MAC_TXSTAT_URUN) {
326 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 322 netdev_err(dev, "TX MAC xmit underrun\n");
327 dev->name);
328 gp->net_stats.tx_fifo_errors++; 323 gp->net_stats.tx_fifo_errors++;
329 } 324 }
330 325
331 if (txmac_stat & MAC_TXSTAT_MPE) { 326 if (txmac_stat & MAC_TXSTAT_MPE) {
332 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 327 netdev_err(dev, "TX MAC max packet size error\n");
333 dev->name);
334 gp->net_stats.tx_errors++; 328 gp->net_stats.tx_errors++;
335 } 329 }
336 330
@@ -377,8 +371,7 @@ static int gem_rxmac_reset(struct gem *gp)
377 udelay(10); 371 udelay(10);
378 } 372 }
379 if (limit == 5000) { 373 if (limit == 5000) {
380 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " 374 netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
381 "chip.\n", dev->name);
382 return 1; 375 return 1;
383 } 376 }
384 377
@@ -390,8 +383,7 @@ static int gem_rxmac_reset(struct gem *gp)
390 udelay(10); 383 udelay(10);
391 } 384 }
392 if (limit == 5000) { 385 if (limit == 5000) {
393 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 386 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
394 "chip.\n", dev->name);
395 return 1; 387 return 1;
396 } 388 }
397 389
@@ -403,8 +395,7 @@ static int gem_rxmac_reset(struct gem *gp)
403 udelay(10); 395 udelay(10);
404 } 396 }
405 if (limit == 5000) { 397 if (limit == 5000) {
406 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 398 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
407 "chip.\n", dev->name);
408 return 1; 399 return 1;
409 } 400 }
410 401
@@ -419,8 +410,7 @@ static int gem_rxmac_reset(struct gem *gp)
419 udelay(10); 410 udelay(10);
420 } 411 }
421 if (limit == 5000) { 412 if (limit == 5000) {
422 printk(KERN_ERR "%s: RX reset command will not execute, resetting " 413 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
423 "whole chip.\n", dev->name);
424 return 1; 414 return 1;
425 } 415 }
426 416
@@ -429,8 +419,7 @@ static int gem_rxmac_reset(struct gem *gp)
429 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 419 struct gem_rxd *rxd = &gp->init_block->rxd[i];
430 420
431 if (gp->rx_skbs[i] == NULL) { 421 if (gp->rx_skbs[i] == NULL) {
432 printk(KERN_ERR "%s: Parts of RX ring empty, resetting " 422 netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
433 "whole chip.\n", dev->name);
434 return 1; 423 return 1;
435 } 424 }
436 425
@@ -479,8 +468,7 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
479 if (rxmac_stat & MAC_RXSTAT_OFLW) { 468 if (rxmac_stat & MAC_RXSTAT_OFLW) {
480 u32 smac = readl(gp->regs + MAC_SMACHINE); 469 u32 smac = readl(gp->regs + MAC_SMACHINE);
481 470
482 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", 471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
483 dev->name, smac);
484 gp->net_stats.rx_over_errors++; 472 gp->net_stats.rx_over_errors++;
485 gp->net_stats.rx_fifo_errors++; 473 gp->net_stats.rx_fifo_errors++;
486 474
@@ -542,19 +530,18 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
542 530
543 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 531 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
544 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 532 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
545 printk(KERN_ERR "%s: PCI error [%04x] ", 533 netdev_err(dev, "PCI error [%04x]", pci_estat);
546 dev->name, pci_estat);
547 534
548 if (pci_estat & GREG_PCIESTAT_BADACK) 535 if (pci_estat & GREG_PCIESTAT_BADACK)
549 printk("<No ACK64# during ABS64 cycle> "); 536 pr_cont(" <No ACK64# during ABS64 cycle>");
550 if (pci_estat & GREG_PCIESTAT_DTRTO) 537 if (pci_estat & GREG_PCIESTAT_DTRTO)
551 printk("<Delayed transaction timeout> "); 538 pr_cont(" <Delayed transaction timeout>");
552 if (pci_estat & GREG_PCIESTAT_OTHER) 539 if (pci_estat & GREG_PCIESTAT_OTHER)
553 printk("<other>"); 540 pr_cont(" <other>");
554 printk("\n"); 541 pr_cont("\n");
555 } else { 542 } else {
556 pci_estat |= GREG_PCIESTAT_OTHER; 543 pci_estat |= GREG_PCIESTAT_OTHER;
557 printk(KERN_ERR "%s: PCI error\n", dev->name); 544 netdev_err(dev, "PCI error\n");
558 } 545 }
559 546
560 if (pci_estat & GREG_PCIESTAT_OTHER) { 547 if (pci_estat & GREG_PCIESTAT_OTHER) {
@@ -565,26 +552,20 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
565 */ 552 */
566 pci_read_config_word(gp->pdev, PCI_STATUS, 553 pci_read_config_word(gp->pdev, PCI_STATUS,
567 &pci_cfg_stat); 554 &pci_cfg_stat);
568 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 555 netdev_err(dev, "Read PCI cfg space status [%04x]\n",
569 dev->name, pci_cfg_stat); 556 pci_cfg_stat);
570 if (pci_cfg_stat & PCI_STATUS_PARITY) 557 if (pci_cfg_stat & PCI_STATUS_PARITY)
571 printk(KERN_ERR "%s: PCI parity error detected.\n", 558 netdev_err(dev, "PCI parity error detected\n");
572 dev->name);
573 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 559 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
574 printk(KERN_ERR "%s: PCI target abort.\n", 560 netdev_err(dev, "PCI target abort\n");
575 dev->name);
576 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 561 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
577 printk(KERN_ERR "%s: PCI master acks target abort.\n", 562 netdev_err(dev, "PCI master acks target abort\n");
578 dev->name);
579 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 563 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
580 printk(KERN_ERR "%s: PCI master abort.\n", 564 netdev_err(dev, "PCI master abort\n");
581 dev->name);
582 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 565 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
583 printk(KERN_ERR "%s: PCI system error SERR#.\n", 566 netdev_err(dev, "PCI system error SERR#\n");
584 dev->name);
585 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 567 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
586 printk(KERN_ERR "%s: PCI parity error.\n", 568 netdev_err(dev, "PCI parity error\n");
587 dev->name);
588 569
589 /* Write the error bits back to clear them. */ 570 /* Write the error bits back to clear them. */
590 pci_cfg_stat &= (PCI_STATUS_PARITY | 571 pci_cfg_stat &= (PCI_STATUS_PARITY |
@@ -874,8 +855,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
874 gp->rx_new = entry; 855 gp->rx_new = entry;
875 856
876 if (drops) 857 if (drops)
877 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 858 netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
878 gp->dev->name);
879 859
880 return work_done; 860 return work_done;
881} 861}
@@ -981,21 +961,19 @@ static void gem_tx_timeout(struct net_device *dev)
981{ 961{
982 struct gem *gp = netdev_priv(dev); 962 struct gem *gp = netdev_priv(dev);
983 963
984 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 964 netdev_err(dev, "transmit timed out, resetting\n");
985 if (!gp->running) { 965 if (!gp->running) {
986 printk("%s: hrm.. hw not running !\n", dev->name); 966 netdev_err(dev, "hrm.. hw not running !\n");
987 return; 967 return;
988 } 968 }
989 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", 969 netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
990 dev->name, 970 readl(gp->regs + TXDMA_CFG),
991 readl(gp->regs + TXDMA_CFG), 971 readl(gp->regs + MAC_TXSTAT),
992 readl(gp->regs + MAC_TXSTAT), 972 readl(gp->regs + MAC_TXCFG));
993 readl(gp->regs + MAC_TXCFG)); 973 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
994 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 974 readl(gp->regs + RXDMA_CFG),
995 dev->name, 975 readl(gp->regs + MAC_RXSTAT),
996 readl(gp->regs + RXDMA_CFG), 976 readl(gp->regs + MAC_RXCFG));
997 readl(gp->regs + MAC_RXSTAT),
998 readl(gp->regs + MAC_RXCFG));
999 977
1000 spin_lock_irq(&gp->lock); 978 spin_lock_irq(&gp->lock);
1001 spin_lock(&gp->tx_lock); 979 spin_lock(&gp->tx_lock);
@@ -1048,8 +1026,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1048 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1026 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1049 netif_stop_queue(dev); 1027 netif_stop_queue(dev);
1050 spin_unlock_irqrestore(&gp->tx_lock, flags); 1028 spin_unlock_irqrestore(&gp->tx_lock, flags);
1051 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 1029 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1052 dev->name);
1053 return NETDEV_TX_BUSY; 1030 return NETDEV_TX_BUSY;
1054 } 1031 }
1055 1032
@@ -1158,8 +1135,7 @@ static void gem_pcs_reset(struct gem *gp)
1158 break; 1135 break;
1159 } 1136 }
1160 if (limit < 0) 1137 if (limit < 0)
1161 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1138 netdev_warn(gp->dev, "PCS reset bit would not clear\n");
1162 gp->dev->name);
1163} 1139}
1164 1140
1165static void gem_pcs_reinit_adv(struct gem *gp) 1141static void gem_pcs_reinit_adv(struct gem *gp)
@@ -1230,7 +1206,7 @@ static void gem_reset(struct gem *gp)
1230 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1206 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1231 1207
1232 if (limit < 0) 1208 if (limit < 0)
1233 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1209 netdev_err(gp->dev, "SW reset is ghetto\n");
1234 1210
1235 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) 1211 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1236 gem_pcs_reinit_adv(gp); 1212 gem_pcs_reinit_adv(gp);
@@ -1395,9 +1371,8 @@ static int gem_set_link_modes(struct gem *gp)
1395 speed = SPEED_1000; 1371 speed = SPEED_1000;
1396 } 1372 }
1397 1373
1398 if (netif_msg_link(gp)) 1374 netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
1399 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", 1375 speed, (full_duplex ? "full" : "half"));
1400 gp->dev->name, speed, (full_duplex ? "full" : "half"));
1401 1376
1402 if (!gp->running) 1377 if (!gp->running)
1403 return 0; 1378 return 0;
@@ -1451,15 +1426,13 @@ static int gem_set_link_modes(struct gem *gp)
1451 1426
1452 if (netif_msg_link(gp)) { 1427 if (netif_msg_link(gp)) {
1453 if (pause) { 1428 if (pause) {
1454 printk(KERN_INFO "%s: Pause is enabled " 1429 netdev_info(gp->dev,
1455 "(rxfifo: %d off: %d on: %d)\n", 1430 "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
1456 gp->dev->name, 1431 gp->rx_fifo_sz,
1457 gp->rx_fifo_sz, 1432 gp->rx_pause_off,
1458 gp->rx_pause_off, 1433 gp->rx_pause_on);
1459 gp->rx_pause_on);
1460 } else { 1434 } else {
1461 printk(KERN_INFO "%s: Pause is disabled\n", 1435 netdev_info(gp->dev, "Pause is disabled\n");
1462 gp->dev->name);
1463 } 1436 }
1464 } 1437 }
1465 1438
@@ -1484,9 +1457,8 @@ static int gem_mdio_link_not_up(struct gem *gp)
1484{ 1457{
1485 switch (gp->lstate) { 1458 switch (gp->lstate) {
1486 case link_force_ret: 1459 case link_force_ret:
1487 if (netif_msg_link(gp)) 1460 netif_info(gp, link, gp->dev,
1488 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1461 "Autoneg failed again, keeping forced mode\n");
1489 " forced mode\n", gp->dev->name);
1490 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1462 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1491 gp->last_forced_speed, DUPLEX_HALF); 1463 gp->last_forced_speed, DUPLEX_HALF);
1492 gp->timer_ticks = 5; 1464 gp->timer_ticks = 5;
@@ -1499,9 +1471,7 @@ static int gem_mdio_link_not_up(struct gem *gp)
1499 */ 1471 */
1500 if (gp->phy_mii.def->magic_aneg) 1472 if (gp->phy_mii.def->magic_aneg)
1501 return 1; 1473 return 1;
1502 if (netif_msg_link(gp)) 1474 netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
1503 printk(KERN_INFO "%s: switching to forced 100bt\n",
1504 gp->dev->name);
1505 /* Try forced modes. */ 1475 /* Try forced modes. */
1506 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1476 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1507 DUPLEX_HALF); 1477 DUPLEX_HALF);
@@ -1517,9 +1487,8 @@ static int gem_mdio_link_not_up(struct gem *gp)
1517 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1487 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1518 DUPLEX_HALF); 1488 DUPLEX_HALF);
1519 gp->timer_ticks = 5; 1489 gp->timer_ticks = 5;
1520 if (netif_msg_link(gp)) 1490 netif_info(gp, link, gp->dev,
1521 printk(KERN_INFO "%s: switching to forced 10bt\n", 1491 "switching to forced 10bt\n");
1522 gp->dev->name);
1523 return 0; 1492 return 0;
1524 } else 1493 } else
1525 return 1; 1494 return 1;
@@ -1574,8 +1543,8 @@ static void gem_link_timer(unsigned long data)
1574 gp->last_forced_speed = gp->phy_mii.speed; 1543 gp->last_forced_speed = gp->phy_mii.speed;
1575 gp->timer_ticks = 5; 1544 gp->timer_ticks = 5;
1576 if (netif_msg_link(gp)) 1545 if (netif_msg_link(gp))
1577 printk(KERN_INFO "%s: Got link after fallback, retrying" 1546 netdev_info(gp->dev,
1578 " autoneg once...\n", gp->dev->name); 1547 "Got link after fallback, retrying autoneg once...\n");
1579 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1548 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1580 } else if (gp->lstate != link_up) { 1549 } else if (gp->lstate != link_up) {
1581 gp->lstate = link_up; 1550 gp->lstate = link_up;
@@ -1589,9 +1558,7 @@ static void gem_link_timer(unsigned long data)
1589 */ 1558 */
1590 if (gp->lstate == link_up) { 1559 if (gp->lstate == link_up) {
1591 gp->lstate = link_down; 1560 gp->lstate = link_down;
1592 if (netif_msg_link(gp)) 1561 netif_info(gp, link, gp->dev, "Link down\n");
1593 printk(KERN_INFO "%s: Link down\n",
1594 gp->dev->name);
1595 netif_carrier_off(gp->dev); 1562 netif_carrier_off(gp->dev);
1596 gp->reset_task_pending = 1; 1563 gp->reset_task_pending = 1;
1597 schedule_work(&gp->reset_task); 1564 schedule_work(&gp->reset_task);
@@ -1746,8 +1713,7 @@ static void gem_init_phy(struct gem *gp)
1746 if (phy_read(gp, MII_BMCR) != 0xffff) 1713 if (phy_read(gp, MII_BMCR) != 0xffff)
1747 break; 1714 break;
1748 if (i == 2) 1715 if (i == 2)
1749 printk(KERN_WARNING "%s: GMAC PHY not responding !\n", 1716 netdev_warn(gp->dev, "GMAC PHY not responding !\n");
1750 gp->dev->name);
1751 } 1717 }
1752 } 1718 }
1753 1719
@@ -2038,7 +2004,7 @@ static int gem_check_invariants(struct gem *gp)
2038 * as this chip has no gigabit PHY. 2004 * as this chip has no gigabit PHY.
2039 */ 2005 */
2040 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 2006 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
2041 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", 2007 pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2042 mif_cfg); 2008 mif_cfg);
2043 return -1; 2009 return -1;
2044 } 2010 }
@@ -2078,7 +2044,7 @@ static int gem_check_invariants(struct gem *gp)
2078 } 2044 }
2079 if (i == 32) { 2045 if (i == 32) {
2080 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2046 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2081 printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); 2047 pr_err("RIO MII phy will not respond\n");
2082 return -1; 2048 return -1;
2083 } 2049 }
2084 gp->phy_type = phy_serdes; 2050 gp->phy_type = phy_serdes;
@@ -2093,7 +2059,7 @@ static int gem_check_invariants(struct gem *gp)
2093 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2059 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2094 if (gp->tx_fifo_sz != (9 * 1024) || 2060 if (gp->tx_fifo_sz != (9 * 1024) ||
2095 gp->rx_fifo_sz != (20 * 1024)) { 2061 gp->rx_fifo_sz != (20 * 1024)) {
2096 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2062 pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2097 gp->tx_fifo_sz, gp->rx_fifo_sz); 2063 gp->tx_fifo_sz, gp->rx_fifo_sz);
2098 return -1; 2064 return -1;
2099 } 2065 }
@@ -2101,7 +2067,7 @@ static int gem_check_invariants(struct gem *gp)
2101 } else { 2067 } else {
2102 if (gp->tx_fifo_sz != (2 * 1024) || 2068 if (gp->tx_fifo_sz != (2 * 1024) ||
2103 gp->rx_fifo_sz != (2 * 1024)) { 2069 gp->rx_fifo_sz != (2 * 1024)) {
2104 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2070 pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2105 gp->tx_fifo_sz, gp->rx_fifo_sz); 2071 gp->tx_fifo_sz, gp->rx_fifo_sz);
2106 return -1; 2072 return -1;
2107 } 2073 }
@@ -2239,7 +2205,7 @@ static int gem_do_start(struct net_device *dev)
2239 2205
2240 if (request_irq(gp->pdev->irq, gem_interrupt, 2206 if (request_irq(gp->pdev->irq, gem_interrupt,
2241 IRQF_SHARED, dev->name, (void *)dev)) { 2207 IRQF_SHARED, dev->name, (void *)dev)) {
2242 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); 2208 netdev_err(dev, "failed to request irq !\n");
2243 2209
2244 spin_lock_irqsave(&gp->lock, flags); 2210 spin_lock_irqsave(&gp->lock, flags);
2245 spin_lock(&gp->tx_lock); 2211 spin_lock(&gp->tx_lock);
@@ -2378,9 +2344,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2378 2344
2379 mutex_lock(&gp->pm_mutex); 2345 mutex_lock(&gp->pm_mutex);
2380 2346
2381 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", 2347 netdev_info(dev, "suspending, WakeOnLan %s\n",
2382 dev->name, 2348 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2383 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2384 2349
2385 /* Keep the cell enabled during the entire operation */ 2350 /* Keep the cell enabled during the entire operation */
2386 spin_lock_irqsave(&gp->lock, flags); 2351 spin_lock_irqsave(&gp->lock, flags);
@@ -2440,7 +2405,7 @@ static int gem_resume(struct pci_dev *pdev)
2440 struct gem *gp = netdev_priv(dev); 2405 struct gem *gp = netdev_priv(dev);
2441 unsigned long flags; 2406 unsigned long flags;
2442 2407
2443 printk(KERN_INFO "%s: resuming\n", dev->name); 2408 netdev_info(dev, "resuming\n");
2444 2409
2445 mutex_lock(&gp->pm_mutex); 2410 mutex_lock(&gp->pm_mutex);
2446 2411
@@ -2452,8 +2417,7 @@ static int gem_resume(struct pci_dev *pdev)
2452 2417
2453 /* Make sure PCI access and bus master are enabled */ 2418 /* Make sure PCI access and bus master are enabled */
2454 if (pci_enable_device(gp->pdev)) { 2419 if (pci_enable_device(gp->pdev)) {
2455 printk(KERN_ERR "%s: Can't re-enable chip !\n", 2420 netdev_err(dev, "Can't re-enable chip !\n");
2456 dev->name);
2457 /* Put cell and forget it for now, it will be considered as 2421 /* Put cell and forget it for now, it will be considered as
2458 * still asleep, a new sleep cycle may bring it back 2422 * still asleep, a new sleep cycle may bring it back
2459 */ 2423 */
@@ -2938,7 +2902,7 @@ static int __devinit gem_get_device_address(struct gem *gp)
2938 addr = idprom->id_ethaddr; 2902 addr = idprom->id_ethaddr;
2939#else 2903#else
2940 printk("\n"); 2904 printk("\n");
2941 printk(KERN_ERR "%s: can't get mac-address\n", dev->name); 2905 pr_err("%s: can't get mac-address\n", dev->name);
2942 return -1; 2906 return -1;
2943#endif 2907#endif
2944 } 2908 }
@@ -3009,14 +2973,12 @@ static const struct net_device_ops gem_netdev_ops = {
3009static int __devinit gem_init_one(struct pci_dev *pdev, 2973static int __devinit gem_init_one(struct pci_dev *pdev,
3010 const struct pci_device_id *ent) 2974 const struct pci_device_id *ent)
3011{ 2975{
3012 static int gem_version_printed = 0;
3013 unsigned long gemreg_base, gemreg_len; 2976 unsigned long gemreg_base, gemreg_len;
3014 struct net_device *dev; 2977 struct net_device *dev;
3015 struct gem *gp; 2978 struct gem *gp;
3016 int err, pci_using_dac; 2979 int err, pci_using_dac;
3017 2980
3018 if (gem_version_printed++ == 0) 2981 printk_once(KERN_INFO "%s", version);
3019 printk(KERN_INFO "%s", version);
3020 2982
3021 /* Apple gmac note: during probe, the chip is powered up by 2983 /* Apple gmac note: during probe, the chip is powered up by
3022 * the arch code to allow the code below to work (and to let 2984 * the arch code to allow the code below to work (and to let
@@ -3026,8 +2988,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3026 */ 2988 */
3027 err = pci_enable_device(pdev); 2989 err = pci_enable_device(pdev);
3028 if (err) { 2990 if (err) {
3029 printk(KERN_ERR PFX "Cannot enable MMIO operation, " 2991 pr_err("Cannot enable MMIO operation, aborting\n");
3030 "aborting.\n");
3031 return err; 2992 return err;
3032 } 2993 }
3033 pci_set_master(pdev); 2994 pci_set_master(pdev);
@@ -3048,8 +3009,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3048 } else { 3009 } else {
3049 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3010 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3050 if (err) { 3011 if (err) {
3051 printk(KERN_ERR PFX "No usable DMA configuration, " 3012 pr_err("No usable DMA configuration, aborting\n");
3052 "aborting.\n");
3053 goto err_disable_device; 3013 goto err_disable_device;
3054 } 3014 }
3055 pci_using_dac = 0; 3015 pci_using_dac = 0;
@@ -3059,15 +3019,14 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3059 gemreg_len = pci_resource_len(pdev, 0); 3019 gemreg_len = pci_resource_len(pdev, 0);
3060 3020
3061 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3021 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3062 printk(KERN_ERR PFX "Cannot find proper PCI device " 3022 pr_err("Cannot find proper PCI device base address, aborting\n");
3063 "base address, aborting.\n");
3064 err = -ENODEV; 3023 err = -ENODEV;
3065 goto err_disable_device; 3024 goto err_disable_device;
3066 } 3025 }
3067 3026
3068 dev = alloc_etherdev(sizeof(*gp)); 3027 dev = alloc_etherdev(sizeof(*gp));
3069 if (!dev) { 3028 if (!dev) {
3070 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 3029 pr_err("Etherdev alloc failed, aborting\n");
3071 err = -ENOMEM; 3030 err = -ENOMEM;
3072 goto err_disable_device; 3031 goto err_disable_device;
3073 } 3032 }
@@ -3077,8 +3036,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3077 3036
3078 err = pci_request_regions(pdev, DRV_NAME); 3037 err = pci_request_regions(pdev, DRV_NAME);
3079 if (err) { 3038 if (err) {
3080 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 3039 pr_err("Cannot obtain PCI resources, aborting\n");
3081 "aborting.\n");
3082 goto err_out_free_netdev; 3040 goto err_out_free_netdev;
3083 } 3041 }
3084 3042
@@ -3104,8 +3062,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3104 3062
3105 gp->regs = ioremap(gemreg_base, gemreg_len); 3063 gp->regs = ioremap(gemreg_base, gemreg_len);
3106 if (!gp->regs) { 3064 if (!gp->regs) {
3107 printk(KERN_ERR PFX "Cannot map device registers, " 3065 pr_err("Cannot map device registers, aborting\n");
3108 "aborting.\n");
3109 err = -EIO; 3066 err = -EIO;
3110 goto err_out_free_res; 3067 goto err_out_free_res;
3111 } 3068 }
@@ -3150,8 +3107,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3150 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 3107 pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
3151 &gp->gblock_dvma); 3108 &gp->gblock_dvma);
3152 if (!gp->init_block) { 3109 if (!gp->init_block) {
3153 printk(KERN_ERR PFX "Cannot allocate init block, " 3110 pr_err("Cannot allocate init block, aborting\n");
3154 "aborting.\n");
3155 err = -ENOMEM; 3111 err = -ENOMEM;
3156 goto err_out_iounmap; 3112 goto err_out_iounmap;
3157 } 3113 }
@@ -3180,19 +3136,18 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3180 3136
3181 /* Register with kernel */ 3137 /* Register with kernel */
3182 if (register_netdev(dev)) { 3138 if (register_netdev(dev)) {
3183 printk(KERN_ERR PFX "Cannot register net device, " 3139 pr_err("Cannot register net device, aborting\n");
3184 "aborting.\n");
3185 err = -ENOMEM; 3140 err = -ENOMEM;
3186 goto err_out_free_consistent; 3141 goto err_out_free_consistent;
3187 } 3142 }
3188 3143
3189 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", 3144 netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
3190 dev->name, dev->dev_addr); 3145 dev->dev_addr);
3191 3146
3192 if (gp->phy_type == phy_mii_mdio0 || 3147 if (gp->phy_type == phy_mii_mdio0 ||
3193 gp->phy_type == phy_mii_mdio1) 3148 gp->phy_type == phy_mii_mdio1)
3194 printk(KERN_INFO "%s: Found %s PHY\n", dev->name, 3149 netdev_info(dev, "Found %s PHY\n",
3195 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3150 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3196 3151
3197 /* GEM can do it all... */ 3152 /* GEM can do it all... */
3198 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3153 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 78f8cee5fd74..4a4fac630337 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -1175,7 +1175,8 @@ int mii_phy_probe(struct mii_phy *phy, int mii_id)
1175 1175
1176 /* Read ID and find matching entry */ 1176 /* Read ID and find matching entry */
1177 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); 1177 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
1178 printk(KERN_DEBUG "PHY ID: %x, addr: %x\n", id, mii_id); 1178 printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
1179 id, mii_id);
1179 for (i=0; (def = mii_phy_table[i]) != NULL; i++) 1180 for (i=0; (def = mii_phy_table[i]) != NULL; i++)
1180 if ((id & def->phy_id_mask) == def->phy_id) 1181 if ((id & def->phy_id_mask) == def->phy_id)
1181 break; 1182 break;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index bd0df1c14955..45f315ed1868 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1409,7 +1409,7 @@ force_link:
1409 hp->timer_ticks = 0; 1409 hp->timer_ticks = 0;
1410 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ 1410 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1411 hp->happy_timer.data = (unsigned long) hp; 1411 hp->happy_timer.data = (unsigned long) hp;
1412 hp->happy_timer.function = &happy_meal_timer; 1412 hp->happy_timer.function = happy_meal_timer;
1413 add_timer(&hp->happy_timer); 1413 add_timer(&hp->happy_timer);
1414} 1414}
1415 1415
@@ -2808,7 +2808,8 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2808 happy_meal_set_initial_advertisement(hp); 2808 happy_meal_set_initial_advertisement(hp);
2809 spin_unlock_irq(&hp->happy_lock); 2809 spin_unlock_irq(&hp->happy_lock);
2810 2810
2811 if (register_netdev(hp->dev)) { 2811 err = register_netdev(hp->dev);
2812 if (err) {
2812 printk(KERN_ERR "happymeal: Cannot register net device, " 2813 printk(KERN_ERR "happymeal: Cannot register net device, "
2813 "aborting.\n"); 2814 "aborting.\n");
2814 goto err_out_free_coherent; 2815 goto err_out_free_coherent;
@@ -3130,7 +3131,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3130 happy_meal_set_initial_advertisement(hp); 3131 happy_meal_set_initial_advertisement(hp);
3131 spin_unlock_irq(&hp->happy_lock); 3132 spin_unlock_irq(&hp->happy_lock);
3132 3133
3133 if (register_netdev(hp->dev)) { 3134 err = register_netdev(hp->dev);
3135 if (err) {
3134 printk(KERN_ERR "happymeal(PCI): Cannot register net device, " 3136 printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
3135 "aborting.\n"); 3137 "aborting.\n");
3136 goto err_out_iounmap; 3138 goto err_out_iounmap;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 8dcb858f2168..2cf84e5968b2 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1483,7 +1483,7 @@ no_link_test:
1483 */ 1483 */
1484 init_timer(&lp->multicast_timer); 1484 init_timer(&lp->multicast_timer);
1485 lp->multicast_timer.data = (unsigned long) dev; 1485 lp->multicast_timer.data = (unsigned long) dev;
1486 lp->multicast_timer.function = &lance_set_multicast_retry; 1486 lp->multicast_timer.function = lance_set_multicast_retry;
1487 1487
1488 if (register_netdev(dev)) { 1488 if (register_netdev(dev)) {
1489 printk(KERN_ERR "SunLance: Cannot register device.\n"); 1489 printk(KERN_ERR "SunLance: Cannot register device.\n");
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index d281a7b34701..bf3c762de620 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -3,6 +3,8 @@
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 */ 4 */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
6#include <linux/module.h> 8#include <linux/module.h>
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/types.h> 10#include <linux/types.h>
@@ -20,7 +22,6 @@
20#include "sunvnet.h" 22#include "sunvnet.h"
21 23
22#define DRV_MODULE_NAME "sunvnet" 24#define DRV_MODULE_NAME "sunvnet"
23#define PFX DRV_MODULE_NAME ": "
24#define DRV_MODULE_VERSION "1.0" 25#define DRV_MODULE_VERSION "1.0"
25#define DRV_MODULE_RELDATE "June 25, 2007" 26#define DRV_MODULE_RELDATE "June 25, 2007"
26 27
@@ -45,9 +46,9 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg)
45{ 46{
46 struct vio_msg_tag *pkt = arg; 47 struct vio_msg_tag *pkt = arg;
47 48
48 printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", 49 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
49 pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 50 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
50 printk(KERN_ERR PFX "Resetting connection.\n"); 51 pr_err("Resetting connection\n");
51 52
52 ldc_disconnect(port->vio.lp); 53 ldc_disconnect(port->vio.lp);
53 54
@@ -400,8 +401,8 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf)
400 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 401 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
401 return 0; 402 return 0;
402 if (unlikely(pkt->seq != dr->rcv_nxt)) { 403 if (unlikely(pkt->seq != dr->rcv_nxt)) {
403 printk(KERN_ERR PFX "RX out of sequence seq[0x%llx] " 404 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
404 "rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt); 405 pkt->seq, dr->rcv_nxt);
405 return 0; 406 return 0;
406 } 407 }
407 408
@@ -464,8 +465,7 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
464 struct vio_net_mcast_info *pkt = msgbuf; 465 struct vio_net_mcast_info *pkt = msgbuf;
465 466
466 if (pkt->tag.stype != VIO_SUBTYPE_ACK) 467 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
467 printk(KERN_ERR PFX "%s: Got unexpected MCAST reply " 468 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
468 "[%02x:%02x:%04x:%08x]\n",
469 port->vp->dev->name, 469 port->vp->dev->name,
470 pkt->tag.type, 470 pkt->tag.type,
471 pkt->tag.stype, 471 pkt->tag.stype,
@@ -520,7 +520,7 @@ static void vnet_event(void *arg, int event)
520 } 520 }
521 521
522 if (unlikely(event != LDC_EVENT_DATA_READY)) { 522 if (unlikely(event != LDC_EVENT_DATA_READY)) {
523 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); 523 pr_warning("Unexpected LDC event %d\n", event);
524 spin_unlock_irqrestore(&vio->lock, flags); 524 spin_unlock_irqrestore(&vio->lock, flags);
525 return; 525 return;
526 } 526 }
@@ -662,8 +662,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
662 netif_stop_queue(dev); 662 netif_stop_queue(dev);
663 663
664 /* This is a hard error, log it. */ 664 /* This is a hard error, log it. */
665 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 665 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
666 "queue awake!\n", dev->name);
667 dev->stats.tx_errors++; 666 dev->stats.tx_errors++;
668 } 667 }
669 spin_unlock_irqrestore(&port->vio.lock, flags); 668 spin_unlock_irqrestore(&port->vio.lock, flags);
@@ -696,8 +695,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
696 695
697 err = __vnet_tx_trigger(port); 696 err = __vnet_tx_trigger(port);
698 if (unlikely(err < 0)) { 697 if (unlikely(err < 0)) {
699 printk(KERN_INFO PFX "%s: TX trigger error %d\n", 698 netdev_info(dev, "TX trigger error %d\n", err);
700 dev->name, err);
701 d->hdr.state = VIO_DESC_FREE; 699 d->hdr.state = VIO_DESC_FREE;
702 dev->stats.tx_carrier_errors++; 700 dev->stats.tx_carrier_errors++;
703 goto out_dropped_unlock; 701 goto out_dropped_unlock;
@@ -952,12 +950,12 @@ static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
952 950
953 err = -ENOMEM; 951 err = -ENOMEM;
954 if (!buf) { 952 if (!buf) {
955 printk(KERN_ERR "TX buffer allocation failure\n"); 953 pr_err("TX buffer allocation failure\n");
956 goto err_out; 954 goto err_out;
957 } 955 }
958 err = -EFAULT; 956 err = -EFAULT;
959 if ((unsigned long)buf & (8UL - 1)) { 957 if ((unsigned long)buf & (8UL - 1)) {
960 printk(KERN_ERR "TX buffer misaligned\n"); 958 pr_err("TX buffer misaligned\n");
961 kfree(buf); 959 kfree(buf);
962 goto err_out; 960 goto err_out;
963 } 961 }
@@ -1030,7 +1028,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
1030 1028
1031 dev = alloc_etherdev(sizeof(*vp)); 1029 dev = alloc_etherdev(sizeof(*vp));
1032 if (!dev) { 1030 if (!dev) {
1033 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 1031 pr_err("Etherdev alloc failed, aborting\n");
1034 return ERR_PTR(-ENOMEM); 1032 return ERR_PTR(-ENOMEM);
1035 } 1033 }
1036 1034
@@ -1056,12 +1054,11 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
1056 1054
1057 err = register_netdev(dev); 1055 err = register_netdev(dev);
1058 if (err) { 1056 if (err) {
1059 printk(KERN_ERR PFX "Cannot register net device, " 1057 pr_err("Cannot register net device, aborting\n");
1060 "aborting.\n");
1061 goto err_out_free_dev; 1058 goto err_out_free_dev;
1062 } 1059 }
1063 1060
1064 printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr); 1061 netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
1065 1062
1066 list_add(&vp->list, &vnet_list); 1063 list_add(&vp->list, &vnet_list);
1067 1064
@@ -1133,10 +1130,7 @@ static struct vio_driver_ops vnet_vio_ops = {
1133 1130
1134static void __devinit print_version(void) 1131static void __devinit print_version(void)
1135{ 1132{
1136 static int version_printed; 1133 printk_once(KERN_INFO "%s", version);
1137
1138 if (version_printed++ == 0)
1139 printk(KERN_INFO "%s", version);
1140} 1134}
1141 1135
1142const char *remote_macaddr_prop = "remote-mac-address"; 1136const char *remote_macaddr_prop = "remote-mac-address";
@@ -1157,7 +1151,7 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1157 1151
1158 vp = vnet_find_parent(hp, vdev->mp); 1152 vp = vnet_find_parent(hp, vdev->mp);
1159 if (IS_ERR(vp)) { 1153 if (IS_ERR(vp)) {
1160 printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); 1154 pr_err("Cannot find port parent vnet\n");
1161 err = PTR_ERR(vp); 1155 err = PTR_ERR(vp);
1162 goto err_out_put_mdesc; 1156 goto err_out_put_mdesc;
1163 } 1157 }
@@ -1165,15 +1159,14 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1165 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); 1159 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
1166 err = -ENODEV; 1160 err = -ENODEV;
1167 if (!rmac) { 1161 if (!rmac) {
1168 printk(KERN_ERR PFX "Port lacks %s property.\n", 1162 pr_err("Port lacks %s property\n", remote_macaddr_prop);
1169 remote_macaddr_prop);
1170 goto err_out_put_mdesc; 1163 goto err_out_put_mdesc;
1171 } 1164 }
1172 1165
1173 port = kzalloc(sizeof(*port), GFP_KERNEL); 1166 port = kzalloc(sizeof(*port), GFP_KERNEL);
1174 err = -ENOMEM; 1167 err = -ENOMEM;
1175 if (!port) { 1168 if (!port) {
1176 printk(KERN_ERR PFX "Cannot allocate vnet_port.\n"); 1169 pr_err("Cannot allocate vnet_port\n");
1177 goto err_out_put_mdesc; 1170 goto err_out_put_mdesc;
1178 } 1171 }
1179 1172
@@ -1214,9 +1207,8 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1214 1207
1215 dev_set_drvdata(&vdev->dev, port); 1208 dev_set_drvdata(&vdev->dev, port);
1216 1209
1217 printk(KERN_INFO "%s: PORT ( remote-mac %pM%s )\n", 1210 pr_info("%s: PORT ( remote-mac %pM%s )\n",
1218 vp->dev->name, port->raddr, 1211 vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
1219 switch_port ? " switch-port" : "");
1220 1212
1221 vio_port_up(&port->vio); 1213 vio_port_up(&port->vio);
1222 1214
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 737df6032bbc..8b3dc1eb4015 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -92,7 +92,7 @@ static void bdx_rx_free(struct bdx_priv *priv);
92static void bdx_tx_free(struct bdx_priv *priv); 92static void bdx_tx_free(struct bdx_priv *priv);
93 93
94/* Definitions needed by bdx_probe */ 94/* Definitions needed by bdx_probe */
95static void bdx_ethtool_ops(struct net_device *netdev); 95static void bdx_set_ethtool_ops(struct net_device *netdev);
96 96
97/************************************************************************* 97/*************************************************************************
98 * Print Info * 98 * Print Info *
@@ -927,13 +927,6 @@ static void bdx_update_stats(struct bdx_priv *priv)
927 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i); 927 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
928} 928}
929 929
930static struct net_device_stats *bdx_get_stats(struct net_device *ndev)
931{
932 struct bdx_priv *priv = netdev_priv(ndev);
933 struct net_device_stats *net_stat = &priv->net_stats;
934 return net_stat;
935}
936
937static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, 930static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
938 u16 rxd_vlan); 931 u16 rxd_vlan);
939static void print_rxfd(struct rxf_desc *rxfd); 932static void print_rxfd(struct rxf_desc *rxfd);
@@ -1220,6 +1213,7 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1220 1213
1221static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget) 1214static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1222{ 1215{
1216 struct net_device *ndev = priv->ndev;
1223 struct sk_buff *skb, *skb2; 1217 struct sk_buff *skb, *skb2;
1224 struct rxd_desc *rxdd; 1218 struct rxd_desc *rxdd;
1225 struct rx_map *dm; 1219 struct rx_map *dm;
@@ -1273,7 +1267,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1273 1267
1274 if (unlikely(GET_RXD_ERR(rxd_val1))) { 1268 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1275 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1)); 1269 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1276 priv->net_stats.rx_errors++; 1270 ndev->stats.rx_errors++;
1277 bdx_recycle_skb(priv, rxdd); 1271 bdx_recycle_skb(priv, rxdd);
1278 continue; 1272 continue;
1279 } 1273 }
@@ -1300,15 +1294,16 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1300 bdx_rxdb_free_elem(db, rxdd->va_lo); 1294 bdx_rxdb_free_elem(db, rxdd->va_lo);
1301 } 1295 }
1302 1296
1303 priv->net_stats.rx_bytes += len; 1297 ndev->stats.rx_bytes += len;
1304 1298
1305 skb_put(skb, len); 1299 skb_put(skb, len);
1306 skb->ip_summed = CHECKSUM_UNNECESSARY; 1300 skb->protocol = eth_type_trans(skb, ndev);
1307 skb->protocol = eth_type_trans(skb, priv->ndev);
1308 1301
1309 /* Non-IP packets aren't checksum-offloaded */ 1302 /* Non-IP packets aren't checksum-offloaded */
1310 if (GET_RXD_PKT_ID(rxd_val1) == 0) 1303 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1311 skb->ip_summed = CHECKSUM_NONE; 1304 skb_checksum_none_assert(skb);
1305 else
1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
1312 1307
1313 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb); 1308 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1314 1309
@@ -1316,7 +1311,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1316 break; 1311 break;
1317 } 1312 }
1318 1313
1319 priv->net_stats.rx_packets += done; 1314 ndev->stats.rx_packets += done;
1320 1315
1321 /* FIXME: do smth to minimize pci accesses */ 1316 /* FIXME: do smth to minimize pci accesses */
1322 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); 1317 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
@@ -1712,8 +1707,8 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1712#ifdef BDX_LLTX 1707#ifdef BDX_LLTX
1713 ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 1708 ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1714#endif 1709#endif
1715 priv->net_stats.tx_packets++; 1710 ndev->stats.tx_packets++;
1716 priv->net_stats.tx_bytes += skb->len; 1711 ndev->stats.tx_bytes += skb->len;
1717 1712
1718 if (priv->tx_level < BDX_MIN_TX_LEVEL) { 1713 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1719 DBG("%s: %s: TX Q STOP level %d\n", 1714 DBG("%s: %s: TX Q STOP level %d\n",
@@ -1888,7 +1883,6 @@ static const struct net_device_ops bdx_netdev_ops = {
1888 .ndo_validate_addr = eth_validate_addr, 1883 .ndo_validate_addr = eth_validate_addr,
1889 .ndo_do_ioctl = bdx_ioctl, 1884 .ndo_do_ioctl = bdx_ioctl,
1890 .ndo_set_multicast_list = bdx_setmulti, 1885 .ndo_set_multicast_list = bdx_setmulti,
1891 .ndo_get_stats = bdx_get_stats,
1892 .ndo_change_mtu = bdx_change_mtu, 1886 .ndo_change_mtu = bdx_change_mtu,
1893 .ndo_set_mac_address = bdx_set_mac, 1887 .ndo_set_mac_address = bdx_set_mac,
1894 .ndo_vlan_rx_register = bdx_vlan_rx_register, 1888 .ndo_vlan_rx_register = bdx_vlan_rx_register,
@@ -2012,7 +2006,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2012 ndev->netdev_ops = &bdx_netdev_ops; 2006 ndev->netdev_ops = &bdx_netdev_ops;
2013 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN; 2007 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
2014 2008
2015 bdx_ethtool_ops(ndev); /* ethtool interface */ 2009 bdx_set_ethtool_ops(ndev); /* ethtool interface */
2016 2010
2017 /* these fields are used for info purposes only 2011 /* these fields are used for info purposes only
2018 * so we can have them same for all ports of the board */ 2012 * so we can have them same for all ports of the board */
@@ -2417,10 +2411,10 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
2417} 2411}
2418 2412
2419/* 2413/*
2420 * bdx_ethtool_ops - ethtool interface implementation 2414 * bdx_set_ethtool_ops - ethtool interface implementation
2421 * @netdev 2415 * @netdev
2422 */ 2416 */
2423static void bdx_ethtool_ops(struct net_device *netdev) 2417static void bdx_set_ethtool_ops(struct net_device *netdev)
2424{ 2418{
2425 static const struct ethtool_ops bdx_ethtool_ops = { 2419 static const struct ethtool_ops bdx_ethtool_ops = {
2426 .get_settings = bdx_get_settings, 2420 .get_settings = bdx_get_settings,
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 67e3b71bf705..b6ba8601e2b5 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -269,7 +269,6 @@ struct bdx_priv {
269 u32 msg_enable; 269 u32 msg_enable;
270 int stats_flag; 270 int stats_flag;
271 struct bdx_stats hw_stats; 271 struct bdx_stats hw_stats;
272 struct net_device_stats net_stats;
273 struct pci_dev *pdev; 272 struct pci_dev *pdev;
274 273
275 struct pci_nic *nic; 274 struct pci_nic *nic;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index bc3af78a869f..9f6ffffc8376 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4719,7 +4719,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4719 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 4719 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4720 skb->ip_summed = CHECKSUM_UNNECESSARY; 4720 skb->ip_summed = CHECKSUM_UNNECESSARY;
4721 else 4721 else
4722 skb->ip_summed = CHECKSUM_NONE; 4722 skb_checksum_none_assert(skb);
4723 4723
4724 skb->protocol = eth_type_trans(skb, tp->dev); 4724 skb->protocol = eth_type_trans(skb, tp->dev);
4725 4725
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index ccee3eddc5f4..0564ca05963d 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -393,7 +393,7 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
393 spin_unlock_irqrestore(&priv->lock, flags); 393 spin_unlock_irqrestore(&priv->lock, flags);
394 return; 394 return;
395 } 395 }
396 priv->timer.function = &TLan_Timer; 396 priv->timer.function = TLan_Timer;
397 if (!in_irq()) 397 if (!in_irq())
398 spin_unlock_irqrestore(&priv->lock, flags); 398 spin_unlock_irqrestore(&priv->lock, flags);
399 399
@@ -1453,7 +1453,7 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1453 TLan_DioWrite8( dev->base_addr, 1453 TLan_DioWrite8( dev->base_addr,
1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1455 if ( priv->timer.function == NULL ) { 1455 if ( priv->timer.function == NULL ) {
1456 priv->timer.function = &TLan_Timer; 1456 priv->timer.function = TLan_Timer;
1457 priv->timer.data = (unsigned long) dev; 1457 priv->timer.data = (unsigned long) dev;
1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1459 priv->timerSetAt = jiffies; 1459 priv->timerSetAt = jiffies;
@@ -1601,7 +1601,7 @@ drop_and_reuse:
1601 TLan_DioWrite8( dev->base_addr, 1601 TLan_DioWrite8( dev->base_addr,
1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1603 if ( priv->timer.function == NULL ) { 1603 if ( priv->timer.function == NULL ) {
1604 priv->timer.function = &TLan_Timer; 1604 priv->timer.function = TLan_Timer;
1605 priv->timer.data = (unsigned long) dev; 1605 priv->timer.data = (unsigned long) dev;
1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1607 priv->timerSetAt = jiffies; 1607 priv->timerSetAt = jiffies;
@@ -1897,7 +1897,7 @@ static void TLan_Timer( unsigned long data )
1897 TLan_DioWrite8( dev->base_addr, 1897 TLan_DioWrite8( dev->base_addr,
1898 TLAN_LED_REG, TLAN_LED_LINK ); 1898 TLAN_LED_REG, TLAN_LED_LINK );
1899 } else { 1899 } else {
1900 priv->timer.function = &TLan_Timer; 1900 priv->timer.function = TLan_Timer;
1901 priv->timer.expires = priv->timerSetAt 1901 priv->timer.expires = priv->timerSetAt
1902 + TLAN_TIMER_ACT_DELAY; 1902 + TLAN_TIMER_ACT_DELAY;
1903 spin_unlock_irqrestore(&priv->lock, flags); 1903 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 435ef7d5470f..08182fde3dcd 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1321,14 +1321,12 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1321 1321
1322 /* Clear CPHALT and start BUD */ 1322 /* Clear CPHALT and start BUD */
1323 SIFWRITEW(c, SIFACL); 1323 SIFWRITEW(c, SIFACL);
1324 if (fw_entry) 1324 release_firmware(fw_entry);
1325 release_firmware(fw_entry);
1326 return (1); 1325 return (1);
1327 } 1326 }
1328 } while(count == 0); 1327 } while(count == 0);
1329 1328
1330 if (fw_entry) 1329 release_firmware(fw_entry);
1331 release_firmware(fw_entry);
1332 printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name); 1330 printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
1333 return (-1); 1331 return (-1);
1334} 1332}
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 0bc4f3030a80..a9f7d5d1a269 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -599,7 +599,7 @@ static int dmfe_open(struct DEVICE *dev)
599 init_timer(&db->timer); 599 init_timer(&db->timer);
600 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 600 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
601 db->timer.data = (unsigned long)dev; 601 db->timer.data = (unsigned long)dev;
602 db->timer.function = &dmfe_timer; 602 db->timer.function = dmfe_timer;
603 add_timer(&db->timer); 603 add_timer(&db->timer);
604 604
605 return 0; 605 return 0;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 1faf7a4d7202..0013642903ee 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -180,21 +180,24 @@ int tulip_poll(struct napi_struct *napi, int budget)
180 dev_warn(&dev->dev, 180 dev_warn(&dev->dev,
181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", 181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
182 status); 182 status);
183 tp->stats.rx_length_errors++; 183 dev->stats.rx_length_errors++;
184 } 184 }
185 } else { 185 } else {
186 /* There was a fatal error. */ 186 /* There was a fatal error. */
187 if (tulip_debug > 2) 187 if (tulip_debug > 2)
188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", 188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
189 dev->name, status); 189 dev->name, status);
190 tp->stats.rx_errors++; /* end of a packet.*/ 190 dev->stats.rx_errors++; /* end of a packet.*/
191 if (pkt_len > 1518 || 191 if (pkt_len > 1518 ||
192 (status & RxDescRunt)) 192 (status & RxDescRunt))
193 tp->stats.rx_length_errors++; 193 dev->stats.rx_length_errors++;
194 194
195 if (status & 0x0004) tp->stats.rx_frame_errors++; 195 if (status & 0x0004)
196 if (status & 0x0002) tp->stats.rx_crc_errors++; 196 dev->stats.rx_frame_errors++;
197 if (status & 0x0001) tp->stats.rx_fifo_errors++; 197 if (status & 0x0002)
198 dev->stats.rx_crc_errors++;
199 if (status & 0x0001)
200 dev->stats.rx_fifo_errors++;
198 } 201 }
199 } else { 202 } else {
200 struct sk_buff *skb; 203 struct sk_buff *skb;
@@ -244,8 +247,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
244 247
245 netif_receive_skb(skb); 248 netif_receive_skb(skb);
246 249
247 tp->stats.rx_packets++; 250 dev->stats.rx_packets++;
248 tp->stats.rx_bytes += pkt_len; 251 dev->stats.rx_bytes += pkt_len;
249 } 252 }
250#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION 253#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
251 received++; 254 received++;
@@ -404,20 +407,23 @@ static int tulip_rx(struct net_device *dev)
404 dev_warn(&dev->dev, 407 dev_warn(&dev->dev,
405 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", 408 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
406 status); 409 status);
407 tp->stats.rx_length_errors++; 410 dev->stats.rx_length_errors++;
408 } 411 }
409 } else { 412 } else {
410 /* There was a fatal error. */ 413 /* There was a fatal error. */
411 if (tulip_debug > 2) 414 if (tulip_debug > 2)
412 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", 415 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
413 dev->name, status); 416 dev->name, status);
414 tp->stats.rx_errors++; /* end of a packet.*/ 417 dev->stats.rx_errors++; /* end of a packet.*/
415 if (pkt_len > 1518 || 418 if (pkt_len > 1518 ||
416 (status & RxDescRunt)) 419 (status & RxDescRunt))
417 tp->stats.rx_length_errors++; 420 dev->stats.rx_length_errors++;
418 if (status & 0x0004) tp->stats.rx_frame_errors++; 421 if (status & 0x0004)
419 if (status & 0x0002) tp->stats.rx_crc_errors++; 422 dev->stats.rx_frame_errors++;
420 if (status & 0x0001) tp->stats.rx_fifo_errors++; 423 if (status & 0x0002)
424 dev->stats.rx_crc_errors++;
425 if (status & 0x0001)
426 dev->stats.rx_fifo_errors++;
421 } 427 }
422 } else { 428 } else {
423 struct sk_buff *skb; 429 struct sk_buff *skb;
@@ -467,8 +473,8 @@ static int tulip_rx(struct net_device *dev)
467 473
468 netif_rx(skb); 474 netif_rx(skb);
469 475
470 tp->stats.rx_packets++; 476 dev->stats.rx_packets++;
471 tp->stats.rx_bytes += pkt_len; 477 dev->stats.rx_bytes += pkt_len;
472 } 478 }
473 received++; 479 received++;
474 entry = (++tp->cur_rx) % RX_RING_SIZE; 480 entry = (++tp->cur_rx) % RX_RING_SIZE;
@@ -602,18 +608,22 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
602 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n", 608 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
603 dev->name, status); 609 dev->name, status);
604#endif 610#endif
605 tp->stats.tx_errors++; 611 dev->stats.tx_errors++;
606 if (status & 0x4104) tp->stats.tx_aborted_errors++; 612 if (status & 0x4104)
607 if (status & 0x0C00) tp->stats.tx_carrier_errors++; 613 dev->stats.tx_aborted_errors++;
608 if (status & 0x0200) tp->stats.tx_window_errors++; 614 if (status & 0x0C00)
609 if (status & 0x0002) tp->stats.tx_fifo_errors++; 615 dev->stats.tx_carrier_errors++;
616 if (status & 0x0200)
617 dev->stats.tx_window_errors++;
618 if (status & 0x0002)
619 dev->stats.tx_fifo_errors++;
610 if ((status & 0x0080) && tp->full_duplex == 0) 620 if ((status & 0x0080) && tp->full_duplex == 0)
611 tp->stats.tx_heartbeat_errors++; 621 dev->stats.tx_heartbeat_errors++;
612 } else { 622 } else {
613 tp->stats.tx_bytes += 623 dev->stats.tx_bytes +=
614 tp->tx_buffers[entry].skb->len; 624 tp->tx_buffers[entry].skb->len;
615 tp->stats.collisions += (status >> 3) & 15; 625 dev->stats.collisions += (status >> 3) & 15;
616 tp->stats.tx_packets++; 626 dev->stats.tx_packets++;
617 } 627 }
618 628
619 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, 629 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
@@ -655,7 +665,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
655 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ 665 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
656 if (csr5 == 0xffffffff) 666 if (csr5 == 0xffffffff)
657 break; 667 break;
658 if (csr5 & TxJabber) tp->stats.tx_errors++; 668 if (csr5 & TxJabber)
669 dev->stats.tx_errors++;
659 if (csr5 & TxFIFOUnderflow) { 670 if (csr5 & TxFIFOUnderflow) {
660 if ((tp->csr6 & 0xC000) != 0xC000) 671 if ((tp->csr6 & 0xC000) != 0xC000)
661 tp->csr6 += 0x4000; /* Bump up the Tx threshold */ 672 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
@@ -672,8 +683,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
672 } 683 }
673 } 684 }
674 if (csr5 & RxDied) { /* Missed a Rx frame. */ 685 if (csr5 & RxDied) { /* Missed a Rx frame. */
675 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; 686 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
676 tp->stats.rx_errors++; 687 dev->stats.rx_errors++;
677 tulip_start_rxtx(tp); 688 tulip_start_rxtx(tp);
678 } 689 }
679 /* 690 /*
@@ -789,7 +800,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
789#endif /* CONFIG_TULIP_NAPI */ 800#endif /* CONFIG_TULIP_NAPI */
790 801
791 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { 802 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
792 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; 803 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
793 } 804 }
794 805
795 if (tulip_debug > 4) 806 if (tulip_debug > 4)
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index e525875ed67d..ed66a16711dc 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -417,7 +417,6 @@ struct tulip_private {
417 int revision; 417 int revision;
418 int flags; 418 int flags;
419 struct napi_struct napi; 419 struct napi_struct napi;
420 struct net_device_stats stats;
421 struct timer_list timer; /* Media selection timer. */ 420 struct timer_list timer; /* Media selection timer. */
422 struct timer_list oom_timer; /* Out of memory timer. */ 421 struct timer_list oom_timer; /* Out of memory timer. */
423 u32 mc_filter[2]; 422 u32 mc_filter[2];
@@ -570,7 +569,7 @@ static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __io
570 /* Trigger an immediate transmit demand. */ 569 /* Trigger an immediate transmit demand. */
571 iowrite32(0, ioaddr + CSR1); 570 iowrite32(0, ioaddr + CSR1);
572 571
573 tp->stats.tx_errors++; 572 tp->dev->stats.tx_errors++;
574} 573}
575 574
576#endif /* __NET_TULIP_H__ */ 575#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3a8d7efa2acf..2c39f2591216 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -725,7 +725,7 @@ static void tulip_clean_tx_ring(struct tulip_private *tp)
725 int status = le32_to_cpu(tp->tx_ring[entry].status); 725 int status = le32_to_cpu(tp->tx_ring[entry].status);
726 726
727 if (status < 0) { 727 if (status < 0) {
728 tp->stats.tx_errors++; /* It wasn't Txed */ 728 tp->dev->stats.tx_errors++; /* It wasn't Txed */
729 tp->tx_ring[entry].status = 0; 729 tp->tx_ring[entry].status = 0;
730 } 730 }
731 731
@@ -781,8 +781,8 @@ static void tulip_down (struct net_device *dev)
781 /* release any unconsumed transmit buffers */ 781 /* release any unconsumed transmit buffers */
782 tulip_clean_tx_ring(tp); 782 tulip_clean_tx_ring(tp);
783 783
784 if (ioread32 (ioaddr + CSR6) != 0xffffffff) 784 if (ioread32(ioaddr + CSR6) != 0xffffffff)
785 tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff; 785 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
786 786
787 spin_unlock_irqrestore (&tp->lock, flags); 787 spin_unlock_irqrestore (&tp->lock, flags);
788 788
@@ -864,12 +864,12 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
864 864
865 spin_lock_irqsave (&tp->lock, flags); 865 spin_lock_irqsave (&tp->lock, flags);
866 866
867 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; 867 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
868 868
869 spin_unlock_irqrestore(&tp->lock, flags); 869 spin_unlock_irqrestore(&tp->lock, flags);
870 } 870 }
871 871
872 return &tp->stats; 872 return &dev->stats;
873} 873}
874 874
875 875
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 96de5829b940..1dc27a557275 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -480,7 +480,7 @@ static int uli526x_open(struct net_device *dev)
480 init_timer(&db->timer); 480 init_timer(&db->timer);
481 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; 481 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
482 db->timer.data = (unsigned long)dev; 482 db->timer.data = (unsigned long)dev;
483 db->timer.function = &uli526x_timer; 483 db->timer.function = uli526x_timer;
484 add_timer(&db->timer); 484 add_timer(&db->timer);
485 485
486 return 0; 486 return 0;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 66d41cf8da29..f0b231035dee 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -662,7 +662,7 @@ static int netdev_open(struct net_device *dev)
662 init_timer(&np->timer); 662 init_timer(&np->timer);
663 np->timer.expires = jiffies + 1*HZ; 663 np->timer.expires = jiffies + 1*HZ;
664 np->timer.data = (unsigned long)dev; 664 np->timer.data = (unsigned long)dev;
665 np->timer.function = &netdev_timer; /* timer handler */ 665 np->timer.function = netdev_timer; /* timer handler */
666 add_timer(&np->timer); 666 add_timer(&np->timer);
667 return 0; 667 return 0;
668out_err: 668out_err:
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index a439e93be22d..5a73752be2ca 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -29,7 +29,6 @@
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/ethtool.h>
33#include <linux/bitops.h> 32#include <linux/bitops.h>
34 33
35#include <asm/uaccess.h> 34#include <asm/uaccess.h>
@@ -181,19 +180,6 @@ static void print_binary(unsigned int number)
181} 180}
182#endif 181#endif
183 182
184static void netdev_get_drvinfo(struct net_device *dev,
185 struct ethtool_drvinfo *info)
186{
187 struct xircom_private *private = netdev_priv(dev);
188
189 strcpy(info->driver, "xircom_cb");
190 strcpy(info->bus_info, pci_name(private->pdev));
191}
192
193static const struct ethtool_ops netdev_ethtool_ops = {
194 .get_drvinfo = netdev_get_drvinfo,
195};
196
197static const struct net_device_ops netdev_ops = { 183static const struct net_device_ops netdev_ops = {
198 .ndo_open = xircom_open, 184 .ndo_open = xircom_open,
199 .ndo_stop = xircom_close, 185 .ndo_stop = xircom_close,
@@ -279,7 +265,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
279 setup_descriptors(private); 265 setup_descriptors(private);
280 266
281 dev->netdev_ops = &netdev_ops; 267 dev->netdev_ops = &netdev_ops;
282 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
283 pci_set_drvdata(pdev, dev); 268 pci_set_drvdata(pdev, dev);
284 269
285 if (register_netdev(dev)) { 270 if (register_netdev(dev)) {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 2e50077ff450..5dfb39539b3e 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -962,36 +962,34 @@ typhoon_do_get_stats(struct typhoon *tp)
962 * The extra status reported would be a good candidate for 962 * The extra status reported would be a good candidate for
963 * ethtool_ops->get_{strings,stats}() 963 * ethtool_ops->get_{strings,stats}()
964 */ 964 */
965 stats->tx_packets = le32_to_cpu(s->txPackets); 965 stats->tx_packets = le32_to_cpu(s->txPackets) +
966 stats->tx_bytes = le64_to_cpu(s->txBytes); 966 saved->tx_packets;
967 stats->tx_errors = le32_to_cpu(s->txCarrierLost); 967 stats->tx_bytes = le64_to_cpu(s->txBytes) +
968 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost); 968 saved->tx_bytes;
969 stats->collisions = le32_to_cpu(s->txMultipleCollisions); 969 stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
970 stats->rx_packets = le32_to_cpu(s->rxPacketsGood); 970 saved->tx_errors;
971 stats->rx_bytes = le64_to_cpu(s->rxBytesGood); 971 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
972 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns); 972 saved->tx_carrier_errors;
973 stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
974 saved->collisions;
975 stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
976 saved->rx_packets;
977 stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
978 saved->rx_bytes;
979 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
980 saved->rx_fifo_errors;
973 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) + 981 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
974 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors); 982 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
975 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors); 983 saved->rx_errors;
976 stats->rx_length_errors = le32_to_cpu(s->rxOversized); 984 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
985 saved->rx_crc_errors;
986 stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
987 saved->rx_length_errors;
977 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ? 988 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
978 SPEED_100 : SPEED_10; 989 SPEED_100 : SPEED_10;
979 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ? 990 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
980 DUPLEX_FULL : DUPLEX_HALF; 991 DUPLEX_FULL : DUPLEX_HALF;
981 992
982 /* add in the saved statistics
983 */
984 stats->tx_packets += saved->tx_packets;
985 stats->tx_bytes += saved->tx_bytes;
986 stats->tx_errors += saved->tx_errors;
987 stats->collisions += saved->collisions;
988 stats->rx_packets += saved->rx_packets;
989 stats->rx_bytes += saved->rx_bytes;
990 stats->rx_fifo_errors += saved->rx_fifo_errors;
991 stats->rx_errors += saved->rx_errors;
992 stats->rx_crc_errors += saved->rx_crc_errors;
993 stats->rx_length_errors += saved->rx_length_errors;
994
995 return 0; 993 return 0;
996} 994}
997 995
@@ -1762,7 +1760,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
1762 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) { 1760 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1763 new_skb->ip_summed = CHECKSUM_UNNECESSARY; 1761 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1764 } else 1762 } else
1765 new_skb->ip_summed = CHECKSUM_NONE; 1763 skb_checksum_none_assert(new_skb);
1766 1764
1767 spin_lock(&tp->state_lock); 1765 spin_lock(&tp->state_lock);
1768 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN) 1766 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index d7b7018a1de1..52ffabe6db0e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -358,6 +358,14 @@ config USB_NET_ZAURUS
358 really need this non-conformant variant of CDC Ethernet (or in 358 really need this non-conformant variant of CDC Ethernet (or in
359 some cases CDC MDLM) protocol, not "g_ether". 359 some cases CDC MDLM) protocol, not "g_ether".
360 360
361config USB_NET_CX82310_ETH
362 tristate "Conexant CX82310 USB ethernet port"
363 depends on USB_USBNET
364 help
365 Choose this option if you're using a Conexant CX82310-based ADSL
366 router with USB ethernet port. This driver is for routers only,
367 it will not work with ADSL modems (use cxacru driver instead).
368
361config USB_HSO 369config USB_HSO
362 tristate "Option USB High Speed Mobile Devices" 370 tristate "Option USB High Speed Mobile Devices"
363 depends on USB && RFKILL 371 depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b13a279663ba..a19b0259ae16 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 26obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
28 29
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
new file mode 100644
index 000000000000..6fbe03276b27
--- /dev/null
+++ b/drivers/net/usb/cx82310_eth.c
@@ -0,0 +1,354 @@
1/*
2 * Driver for USB ethernet port of Conexant CX82310-based ADSL routers
3 * Copyright (C) 2010 by Ondrej Zary
4 * some parts inspired by the cxacru driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/ethtool.h>
26#include <linux/workqueue.h>
27#include <linux/mii.h>
28#include <linux/usb.h>
29#include <linux/usb/usbnet.h>
30
31enum cx82310_cmd {
32 CMD_START = 0x84, /* no effect? */
33 CMD_STOP = 0x85, /* no effect? */
34 CMD_GET_STATUS = 0x90, /* returns nothing? */
35 CMD_GET_MAC_ADDR = 0x91, /* read MAC address */
36 CMD_GET_LINK_STATUS = 0x92, /* not useful, link is always up */
37 CMD_ETHERNET_MODE = 0x99, /* unknown, needed during init */
38};
39
40enum cx82310_status {
41 STATUS_UNDEFINED,
42 STATUS_SUCCESS,
43 STATUS_ERROR,
44 STATUS_UNSUPPORTED,
45 STATUS_UNIMPLEMENTED,
46 STATUS_PARAMETER_ERROR,
47 STATUS_DBG_LOOPBACK,
48};
49
50#define CMD_PACKET_SIZE 64
51/* first command after power on can take around 8 seconds */
52#define CMD_TIMEOUT 15000
53#define CMD_REPLY_RETRY 5
54
55#define CX82310_MTU 1514
56#define CMD_EP 0x01
57
58/*
59 * execute control command
60 * - optionally send some data (command parameters)
61 * - optionally wait for the reply
62 * - optionally read some data from the reply
63 */
64static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
65 u8 *wdata, int wlen, u8 *rdata, int rlen)
66{
67 int actual_len, retries, ret;
68 struct usb_device *udev = dev->udev;
69 u8 *buf = kzalloc(CMD_PACKET_SIZE, GFP_KERNEL);
70
71 if (!buf)
72 return -ENOMEM;
73
74 /* create command packet */
75 buf[0] = cmd;
76 if (wdata)
77 memcpy(buf + 4, wdata, min_t(int, wlen, CMD_PACKET_SIZE - 4));
78
79 /* send command packet */
80 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
81 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
82 if (ret < 0) {
83 dev_err(&dev->udev->dev, "send command %#x: error %d\n",
84 cmd, ret);
85 goto end;
86 }
87
88 if (reply) {
89 /* wait for reply, retry if it's empty */
90 for (retries = 0; retries < CMD_REPLY_RETRY; retries++) {
91 ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, CMD_EP),
92 buf, CMD_PACKET_SIZE, &actual_len,
93 CMD_TIMEOUT);
94 if (ret < 0) {
95 dev_err(&dev->udev->dev,
96 "reply receive error %d\n", ret);
97 goto end;
98 }
99 if (actual_len > 0)
100 break;
101 }
102 if (actual_len == 0) {
103 dev_err(&dev->udev->dev, "no reply to command %#x\n",
104 cmd);
105 ret = -EIO;
106 goto end;
107 }
108 if (buf[0] != cmd) {
109 dev_err(&dev->udev->dev,
110 "got reply to command %#x, expected: %#x\n",
111 buf[0], cmd);
112 ret = -EIO;
113 goto end;
114 }
115 if (buf[1] != STATUS_SUCCESS) {
116 dev_err(&dev->udev->dev, "command %#x failed: %#x\n",
117 cmd, buf[1]);
118 ret = -EIO;
119 goto end;
120 }
121 if (rdata)
122 memcpy(rdata, buf + 4,
123 min_t(int, rlen, CMD_PACKET_SIZE - 4));
124 }
125end:
126 kfree(buf);
127 return ret;
128}
129
130#define partial_len data[0] /* length of partial packet data */
131#define partial_rem data[1] /* remaining (missing) data length */
132#define partial_data data[2] /* partial packet data */
133
134static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
135{
136 int ret;
137 char buf[15];
138 struct usb_device *udev = dev->udev;
139
140 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
141 if (udev->descriptor.iProduct &&
142 usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) &&
143 strcmp(buf, "USB NET CARD")) {
144 dev_err(&udev->dev,
145 "probably an ADSL modem, use cxacru driver instead\n");
146 return -ENODEV;
147 }
148
149 ret = usbnet_get_endpoints(dev, intf);
150 if (ret)
151 return ret;
152
153 /*
154 * this must not include ethernet header as the device can send partial
155 * packets with no header (URB is at least 2 bytes long, so 2 is OK)
156 */
157 dev->net->hard_header_len = 2;
158 /* we can send at most 1514 bytes of data (+ 2-byte header) per URB */
159 dev->hard_mtu = CX82310_MTU + dev->net->hard_header_len;
160 /* we can receive URBs up to 4KB from the device */
161 dev->rx_urb_size = 4096;
162
163 dev->partial_data = (unsigned long) kmalloc(dev->hard_mtu, GFP_KERNEL);
164 if (!dev->partial_data)
165 return -ENOMEM;
166
167 /* enable ethernet mode (?) */
168 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
169 if (ret) {
170 dev_err(&udev->dev, "unable to enable ethernet mode: %d\n",
171 ret);
172 goto err;
173 }
174
175 /* get the MAC address */
176 ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
177 dev->net->dev_addr, ETH_ALEN);
178 if (ret) {
179 dev_err(&udev->dev, "unable to read MAC address: %d\n", ret);
180 goto err;
181 }
182
183 /* start (does not seem to have any effect?) */
184 ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
185 if (ret)
186 goto err;
187
188 return 0;
189err:
190 kfree((void *)dev->partial_data);
191 return ret;
192}
193
194static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf)
195{
196 kfree((void *)dev->partial_data);
197}
198
199/*
200 * RX is NOT easy - we can receive multiple packets per skb, each having 2-byte
201 * packet length at the beginning.
202 * The last packet might be incomplete (when it crosses the 4KB URB size),
203 * continuing in the next skb (without any headers).
204 * If a packet has odd length, there is one extra byte at the end (before next
205 * packet or at the end of the URB).
206 */
207static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
208{
209 int len;
210 struct sk_buff *skb2;
211
212 /*
213 * If the last skb ended with an incomplete packet, this skb contains
214 * end of that packet at the beginning.
215 */
216 if (dev->partial_rem) {
217 len = dev->partial_len + dev->partial_rem;
218 skb2 = alloc_skb(len, GFP_ATOMIC);
219 if (!skb2)
220 return 0;
221 skb_put(skb2, len);
222 memcpy(skb2->data, (void *)dev->partial_data,
223 dev->partial_len);
224 memcpy(skb2->data + dev->partial_len, skb->data,
225 dev->partial_rem);
226 usbnet_skb_return(dev, skb2);
227 skb_pull(skb, (dev->partial_rem + 1) & ~1);
228 dev->partial_rem = 0;
229 if (skb->len < 2)
230 return 1;
231 }
232
233 if (skb->len < 2) {
234 dev_err(&dev->udev->dev, "RX frame too short: %d B\n",
235 skb->len);
236 return 0;
237 }
238
239 /* a skb can contain multiple packets */
240 while (skb->len > 1) {
241 /* first two bytes are packet length */
242 len = skb->data[0] | (skb->data[1] << 8);
243 skb_pull(skb, 2);
244
245 /* if last packet in the skb, let usbnet to process it */
246 if (len == skb->len || len + 1 == skb->len) {
247 skb_trim(skb, len);
248 break;
249 }
250
251 if (len > CX82310_MTU) {
252 dev_err(&dev->udev->dev, "RX packet too long: %d B\n",
253 len);
254 return 0;
255 }
256
257 /* incomplete packet, save it for the next skb */
258 if (len > skb->len) {
259 dev->partial_len = skb->len;
260 dev->partial_rem = len - skb->len;
261 memcpy((void *)dev->partial_data, skb->data,
262 dev->partial_len);
263 skb_pull(skb, skb->len);
264 break;
265 }
266
267 skb2 = alloc_skb(len, GFP_ATOMIC);
268 if (!skb2)
269 return 0;
270 skb_put(skb2, len);
271 memcpy(skb2->data, skb->data, len);
272 /* process the packet */
273 usbnet_skb_return(dev, skb2);
274
275 skb_pull(skb, (len + 1) & ~1);
276 }
277
278 /* let usbnet process the last packet */
279 return 1;
280}
281
282/* TX is easy, just add 2 bytes of length at the beginning */
283static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
284 gfp_t flags)
285{
286 int len = skb->len;
287
288 if (skb_headroom(skb) < 2) {
289 struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
290 dev_kfree_skb_any(skb);
291 skb = skb2;
292 if (!skb)
293 return NULL;
294 }
295 skb_push(skb, 2);
296
297 skb->data[0] = len;
298 skb->data[1] = len >> 8;
299
300 return skb;
301}
302
303
304static const struct driver_info cx82310_info = {
305 .description = "Conexant CX82310 USB ethernet",
306 .flags = FLAG_ETHER,
307 .bind = cx82310_bind,
308 .unbind = cx82310_unbind,
309 .rx_fixup = cx82310_rx_fixup,
310 .tx_fixup = cx82310_tx_fixup,
311};
312
313#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
314 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
315 USB_DEVICE_ID_MATCH_DEV_INFO, \
316 .idVendor = (vend), \
317 .idProduct = (prod), \
318 .bDeviceClass = (cl), \
319 .bDeviceSubClass = (sc), \
320 .bDeviceProtocol = (pr)
321
322static const struct usb_device_id products[] = {
323 {
324 USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
325 .driver_info = (unsigned long) &cx82310_info
326 },
327 { },
328};
329MODULE_DEVICE_TABLE(usb, products);
330
331static struct usb_driver cx82310_driver = {
332 .name = "cx82310_eth",
333 .id_table = products,
334 .probe = usbnet_probe,
335 .disconnect = usbnet_disconnect,
336 .suspend = usbnet_suspend,
337 .resume = usbnet_resume,
338};
339
340static int __init cx82310_init(void)
341{
342 return usb_register(&cx82310_driver);
343}
344module_init(cx82310_init);
345
346static void __exit cx82310_exit(void)
347{
348 usb_deregister(&cx82310_driver);
349}
350module_exit(cx82310_exit);
351
352MODULE_AUTHOR("Ondrej Zary");
353MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver");
354MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6efca66b8766..4f123f869bdc 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -843,16 +843,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
843 return NETDEV_TX_OK; 843 return NETDEV_TX_OK;
844} 844}
845 845
846static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
847{
848 struct hso_net *odev = netdev_priv(net);
849
850 strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
851 usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info);
852}
853
854static const struct ethtool_ops ops = { 846static const struct ethtool_ops ops = {
855 .get_drvinfo = hso_get_drvinfo,
856 .get_link = ethtool_op_get_link 847 .get_link = ethtool_op_get_link
857}; 848};
858 849
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 2b7b39cad1ce..5e98643a4a21 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -759,14 +759,6 @@ static int kaweth_close(struct net_device *net)
759 return 0; 759 return 0;
760} 760}
761 761
762static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
763{
764 struct kaweth_device *kaweth = netdev_priv(dev);
765
766 strlcpy(info->driver, driver_name, sizeof(info->driver));
767 usb_make_path(kaweth->dev, info->bus_info, sizeof (info->bus_info));
768}
769
770static u32 kaweth_get_link(struct net_device *dev) 762static u32 kaweth_get_link(struct net_device *dev)
771{ 763{
772 struct kaweth_device *kaweth = netdev_priv(dev); 764 struct kaweth_device *kaweth = netdev_priv(dev);
@@ -775,7 +767,6 @@ static u32 kaweth_get_link(struct net_device *dev)
775} 767}
776 768
777static const struct ethtool_ops ops = { 769static const struct ethtool_ops ops = {
778 .get_drvinfo = kaweth_get_drvinfo,
779 .get_link = kaweth_get_link 770 .get_link = kaweth_get_link
780}; 771};
781 772
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f53412368ce1..6884813b809c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1954,7 +1954,7 @@ static int velocity_tx_srv(struct velocity_info *vptr)
1954 */ 1954 */
1955static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) 1955static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1956{ 1956{
1957 skb->ip_summed = CHECKSUM_NONE; 1957 skb_checksum_none_assert(skb);
1958 1958
1959 if (rd->rdesc1.CSM & CSM_IPKT) { 1959 if (rd->rdesc1.CSM & CSM_IPKT) {
1960 if (rd->rdesc1.CSM & CSM_IPOK) { 1960 if (rd->rdesc1.CSM & CSM_IPOK) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4598e9d2608f..bb6b67f6b0cc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -705,19 +705,6 @@ static int virtnet_close(struct net_device *dev)
705 return 0; 705 return 0;
706} 706}
707 707
708static void virtnet_get_drvinfo(struct net_device *dev,
709 struct ethtool_drvinfo *drvinfo)
710{
711 struct virtnet_info *vi = netdev_priv(dev);
712 struct virtio_device *vdev = vi->vdev;
713
714 strncpy(drvinfo->driver, KBUILD_MODNAME, ARRAY_SIZE(drvinfo->driver));
715 strncpy(drvinfo->version, "N/A", ARRAY_SIZE(drvinfo->version));
716 strncpy(drvinfo->fw_version, "N/A", ARRAY_SIZE(drvinfo->fw_version));
717 strncpy(drvinfo->bus_info, dev_name(&vdev->dev),
718 ARRAY_SIZE(drvinfo->bus_info));
719}
720
721static int virtnet_set_tx_csum(struct net_device *dev, u32 data) 708static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
722{ 709{
723 struct virtnet_info *vi = netdev_priv(dev); 710 struct virtnet_info *vi = netdev_priv(dev);
@@ -830,7 +817,6 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
830} 817}
831 818
832static const struct ethtool_ops virtnet_ethtool_ops = { 819static const struct ethtool_ops virtnet_ethtool_ops = {
833 .get_drvinfo = virtnet_get_drvinfo,
834 .set_tx_csum = virtnet_set_tx_csum, 820 .set_tx_csum = virtnet_set_tx_csum,
835 .set_sg = ethtool_op_set_sg, 821 .set_sg = ethtool_op_set_sg,
836 .set_tso = ethtool_op_set_tso, 822 .set_tso = ethtool_op_set_tso,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index abe0ff53daf3..198ce92af0c3 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1042,11 +1042,11 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1042 skb->csum = htons(gdesc->rcd.csum); 1042 skb->csum = htons(gdesc->rcd.csum);
1043 skb->ip_summed = CHECKSUM_PARTIAL; 1043 skb->ip_summed = CHECKSUM_PARTIAL;
1044 } else { 1044 } else {
1045 skb->ip_summed = CHECKSUM_NONE; 1045 skb_checksum_none_assert(skb);
1046 } 1046 }
1047 } 1047 }
1048 } else { 1048 } else {
1049 skb->ip_summed = CHECKSUM_NONE; 1049 skb_checksum_none_assert(skb);
1050 } 1050 }
1051} 1051}
1052 1052
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c7c5605b3728..5378b849f54f 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -501,7 +501,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
501 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) 501 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
502 skb->ip_summed = CHECKSUM_UNNECESSARY; 502 skb->ip_summed = CHECKSUM_UNNECESSARY;
503 else 503 else
504 skb->ip_summed = CHECKSUM_NONE; 504 skb_checksum_none_assert(skb);
505 505
506 vxge_rx_complete(ring, skb, ext_info.vlan, 506 vxge_rx_complete(ring, skb, ext_info.vlan,
507 pkt_length, &ext_info); 507 pkt_length, &ext_info);
@@ -2159,8 +2159,8 @@ start:
2159 /* Alarm MSIX Vectors count */ 2159 /* Alarm MSIX Vectors count */
2160 vdev->intr_cnt++; 2160 vdev->intr_cnt++;
2161 2161
2162 vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry), 2162 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2163 GFP_KERNEL); 2163 GFP_KERNEL);
2164 if (!vdev->entries) { 2164 if (!vdev->entries) {
2165 vxge_debug_init(VXGE_ERR, 2165 vxge_debug_init(VXGE_ERR,
2166 "%s: memory allocation failed", 2166 "%s: memory allocation failed",
@@ -2169,9 +2169,9 @@ start:
2169 goto alloc_entries_failed; 2169 goto alloc_entries_failed;
2170 } 2170 }
2171 2171
2172 vdev->vxge_entries = 2172 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2173 kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry), 2173 sizeof(struct vxge_msix_entry),
2174 GFP_KERNEL); 2174 GFP_KERNEL);
2175 if (!vdev->vxge_entries) { 2175 if (!vdev->vxge_entries) {
2176 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", 2176 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2177 VXGE_DRIVER_NAME); 2177 VXGE_DRIVER_NAME);
@@ -2914,26 +2914,18 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
2914} 2914}
2915 2915
2916/** 2916/**
2917 * vxge_get_stats 2917 * vxge_get_stats64
2918 * @dev: pointer to the device structure 2918 * @dev: pointer to the device structure
2919 * @stats: pointer to struct rtnl_link_stats64
2919 * 2920 *
2920 * Updates the device statistics structure. This function updates the device
2921 * statistics structure in the net_device structure and returns a pointer
2922 * to the same.
2923 */ 2921 */
2924static struct net_device_stats * 2922static struct rtnl_link_stats64 *
2925vxge_get_stats(struct net_device *dev) 2923vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2926{ 2924{
2927 struct vxgedev *vdev; 2925 struct vxgedev *vdev = netdev_priv(dev);
2928 struct net_device_stats *net_stats;
2929 int k; 2926 int k;
2930 2927
2931 vdev = netdev_priv(dev); 2928 /* net_stats already zeroed by caller */
2932
2933 net_stats = &vdev->stats.net_stats;
2934
2935 memset(net_stats, 0, sizeof(struct net_device_stats));
2936
2937 for (k = 0; k < vdev->no_of_vpath; k++) { 2929 for (k = 0; k < vdev->no_of_vpath; k++) {
2938 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; 2930 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
2939 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; 2931 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
@@ -3102,7 +3094,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3102static const struct net_device_ops vxge_netdev_ops = { 3094static const struct net_device_ops vxge_netdev_ops = {
3103 .ndo_open = vxge_open, 3095 .ndo_open = vxge_open,
3104 .ndo_stop = vxge_close, 3096 .ndo_stop = vxge_close,
3105 .ndo_get_stats = vxge_get_stats, 3097 .ndo_get_stats64 = vxge_get_stats64,
3106 .ndo_start_xmit = vxge_xmit, 3098 .ndo_start_xmit = vxge_xmit,
3107 .ndo_validate_addr = eth_validate_addr, 3099 .ndo_validate_addr = eth_validate_addr,
3108 .ndo_set_multicast_list = vxge_set_multicast, 3100 .ndo_set_multicast_list = vxge_set_multicast,
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 2e3b064b8e4b..d4be07eaacd7 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -172,7 +172,6 @@ struct vxge_msix_entry {
172 172
173struct vxge_sw_stats { 173struct vxge_sw_stats {
174 /* Network Stats (interface stats) */ 174 /* Network Stats (interface stats) */
175 struct net_device_stats net_stats;
176 175
177 /* Tx */ 176 /* Tx */
178 u64 tx_frms; 177 u64 tx_frms;
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 0bd898c94759..4ac85a09c5a6 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -264,7 +264,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
264 new_line.clock_type != CLOCK_TXFROMRX && 264 new_line.clock_type != CLOCK_TXFROMRX &&
265 new_line.clock_type != CLOCK_INT && 265 new_line.clock_type != CLOCK_INT &&
266 new_line.clock_type != CLOCK_TXINT) 266 new_line.clock_type != CLOCK_TXINT)
267 return -EINVAL; /* No such clock setting */ 267 return -EINVAL; /* No such clock setting */
268 268
269 if (new_line.loopback != 0 && new_line.loopback != 1) 269 if (new_line.loopback != 0 && new_line.loopback != 1)
270 return -EINVAL; 270 return -EINVAL;
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index a5ddc6c8963e..164c3624ba89 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -73,7 +73,7 @@ static int reset_cyc2x(void __iomem *addr);
73static int detect_cyc2x(void __iomem *addr); 73static int detect_cyc2x(void __iomem *addr);
74 74
75/* Miscellaneous functions */ 75/* Miscellaneous functions */
76static int get_option_index(long *optlist, long optval); 76static int get_option_index(const long *optlist, long optval);
77static u16 checksum(u8 *buf, u32 len); 77static u16 checksum(u8 *buf, u32 len);
78 78
79#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET) 79#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
@@ -81,23 +81,23 @@ static u16 checksum(u8 *buf, u32 len);
81/* Global Data */ 81/* Global Data */
82 82
83/* private data */ 83/* private data */
84static char modname[] = "cycx_drv"; 84static const char modname[] = "cycx_drv";
85static char fullname[] = "Cyclom 2X Support Module"; 85static const char fullname[] = "Cyclom 2X Support Module";
86static char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo " 86static const char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
87 "<acme@conectiva.com.br>"; 87 "<acme@conectiva.com.br>";
88 88
89/* Hardware configuration options. 89/* Hardware configuration options.
90 * These are arrays of configuration options used by verification routines. 90 * These are arrays of configuration options used by verification routines.
91 * The first element of each array is its size (i.e. number of options). 91 * The first element of each array is its size (i.e. number of options).
92 */ 92 */
93static long cyc2x_dpmbase_options[] = { 93static const long cyc2x_dpmbase_options[] = {
94 20, 94 20,
95 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000, 95 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
96 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000, 96 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
97 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000 97 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
98}; 98};
99 99
100static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 }; 100static const long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
101 101
102/* Kernel Loadable Module Entry Points */ 102/* Kernel Loadable Module Entry Points */
103/* Module 'insert' entry point. 103/* Module 'insert' entry point.
@@ -529,7 +529,7 @@ static int detect_cyc2x(void __iomem *addr)
529/* Miscellaneous */ 529/* Miscellaneous */
530/* Get option's index into the options list. 530/* Get option's index into the options list.
531 * Return option's index (1 .. N) or zero if option is invalid. */ 531 * Return option's index (1 .. N) or zero if option is invalid. */
532static int get_option_index(long *optlist, long optval) 532static int get_option_index(const long *optlist, long optval)
533{ 533{
534 int i = 1; 534 int i = 1;
535 535
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index a0e8611ad8e8..859dba9b972e 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -81,9 +81,9 @@ static irqreturn_t cycx_isr(int irq, void *dev_id);
81 */ 81 */
82 82
83/* private data */ 83/* private data */
84static char cycx_drvname[] = "cyclomx"; 84static const char cycx_drvname[] = "cyclomx";
85static char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver"; 85static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
86static char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo " 86static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
87 "<acme@conectiva.com.br>"; 87 "<acme@conectiva.com.br>";
88static int cycx_ncards = CONFIG_CYCX_CARDS; 88static int cycx_ncards = CONFIG_CYCX_CARDS;
89static struct cycx_device *cycx_card_array; /* adapter data space */ 89static struct cycx_device *cycx_card_array; /* adapter data space */
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 4d4dc38c7290..7f5bb913c8b9 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -46,7 +46,7 @@
46 46
47#include <net/x25device.h> 47#include <net/x25device.h>
48 48
49static char bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 49static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
50 50
51/* If this number is made larger, check that the temporary string buffer 51/* If this number is made larger, check that the temporary string buffer
52 * in lapbeth_new_device is large enough to store the probe device name.*/ 52 * in lapbeth_new_device is large enough to store the probe device name.*/
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index e2c6f7f4f51c..43af85b8e45e 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1105,7 +1105,7 @@ static int lmc_open(struct net_device *dev)
1105 init_timer (&sc->timer); 1105 init_timer (&sc->timer);
1106 sc->timer.expires = jiffies + HZ; 1106 sc->timer.expires = jiffies + HZ;
1107 sc->timer.data = (unsigned long) dev; 1107 sc->timer.data = (unsigned long) dev;
1108 sc->timer.function = &lmc_watchdog; 1108 sc->timer.function = lmc_watchdog;
1109 add_timer (&sc->timer); 1109 add_timer (&sc->timer);
1110 1110
1111 lmc_trace(dev, "lmc_open out"); 1111 lmc_trace(dev, "lmc_open out");
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5394b51bdb2f..7a3720f09ce3 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -282,7 +282,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
282 new_line.clock_type != CLOCK_TXFROMRX && 282 new_line.clock_type != CLOCK_TXFROMRX &&
283 new_line.clock_type != CLOCK_INT && 283 new_line.clock_type != CLOCK_INT &&
284 new_line.clock_type != CLOCK_TXINT) 284 new_line.clock_type != CLOCK_TXINT)
285 return -EINVAL; /* No such clock setting */ 285 return -EINVAL; /* No such clock setting */
286 286
287 if (new_line.loopback != 0 && new_line.loopback != 1) 287 if (new_line.loopback != 0 && new_line.loopback != 1)
288 return -EINVAL; 288 return -EINVAL;
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index c6aa66e5b52f..fbf1175a07f1 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -1,5 +1,5 @@
1#define USE_PCI_CLOCK 1#define USE_PCI_CLOCK
2static char rcsid[] = 2static const char rcsid[] =
3"Revision: 3.4.5 Date: 2002/03/07 "; 3"Revision: 3.4.5 Date: 2002/03/07 ";
4 4
5/* 5/*
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index e2cff64a446a..fd7375955e41 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -220,7 +220,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
220 new_line.clock_type != CLOCK_TXFROMRX && 220 new_line.clock_type != CLOCK_TXFROMRX &&
221 new_line.clock_type != CLOCK_INT && 221 new_line.clock_type != CLOCK_INT &&
222 new_line.clock_type != CLOCK_TXINT) 222 new_line.clock_type != CLOCK_TXINT)
223 return -EINVAL; /* No such clock setting */ 223 return -EINVAL; /* No such clock setting */
224 224
225 if (new_line.loopback != 0 && new_line.loopback != 1) 225 if (new_line.loopback != 0 && new_line.loopback != 1)
226 return -EINVAL; 226 return -EINVAL;
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index fbf5e843d48c..93956861ea21 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -766,7 +766,7 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)
766 766
767EXPORT_SYMBOL(z8530_interrupt); 767EXPORT_SYMBOL(z8530_interrupt);
768 768
769static char reg_init[16]= 769static const u8 reg_init[16]=
770{ 770{
771 0,0,0,0, 771 0,0,0,0,
772 0,0,0,0, 772 0,0,0,0,
@@ -1206,7 +1206,7 @@ EXPORT_SYMBOL(z8530_sync_txdma_close);
1206 * it exists... 1206 * it exists...
1207 */ 1207 */
1208 1208
1209static char *z8530_type_name[]={ 1209static const char *z8530_type_name[]={
1210 "Z8530", 1210 "Z8530",
1211 "Z85C30", 1211 "Z85C30",
1212 "Z85230" 1212 "Z85230"
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index eb72c67699ab..f1549fff0edc 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -342,10 +342,10 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
342 printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n", 342 printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
343 model_name, dev->irq, dev->mem_start, dev->mem_end-1); 343 model_name, dev->irq, dev->mem_start, dev->mem_end-1);
344 344
345 ei_status.reset_8390 = &wd_reset_8390; 345 ei_status.reset_8390 = wd_reset_8390;
346 ei_status.block_input = &wd_block_input; 346 ei_status.block_input = wd_block_input;
347 ei_status.block_output = &wd_block_output; 347 ei_status.block_output = wd_block_output;
348 ei_status.get_8390_hdr = &wd_get_8390_hdr; 348 ei_status.get_8390_hdr = wd_get_8390_hdr;
349 349
350 dev->netdev_ops = &wd_netdev_ops; 350 dev->netdev_ops = &wd_netdev_ops;
351 NS8390_init(dev, 0); 351 NS8390_init(dev, 0);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 1d05445d4ba3..7d26506957d7 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2723,9 +2723,8 @@ static int airo_networks_allocate(struct airo_info *ai)
2723 if (ai->networks) 2723 if (ai->networks)
2724 return 0; 2724 return 0;
2725 2725
2726 ai->networks = 2726 ai->networks = kcalloc(AIRO_MAX_NETWORK_COUNT, sizeof(BSSListElement),
2727 kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement), 2727 GFP_KERNEL);
2728 GFP_KERNEL);
2729 if (!ai->networks) { 2728 if (!ai->networks) {
2730 airo_print_warn("", "Out of memory allocating beacons"); 2729 airo_print_warn("", "Out of memory allocating beacons");
2731 return -ENOMEM; 2730 return -ENOMEM;
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 1128fa8c9ed5..91c5f73b5ba3 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2061,11 +2061,12 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2061 2061
2062 int i; 2062 int i;
2063 2063
2064 at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d " 2064 at76_dbg(DBG_MAC80211, "%s(): cmd %d key->cipher %d key->keyidx %d "
2065 "key->keylen %d", 2065 "key->keylen %d",
2066 __func__, cmd, key->alg, key->keyidx, key->keylen); 2066 __func__, cmd, key->cipher, key->keyidx, key->keylen);
2067 2067
2068 if (key->alg != ALG_WEP) 2068 if ((key->cipher != WLAN_CIPHER_SUITE_WEP40) &&
2069 (key->cipher != WLAN_CIPHER_SUITE_WEP104))
2069 return -EOPNOTSUPP; 2070 return -EOPNOTSUPP;
2070 2071
2071 key->hw_key_idx = key->keyidx; 2072 key->hw_key_idx = key->keyidx;
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index debfb0fbc7c5..32bf79e6a320 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1190,14 +1190,13 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1190 if (info->control.hw_key) { 1190 if (info->control.hw_key) {
1191 icv = info->control.hw_key->icv_len; 1191 icv = info->control.hw_key->icv_len;
1192 1192
1193 switch (info->control.hw_key->alg) { 1193 switch (info->control.hw_key->cipher) {
1194 case ALG_WEP: 1194 case WLAN_CIPHER_SUITE_WEP40:
1195 case WLAN_CIPHER_SUITE_WEP104:
1196 case WLAN_CIPHER_SUITE_TKIP:
1195 keytype = AR9170_TX_MAC_ENCR_RC4; 1197 keytype = AR9170_TX_MAC_ENCR_RC4;
1196 break; 1198 break;
1197 case ALG_TKIP: 1199 case WLAN_CIPHER_SUITE_CCMP:
1198 keytype = AR9170_TX_MAC_ENCR_RC4;
1199 break;
1200 case ALG_CCMP:
1201 keytype = AR9170_TX_MAC_ENCR_AES; 1200 keytype = AR9170_TX_MAC_ENCR_AES;
1202 break; 1201 break;
1203 default: 1202 default:
@@ -1778,17 +1777,17 @@ static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1778 if ((!ar->vif) || (ar->disable_offload)) 1777 if ((!ar->vif) || (ar->disable_offload))
1779 return -EOPNOTSUPP; 1778 return -EOPNOTSUPP;
1780 1779
1781 switch (key->alg) { 1780 switch (key->cipher) {
1782 case ALG_WEP: 1781 case WLAN_CIPHER_SUITE_WEP40:
1783 if (key->keylen == WLAN_KEY_LEN_WEP40) 1782 ktype = AR9170_ENC_ALG_WEP64;
1784 ktype = AR9170_ENC_ALG_WEP64; 1783 break;
1785 else 1784 case WLAN_CIPHER_SUITE_WEP104:
1786 ktype = AR9170_ENC_ALG_WEP128; 1785 ktype = AR9170_ENC_ALG_WEP128;
1787 break; 1786 break;
1788 case ALG_TKIP: 1787 case WLAN_CIPHER_SUITE_TKIP:
1789 ktype = AR9170_ENC_ALG_TKIP; 1788 ktype = AR9170_ENC_ALG_TKIP;
1790 break; 1789 break;
1791 case ALG_CCMP: 1790 case WLAN_CIPHER_SUITE_CCMP:
1792 ktype = AR9170_ENC_ALG_AESCCMP; 1791 ktype = AR9170_ENC_ALG_AESCCMP;
1793 break; 1792 break;
1794 default: 1793 default:
@@ -1827,7 +1826,7 @@ static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1827 if (err) 1826 if (err)
1828 goto out; 1827 goto out;
1829 1828
1830 if (key->alg == ALG_TKIP) { 1829 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1831 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, 1830 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
1832 ktype, 1, key->key + 16, 16); 1831 ktype, 1, key->key + 16, 16);
1833 if (err) 1832 if (err)
@@ -1864,7 +1863,7 @@ static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1864 if (err) 1863 if (err)
1865 goto out; 1864 goto out;
1866 1865
1867 if (key->alg == ALG_TKIP) { 1866 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1868 err = ar9170_upload_key(ar, key->hw_key_idx, 1867 err = ar9170_upload_key(ar, key->hw_key_idx,
1869 NULL, 1868 NULL,
1870 AR9170_ENC_ALG_NONE, 1, 1869 AR9170_ENC_ALG_NONE, 1,
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index d32f2828b098..a706202fa67c 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -119,6 +119,7 @@ struct ath_common {
119 119
120 u32 keymax; 120 u32 keymax;
121 DECLARE_BITMAP(keymap, ATH_KEYMAX); 121 DECLARE_BITMAP(keymap, ATH_KEYMAX);
122 DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
122 u8 splitmic; 123 u8 splitmic;
123 124
124 struct ath_regulatory regulatory; 125 struct ath_regulatory regulatory;
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 26dbe65fedb0..e4a5f046bba4 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -552,9 +552,9 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
552 if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO) 552 if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
553 return; 553 return;
554 554
555 /* if one of the errors triggered, we can get a superfluous second 555 /* If one of the errors triggered, we can get a superfluous second
556 * interrupt, even though we have already reset the register. the 556 * interrupt, even though we have already reset the register. The
557 * function detects that so we can return early */ 557 * function detects that so we can return early. */
558 if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0) 558 if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
559 return; 559 return;
560 560
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ea6362a8988d..f399c4dd8e69 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -175,7 +175,7 @@
175#define AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF 0 175#define AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF 0
176#define AR5K_TUNE_RADAR_ALERT false 176#define AR5K_TUNE_RADAR_ALERT false
177#define AR5K_TUNE_MIN_TX_FIFO_THRES 1 177#define AR5K_TUNE_MIN_TX_FIFO_THRES 1
178#define AR5K_TUNE_MAX_TX_FIFO_THRES ((IEEE80211_MAX_LEN / 64) + 1) 178#define AR5K_TUNE_MAX_TX_FIFO_THRES ((IEEE80211_MAX_FRAME_LEN / 64) + 1)
179#define AR5K_TUNE_REGISTER_TIMEOUT 20000 179#define AR5K_TUNE_REGISTER_TIMEOUT 20000
180/* Register for RSSI threshold has a mask of 0xff, so 255 seems to 180/* Register for RSSI threshold has a mask of 0xff, so 255 seems to
181 * be the max value. */ 181 * be the max value. */
@@ -343,9 +343,6 @@ struct ath5k_srev_name {
343#define AR5K_SREV_PHY_5413 0x61 343#define AR5K_SREV_PHY_5413 0x61
344#define AR5K_SREV_PHY_2425 0x70 344#define AR5K_SREV_PHY_2425 0x70
345 345
346/* IEEE defs */
347#define IEEE80211_MAX_LEN 2500
348
349/* TODO add support to mac80211 for vendor-specific rates and modes */ 346/* TODO add support to mac80211 for vendor-specific rates and modes */
350 347
351/* 348/*
@@ -1190,7 +1187,7 @@ extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
1190void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class); 1187void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
1191/* BSSID Functions */ 1188/* BSSID Functions */
1192int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1189int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1193void ath5k_hw_set_associd(struct ath5k_hw *ah); 1190void ath5k_hw_set_bssid(struct ath5k_hw *ah);
1194void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1191void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1195/* Receive start/stop functions */ 1192/* Receive start/stop functions */
1196void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); 1193void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index b32e28caeee2..aabad4f13e2a 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -139,12 +139,12 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
139 else 139 else
140 ah->ah_version = AR5K_AR5212; 140 ah->ah_version = AR5K_AR5212;
141 141
142 /*Fill the ath5k_hw struct with the needed functions*/ 142 /* Fill the ath5k_hw struct with the needed functions */
143 ret = ath5k_hw_init_desc_functions(ah); 143 ret = ath5k_hw_init_desc_functions(ah);
144 if (ret) 144 if (ret)
145 goto err_free; 145 goto err_free;
146 146
147 /* Bring device out of sleep and reset it's units */ 147 /* Bring device out of sleep and reset its units */
148 ret = ath5k_hw_nic_wakeup(ah, 0, true); 148 ret = ath5k_hw_nic_wakeup(ah, 0, true);
149 if (ret) 149 if (ret)
150 goto err_free; 150 goto err_free;
@@ -158,7 +158,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
158 CHANNEL_5GHZ); 158 CHANNEL_5GHZ);
159 ah->ah_phy = AR5K_PHY(0); 159 ah->ah_phy = AR5K_PHY(0);
160 160
161 /* Try to identify radio chip based on it's srev */ 161 /* Try to identify radio chip based on its srev */
162 switch (ah->ah_radio_5ghz_revision & 0xf0) { 162 switch (ah->ah_radio_5ghz_revision & 0xf0) {
163 case AR5K_SREV_RAD_5111: 163 case AR5K_SREV_RAD_5111:
164 ah->ah_radio = AR5K_RF5111; 164 ah->ah_radio = AR5K_RF5111;
@@ -329,7 +329,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
329 329
330 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ 330 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
331 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN); 331 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
332 ath5k_hw_set_associd(ah); 332 ath5k_hw_set_bssid(ah);
333 ath5k_hw_set_opmode(ah, sc->opmode); 333 ath5k_hw_set_opmode(ah, sc->opmode);
334 334
335 ath5k_hw_rfgain_opt_init(ah); 335 ath5k_hw_rfgain_opt_init(ah);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index d77ce9906b6c..116ac66c6e3e 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -612,7 +612,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
612 goto err_free; 612 goto err_free;
613 } 613 }
614 614
615 /*If we passed the test malloc a ath5k_hw struct*/ 615 /* If we passed the test, malloc an ath5k_hw struct */
616 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL); 616 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
617 if (!sc->ah) { 617 if (!sc->ah) {
618 ret = -ENOMEM; 618 ret = -ENOMEM;
@@ -700,10 +700,10 @@ ath5k_pci_probe(struct pci_dev *pdev,
700 return 0; 700 return 0;
701err_ah: 701err_ah:
702 ath5k_hw_detach(sc->ah); 702 ath5k_hw_detach(sc->ah);
703err_irq:
704 free_irq(pdev->irq, sc);
705err_free_ah: 703err_free_ah:
706 kfree(sc->ah); 704 kfree(sc->ah);
705err_irq:
706 free_irq(pdev->irq, sc);
707err_free: 707err_free:
708 ieee80211_free_hw(hw); 708 ieee80211_free_hw(hw);
709err_map: 709err_map:
@@ -786,8 +786,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
786 /* 786 /*
787 * Check if the MAC has multi-rate retry support. 787 * Check if the MAC has multi-rate retry support.
788 * We do this by trying to setup a fake extended 788 * We do this by trying to setup a fake extended
789 * descriptor. MAC's that don't have support will 789 * descriptor. MACs that don't have support will
790 * return false w/o doing anything. MAC's that do 790 * return false w/o doing anything. MACs that do
791 * support it will return true w/o doing anything. 791 * support it will return true w/o doing anything.
792 */ 792 */
793 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); 793 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
@@ -827,7 +827,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
827 /* 827 /*
828 * Allocate hardware transmit queues: one queue for 828 * Allocate hardware transmit queues: one queue for
829 * beacon frames and one data queue for each QoS 829 * beacon frames and one data queue for each QoS
830 * priority. Note that hw functions handle reseting 830 * priority. Note that hw functions handle resetting
831 * these queues at the needed time. 831 * these queues at the needed time.
832 */ 832 */
833 ret = ath5k_beaconq_setup(ah); 833 ret = ath5k_beaconq_setup(ah);
@@ -909,7 +909,7 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
909 /* 909 /*
910 * NB: the order of these is important: 910 * NB: the order of these is important:
911 * o call the 802.11 layer before detaching ath5k_hw to 911 * o call the 802.11 layer before detaching ath5k_hw to
912 * insure callbacks into the driver to delete global 912 * ensure callbacks into the driver to delete global
913 * key cache entries can be handled 913 * key cache entries can be handled
914 * o reclaim the tx queue data structures after calling 914 * o reclaim the tx queue data structures after calling
915 * the 802.11 layer as we'll get called back to reclaim 915 * the 802.11 layer as we'll get called back to reclaim
@@ -1518,7 +1518,7 @@ ath5k_txq_setup(struct ath5k_softc *sc,
1518 /* 1518 /*
1519 * Enable interrupts only for EOL and DESC conditions. 1519 * Enable interrupts only for EOL and DESC conditions.
1520 * We mark tx descriptors to receive a DESC interrupt 1520 * We mark tx descriptors to receive a DESC interrupt
1521 * when a tx queue gets deep; otherwise waiting for the 1521 * when a tx queue gets deep; otherwise we wait for the
1522 * EOL to reap descriptors. Note that this is done to 1522 * EOL to reap descriptors. Note that this is done to
1523 * reduce interrupt load and this only defers reaping 1523 * reduce interrupt load and this only defers reaping
1524 * descriptors, never transmitting frames. Aside from 1524 * descriptors, never transmitting frames. Aside from
@@ -1713,7 +1713,7 @@ ath5k_rx_start(struct ath5k_softc *sc)
1713 struct ath5k_buf *bf; 1713 struct ath5k_buf *bf;
1714 int ret; 1714 int ret;
1715 1715
1716 common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz); 1716 common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1717 1717
1718 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n", 1718 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1719 common->cachelsz, common->rx_bufsize); 1719 common->cachelsz, common->rx_bufsize);
@@ -1863,7 +1863,7 @@ ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1863} 1863}
1864 1864
1865/* 1865/*
1866 * Compute padding position. skb must contains an IEEE 802.11 frame 1866 * Compute padding position. skb must contain an IEEE 802.11 frame
1867 */ 1867 */
1868static int ath5k_common_padpos(struct sk_buff *skb) 1868static int ath5k_common_padpos(struct sk_buff *skb)
1869{ 1869{
@@ -1882,10 +1882,9 @@ static int ath5k_common_padpos(struct sk_buff *skb)
1882} 1882}
1883 1883
1884/* 1884/*
1885 * This function expects a 802.11 frame and returns the number of 1885 * This function expects an 802.11 frame and returns the number of
1886 * bytes added, or -1 if we don't have enought header room. 1886 * bytes added, or -1 if we don't have enough header room.
1887 */ 1887 */
1888
1889static int ath5k_add_padding(struct sk_buff *skb) 1888static int ath5k_add_padding(struct sk_buff *skb)
1890{ 1889{
1891 int padpos = ath5k_common_padpos(skb); 1890 int padpos = ath5k_common_padpos(skb);
@@ -1905,10 +1904,18 @@ static int ath5k_add_padding(struct sk_buff *skb)
1905} 1904}
1906 1905
1907/* 1906/*
1908 * This function expects a 802.11 frame and returns the number of 1907 * The MAC header is padded to have 32-bit boundary if the
1909 * bytes removed 1908 * packet payload is non-zero. The general calculation for
1909 * padsize would take into account odd header lengths:
1910 * padsize = 4 - (hdrlen & 3); however, since only
1911 * even-length headers are used, padding can only be 0 or 2
1912 * bytes and we can optimize this a bit. We must not try to
1913 * remove padding from short control frames that do not have a
1914 * payload.
1915 *
1916 * This function expects an 802.11 frame and returns the number of
1917 * bytes removed.
1910 */ 1918 */
1911
1912static int ath5k_remove_padding(struct sk_buff *skb) 1919static int ath5k_remove_padding(struct sk_buff *skb)
1913{ 1920{
1914 int padpos = ath5k_common_padpos(skb); 1921 int padpos = ath5k_common_padpos(skb);
@@ -1929,14 +1936,6 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1929{ 1936{
1930 struct ieee80211_rx_status *rxs; 1937 struct ieee80211_rx_status *rxs;
1931 1938
1932 /* The MAC header is padded to have 32-bit boundary if the
1933 * packet payload is non-zero. The general calculation for
1934 * padsize would take into account odd header lengths:
1935 * padsize = (4 - hdrlen % 4) % 4; However, since only
1936 * even-length headers are used, padding can only be 0 or 2
1937 * bytes and we can optimize this a bit. In addition, we must
1938 * not try to remove padding from short control frames that do
1939 * not have payload. */
1940 ath5k_remove_padding(skb); 1939 ath5k_remove_padding(skb);
1941 1940
1942 rxs = IEEE80211_SKB_RXCB(skb); 1941 rxs = IEEE80211_SKB_RXCB(skb);
@@ -2040,9 +2039,8 @@ ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
2040 return true; 2039 return true;
2041 } 2040 }
2042 2041
2043 /* let crypto-error packets fall through in MNTR */ 2042 /* reject any frames with non-crypto errors */
2044 if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 2043 if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
2045 sc->opmode != NL80211_IFTYPE_MONITOR)
2046 return false; 2044 return false;
2047 } 2045 }
2048 2046
@@ -2285,10 +2283,11 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2285 * default antenna which is supposed to be an omni. 2283 * default antenna which is supposed to be an omni.
2286 * 2284 *
2287 * Note2: On sectored scenarios it's possible to have 2285 * Note2: On sectored scenarios it's possible to have
2288 * multiple antennas (1omni -the default- and 14 sectors) 2286 * multiple antennas (1 omni -- the default -- and 14
2289 * so if we choose to actually support this mode we need 2287 * sectors), so if we choose to actually support this
2290 * to allow user to set how many antennas we have and tweak 2288 * mode, we need to allow the user to set how many antennas
2291 * the code below to send beacons on all of them. 2289 * we have and tweak the code below to send beacons
2290 * on all of them.
2292 */ 2291 */
2293 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP) 2292 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
2294 antenna = sc->bsent & 4 ? 2 : 1; 2293 antenna = sc->bsent & 4 ? 2 : 1;
@@ -2330,14 +2329,13 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2330 2329
2331 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 2330 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
2332 2331
2333 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION || 2332 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION)) {
2334 sc->opmode == NL80211_IFTYPE_MONITOR)) {
2335 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); 2333 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
2336 return; 2334 return;
2337 } 2335 }
2338 /* 2336 /*
2339 * Check if the previous beacon has gone out. If 2337 * Check if the previous beacon has gone out. If
2340 * not don't don't try to post another, skip this 2338 * not, don't don't try to post another: skip this
2341 * period and wait for the next. Missed beacons 2339 * period and wait for the next. Missed beacons
2342 * indicate a problem and should not occur. If we 2340 * indicate a problem and should not occur. If we
2343 * miss too many consecutive beacons reset the device. 2341 * miss too many consecutive beacons reset the device.
@@ -2905,12 +2903,9 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
2905 2903
2906 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2904 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
2907 2905
2908 if (sc->opmode == NL80211_IFTYPE_MONITOR)
2909 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n");
2910
2911 /* 2906 /*
2912 * the hardware expects the header padded to 4 byte boundaries 2907 * The hardware expects the header padded to 4 byte boundaries.
2913 * if this is not the case we add the padding after the header 2908 * If this is not the case, we add the padding after the header.
2914 */ 2909 */
2915 padsize = ath5k_add_padding(skb); 2910 padsize = ath5k_add_padding(skb);
2916 if (padsize < 0) { 2911 if (padsize < 0) {
@@ -3053,7 +3048,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
3053 case NL80211_IFTYPE_STATION: 3048 case NL80211_IFTYPE_STATION:
3054 case NL80211_IFTYPE_ADHOC: 3049 case NL80211_IFTYPE_ADHOC:
3055 case NL80211_IFTYPE_MESH_POINT: 3050 case NL80211_IFTYPE_MESH_POINT:
3056 case NL80211_IFTYPE_MONITOR:
3057 sc->opmode = vif->type; 3051 sc->opmode = vif->type;
3058 break; 3052 break;
3059 default: 3053 default:
@@ -3237,9 +3231,9 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
3237 rfilt |= AR5K_RX_FILTER_PHYERR; 3231 rfilt |= AR5K_RX_FILTER_PHYERR;
3238 3232
3239 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons 3233 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
3240 * and probes for any BSSID, this needs testing */ 3234 * and probes for any BSSID */
3241 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) 3235 if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
3242 rfilt |= AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ; 3236 rfilt |= AR5K_RX_FILTER_BEACON;
3243 3237
3244 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not 3238 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
3245 * set we should only pass on control frames for this 3239 * set we should only pass on control frames for this
@@ -3255,7 +3249,6 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
3255 3249
3256 switch (sc->opmode) { 3250 switch (sc->opmode) {
3257 case NL80211_IFTYPE_MESH_POINT: 3251 case NL80211_IFTYPE_MESH_POINT:
3258 case NL80211_IFTYPE_MONITOR:
3259 rfilt |= AR5K_RX_FILTER_CONTROL | 3252 rfilt |= AR5K_RX_FILTER_CONTROL |
3260 AR5K_RX_FILTER_BEACON | 3253 AR5K_RX_FILTER_BEACON |
3261 AR5K_RX_FILTER_PROBEREQ | 3254 AR5K_RX_FILTER_PROBEREQ |
@@ -3278,7 +3271,7 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
3278 3271
3279 /* Set multicast bits */ 3272 /* Set multicast bits */
3280 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); 3273 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
3281 /* Set the cached hw filter flags, this will alter actually 3274 /* Set the cached hw filter flags, this will later actually
3282 * be set in HW */ 3275 * be set in HW */
3283 sc->filter_flags = rfilt; 3276 sc->filter_flags = rfilt;
3284 3277
@@ -3301,11 +3294,12 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3301 if (sc->opmode == NL80211_IFTYPE_AP) 3294 if (sc->opmode == NL80211_IFTYPE_AP)
3302 return -EOPNOTSUPP; 3295 return -EOPNOTSUPP;
3303 3296
3304 switch (key->alg) { 3297 switch (key->cipher) {
3305 case ALG_WEP: 3298 case WLAN_CIPHER_SUITE_WEP40:
3306 case ALG_TKIP: 3299 case WLAN_CIPHER_SUITE_WEP104:
3300 case WLAN_CIPHER_SUITE_TKIP:
3307 break; 3301 break;
3308 case ALG_CCMP: 3302 case WLAN_CIPHER_SUITE_CCMP:
3309 if (sc->ah->ah_aes_support) 3303 if (sc->ah->ah_aes_support)
3310 break; 3304 break;
3311 3305
@@ -3479,7 +3473,7 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3479 /* Cache for later use during resets */ 3473 /* Cache for later use during resets */
3480 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 3474 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
3481 common->curaid = 0; 3475 common->curaid = 0;
3482 ath5k_hw_set_associd(ah); 3476 ath5k_hw_set_bssid(ah);
3483 mmiowb(); 3477 mmiowb();
3484 } 3478 }
3485 3479
@@ -3497,7 +3491,7 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3497 "Bss Info ASSOC %d, bssid: %pM\n", 3491 "Bss Info ASSOC %d, bssid: %pM\n",
3498 bss_conf->aid, common->curbssid); 3492 bss_conf->aid, common->curbssid);
3499 common->curaid = bss_conf->aid; 3493 common->curaid = bss_conf->aid;
3500 ath5k_hw_set_associd(ah); 3494 ath5k_hw_set_bssid(ah);
3501 /* Once ANI is available you would start it here */ 3495 /* Once ANI is available you would start it here */
3502 } 3496 }
3503 } 3497 }
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 4cccc29964f6..1b7c6d7fde93 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -312,6 +312,7 @@ static const struct {
312 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 312 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
313 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 313 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
314 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, 314 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
315 { ATH5K_DEBUG_DESC, "desc", "descriptor chains" },
315 { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, 316 { ATH5K_DEBUG_ANY, "all", "show all debug levels" },
316}; 317};
317 318
@@ -955,7 +956,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
955 struct ath5k_rx_status rs = {}; 956 struct ath5k_rx_status rs = {};
956 int status; 957 int status;
957 958
958 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) 959 if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
959 return; 960 return;
960 961
961 printk(KERN_DEBUG "rxdp %x, rxlink %p\n", 962 printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
@@ -997,7 +998,7 @@ ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
997 struct ath5k_tx_status ts = {}; 998 struct ath5k_tx_status ts = {};
998 int done; 999 int done;
999 1000
1000 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) 1001 if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
1001 return; 1002 return;
1002 1003
1003 done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts); 1004 done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts);
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 606ae94a9157..9b22722a95f0 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -95,6 +95,7 @@ struct ath5k_dbg_info {
95 * @ATH5K_DEBUG_DUMP_TX: print transmit skb content 95 * @ATH5K_DEBUG_DUMP_TX: print transmit skb content
96 * @ATH5K_DEBUG_DUMPBANDS: dump bands 96 * @ATH5K_DEBUG_DUMPBANDS: dump bands
97 * @ATH5K_DEBUG_TRACE: trace function calls 97 * @ATH5K_DEBUG_TRACE: trace function calls
98 * @ATH5K_DEBUG_DESC: descriptor setup
98 * @ATH5K_DEBUG_ANY: show at any debug level 99 * @ATH5K_DEBUG_ANY: show at any debug level
99 * 100 *
100 * The debug level is used to control the amount and type of debugging output 101 * The debug level is used to control the amount and type of debugging output
@@ -117,6 +118,7 @@ enum ath5k_debug_level {
117 ATH5K_DEBUG_DUMP_TX = 0x00000200, 118 ATH5K_DEBUG_DUMP_TX = 0x00000200,
118 ATH5K_DEBUG_DUMPBANDS = 0x00000400, 119 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
119 ATH5K_DEBUG_ANI = 0x00002000, 120 ATH5K_DEBUG_ANI = 0x00002000,
121 ATH5K_DEBUG_DESC = 0x00004000,
120 ATH5K_DEBUG_ANY = 0xffffffff 122 ATH5K_DEBUG_ANY = 0xffffffff
121}; 123};
122 124
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 484f31870ba8..58bb6c5dda7b 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -377,11 +377,11 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
377 * 377 *
378 * This function increases/decreases the tx trigger level for the tx fifo 378 * This function increases/decreases the tx trigger level for the tx fifo
379 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes 379 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
380 * the buffer and transmits it's data. Lowering this results sending small 380 * the buffer and transmits its data. Lowering this results sending small
381 * frames more quickly but can lead to tx underruns, raising it a lot can 381 * frames more quickly but can lead to tx underruns, raising it a lot can
382 * result other problems (i think bmiss is related). Right now we start with 382 * result other problems (i think bmiss is related). Right now we start with
383 * the lowest possible (64Bytes) and if we get tx underrun we increase it using 383 * the lowest possible (64Bytes) and if we get tx underrun we increase it using
384 * the increase flag. Returns -EIO if we have have reached maximum/minimum. 384 * the increase flag. Returns -EIO if we have reached maximum/minimum.
385 * 385 *
386 * XXX: Link this with tx DMA size ? 386 * XXX: Link this with tx DMA size ?
387 * XXX: Use it to save interrupts ? 387 * XXX: Use it to save interrupts ?
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index ae316fec4a6a..39722dd73e43 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -661,7 +661,7 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
661 * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC 661 * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC
662 * steps that match with the power values we read from eeprom. On 662 * steps that match with the power values we read from eeprom. On
663 * older eeprom versions (< 3.2) these steps are equaly spaced at 663 * older eeprom versions (< 3.2) these steps are equaly spaced at
664 * 10% of the pcdac curve -until the curve reaches it's maximum- 664 * 10% of the pcdac curve -until the curve reaches its maximum-
665 * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2) 665 * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
666 * these 11 steps are spaced in a different way. This function returns 666 * these 11 steps are spaced in a different way. This function returns
667 * the pcdac steps based on eeprom version and curve min/max so that we 667 * the pcdac steps based on eeprom version and curve min/max so that we
@@ -1113,7 +1113,7 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
1113 */ 1113 */
1114 1114
1115/* For RF2413 power calibration data doesn't start on a fixed location and 1115/* For RF2413 power calibration data doesn't start on a fixed location and
1116 * if a mode is not supported, it's section is missing -not zeroed-. 1116 * if a mode is not supported, its section is missing -not zeroed-.
1117 * So we need to calculate the starting offset for each section by using 1117 * So we need to calculate the starting offset for each section by using
1118 * these two functions */ 1118 * these two functions */
1119 1119
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 86fdb6ddfaaa..bb2e21553d1b 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -137,11 +137,11 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
137 * ath5k_hw_set_ack_bitrate - set bitrate for ACKs 137 * ath5k_hw_set_ack_bitrate - set bitrate for ACKs
138 * 138 *
139 * @ah: The &struct ath5k_hw 139 * @ah: The &struct ath5k_hw
140 * @high: Flag to determine if we want to use high transmition rate 140 * @high: Flag to determine if we want to use high transmission rate
141 * for ACKs or not 141 * for ACKs or not
142 * 142 *
143 * If high flag is set, we tell hw to use a set of control rates based on 143 * If high flag is set, we tell hw to use a set of control rates based on
144 * the current transmition rate (check out control_rates array inside reset.c). 144 * the current transmission rate (check out control_rates array inside reset.c).
145 * If not hw just uses the lowest rate available for the current modulation 145 * If not hw just uses the lowest rate available for the current modulation
146 * scheme being used (1Mbit for CCK and 6Mbits for OFDM). 146 * scheme being used (1Mbit for CCK and 6Mbits for OFDM).
147 */ 147 */
@@ -308,27 +308,26 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
308} 308}
309 309
310/** 310/**
311 * ath5k_hw_set_associd - Set BSSID for association 311 * ath5k_hw_set_bssid - Set current BSSID on hw
312 * 312 *
313 * @ah: The &struct ath5k_hw 313 * @ah: The &struct ath5k_hw
314 * @bssid: BSSID
315 * @assoc_id: Assoc id
316 * 314 *
317 * Sets the BSSID which trigers the "SME Join" operation 315 * Sets the current BSSID and BSSID mask we have from the
316 * common struct into the hardware
318 */ 317 */
319void ath5k_hw_set_associd(struct ath5k_hw *ah) 318void ath5k_hw_set_bssid(struct ath5k_hw *ah)
320{ 319{
321 struct ath_common *common = ath5k_hw_common(ah); 320 struct ath_common *common = ath5k_hw_common(ah);
322 u16 tim_offset = 0; 321 u16 tim_offset = 0;
323 322
324 /* 323 /*
325 * Set simple BSSID mask on 5212 324 * Set BSSID mask on 5212
326 */ 325 */
327 if (ah->ah_version == AR5K_AR5212) 326 if (ah->ah_version == AR5K_AR5212)
328 ath_hw_setbssidmask(common); 327 ath_hw_setbssidmask(common);
329 328
330 /* 329 /*
331 * Set BSSID which triggers the "SME Join" operation 330 * Set BSSID
332 */ 331 */
333 ath5k_hw_reg_write(ah, 332 ath5k_hw_reg_write(ah,
334 get_unaligned_le32(common->curbssid), 333 get_unaligned_le32(common->curbssid),
@@ -695,21 +694,18 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
695static 694static
696int ath5k_keycache_type(const struct ieee80211_key_conf *key) 695int ath5k_keycache_type(const struct ieee80211_key_conf *key)
697{ 696{
698 switch (key->alg) { 697 switch (key->cipher) {
699 case ALG_TKIP: 698 case WLAN_CIPHER_SUITE_TKIP:
700 return AR5K_KEYTABLE_TYPE_TKIP; 699 return AR5K_KEYTABLE_TYPE_TKIP;
701 case ALG_CCMP: 700 case WLAN_CIPHER_SUITE_CCMP:
702 return AR5K_KEYTABLE_TYPE_CCM; 701 return AR5K_KEYTABLE_TYPE_CCM;
703 case ALG_WEP: 702 case WLAN_CIPHER_SUITE_WEP40:
704 if (key->keylen == WLAN_KEY_LEN_WEP40) 703 return AR5K_KEYTABLE_TYPE_40;
705 return AR5K_KEYTABLE_TYPE_40; 704 case WLAN_CIPHER_SUITE_WEP104:
706 else if (key->keylen == WLAN_KEY_LEN_WEP104) 705 return AR5K_KEYTABLE_TYPE_104;
707 return AR5K_KEYTABLE_TYPE_104;
708 return -EINVAL;
709 default: 706 default:
710 return -EINVAL; 707 return -EINVAL;
711 } 708 }
712 return -EINVAL;
713} 709}
714 710
715/* 711/*
@@ -728,7 +724,7 @@ int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
728 bool is_tkip; 724 bool is_tkip;
729 const u8 *key_ptr; 725 const u8 *key_ptr;
730 726
731 is_tkip = (key->alg == ALG_TKIP); 727 is_tkip = (key->cipher == WLAN_CIPHER_SUITE_TKIP);
732 728
733 /* 729 /*
734 * key->keylen comes in from mac80211 in bytes. 730 * key->keylen comes in from mac80211 in bytes.
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 6284c389ba18..984ba92c7df3 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -115,7 +115,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
115\**********************/ 115\**********************/
116 116
117/* 117/*
118 * This code is used to optimize rf gain on different environments 118 * This code is used to optimize RF gain on different environments
119 * (temperature mostly) based on feedback from a power detector. 119 * (temperature mostly) based on feedback from a power detector.
120 * 120 *
121 * It's only used on RF5111 and RF5112, later RF chips seem to have 121 * It's only used on RF5111 and RF5112, later RF chips seem to have
@@ -302,7 +302,7 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
302} 302}
303 303
304/* Perform gain_F adjustment by choosing the right set 304/* Perform gain_F adjustment by choosing the right set
305 * of parameters from rf gain optimization ladder */ 305 * of parameters from RF gain optimization ladder */
306static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) 306static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
307{ 307{
308 const struct ath5k_gain_opt *go; 308 const struct ath5k_gain_opt *go;
@@ -367,7 +367,7 @@ done:
367 return ret; 367 return ret;
368} 368}
369 369
370/* Main callback for thermal rf gain calibration engine 370/* Main callback for thermal RF gain calibration engine
371 * Check for a new gain reading and schedule an adjustment 371 * Check for a new gain reading and schedule an adjustment
372 * if needed. 372 * if needed.
373 * 373 *
@@ -433,7 +433,7 @@ done:
433 return ah->ah_gain.g_state; 433 return ah->ah_gain.g_state;
434} 434}
435 435
436/* Write initial rf gain table to set the RF sensitivity 436/* Write initial RF gain table to set the RF sensitivity
437 * this one works on all RF chips and has nothing to do 437 * this one works on all RF chips and has nothing to do
438 * with gain_F calibration */ 438 * with gain_F calibration */
439int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq) 439int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
@@ -496,7 +496,7 @@ int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
496 496
497 497
498/* 498/*
499 * Setup RF registers by writing rf buffer on hw 499 * Setup RF registers by writing RF buffer on hw
500 */ 500 */
501int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, 501int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
502 unsigned int mode) 502 unsigned int mode)
@@ -571,7 +571,7 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
571 return -EINVAL; 571 return -EINVAL;
572 } 572 }
573 573
574 /* If it's the first time we set rf buffer, allocate 574 /* If it's the first time we set RF buffer, allocate
575 * ah->ah_rf_banks based on ah->ah_rf_banks_size 575 * ah->ah_rf_banks based on ah->ah_rf_banks_size
576 * we set above */ 576 * we set above */
577 if (ah->ah_rf_banks == NULL) { 577 if (ah->ah_rf_banks == NULL) {
@@ -1582,7 +1582,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1582 else if (curr_sym_off >= 31 && curr_sym_off <= 46) 1582 else if (curr_sym_off >= 31 && curr_sym_off <= 46)
1583 mag_mask[2] |= 1583 mag_mask[2] |=
1584 plt_mag_map << (curr_sym_off - 31) * 2; 1584 plt_mag_map << (curr_sym_off - 31) * 2;
1585 else if (curr_sym_off >= 46 && curr_sym_off <= 53) 1585 else if (curr_sym_off >= 47 && curr_sym_off <= 53)
1586 mag_mask[3] |= 1586 mag_mask[3] |=
1587 plt_mag_map << (curr_sym_off - 47) * 2; 1587 plt_mag_map << (curr_sym_off - 47) * 2;
1588 1588
@@ -2987,7 +2987,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
2987 2987
2988 2988
2989/* 2989/*
2990 * Set transmition power 2990 * Set transmission power
2991 */ 2991 */
2992int 2992int
2993ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, 2993ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3035,9 +3035,6 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3035 /* Limit max power if we have a CTL available */ 3035 /* Limit max power if we have a CTL available */
3036 ath5k_get_max_ctl_power(ah, channel); 3036 ath5k_get_max_ctl_power(ah, channel);
3037 3037
3038 /* FIXME: Tx power limit for this regdomain
3039 * XXX: Mac80211/CRDA will do that anyway ? */
3040
3041 /* FIXME: Antenna reduction stuff */ 3038 /* FIXME: Antenna reduction stuff */
3042 3039
3043 /* FIXME: Limit power on turbo modes */ 3040 /* FIXME: Limit power on turbo modes */
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 55b4ac6d236f..05ef587ad2b4 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -1911,7 +1911,7 @@
1911#define AR5K_PHY_TURBO 0x9804 /* Register Address */ 1911#define AR5K_PHY_TURBO 0x9804 /* Register Address */
1912#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */ 1912#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */
1913#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Set short symbols to turbo mode */ 1913#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Set short symbols to turbo mode */
1914#define AR5K_PHY_TURBO_MIMO 0x00000004 /* Set turbo for mimo mimo */ 1914#define AR5K_PHY_TURBO_MIMO 0x00000004 /* Set turbo for mimo */
1915 1915
1916/* 1916/*
1917 * PHY agility command register 1917 * PHY agility command register
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 498aa28ea9e6..58912cd762d9 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -326,7 +326,7 @@ commit:
326 * register). After this MAC and Baseband are 326 * register). After this MAC and Baseband are
327 * disabled and a full reset is needed to come 327 * disabled and a full reset is needed to come
328 * back. This way we save as much power as possible 328 * back. This way we save as much power as possible
329 * without puting the card on full sleep. 329 * without putting the card on full sleep.
330 */ 330 */
331int ath5k_hw_on_hold(struct ath5k_hw *ah) 331int ath5k_hw_on_hold(struct ath5k_hw *ah)
332{ 332{
@@ -344,7 +344,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
344 /* 344 /*
345 * Put chipset on warm reset... 345 * Put chipset on warm reset...
346 * 346 *
347 * Note: puting PCI core on warm reset on PCI-E cards 347 * Note: putting PCI core on warm reset on PCI-E cards
348 * results card to hang and always return 0xffff... so 348 * results card to hang and always return 0xffff... so
349 * we ingore that flag for PCI-E cards. On PCI cards 349 * we ingore that flag for PCI-E cards. On PCI cards
350 * this flag gets cleared after 64 PCI clocks. 350 * this flag gets cleared after 64 PCI clocks.
@@ -400,7 +400,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
400 /* 400 /*
401 * Put chipset on warm reset... 401 * Put chipset on warm reset...
402 * 402 *
403 * Note: puting PCI core on warm reset on PCI-E cards 403 * Note: putting PCI core on warm reset on PCI-E cards
404 * results card to hang and always return 0xffff... so 404 * results card to hang and always return 0xffff... so
405 * we ingore that flag for PCI-E cards. On PCI cards 405 * we ingore that flag for PCI-E cards. On PCI cards
406 * this flag gets cleared after 64 PCI clocks. 406 * this flag gets cleared after 64 PCI clocks.
@@ -959,7 +959,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
959 AR5K_QUEUE_DCU_SEQNUM(0)); 959 AR5K_QUEUE_DCU_SEQNUM(0));
960 } 960 }
961 961
962 /* TSF accelerates on AR5211 durring reset 962 /* TSF accelerates on AR5211 during reset
963 * As a workaround save it here and restore 963 * As a workaround save it here and restore
964 * it later so that it's back in time after 964 * it later so that it's back in time after
965 * reset. This way it'll get re-synced on the 965 * reset. This way it'll get re-synced on the
@@ -1080,7 +1080,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1080 return ret; 1080 return ret;
1081 1081
1082 /* Spur info is available only from EEPROM versions 1082 /* Spur info is available only from EEPROM versions
1083 * bigger than 5.3 but but the EEPOM routines will use 1083 * greater than 5.3, but the EEPROM routines will use
1084 * static values for older versions */ 1084 * static values for older versions */
1085 if (ah->ah_mac_srev >= AR5K_SREV_AR5424) 1085 if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
1086 ath5k_hw_set_spur_mitigation_filter(ah, 1086 ath5k_hw_set_spur_mitigation_filter(ah,
@@ -1160,7 +1160,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1160 */ 1160 */
1161 1161
1162 /* Restore bssid and bssid mask */ 1162 /* Restore bssid and bssid mask */
1163 ath5k_hw_set_associd(ah); 1163 ath5k_hw_set_bssid(ah);
1164 1164
1165 /* Set PCU config */ 1165 /* Set PCU config */
1166 ath5k_hw_set_opmode(ah, op_mode); 1166 ath5k_hw_set_opmode(ah, op_mode);
@@ -1173,11 +1173,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1173 /* Set RSSI/BRSSI thresholds 1173 /* Set RSSI/BRSSI thresholds
1174 * 1174 *
1175 * Note: If we decide to set this value 1175 * Note: If we decide to set this value
1176 * dynamicaly, have in mind that when AR5K_RSSI_THR 1176 * dynamically, keep in mind that when AR5K_RSSI_THR
1177 * register is read it might return 0x40 if we haven't 1177 * register is read, it might return 0x40 if we haven't
1178 * wrote anything to it plus BMISS RSSI threshold is zeroed. 1178 * written anything to it. Also, BMISS RSSI threshold is zeroed.
1179 * So doing a save/restore procedure here isn't the right 1179 * So doing a save/restore procedure here isn't the right
1180 * choice. Instead store it on ath5k_hw */ 1180 * choice. Instead, store it in ath5k_hw */
1181 ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES | 1181 ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
1182 AR5K_TUNE_BMISS_THRES << 1182 AR5K_TUNE_BMISS_THRES <<
1183 AR5K_RSSI_THR_BMISS_S), 1183 AR5K_RSSI_THR_BMISS_S),
@@ -1235,7 +1235,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1235 1235
1236 /* 1236 /*
1237 * Perform ADC test to see if baseband is ready 1237 * Perform ADC test to see if baseband is ready
1238 * Set tx hold and check adc test register 1238 * Set TX hold and check ADC test register
1239 */ 1239 */
1240 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); 1240 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
1241 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); 1241 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
@@ -1254,15 +1254,15 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1254 * 1254 *
1255 * This method is used to calibrate some static offsets 1255 * This method is used to calibrate some static offsets
1256 * used together with on-the fly I/Q calibration (the 1256 * used together with on-the fly I/Q calibration (the
1257 * one performed via ath5k_hw_phy_calibrate), that doesn't 1257 * one performed via ath5k_hw_phy_calibrate), which doesn't
1258 * interrupt rx path. 1258 * interrupt rx path.
1259 * 1259 *
1260 * While rx path is re-routed to the power detector we also 1260 * While rx path is re-routed to the power detector we also
1261 * start a noise floor calibration, to measure the 1261 * start a noise floor calibration to measure the
1262 * card's noise floor (the noise we measure when we are not 1262 * card's noise floor (the noise we measure when we are not
1263 * transmiting or receiving anything). 1263 * transmitting or receiving anything).
1264 * 1264 *
1265 * If we are in a noisy environment AGC calibration may time 1265 * If we are in a noisy environment, AGC calibration may time
1266 * out and/or noise floor calibration might timeout. 1266 * out and/or noise floor calibration might timeout.
1267 */ 1267 */
1268 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1268 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index e50baff66175..3ac4cff4239d 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -25,10 +25,10 @@
25 * 25 *
26 * We don't write on those registers directly but 26 * We don't write on those registers directly but
27 * we send a data packet on the chip, using a special register, 27 * we send a data packet on the chip, using a special register,
28 * that holds all the settings we need. After we 've sent the 28 * that holds all the settings we need. After we've sent the
29 * data packet, we write on another special register to notify hw 29 * data packet, we write on another special register to notify hw
30 * to apply the settings. This is done so that control registers 30 * to apply the settings. This is done so that control registers
31 * can be dynamicaly programmed during operation and the settings 31 * can be dynamically programmed during operation and the settings
32 * are applied faster on the hw. 32 * are applied faster on the hw.
33 * 33 *
34 * We call each data packet an "RF Bank" and all the data we write 34 * We call each data packet an "RF Bank" and all the data we write
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 973ae4f49f35..4555e9983903 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -46,6 +46,7 @@ ath9k_htc-y += htc_hst.o \
46 htc_drv_txrx.o \ 46 htc_drv_txrx.o \
47 htc_drv_main.o \ 47 htc_drv_main.o \
48 htc_drv_beacon.o \ 48 htc_drv_beacon.o \
49 htc_drv_init.o 49 htc_drv_init.o \
50 htc_drv_gpio.o
50 51
51obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o 52obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 5b995bee70ae..a462da23e87e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -185,7 +185,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
185 ath_print(common, ATH_DBG_INTERRUPT, 185 ath_print(common, ATH_DBG_INTERRUPT,
186 "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); 186 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
187 187
188 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); 188 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
189 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); 189 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
190 190
191 } 191 }
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 07f26ee7a723..f0197a6046ab 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -423,6 +423,7 @@ int ath_beaconq_config(struct ath_softc *sc);
423#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ 423#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
424#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */ 424#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */
425#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */ 425#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */
426#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
426#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 427#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
427#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 428#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
428 429
@@ -436,14 +437,6 @@ void ath_ani_calibrate(unsigned long data);
436/* BTCOEX */ 437/* BTCOEX */
437/**********/ 438/**********/
438 439
439/* Defines the BT AR_BT_COEX_WGHT used */
440enum ath_stomp_type {
441 ATH_BTCOEX_NO_STOMP,
442 ATH_BTCOEX_STOMP_ALL,
443 ATH_BTCOEX_STOMP_LOW,
444 ATH_BTCOEX_STOMP_NONE
445};
446
447struct ath_btcoex { 440struct ath_btcoex {
448 bool hw_timer_enabled; 441 bool hw_timer_enabled;
449 spinlock_t btcoex_lock; 442 spinlock_t btcoex_lock;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 4d4b22d52dfd..081192e78a46 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -359,11 +359,12 @@ void ath_beacon_tasklet(unsigned long data)
359 sc->beacon.bmisscnt++; 359 sc->beacon.bmisscnt++;
360 360
361 if (sc->beacon.bmisscnt < BSTUCK_THRESH) { 361 if (sc->beacon.bmisscnt < BSTUCK_THRESH) {
362 ath_print(common, ATH_DBG_BEACON, 362 ath_print(common, ATH_DBG_BSTUCK,
363 "missed %u consecutive beacons\n", 363 "missed %u consecutive beacons\n",
364 sc->beacon.bmisscnt); 364 sc->beacon.bmisscnt);
365 ath9k_hw_bstuck_nfcal(ah);
365 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 366 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
366 ath_print(common, ATH_DBG_BEACON, 367 ath_print(common, ATH_DBG_BSTUCK,
367 "beacon is officially stuck\n"); 368 "beacon is officially stuck\n");
368 sc->sc_flags |= SC_OP_TSF_RESET; 369 sc->sc_flags |= SC_OP_TSF_RESET;
369 ath_reset(sc, false); 370 ath_reset(sc, false);
@@ -373,7 +374,7 @@ void ath_beacon_tasklet(unsigned long data)
373 } 374 }
374 375
375 if (sc->beacon.bmisscnt != 0) { 376 if (sc->beacon.bmisscnt != 0) {
376 ath_print(common, ATH_DBG_BEACON, 377 ath_print(common, ATH_DBG_BSTUCK,
377 "resume beacon xmit after %u misses\n", 378 "resume beacon xmit after %u misses\n",
378 sc->beacon.bmisscnt); 379 sc->beacon.bmisscnt);
379 sc->beacon.bmisscnt = 0; 380 sc->beacon.bmisscnt = 0;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index fb4ac15f3b93..6a92e57fddf0 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -168,6 +168,7 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
168static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) 168static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
169{ 169{
170 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 170 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
171 u32 val;
171 172
172 /* 173 /*
173 * Program coex mode and weight registers to 174 * Program coex mode and weight registers to
@@ -177,6 +178,12 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
177 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights); 178 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
178 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2); 179 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
179 180
181 if (AR_SREV_9271(ah)) {
182 val = REG_READ(ah, 0x50040);
183 val &= 0xFFFFFEFF;
184 REG_WRITE(ah, 0x50040, val);
185 }
186
180 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); 187 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
181 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); 188 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
182 189
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 45208690c0ec..67ee5d735cc1 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -19,8 +19,7 @@
19 19
20/* Common calibration code */ 20/* Common calibration code */
21 21
22/* We can tune this as we go by monitoring really low values */ 22#define ATH9K_NF_TOO_HIGH -60
23#define ATH9K_NF_TOO_LOW -60
24 23
25static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) 24static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
26{ 25{
@@ -45,11 +44,39 @@ static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
45 return nfval; 44 return nfval;
46} 45}
47 46
48static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h, 47static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
48 struct ath9k_channel *chan)
49{
50 struct ath_nf_limits *limit;
51
52 if (!chan || IS_CHAN_2GHZ(chan))
53 limit = &ah->nf_2g;
54 else
55 limit = &ah->nf_5g;
56
57 return limit;
58}
59
60static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
61 struct ath9k_channel *chan)
62{
63 return ath9k_hw_get_nf_limits(ah, chan)->nominal;
64}
65
66
67static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
68 struct ath9k_hw_cal_data *cal,
49 int16_t *nfarray) 69 int16_t *nfarray)
50{ 70{
71 struct ath_common *common = ath9k_hw_common(ah);
72 struct ath_nf_limits *limit;
73 struct ath9k_nfcal_hist *h;
74 bool high_nf_mid = false;
51 int i; 75 int i;
52 76
77 h = cal->nfCalHist;
78 limit = ath9k_hw_get_nf_limits(ah, ah->curchan);
79
53 for (i = 0; i < NUM_NF_READINGS; i++) { 80 for (i = 0; i < NUM_NF_READINGS; i++) {
54 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i]; 81 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
55 82
@@ -63,7 +90,39 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
63 h[i].privNF = 90 h[i].privNF =
64 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); 91 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
65 } 92 }
93
94 if (!h[i].privNF)
95 continue;
96
97 if (h[i].privNF > limit->max) {
98 high_nf_mid = true;
99
100 ath_print(common, ATH_DBG_CALIBRATE,
101 "NFmid[%d] (%d) > MAX (%d), %s\n",
102 i, h[i].privNF, limit->max,
103 (cal->nfcal_interference ?
104 "not corrected (due to interference)" :
105 "correcting to MAX"));
106
107 /*
108 * Normally we limit the average noise floor by the
109 * hardware specific maximum here. However if we have
110 * encountered stuck beacons because of interference,
111 * we bypass this limit here in order to better deal
112 * with our environment.
113 */
114 if (!cal->nfcal_interference)
115 h[i].privNF = limit->max;
116 }
66 } 117 }
118
119 /*
120 * If the noise floor seems normal for all chains, assume that
121 * there is no significant interference in the environment anymore.
122 * Re-enable the enforcement of the NF maximum again.
123 */
124 if (!high_nf_mid)
125 cal->nfcal_interference = false;
67} 126}
68 127
69static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah, 128static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -104,19 +163,6 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
104 ah->cal_samples = 0; 163 ah->cal_samples = 0;
105} 164}
106 165
107static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
108 struct ath9k_channel *chan)
109{
110 struct ath_nf_limits *limit;
111
112 if (!chan || IS_CHAN_2GHZ(chan))
113 limit = &ah->nf_2g;
114 else
115 limit = &ah->nf_5g;
116
117 return limit->nominal;
118}
119
120/* This is done for the currently configured channel */ 166/* This is done for the currently configured channel */
121bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 167bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
122{ 168{
@@ -277,10 +323,10 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
277 "NF calibrated [%s] [chain %d] is %d\n", 323 "NF calibrated [%s] [chain %d] is %d\n",
278 (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]); 324 (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
279 325
280 if (nf[i] > limit->max) { 326 if (nf[i] > ATH9K_NF_TOO_HIGH) {
281 ath_print(common, ATH_DBG_CALIBRATE, 327 ath_print(common, ATH_DBG_CALIBRATE,
282 "NF[%d] (%d) > MAX (%d), correcting to MAX", 328 "NF[%d] (%d) > MAX (%d), correcting to MAX",
283 i, nf[i], limit->max); 329 i, nf[i], ATH9K_NF_TOO_HIGH);
284 nf[i] = limit->max; 330 nf[i] = limit->max;
285 } else if (nf[i] < limit->min) { 331 } else if (nf[i] < limit->min) {
286 ath_print(common, ATH_DBG_CALIBRATE, 332 ath_print(common, ATH_DBG_CALIBRATE,
@@ -326,7 +372,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
326 372
327 h = caldata->nfCalHist; 373 h = caldata->nfCalHist;
328 caldata->nfcal_pending = false; 374 caldata->nfcal_pending = false;
329 ath9k_hw_update_nfcal_hist_buffer(h, nfarray); 375 ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
330 caldata->rawNoiseFloor = h[0].privNF; 376 caldata->rawNoiseFloor = h[0].privNF;
331 return true; 377 return true;
332} 378}
@@ -361,3 +407,28 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
361 return ah->caldata->rawNoiseFloor; 407 return ah->caldata->rawNoiseFloor;
362} 408}
363EXPORT_SYMBOL(ath9k_hw_getchan_noise); 409EXPORT_SYMBOL(ath9k_hw_getchan_noise);
410
411void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
412{
413 struct ath9k_hw_cal_data *caldata = ah->caldata;
414
415 if (unlikely(!caldata))
416 return;
417
418 /*
419 * If beacons are stuck, the most likely cause is interference.
420 * Triggering a noise floor calibration at this point helps the
421 * hardware adapt to a noisy environment much faster.
422 * To ensure that we recover from stuck beacons quickly, let
423 * the baseband update the internal NF value itself, similar to
424 * what is being done after a full reset.
425 */
426 if (!caldata->nfcal_pending)
427 ath9k_hw_start_nfcal(ah, true);
428 else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
429 ath9k_hw_getnf(ah, ah->curchan);
430
431 caldata->nfcal_interference = true;
432}
433EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
434
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 0a304b3eeeb6..5b053a6260b2 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -113,6 +113,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
113bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan); 113bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan);
114void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, 114void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
115 struct ath9k_channel *chan); 115 struct ath9k_channel *chan);
116void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
116s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); 117s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
117void ath9k_hw_reset_calibration(struct ath_hw *ah, 118void ath9k_hw_reset_calibration(struct ath_hw *ah,
118 struct ath9k_cal_list *currCal); 119 struct ath9k_cal_list *currCal);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index c86f7d3593ab..2dab64bb23a8 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -46,12 +46,17 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
46 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 46 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
47 47
48 if (tx_info->control.hw_key) { 48 if (tx_info->control.hw_key) {
49 if (tx_info->control.hw_key->alg == ALG_WEP) 49 switch (tx_info->control.hw_key->cipher) {
50 case WLAN_CIPHER_SUITE_WEP40:
51 case WLAN_CIPHER_SUITE_WEP104:
50 return ATH9K_KEY_TYPE_WEP; 52 return ATH9K_KEY_TYPE_WEP;
51 else if (tx_info->control.hw_key->alg == ALG_TKIP) 53 case WLAN_CIPHER_SUITE_TKIP:
52 return ATH9K_KEY_TYPE_TKIP; 54 return ATH9K_KEY_TYPE_TKIP;
53 else if (tx_info->control.hw_key->alg == ALG_CCMP) 55 case WLAN_CIPHER_SUITE_CCMP:
54 return ATH9K_KEY_TYPE_AES; 56 return ATH9K_KEY_TYPE_AES;
57 default:
58 break;
59 }
55 } 60 }
56 61
57 return ATH9K_KEY_TYPE_CLEAR; 62 return ATH9K_KEY_TYPE_CLEAR;
@@ -212,11 +217,11 @@ static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
212} 217}
213 218
214static int ath_reserve_key_cache_slot(struct ath_common *common, 219static int ath_reserve_key_cache_slot(struct ath_common *common,
215 enum ieee80211_key_alg alg) 220 u32 cipher)
216{ 221{
217 int i; 222 int i;
218 223
219 if (alg == ALG_TKIP) 224 if (cipher == WLAN_CIPHER_SUITE_TKIP)
220 return ath_reserve_key_cache_slot_tkip(common); 225 return ath_reserve_key_cache_slot_tkip(common);
221 226
222 /* First, try to find slots that would not be available for TKIP. */ 227 /* First, try to find slots that would not be available for TKIP. */
@@ -293,14 +298,15 @@ int ath9k_cmn_key_config(struct ath_common *common,
293 298
294 memset(&hk, 0, sizeof(hk)); 299 memset(&hk, 0, sizeof(hk));
295 300
296 switch (key->alg) { 301 switch (key->cipher) {
297 case ALG_WEP: 302 case WLAN_CIPHER_SUITE_WEP40:
303 case WLAN_CIPHER_SUITE_WEP104:
298 hk.kv_type = ATH9K_CIPHER_WEP; 304 hk.kv_type = ATH9K_CIPHER_WEP;
299 break; 305 break;
300 case ALG_TKIP: 306 case WLAN_CIPHER_SUITE_TKIP:
301 hk.kv_type = ATH9K_CIPHER_TKIP; 307 hk.kv_type = ATH9K_CIPHER_TKIP;
302 break; 308 break;
303 case ALG_CCMP: 309 case WLAN_CIPHER_SUITE_CCMP:
304 hk.kv_type = ATH9K_CIPHER_AES_CCM; 310 hk.kv_type = ATH9K_CIPHER_AES_CCM;
305 break; 311 break;
306 default: 312 default:
@@ -316,7 +322,7 @@ int ath9k_cmn_key_config(struct ath_common *common,
316 memcpy(gmac, vif->addr, ETH_ALEN); 322 memcpy(gmac, vif->addr, ETH_ALEN);
317 gmac[0] |= 0x01; 323 gmac[0] |= 0x01;
318 mac = gmac; 324 mac = gmac;
319 idx = ath_reserve_key_cache_slot(common, key->alg); 325 idx = ath_reserve_key_cache_slot(common, key->cipher);
320 break; 326 break;
321 case NL80211_IFTYPE_ADHOC: 327 case NL80211_IFTYPE_ADHOC:
322 if (!sta) { 328 if (!sta) {
@@ -326,7 +332,7 @@ int ath9k_cmn_key_config(struct ath_common *common,
326 memcpy(gmac, sta->addr, ETH_ALEN); 332 memcpy(gmac, sta->addr, ETH_ALEN);
327 gmac[0] |= 0x01; 333 gmac[0] |= 0x01;
328 mac = gmac; 334 mac = gmac;
329 idx = ath_reserve_key_cache_slot(common, key->alg); 335 idx = ath_reserve_key_cache_slot(common, key->cipher);
330 break; 336 break;
331 default: 337 default:
332 idx = key->keyidx; 338 idx = key->keyidx;
@@ -348,13 +354,13 @@ int ath9k_cmn_key_config(struct ath_common *common,
348 return -EOPNOTSUPP; 354 return -EOPNOTSUPP;
349 mac = sta->addr; 355 mac = sta->addr;
350 356
351 idx = ath_reserve_key_cache_slot(common, key->alg); 357 idx = ath_reserve_key_cache_slot(common, key->cipher);
352 } 358 }
353 359
354 if (idx < 0) 360 if (idx < 0)
355 return -ENOSPC; /* no free key cache entries */ 361 return -ENOSPC; /* no free key cache entries */
356 362
357 if (key->alg == ALG_TKIP) 363 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
358 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac, 364 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
359 vif->type == NL80211_IFTYPE_AP); 365 vif->type == NL80211_IFTYPE_AP);
360 else 366 else
@@ -364,11 +370,15 @@ int ath9k_cmn_key_config(struct ath_common *common,
364 return -EIO; 370 return -EIO;
365 371
366 set_bit(idx, common->keymap); 372 set_bit(idx, common->keymap);
367 if (key->alg == ALG_TKIP) { 373 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
368 set_bit(idx + 64, common->keymap); 374 set_bit(idx + 64, common->keymap);
375 set_bit(idx, common->tkip_keymap);
376 set_bit(idx + 64, common->tkip_keymap);
369 if (common->splitmic) { 377 if (common->splitmic) {
370 set_bit(idx + 32, common->keymap); 378 set_bit(idx + 32, common->keymap);
371 set_bit(idx + 64 + 32, common->keymap); 379 set_bit(idx + 64 + 32, common->keymap);
380 set_bit(idx + 32, common->tkip_keymap);
381 set_bit(idx + 64 + 32, common->tkip_keymap);
372 } 382 }
373 } 383 }
374 384
@@ -389,14 +399,21 @@ void ath9k_cmn_key_delete(struct ath_common *common,
389 return; 399 return;
390 400
391 clear_bit(key->hw_key_idx, common->keymap); 401 clear_bit(key->hw_key_idx, common->keymap);
392 if (key->alg != ALG_TKIP) 402 if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
393 return; 403 return;
394 404
395 clear_bit(key->hw_key_idx + 64, common->keymap); 405 clear_bit(key->hw_key_idx + 64, common->keymap);
406
407 clear_bit(key->hw_key_idx, common->tkip_keymap);
408 clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
409
396 if (common->splitmic) { 410 if (common->splitmic) {
397 ath9k_hw_keyreset(ah, key->hw_key_idx + 32); 411 ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
398 clear_bit(key->hw_key_idx + 32, common->keymap); 412 clear_bit(key->hw_key_idx + 32, common->keymap);
399 clear_bit(key->hw_key_idx + 64 + 32, common->keymap); 413 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
414
415 clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
416 clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
400 } 417 }
401} 418}
402EXPORT_SYMBOL(ath9k_cmn_key_delete); 419EXPORT_SYMBOL(ath9k_cmn_key_delete);
@@ -414,6 +431,37 @@ int ath9k_cmn_count_streams(unsigned int chainmask, int max)
414} 431}
415EXPORT_SYMBOL(ath9k_cmn_count_streams); 432EXPORT_SYMBOL(ath9k_cmn_count_streams);
416 433
434/*
435 * Configures appropriate weight based on stomp type.
436 */
437void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
438 enum ath_stomp_type stomp_type)
439{
440 struct ath_hw *ah = common->ah;
441
442 switch (stomp_type) {
443 case ATH_BTCOEX_STOMP_ALL:
444 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
445 AR_STOMP_ALL_WLAN_WGHT);
446 break;
447 case ATH_BTCOEX_STOMP_LOW:
448 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
449 AR_STOMP_LOW_WLAN_WGHT);
450 break;
451 case ATH_BTCOEX_STOMP_NONE:
452 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
453 AR_STOMP_NONE_WLAN_WGHT);
454 break;
455 default:
456 ath_print(common, ATH_DBG_BTCOEX,
457 "Invalid Stomptype\n");
458 break;
459 }
460
461 ath9k_hw_btcoex_enable(ah);
462}
463EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
464
417static int __init ath9k_cmn_init(void) 465static int __init ath9k_cmn_init(void)
418{ 466{
419 return 0; 467 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 97809d39c73f..4aa4e7dbe4d2 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -52,6 +52,14 @@
52#define ATH_EP_RND(x, mul) \ 52#define ATH_EP_RND(x, mul) \
53 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 53 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
54 54
55/* Defines the BT AR_BT_COEX_WGHT used */
56enum ath_stomp_type {
57 ATH_BTCOEX_NO_STOMP,
58 ATH_BTCOEX_STOMP_ALL,
59 ATH_BTCOEX_STOMP_LOW,
60 ATH_BTCOEX_STOMP_NONE
61};
62
55int ath9k_cmn_padpos(__le16 frame_control); 63int ath9k_cmn_padpos(__le16 frame_control);
56int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 64int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
57void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw, 65void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
@@ -65,3 +73,5 @@ int ath9k_cmn_key_config(struct ath_common *common,
65void ath9k_cmn_key_delete(struct ath_common *common, 73void ath9k_cmn_key_delete(struct ath_common *common,
66 struct ieee80211_key_conf *key); 74 struct ieee80211_key_conf *key);
67int ath9k_cmn_count_streams(unsigned int chainmask, int max); 75int ath9k_cmn_count_streams(unsigned int chainmask, int max);
76void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
77 enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 3a8ee999da5d..4a9a68bba324 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -251,36 +251,6 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
251 } 251 }
252} 252}
253 253
254/*
255 * Configures appropriate weight based on stomp type.
256 */
257static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
258 enum ath_stomp_type stomp_type)
259{
260 struct ath_hw *ah = sc->sc_ah;
261
262 switch (stomp_type) {
263 case ATH_BTCOEX_STOMP_ALL:
264 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
265 AR_STOMP_ALL_WLAN_WGHT);
266 break;
267 case ATH_BTCOEX_STOMP_LOW:
268 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
269 AR_STOMP_LOW_WLAN_WGHT);
270 break;
271 case ATH_BTCOEX_STOMP_NONE:
272 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
273 AR_STOMP_NONE_WLAN_WGHT);
274 break;
275 default:
276 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
277 "Invalid Stomptype\n");
278 break;
279 }
280
281 ath9k_hw_btcoex_enable(ah);
282}
283
284static void ath9k_gen_timer_start(struct ath_hw *ah, 254static void ath9k_gen_timer_start(struct ath_hw *ah,
285 struct ath_gen_timer *timer, 255 struct ath_gen_timer *timer,
286 u32 timer_next, 256 u32 timer_next,
@@ -319,6 +289,7 @@ static void ath_btcoex_period_timer(unsigned long data)
319 struct ath_softc *sc = (struct ath_softc *) data; 289 struct ath_softc *sc = (struct ath_softc *) data;
320 struct ath_hw *ah = sc->sc_ah; 290 struct ath_hw *ah = sc->sc_ah;
321 struct ath_btcoex *btcoex = &sc->btcoex; 291 struct ath_btcoex *btcoex = &sc->btcoex;
292 struct ath_common *common = ath9k_hw_common(ah);
322 u32 timer_period; 293 u32 timer_period;
323 bool is_btscan; 294 bool is_btscan;
324 295
@@ -328,7 +299,7 @@ static void ath_btcoex_period_timer(unsigned long data)
328 299
329 spin_lock_bh(&btcoex->btcoex_lock); 300 spin_lock_bh(&btcoex->btcoex_lock);
330 301
331 ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL : 302 ath9k_cmn_btcoex_bt_stomp(common, is_btscan ? ATH_BTCOEX_STOMP_ALL :
332 btcoex->bt_stomp_type); 303 btcoex->bt_stomp_type);
333 304
334 spin_unlock_bh(&btcoex->btcoex_lock); 305 spin_unlock_bh(&btcoex->btcoex_lock);
@@ -359,17 +330,18 @@ static void ath_btcoex_no_stomp_timer(void *arg)
359 struct ath_softc *sc = (struct ath_softc *)arg; 330 struct ath_softc *sc = (struct ath_softc *)arg;
360 struct ath_hw *ah = sc->sc_ah; 331 struct ath_hw *ah = sc->sc_ah;
361 struct ath_btcoex *btcoex = &sc->btcoex; 332 struct ath_btcoex *btcoex = &sc->btcoex;
333 struct ath_common *common = ath9k_hw_common(ah);
362 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN; 334 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
363 335
364 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, 336 ath_print(common, ATH_DBG_BTCOEX,
365 "no stomp timer running\n"); 337 "no stomp timer running\n");
366 338
367 spin_lock_bh(&btcoex->btcoex_lock); 339 spin_lock_bh(&btcoex->btcoex_lock);
368 340
369 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 341 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
370 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE); 342 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE);
371 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 343 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
372 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW); 344 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_LOW);
373 345
374 spin_unlock_bh(&btcoex->btcoex_lock); 346 spin_unlock_bh(&btcoex->btcoex_lock);
375} 347}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 17e7a9a367e7..495f18950ac9 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -920,7 +920,8 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
920 } 920 }
921 921
922 ret = ath9k_htc_hw_init(hif_dev->htc_handle, 922 ret = ath9k_htc_hw_init(hif_dev->htc_handle,
923 &hif_dev->udev->dev, hif_dev->device_id); 923 &hif_dev->udev->dev, hif_dev->device_id,
924 hif_dev->udev->product);
924 if (ret) { 925 if (ret) {
925 ret = -EINVAL; 926 ret = -EINVAL;
926 goto err_htc_hw_init; 927 goto err_htc_hw_init;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 43b9e21bc562..75ecf6a30d25 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -316,17 +316,32 @@ struct htc_beacon_config {
316 u8 dtim_count; 316 u8 dtim_count;
317}; 317};
318 318
319#define OP_INVALID BIT(0) 319struct ath_btcoex {
320#define OP_SCANNING BIT(1) 320 u32 bt_priority_cnt;
321#define OP_FULL_RESET BIT(2) 321 unsigned long bt_priority_time;
322#define OP_LED_ASSOCIATED BIT(3) 322 int bt_stomp_type; /* Types of BT stomping */
323#define OP_LED_ON BIT(4) 323 u32 btcoex_no_stomp;
324#define OP_PREAMBLE_SHORT BIT(5) 324 u32 btcoex_period;
325#define OP_PROTECT_ENABLE BIT(6) 325 u32 btscan_no_stomp;
326#define OP_ASSOCIATED BIT(7) 326};
327#define OP_ENABLE_BEACON BIT(8) 327
328#define OP_LED_DEINIT BIT(9) 328void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv);
329#define OP_UNPLUGGED BIT(10) 329void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv);
330void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
331
332#define OP_INVALID BIT(0)
333#define OP_SCANNING BIT(1)
334#define OP_FULL_RESET BIT(2)
335#define OP_LED_ASSOCIATED BIT(3)
336#define OP_LED_ON BIT(4)
337#define OP_PREAMBLE_SHORT BIT(5)
338#define OP_PROTECT_ENABLE BIT(6)
339#define OP_ASSOCIATED BIT(7)
340#define OP_ENABLE_BEACON BIT(8)
341#define OP_LED_DEINIT BIT(9)
342#define OP_UNPLUGGED BIT(10)
343#define OP_BT_PRIORITY_DETECTED BIT(11)
344#define OP_BT_SCAN BIT(12)
330 345
331struct ath9k_htc_priv { 346struct ath9k_htc_priv {
332 struct device *dev; 347 struct device *dev;
@@ -391,6 +406,9 @@ struct ath9k_htc_priv {
391 int cabq; 406 int cabq;
392 int hwq_map[WME_NUM_AC]; 407 int hwq_map[WME_NUM_AC];
393 408
409 struct ath_btcoex btcoex;
410 struct delayed_work coex_period_work;
411 struct delayed_work duty_cycle_work;
394#ifdef CONFIG_ATH9K_HTC_DEBUGFS 412#ifdef CONFIG_ATH9K_HTC_DEBUGFS
395 struct ath9k_debug debug; 413 struct ath9k_debug debug;
396#endif 414#endif
@@ -443,7 +461,7 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv);
443void ath9k_deinit_leds(struct ath9k_htc_priv *priv); 461void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
444 462
445int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 463int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
446 u16 devid); 464 u16 devid, char *product);
447void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug); 465void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
448#ifdef CONFIG_PM 466#ifdef CONFIG_PM
449int ath9k_htc_resume(struct htc_target *htc_handle); 467int ath9k_htc_resume(struct htc_target *htc_handle);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
new file mode 100644
index 000000000000..50eec9a3b88c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -0,0 +1,134 @@
1#include "htc.h"
2
3/******************/
4/* BTCOEX */
5/******************/
6
7/*
8 * Detects if there is any priority bt traffic
9 */
10static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
11{
12 struct ath_btcoex *btcoex = &priv->btcoex;
13 struct ath_hw *ah = priv->ah;
14
15 if (ath9k_hw_gpio_get(ah, ah->btcoex_hw.btpriority_gpio))
16 btcoex->bt_priority_cnt++;
17
18 if (time_after(jiffies, btcoex->bt_priority_time +
19 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
20 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
21 /* Detect if colocated bt started scanning */
22 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
23 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
24 "BT scan detected");
25 priv->op_flags |= (OP_BT_SCAN |
26 OP_BT_PRIORITY_DETECTED);
27 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
28 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
29 "BT priority traffic detected");
30 priv->op_flags |= OP_BT_PRIORITY_DETECTED;
31 }
32
33 btcoex->bt_priority_cnt = 0;
34 btcoex->bt_priority_time = jiffies;
35 }
36}
37
38/*
39 * This is the master bt coex work which runs for every
40 * 45ms, bt traffic will be given priority during 55% of this
41 * period while wlan gets remaining 45%
42 */
43static void ath_btcoex_period_work(struct work_struct *work)
44{
45 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
46 coex_period_work.work);
47 struct ath_btcoex *btcoex = &priv->btcoex;
48 struct ath_common *common = ath9k_hw_common(priv->ah);
49 u32 timer_period;
50 bool is_btscan;
51 int ret;
52 u8 cmd_rsp, aggr;
53
54 ath_detect_bt_priority(priv);
55
56 is_btscan = !!(priv->op_flags & OP_BT_SCAN);
57
58 aggr = priv->op_flags & OP_BT_PRIORITY_DETECTED;
59
60 WMI_CMD_BUF(WMI_AGGR_LIMIT_CMD, &aggr);
61
62 ath9k_cmn_btcoex_bt_stomp(common, is_btscan ? ATH_BTCOEX_STOMP_ALL :
63 btcoex->bt_stomp_type);
64
65 timer_period = is_btscan ? btcoex->btscan_no_stomp :
66 btcoex->btcoex_no_stomp;
67 ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
68 msecs_to_jiffies(timer_period));
69 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work,
70 msecs_to_jiffies(btcoex->btcoex_period));
71}
72
73/*
74 * Work to time slice between wlan and bt traffic and
75 * configure weight registers
76 */
77static void ath_btcoex_duty_cycle_work(struct work_struct *work)
78{
79 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
80 duty_cycle_work.work);
81 struct ath_hw *ah = priv->ah;
82 struct ath_btcoex *btcoex = &priv->btcoex;
83 struct ath_common *common = ath9k_hw_common(ah);
84 bool is_btscan = priv->op_flags & OP_BT_SCAN;
85
86 ath_print(common, ATH_DBG_BTCOEX,
87 "time slice work for bt and wlan\n");
88
89 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
90 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE);
91 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
92 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_LOW);
93}
94
95void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
96{
97 struct ath_btcoex *btcoex = &priv->btcoex;
98
99 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
100 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
101 btcoex->btcoex_period / 100;
102 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
103 btcoex->btcoex_period / 100;
104 INIT_DELAYED_WORK(&priv->coex_period_work, ath_btcoex_period_work);
105 INIT_DELAYED_WORK(&priv->duty_cycle_work, ath_btcoex_duty_cycle_work);
106}
107
108/*
109 * (Re)start btcoex work
110 */
111
112void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
113{
114 struct ath_btcoex *btcoex = &priv->btcoex;
115 struct ath_hw *ah = priv->ah;
116
117 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
118 "Starting btcoex work");
119
120 btcoex->bt_priority_cnt = 0;
121 btcoex->bt_priority_time = jiffies;
122 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
123 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0);
124}
125
126
127/*
128 * Cancel btcoex and bt duty cycle work.
129 */
130void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
131{
132 cancel_delayed_work_sync(&priv->coex_period_work);
133 cancel_delayed_work_sync(&priv->duty_cycle_work);
134}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 2d4279191d7a..695e2b088d10 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -41,6 +41,8 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
41 .max_power = 20, \ 41 .max_power = 20, \
42} 42}
43 43
44#define ATH_HTC_BTCOEX_PRODUCT_ID "wb193"
45
44static struct ieee80211_channel ath9k_2ghz_channels[] = { 46static struct ieee80211_channel ath9k_2ghz_channels[] = {
45 CHAN2G(2412, 0), /* Channel 1 */ 47 CHAN2G(2412, 0), /* Channel 1 */
46 CHAN2G(2417, 1), /* Channel 2 */ 48 CHAN2G(2417, 1), /* Channel 2 */
@@ -605,7 +607,31 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
605 priv->ah->opmode = NL80211_IFTYPE_STATION; 607 priv->ah->opmode = NL80211_IFTYPE_STATION;
606} 608}
607 609
608static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid) 610static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
611{
612 int qnum;
613
614 switch (priv->ah->btcoex_hw.scheme) {
615 case ATH_BTCOEX_CFG_NONE:
616 break;
617 case ATH_BTCOEX_CFG_3WIRE:
618 priv->ah->btcoex_hw.btactive_gpio = 7;
619 priv->ah->btcoex_hw.btpriority_gpio = 6;
620 priv->ah->btcoex_hw.wlanactive_gpio = 8;
621 priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
622 ath9k_hw_btcoex_init_3wire(priv->ah);
623 ath_htc_init_btcoex_work(priv);
624 qnum = priv->hwq_map[WME_AC_BE];
625 ath9k_hw_init_btcoex_hw(priv->ah, qnum);
626 break;
627 default:
628 WARN_ON(1);
629 break;
630 }
631}
632
633static int ath9k_init_priv(struct ath9k_htc_priv *priv,
634 u16 devid, char *product)
609{ 635{
610 struct ath_hw *ah = NULL; 636 struct ath_hw *ah = NULL;
611 struct ath_common *common; 637 struct ath_common *common;
@@ -672,6 +698,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
672 ath9k_init_channels_rates(priv); 698 ath9k_init_channels_rates(priv);
673 ath9k_init_misc(priv); 699 ath9k_init_misc(priv);
674 700
701 if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
702 ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
703 ath9k_init_btcoex(priv);
704 }
705
675 return 0; 706 return 0;
676 707
677err_queues: 708err_queues:
@@ -734,7 +765,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
734 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 765 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
735} 766}
736 767
737static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid) 768static int ath9k_init_device(struct ath9k_htc_priv *priv,
769 u16 devid, char *product)
738{ 770{
739 struct ieee80211_hw *hw = priv->hw; 771 struct ieee80211_hw *hw = priv->hw;
740 struct ath_common *common; 772 struct ath_common *common;
@@ -743,7 +775,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid)
743 struct ath_regulatory *reg; 775 struct ath_regulatory *reg;
744 776
745 /* Bring up device */ 777 /* Bring up device */
746 error = ath9k_init_priv(priv, devid); 778 error = ath9k_init_priv(priv, devid, product);
747 if (error != 0) 779 if (error != 0)
748 goto err_init; 780 goto err_init;
749 781
@@ -801,7 +833,7 @@ err_init:
801} 833}
802 834
803int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 835int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
804 u16 devid) 836 u16 devid, char *product)
805{ 837{
806 struct ieee80211_hw *hw; 838 struct ieee80211_hw *hw;
807 struct ath9k_htc_priv *priv; 839 struct ath9k_htc_priv *priv;
@@ -835,7 +867,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
835 /* The device may have been unplugged earlier. */ 867 /* The device may have been unplugged earlier. */
836 priv->op_flags &= ~OP_UNPLUGGED; 868 priv->op_flags &= ~OP_UNPLUGGED;
837 869
838 ret = ath9k_init_device(priv, devid); 870 ret = ath9k_init_device(priv, devid, product);
839 if (ret) 871 if (ret)
840 goto err_init; 872 goto err_init;
841 873
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 7d09b4b17bbd..f4672073ac0a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1210,6 +1210,12 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1210 1210
1211 ieee80211_wake_queues(hw); 1211 ieee80211_wake_queues(hw);
1212 1212
1213 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
1214 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1215 AR_STOMP_LOW_WLAN_WGHT);
1216 ath9k_hw_btcoex_enable(ah);
1217 ath_htc_resume_btcoex_work(priv);
1218 }
1213 mutex_unlock(&priv->mutex); 1219 mutex_unlock(&priv->mutex);
1214 1220
1215 return ret; 1221 return ret;
@@ -1233,7 +1239,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1233 1239
1234 /* Cancel all the running timers/work .. */ 1240 /* Cancel all the running timers/work .. */
1235 cancel_work_sync(&priv->ps_work); 1241 cancel_work_sync(&priv->ps_work);
1236 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1237 cancel_delayed_work_sync(&priv->ath9k_led_blink_work); 1242 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1238 ath9k_led_stop_brightness(priv); 1243 ath9k_led_stop_brightness(priv);
1239 1244
@@ -1254,6 +1259,12 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1254 "Monitor interface removed\n"); 1259 "Monitor interface removed\n");
1255 } 1260 }
1256 1261
1262 if (ah->btcoex_hw.enabled) {
1263 ath9k_hw_btcoex_disable(ah);
1264 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1265 ath_htc_cancel_btcoex_work(priv);
1266 }
1267
1257 ath9k_hw_phy_disable(ah); 1268 ath9k_hw_phy_disable(ah);
1258 ath9k_hw_disable(ah); 1269 ath9k_hw_disable(ah);
1259 ath9k_hw_configpcipowersave(ah, 1, 1); 1270 ath9k_hw_configpcipowersave(ah, 1, 1);
@@ -1585,9 +1596,10 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1585 key->hw_key_idx = ret; 1596 key->hw_key_idx = ret;
1586 /* push IV and Michael MIC generation to stack */ 1597 /* push IV and Michael MIC generation to stack */
1587 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1598 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1588 if (key->alg == ALG_TKIP) 1599 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
1589 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1600 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1590 if (priv->ah->sw_mgmt_crypto && key->alg == ALG_CCMP) 1601 if (priv->ah->sw_mgmt_crypto &&
1602 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1591 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 1603 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
1592 ret = 0; 1604 ret = 0;
1593 } 1605 }
@@ -1774,7 +1786,8 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1774 priv->op_flags |= OP_SCANNING; 1786 priv->op_flags |= OP_SCANNING;
1775 spin_unlock_bh(&priv->beacon_lock); 1787 spin_unlock_bh(&priv->beacon_lock);
1776 cancel_work_sync(&priv->ps_work); 1788 cancel_work_sync(&priv->ps_work);
1777 cancel_delayed_work_sync(&priv->ath9k_ani_work); 1789 if (priv->op_flags & OP_ASSOCIATED)
1790 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1778 mutex_unlock(&priv->mutex); 1791 mutex_unlock(&priv->mutex);
1779} 1792}
1780 1793
@@ -1788,9 +1801,10 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1788 priv->op_flags &= ~OP_SCANNING; 1801 priv->op_flags &= ~OP_SCANNING;
1789 spin_unlock_bh(&priv->beacon_lock); 1802 spin_unlock_bh(&priv->beacon_lock);
1790 priv->op_flags |= OP_FULL_RESET; 1803 priv->op_flags |= OP_FULL_RESET;
1791 if (priv->op_flags & OP_ASSOCIATED) 1804 if (priv->op_flags & OP_ASSOCIATED) {
1792 ath9k_htc_beacon_config(priv, priv->vif); 1805 ath9k_htc_beacon_config(priv, priv->vif);
1793 ath_start_ani(priv); 1806 ath_start_ani(priv);
1807 }
1794 ath9k_htc_ps_restore(priv); 1808 ath9k_htc_ps_restore(priv);
1795 mutex_unlock(&priv->mutex); 1809 mutex_unlock(&priv->mutex);
1796} 1810}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 705c0f342e1c..861ec9269309 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -462,9 +462,9 @@ void ath9k_htc_hw_free(struct htc_target *htc)
462} 462}
463 463
464int ath9k_htc_hw_init(struct htc_target *target, 464int ath9k_htc_hw_init(struct htc_target *target,
465 struct device *dev, u16 devid) 465 struct device *dev, u16 devid, char *product)
466{ 466{
467 if (ath9k_htc_probe_device(target, dev, devid)) { 467 if (ath9k_htc_probe_device(target, dev, devid, product)) {
468 printk(KERN_ERR "Failed to initialize the device\n"); 468 printk(KERN_ERR "Failed to initialize the device\n");
469 return -ENODEV; 469 return -ENODEV;
470 } 470 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index faba6790328b..07b6509d5896 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -239,7 +239,7 @@ struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
239 struct device *dev); 239 struct device *dev);
240void ath9k_htc_hw_free(struct htc_target *htc); 240void ath9k_htc_hw_free(struct htc_target *htc);
241int ath9k_htc_hw_init(struct htc_target *target, 241int ath9k_htc_hw_init(struct htc_target *target,
242 struct device *dev, u16 devid); 242 struct device *dev, u16 devid, char *product);
243void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug); 243void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
244 244
245#endif /* HTC_HST_H */ 245#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 399f7c1283cd..1601dd439890 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -355,6 +355,7 @@ struct ath9k_hw_cal_data {
355 int16_t rawNoiseFloor; 355 int16_t rawNoiseFloor;
356 bool paprd_done; 356 bool paprd_done;
357 bool nfcal_pending; 357 bool nfcal_pending;
358 bool nfcal_interference;
358 u16 small_signal_gain[AR9300_MAX_CHAINS]; 359 u16 small_signal_gain[AR9300_MAX_CHAINS];
359 u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; 360 u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
360 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; 361 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 243c1775f343..3dbff8d07766 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -33,7 +33,7 @@ int modparam_nohwcrypt;
33module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 33module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 35
36int led_blink = 1; 36int led_blink;
37module_param_named(blink, led_blink, int, 0444); 37module_param_named(blink, led_blink, int, 0444);
38MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 38MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39 39
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index e955bb9d98cb..0b7d1253f0c0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -711,7 +711,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
711 rs->rs_phyerr = phyerr; 711 rs->rs_phyerr = phyerr;
712 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 712 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
713 rs->rs_status |= ATH9K_RXERR_DECRYPT; 713 rs->rs_status |= ATH9K_RXERR_DECRYPT;
714 else if (ads.ds_rxstatus8 & AR_MichaelErr) 714 else if ((ads.ds_rxstatus8 & AR_MichaelErr) &&
715 rs->rs_keyix != ATH9K_RXKEYIX_INVALID)
715 rs->rs_status |= ATH9K_RXERR_MIC; 716 rs->rs_status |= ATH9K_RXERR_MIC;
716 } 717 }
717 718
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3caa32316e7b..1165f909ef04 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -226,9 +226,10 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
226 caldata = &aphy->caldata; 226 caldata = &aphy->caldata;
227 227
228 ath_print(common, ATH_DBG_CONFIG, 228 ath_print(common, ATH_DBG_CONFIG,
229 "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n", 229 "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
230 sc->sc_ah->curchan->channel, 230 sc->sc_ah->curchan->channel,
231 channel->center_freq, conf_is_ht40(conf)); 231 channel->center_freq, conf_is_ht40(conf),
232 fastcc);
232 233
233 spin_lock_bh(&sc->sc_resetlock); 234 spin_lock_bh(&sc->sc_resetlock);
234 235
@@ -395,7 +396,12 @@ void ath_ani_calibrate(unsigned long data)
395 bool shortcal = false; 396 bool shortcal = false;
396 bool aniflag = false; 397 bool aniflag = false;
397 unsigned int timestamp = jiffies_to_msecs(jiffies); 398 unsigned int timestamp = jiffies_to_msecs(jiffies);
398 u32 cal_interval, short_cal_interval; 399 u32 cal_interval, short_cal_interval, long_cal_interval;
400
401 if (ah->caldata && ah->caldata->nfcal_interference)
402 long_cal_interval = ATH_LONG_CALINTERVAL_INT;
403 else
404 long_cal_interval = ATH_LONG_CALINTERVAL;
399 405
400 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? 406 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
401 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; 407 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
@@ -407,7 +413,7 @@ void ath_ani_calibrate(unsigned long data)
407 ath9k_ps_wakeup(sc); 413 ath9k_ps_wakeup(sc);
408 414
409 /* Long calibration runs independently of short calibration. */ 415 /* Long calibration runs independently of short calibration. */
410 if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { 416 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
411 longcal = true; 417 longcal = true;
412 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); 418 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
413 common->ani.longcal_timer = timestamp; 419 common->ani.longcal_timer = timestamp;
@@ -1776,9 +1782,10 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1776 key->hw_key_idx = ret; 1782 key->hw_key_idx = ret;
1777 /* push IV and Michael MIC generation to stack */ 1783 /* push IV and Michael MIC generation to stack */
1778 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1784 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1779 if (key->alg == ALG_TKIP) 1785 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
1780 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1786 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1781 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP) 1787 if (sc->sc_ah->sw_mgmt_crypto &&
1788 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1782 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 1789 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
1783 ret = 0; 1790 ret = 0;
1784 } 1791 }
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a3fc987ebab0..534a91bcc1d9 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -870,15 +870,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
870 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 870 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
871 *decrypt_error = true; 871 *decrypt_error = true;
872 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 872 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
873 if (ieee80211_is_ctl(fc)) 873 /*
874 /* 874 * The MIC error bit is only valid if the frame
875 * Sometimes, we get invalid 875 * is not a control frame or fragment, and it was
876 * MIC failures on valid control frames. 876 * decrypted using a valid TKIP key.
877 * Remove these mic errors. 877 */
878 */ 878 if (!ieee80211_is_ctl(fc) &&
879 rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 879 !ieee80211_has_morefrags(fc) &&
880 else 880 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
881 test_bit(rx_stats->rs_keyix, common->tkip_keymap))
881 rxs->flag |= RX_FLAG_MMIC_ERROR; 882 rxs->flag |= RX_FLAG_MMIC_ERROR;
883 else
884 rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
882 } 885 }
883 /* 886 /*
884 * Reject error frames with the exception of 887 * Reject error frames with the exception of
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 6260faa658a2..45fe9cac7971 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -85,6 +85,8 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
85 return "WMI_TGT_DETACH_CMDID"; 85 return "WMI_TGT_DETACH_CMDID";
86 case WMI_TGT_TXQ_ENABLE_CMDID: 86 case WMI_TGT_TXQ_ENABLE_CMDID:
87 return "WMI_TGT_TXQ_ENABLE_CMDID"; 87 return "WMI_TGT_TXQ_ENABLE_CMDID";
88 case WMI_AGGR_LIMIT_CMD:
89 return "WMI_AGGR_LIMIT_CMD";
88 } 90 }
89 91
90 return "Bogus"; 92 return "Bogus";
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 765db5faa2d3..a0bf857625df 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -71,6 +71,7 @@ enum wmi_cmd_id {
71 WMI_TX_AGGR_ENABLE_CMDID, 71 WMI_TX_AGGR_ENABLE_CMDID,
72 WMI_TGT_DETACH_CMDID, 72 WMI_TGT_DETACH_CMDID,
73 WMI_TGT_TXQ_ENABLE_CMDID, 73 WMI_TGT_TXQ_ENABLE_CMDID,
74 WMI_AGGR_LIMIT_CMD = 0x0026,
74}; 75};
75 76
76enum wmi_event_id { 77enum wmi_event_id {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 4dda14e36227..457f07692ac7 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1407,22 +1407,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1407 return htype; 1407 return htype;
1408} 1408}
1409 1409
1410static int get_hw_crypto_keytype(struct sk_buff *skb)
1411{
1412 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1413
1414 if (tx_info->control.hw_key) {
1415 if (tx_info->control.hw_key->alg == ALG_WEP)
1416 return ATH9K_KEY_TYPE_WEP;
1417 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1418 return ATH9K_KEY_TYPE_TKIP;
1419 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1420 return ATH9K_KEY_TYPE_AES;
1421 }
1422
1423 return ATH9K_KEY_TYPE_CLEAR;
1424}
1425
1426static void assign_aggr_tid_seqno(struct sk_buff *skb, 1410static void assign_aggr_tid_seqno(struct sk_buff *skb,
1427 struct ath_buf *bf) 1411 struct ath_buf *bf)
1428{ 1412{
@@ -1661,7 +1645,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1661 bf->bf_state.bfs_paprd_timestamp = jiffies; 1645 bf->bf_state.bfs_paprd_timestamp = jiffies;
1662 bf->bf_flags = setup_tx_flags(skb, use_ldpc); 1646 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1663 1647
1664 bf->bf_keytype = get_hw_crypto_keytype(skb); 1648 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1665 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1649 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1666 bf->bf_frmlen += tx_info->control.hw_key->icv_len; 1650 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1667 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; 1651 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index 873bf526e11f..fd3a020682dc 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -36,6 +36,7 @@
36 * @ATH_DBG_PS: power save processing 36 * @ATH_DBG_PS: power save processing
37 * @ATH_DBG_HWTIMER: hardware timer handling 37 * @ATH_DBG_HWTIMER: hardware timer handling
38 * @ATH_DBG_BTCOEX: bluetooth coexistance 38 * @ATH_DBG_BTCOEX: bluetooth coexistance
39 * @ATH_DBG_BSTUCK: stuck beacons
39 * @ATH_DBG_ANY: enable all debugging 40 * @ATH_DBG_ANY: enable all debugging
40 * 41 *
41 * The debug level is used to control the amount and type of debugging output 42 * The debug level is used to control the amount and type of debugging output
@@ -60,6 +61,7 @@ enum ATH_DEBUG {
60 ATH_DBG_HWTIMER = 0x00001000, 61 ATH_DBG_HWTIMER = 0x00001000,
61 ATH_DBG_BTCOEX = 0x00002000, 62 ATH_DBG_BTCOEX = 0x00002000,
62 ATH_DBG_WMI = 0x00004000, 63 ATH_DBG_WMI = 0x00004000,
64 ATH_DBG_BSTUCK = 0x00008000,
63 ATH_DBG_ANY = 0xffffffff 65 ATH_DBG_ANY = 0xffffffff
64}; 66};
65 67
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 20631ae2ddd7..a1186525c70d 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2280,6 +2280,7 @@ out:
2280 2280
2281static int b43_upload_microcode(struct b43_wldev *dev) 2281static int b43_upload_microcode(struct b43_wldev *dev)
2282{ 2282{
2283 struct wiphy *wiphy = dev->wl->hw->wiphy;
2283 const size_t hdr_len = sizeof(struct b43_fw_header); 2284 const size_t hdr_len = sizeof(struct b43_fw_header);
2284 const __be32 *data; 2285 const __be32 *data;
2285 unsigned int i, len; 2286 unsigned int i, len;
@@ -2405,6 +2406,10 @@ static int b43_upload_microcode(struct b43_wldev *dev)
2405 } 2406 }
2406 } 2407 }
2407 2408
2409 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u",
2410 dev->fw.rev, dev->fw.patch);
2411 wiphy->hw_version = dev->dev->id.coreid;
2412
2408 if (b43_is_old_txhdr_format(dev)) { 2413 if (b43_is_old_txhdr_format(dev)) {
2409 /* We're over the deadline, but we keep support for old fw 2414 /* We're over the deadline, but we keep support for old fw
2410 * until it turns out to be in major conflict with something new. */ 2415 * until it turns out to be in major conflict with something new. */
@@ -3754,17 +3759,17 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3754 } 3759 }
3755 3760
3756 err = -EINVAL; 3761 err = -EINVAL;
3757 switch (key->alg) { 3762 switch (key->cipher) {
3758 case ALG_WEP: 3763 case WLAN_CIPHER_SUITE_WEP40:
3759 if (key->keylen == WLAN_KEY_LEN_WEP40) 3764 algorithm = B43_SEC_ALGO_WEP40;
3760 algorithm = B43_SEC_ALGO_WEP40; 3765 break;
3761 else 3766 case WLAN_CIPHER_SUITE_WEP104:
3762 algorithm = B43_SEC_ALGO_WEP104; 3767 algorithm = B43_SEC_ALGO_WEP104;
3763 break; 3768 break;
3764 case ALG_TKIP: 3769 case WLAN_CIPHER_SUITE_TKIP:
3765 algorithm = B43_SEC_ALGO_TKIP; 3770 algorithm = B43_SEC_ALGO_TKIP;
3766 break; 3771 break;
3767 case ALG_CCMP: 3772 case WLAN_CIPHER_SUITE_CCMP:
3768 algorithm = B43_SEC_ALGO_AES; 3773 algorithm = B43_SEC_ALGO_AES;
3769 break; 3774 break;
3770 default: 3775 default:
@@ -4250,6 +4255,10 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
4250 B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED); 4255 B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED);
4251 if (!dev || b43_status(dev) != B43_STAT_INITIALIZED) 4256 if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
4252 return; 4257 return;
4258
4259 /* Unregister HW RNG driver */
4260 b43_rng_exit(dev->wl);
4261
4253 b43_set_status(dev, B43_STAT_UNINIT); 4262 b43_set_status(dev, B43_STAT_UNINIT);
4254 4263
4255 /* Stop the microcode PSM. */ 4264 /* Stop the microcode PSM. */
@@ -4379,6 +4388,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4379 4388
4380 b43_set_status(dev, B43_STAT_INITIALIZED); 4389 b43_set_status(dev, B43_STAT_INITIALIZED);
4381 4390
4391 /* Register HW RNG driver */
4392 b43_rng_init(dev->wl);
4393
4382out: 4394out:
4383 return err; 4395 return err;
4384 4396
@@ -4984,7 +4996,6 @@ static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id)
4984 if (err) 4996 if (err)
4985 goto err_one_core_detach; 4997 goto err_one_core_detach;
4986 b43_leds_register(wl->current_dev); 4998 b43_leds_register(wl->current_dev);
4987 b43_rng_init(wl);
4988 } 4999 }
4989 5000
4990 out: 5001 out:
@@ -5020,7 +5031,6 @@ static void b43_remove(struct ssb_device *dev)
5020 b43_one_core_detach(dev); 5031 b43_one_core_detach(dev);
5021 5032
5022 if (list_empty(&wl->devlist)) { 5033 if (list_empty(&wl->devlist)) {
5023 b43_rng_exit(wl);
5024 b43_leds_unregister(wl); 5034 b43_leds_unregister(wl);
5025 /* Last core on the chip unregistered. 5035 /* Last core on the chip unregistered.
5026 * We can destroy common struct b43_wl. 5036 * We can destroy common struct b43_wl.
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 5a725703770c..2466c0a52e5d 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -893,7 +893,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
893} 893}
894 894
895/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ 895/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
896static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev) 896static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
897{ 897{
898 struct b43_phy_n *nphy = dev->phy.n; 898 struct b43_phy_n *nphy = dev->phy.n;
899 u8 i, j; 899 u8 i, j;
@@ -1094,11 +1094,12 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
1094 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); 1094 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
1095 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); 1095 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
1096 1096
1097 b43_nphy_gain_crtl_workarounds(dev); 1097 b43_nphy_gain_ctrl_workarounds(dev);
1098 1098
1099 if (dev->phy.rev < 2) { 1099 if (dev->phy.rev < 2) {
1100 if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2) 1100 if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
1101 ; /*TODO: b43_mhf(dev, 2, 0x0010, 0x0010, 3);*/ 1101 b43_hf_write(dev, b43_hf_read(dev) |
1102 B43_HF_MLADVW);
1102 } else if (dev->phy.rev == 2) { 1103 } else if (dev->phy.rev == 2) {
1103 b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0); 1104 b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
1104 b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0); 1105 b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
@@ -1182,7 +1183,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
1182 len = bw << 1; 1183 len = bw << 1;
1183 } 1184 }
1184 1185
1185 samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL); 1186 samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL);
1186 if (!samples) { 1187 if (!samples) {
1187 b43err(dev->wl, "allocation for samples generation failed\n"); 1188 b43err(dev->wl, "allocation for samples generation failed\n");
1188 return 0; 1189 return 0;
@@ -3073,6 +3074,57 @@ static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
3073 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug); 3074 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
3074} 3075}
3075 3076
3077/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
3078static void b43_nphy_mac_phy_clock_set(struct b43_wldev *dev, bool on)
3079{
3080 u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
3081 if (on)
3082 tmslow |= SSB_TMSLOW_PHYCLK;
3083 else
3084 tmslow &= ~SSB_TMSLOW_PHYCLK;
3085 ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
3086}
3087
3088/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */
3089static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
3090{
3091 struct b43_phy *phy = &dev->phy;
3092 struct b43_phy_n *nphy = phy->n;
3093 u16 buf[16];
3094
3095 nphy->phyrxchain = mask;
3096
3097 if (0 /* FIXME clk */)
3098 return;
3099
3100 b43_mac_suspend(dev);
3101
3102 if (nphy->hang_avoid)
3103 b43_nphy_stay_in_carrier_search(dev, true);
3104
3105 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
3106 (mask & 0x3) << B43_NPHY_RFSEQCA_RXEN_SHIFT);
3107
3108 if ((mask & 0x3) != 0x3) {
3109 b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 1);
3110 if (dev->phy.rev >= 3) {
3111 /* TODO */
3112 }
3113 } else {
3114 b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 0x1E);
3115 if (dev->phy.rev >= 3) {
3116 /* TODO */
3117 }
3118 }
3119
3120 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
3121
3122 if (nphy->hang_avoid)
3123 b43_nphy_stay_in_carrier_search(dev, false);
3124
3125 b43_mac_enable(dev);
3126}
3127
3076/* 3128/*
3077 * Init N-PHY 3129 * Init N-PHY
3078 * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N 3130 * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
@@ -3173,7 +3225,7 @@ int b43_phy_initn(struct b43_wldev *dev)
3173 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA); 3225 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
3174 b43_nphy_bmac_clock_fgc(dev, 0); 3226 b43_nphy_bmac_clock_fgc(dev, 0);
3175 3227
3176 /* TODO N PHY MAC PHY Clock Set with argument 1 */ 3228 b43_nphy_mac_phy_clock_set(dev, true);
3177 3229
3178 b43_nphy_pa_override(dev, false); 3230 b43_nphy_pa_override(dev, false);
3179 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); 3231 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
@@ -3199,7 +3251,7 @@ int b43_phy_initn(struct b43_wldev *dev)
3199 } 3251 }
3200 3252
3201 if (nphy->phyrxchain != 3) 3253 if (nphy->phyrxchain != 3)
3202 ;/* TODO N PHY RX Core Set State with phyrxchain as argument */ 3254 b43_nphy_set_rx_core_state(dev, nphy->phyrxchain);
3203 if (nphy->mphase_cal_phase_id > 0) 3255 if (nphy->mphase_cal_phase_id > 0)
3204 ;/* TODO PHY Periodic Calibration Multi-Phase Restart */ 3256 ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
3205 3257
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1713f5f7a58b..67f18ecdb3bf 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1623,6 +1623,7 @@ error:
1623 1623
1624static int b43legacy_upload_microcode(struct b43legacy_wldev *dev) 1624static int b43legacy_upload_microcode(struct b43legacy_wldev *dev)
1625{ 1625{
1626 struct wiphy *wiphy = dev->wl->hw->wiphy;
1626 const size_t hdr_len = sizeof(struct b43legacy_fw_header); 1627 const size_t hdr_len = sizeof(struct b43legacy_fw_header);
1627 const __be32 *data; 1628 const __be32 *data;
1628 unsigned int i; 1629 unsigned int i;
@@ -1732,6 +1733,10 @@ static int b43legacy_upload_microcode(struct b43legacy_wldev *dev)
1732 dev->fw.rev = fwrev; 1733 dev->fw.rev = fwrev;
1733 dev->fw.patch = fwpatch; 1734 dev->fw.patch = fwpatch;
1734 1735
1736 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u",
1737 dev->fw.rev, dev->fw.patch);
1738 wiphy->hw_version = dev->dev->id.coreid;
1739
1735 return 0; 1740 return 0;
1736 1741
1737error: 1742error:
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index a85e43a8d758..6038633ef361 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1696,7 +1696,7 @@ static int prism2_request_scan(struct net_device *dev)
1696 hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE, 1696 hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
1697 HFA384X_ROAMING_FIRMWARE); 1697 HFA384X_ROAMING_FIRMWARE);
1698 1698
1699 return 0; 1699 return ret;
1700} 1700}
1701 1701
1702#else /* !PRISM2_NO_STATION_MODES */ 1702#else /* !PRISM2_NO_STATION_MODES */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 996e9d7d7586..61915f371416 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1921,9 +1921,9 @@ static int ipw2100_net_init(struct net_device *dev)
1921 1921
1922 bg_band->band = IEEE80211_BAND_2GHZ; 1922 bg_band->band = IEEE80211_BAND_2GHZ;
1923 bg_band->n_channels = geo->bg_channels; 1923 bg_band->n_channels = geo->bg_channels;
1924 bg_band->channels = 1924 bg_band->channels = kcalloc(geo->bg_channels,
1925 kzalloc(geo->bg_channels * 1925 sizeof(struct ieee80211_channel),
1926 sizeof(struct ieee80211_channel), GFP_KERNEL); 1926 GFP_KERNEL);
1927 if (!bg_band->channels) { 1927 if (!bg_band->channels) {
1928 ipw2100_down(priv); 1928 ipw2100_down(priv);
1929 return -ENOMEM; 1929 return -ENOMEM;
@@ -3056,9 +3056,9 @@ static void ipw2100_tx_send_commands(struct ipw2100_priv *priv)
3056 3056
3057 packet = list_entry(element, struct ipw2100_tx_packet, list); 3057 packet = list_entry(element, struct ipw2100_tx_packet, list);
3058 3058
3059 IPW_DEBUG_TX("using TBD at virt=%p, phys=%p\n", 3059 IPW_DEBUG_TX("using TBD at virt=%p, phys=%04X\n",
3060 &txq->drv[txq->next], 3060 &txq->drv[txq->next],
3061 (void *)(txq->nic + txq->next * 3061 (u32) (txq->nic + txq->next *
3062 sizeof(struct ipw2100_bd))); 3062 sizeof(struct ipw2100_bd)));
3063 3063
3064 packet->index = txq->next; 3064 packet->index = txq->next;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index cb2552a6777c..0f2508384c75 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11467,9 +11467,9 @@ static int ipw_net_init(struct net_device *dev)
11467 11467
11468 bg_band->band = IEEE80211_BAND_2GHZ; 11468 bg_band->band = IEEE80211_BAND_2GHZ;
11469 bg_band->n_channels = geo->bg_channels; 11469 bg_band->n_channels = geo->bg_channels;
11470 bg_band->channels = 11470 bg_band->channels = kcalloc(geo->bg_channels,
11471 kzalloc(geo->bg_channels * 11471 sizeof(struct ieee80211_channel),
11472 sizeof(struct ieee80211_channel), GFP_KERNEL); 11472 GFP_KERNEL);
11473 /* translate geo->bg to bg_band.channels */ 11473 /* translate geo->bg to bg_band.channels */
11474 for (i = 0; i < geo->bg_channels; i++) { 11474 for (i = 0; i < geo->bg_channels; i++) {
11475 bg_band->channels[i].band = IEEE80211_BAND_2GHZ; 11475 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
@@ -11502,9 +11502,9 @@ static int ipw_net_init(struct net_device *dev)
11502 11502
11503 a_band->band = IEEE80211_BAND_5GHZ; 11503 a_band->band = IEEE80211_BAND_5GHZ;
11504 a_band->n_channels = geo->a_channels; 11504 a_band->n_channels = geo->a_channels;
11505 a_band->channels = 11505 a_band->channels = kcalloc(geo->a_channels,
11506 kzalloc(geo->a_channels * 11506 sizeof(struct ieee80211_channel),
11507 sizeof(struct ieee80211_channel), GFP_KERNEL); 11507 GFP_KERNEL);
11508 /* translate geo->bg to a_band.channels */ 11508 /* translate geo->bg to a_band.channels */
11509 for (i = 0; i < geo->a_channels; i++) { 11509 for (i = 0; i < geo->a_channels; i++) {
11510 a_band->channels[i].band = IEEE80211_BAND_2GHZ; 11510 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index a51e4da1bdfc..b82364258dc5 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -3,6 +3,9 @@ config IWLWIFI
3 depends on PCI && MAC80211 3 depends on PCI && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 5
6menu "Debugging Options"
7 depends on IWLWIFI
8
6config IWLWIFI_DEBUG 9config IWLWIFI_DEBUG
7 bool "Enable full debugging output in iwlagn and iwl3945 drivers" 10 bool "Enable full debugging output in iwlagn and iwl3945 drivers"
8 depends on IWLWIFI 11 depends on IWLWIFI
@@ -36,6 +39,12 @@ config IWLWIFI_DEBUGFS
36 is a low-impact option that allows getting insight into the 39 is a low-impact option that allows getting insight into the
37 driver's state at runtime. 40 driver's state at runtime.
38 41
42config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
43 bool "Experimental uCode support"
44 depends on IWLWIFI && IWLWIFI_DEBUG
45 ---help---
46 Enable use of experimental ucode for testing and debugging.
47
39config IWLWIFI_DEVICE_TRACING 48config IWLWIFI_DEVICE_TRACING
40 bool "iwlwifi device access tracing" 49 bool "iwlwifi device access tracing"
41 depends on IWLWIFI 50 depends on IWLWIFI
@@ -53,6 +62,7 @@ config IWLWIFI_DEVICE_TRACING
53 62
54 If unsure, say Y so we can help you better when problems 63 If unsure, say Y so we can help you better when problems
55 occur. 64 occur.
65endmenu
56 66
57config IWLAGN 67config IWLAGN
58 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)" 68 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 728bb858ba97..493163925a45 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o 12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o 13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o 14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
15iwlagn-objs += iwl-agn-tt.o
15iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o 16iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
16 17
17iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 18iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 0b779a41a142..674fb93ae17f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -130,7 +130,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
130 sizeof(struct iwlagn_scd_bc_tbl); 130 sizeof(struct iwlagn_scd_bc_tbl);
131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
132 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 132 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
133 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; 133 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
134 134
135 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 135 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
136 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 136 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -217,7 +217,7 @@ static struct iwl_lib_ops iwl1000_lib = {
217 .set_ct_kill = iwl1000_set_ct_threshold, 217 .set_ct_kill = iwl1000_set_ct_threshold,
218 }, 218 },
219 .manage_ibss_station = iwlagn_manage_ibss_station, 219 .manage_ibss_station = iwlagn_manage_ibss_station,
220 .update_bcast_station = iwl_update_bcast_station, 220 .update_bcast_stations = iwl_update_bcast_stations,
221 .debugfs_ops = { 221 .debugfs_ops = {
222 .rx_stats_read = iwl_ucode_rx_stats_read, 222 .rx_stats_read = iwl_ucode_rx_stats_read,
223 .tx_stats_read = iwl_ucode_tx_stats_read, 223 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -229,6 +229,11 @@ static struct iwl_lib_ops iwl1000_lib = {
229 .check_ack_health = iwl_good_ack_health, 229 .check_ack_health = iwl_good_ack_health,
230 .txfifo_flush = iwlagn_txfifo_flush, 230 .txfifo_flush = iwlagn_txfifo_flush,
231 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 231 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
232 .tt_ops = {
233 .lower_power_detection = iwl_tt_is_low_power_state,
234 .tt_power_mode = iwl_tt_current_power_mode,
235 .ct_kill_check = iwl_check_for_ct_kill,
236 }
232}; 237};
233 238
234static const struct iwl_ops iwl1000_ops = { 239static const struct iwl_ops iwl1000_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 7c731a793632..65b5834da28c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -62,7 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
65 * Please use iwl-3945-commands.h for uCode API definitions. 65 * Please use iwl-commands.h for uCode API definitions.
66 * Please use iwl-3945.h for driver implementation definitions. 66 * Please use iwl-3945.h for driver implementation definitions.
67 */ 67 */
68 68
@@ -226,6 +226,7 @@ struct iwl3945_eeprom {
226 226
227/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ 227/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
228#define IWL39_NUM_QUEUES 5 228#define IWL39_NUM_QUEUES 5
229#define IWL39_CMD_QUEUE_NUM 4
229 230
230#define IWL_DEFAULT_TX_RETRY 15 231#define IWL_DEFAULT_TX_RETRY 15
231 232
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 8e84a08ff951..d707f5bb1a8b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -343,7 +343,7 @@ void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 s
343 int i; 343 int i;
344 344
345 IWL_DEBUG_INFO(priv, "enter\n"); 345 IWL_DEBUG_INFO(priv, "enter\n");
346 if (sta_id == priv->hw_params.bcast_sta_id) 346 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
347 goto out; 347 goto out;
348 348
349 psta = (struct iwl3945_sta_priv *) sta->drv_priv; 349 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
@@ -932,7 +932,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
932 932
933 rcu_read_lock(); 933 rcu_read_lock();
934 934
935 sta = ieee80211_find_sta(priv->vif, 935 sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
936 priv->stations[sta_id].sta.sta.addr); 936 priv->stations[sta_id].sta.sta.addr);
937 if (!sta) { 937 if (!sta) {
938 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n"); 938 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
@@ -949,7 +949,8 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
949 switch (priv->band) { 949 switch (priv->band) {
950 case IEEE80211_BAND_2GHZ: 950 case IEEE80211_BAND_2GHZ:
951 /* TODO: this always does G, not a regression */ 951 /* TODO: this always does G, not a regression */
952 if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) { 952 if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
953 RXON_FLG_TGG_PROTECT_MSK) {
953 rs_sta->tgg = 1; 954 rs_sta->tgg = 1;
954 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot; 955 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
955 } else 956 } else
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 8ccfcd08218d..5d09686c3389 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -245,7 +245,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
245 break; 245 break;
246 case IEEE80211_BAND_2GHZ: 246 case IEEE80211_BAND_2GHZ:
247 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 247 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
248 iwl_is_associated(priv)) { 248 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
249 if (rate == IWL_RATE_11M_INDEX) 249 if (rate == IWL_RATE_11M_INDEX)
250 next_rate = IWL_RATE_5M_INDEX; 250 next_rate = IWL_RATE_5M_INDEX;
251 } 251 }
@@ -273,7 +273,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
273 struct iwl_queue *q = &txq->q; 273 struct iwl_queue *q = &txq->q;
274 struct iwl_tx_info *tx_info; 274 struct iwl_tx_info *tx_info;
275 275
276 BUG_ON(txq_id == IWL_CMD_QUEUE_NUM); 276 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
277 277
278 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 278 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
@@ -285,7 +285,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
285 } 285 }
286 286
287 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 287 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
288 (txq_id != IWL_CMD_QUEUE_NUM) && 288 (txq_id != IWL39_CMD_QUEUE_NUM) &&
289 priv->mac80211_registered) 289 priv->mac80211_registered)
290 iwl_wake_queue(priv, txq_id); 290 iwl_wake_queue(priv, txq_id);
291} 291}
@@ -760,7 +760,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
760 data_retry_limit = IWL_DEFAULT_TX_RETRY; 760 data_retry_limit = IWL_DEFAULT_TX_RETRY;
761 tx_cmd->data_retry_limit = data_retry_limit; 761 tx_cmd->data_retry_limit = data_retry_limit;
762 762
763 if (tx_id >= IWL_CMD_QUEUE_NUM) 763 if (tx_id >= IWL39_CMD_QUEUE_NUM)
764 rts_retry_limit = 3; 764 rts_retry_limit = 3;
765 else 765 else
766 rts_retry_limit = 7; 766 rts_retry_limit = 7;
@@ -909,7 +909,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
909 909
910 /* Tx queue(s) */ 910 /* Tx queue(s) */
911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
912 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 912 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
913 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 913 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
914 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 914 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
915 txq_id); 915 txq_id);
@@ -1072,7 +1072,7 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1072 if (priv->txq) 1072 if (priv->txq)
1073 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 1073 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1074 txq_id++) 1074 txq_id++)
1075 if (txq_id == IWL_CMD_QUEUE_NUM) 1075 if (txq_id == IWL39_CMD_QUEUE_NUM)
1076 iwl_cmd_queue_free(priv); 1076 iwl_cmd_queue_free(priv);
1077 else 1077 else
1078 iwl_tx_queue_free(priv, txq_id); 1078 iwl_tx_queue_free(priv, txq_id);
@@ -1439,17 +1439,18 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1439 int rate_idx, i; 1439 int rate_idx, i;
1440 const struct iwl_channel_info *ch_info = NULL; 1440 const struct iwl_channel_info *ch_info = NULL;
1441 struct iwl3945_txpowertable_cmd txpower = { 1441 struct iwl3945_txpowertable_cmd txpower = {
1442 .channel = priv->active_rxon.channel, 1442 .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
1443 }; 1443 };
1444 u16 chan;
1445
1446 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1444 1447
1445 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1448 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1446 ch_info = iwl_get_channel_info(priv, 1449 ch_info = iwl_get_channel_info(priv, priv->band, chan);
1447 priv->band,
1448 le16_to_cpu(priv->active_rxon.channel));
1449 if (!ch_info) { 1450 if (!ch_info) {
1450 IWL_ERR(priv, 1451 IWL_ERR(priv,
1451 "Failed to get channel info for channel %d [%d]\n", 1452 "Failed to get channel info for channel %d [%d]\n",
1452 le16_to_cpu(priv->active_rxon.channel), priv->band); 1453 chan, priv->band);
1453 return -EINVAL; 1454 return -EINVAL;
1454 } 1455 }
1455 1456
@@ -1710,7 +1711,8 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1710 return 0; 1711 return 0;
1711} 1712}
1712 1713
1713static int iwl3945_send_rxon_assoc(struct iwl_priv *priv) 1714static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1715 struct iwl_rxon_context *ctx)
1714{ 1716{
1715 int rc = 0; 1717 int rc = 0;
1716 struct iwl_rx_packet *pkt; 1718 struct iwl_rx_packet *pkt;
@@ -1721,8 +1723,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1721 .flags = CMD_WANT_SKB, 1723 .flags = CMD_WANT_SKB,
1722 .data = &rxon_assoc, 1724 .data = &rxon_assoc,
1723 }; 1725 };
1724 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; 1726 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
1725 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; 1727 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
1726 1728
1727 if ((rxon1->flags == rxon2->flags) && 1729 if ((rxon1->flags == rxon2->flags) &&
1728 (rxon1->filter_flags == rxon2->filter_flags) && 1730 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1732,10 +1734,10 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1732 return 0; 1734 return 0;
1733 } 1735 }
1734 1736
1735 rxon_assoc.flags = priv->staging_rxon.flags; 1737 rxon_assoc.flags = ctx->staging.flags;
1736 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; 1738 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1737 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; 1739 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1738 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; 1740 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1739 rxon_assoc.reserved = 0; 1741 rxon_assoc.reserved = 0;
1740 1742
1741 rc = iwl_send_cmd_sync(priv, &cmd); 1743 rc = iwl_send_cmd_sync(priv, &cmd);
@@ -1761,14 +1763,14 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1761 * function correctly transitions out of the RXON_ASSOC_MSK state if 1763 * function correctly transitions out of the RXON_ASSOC_MSK state if
1762 * a HW tune is required based on the RXON structure changes. 1764 * a HW tune is required based on the RXON structure changes.
1763 */ 1765 */
1764static int iwl3945_commit_rxon(struct iwl_priv *priv) 1766static int iwl3945_commit_rxon(struct iwl_priv *priv,
1767 struct iwl_rxon_context *ctx)
1765{ 1768{
1766 /* cast away the const for active_rxon in this function */ 1769 /* cast away the const for active_rxon in this function */
1767 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 1770 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1768 struct iwl3945_rxon_cmd *staging_rxon = (void *)&priv->staging_rxon; 1771 struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1769 int rc = 0; 1772 int rc = 0;
1770 bool new_assoc = 1773 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1771 !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
1772 1774
1773 if (!iwl_is_alive(priv)) 1775 if (!iwl_is_alive(priv))
1774 return -1; 1776 return -1;
@@ -1781,7 +1783,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1781 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); 1783 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1782 staging_rxon->flags |= iwl3945_get_antenna_flags(priv); 1784 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1783 1785
1784 rc = iwl_check_rxon_cmd(priv); 1786 rc = iwl_check_rxon_cmd(priv, ctx);
1785 if (rc) { 1787 if (rc) {
1786 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 1788 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1787 return -EINVAL; 1789 return -EINVAL;
@@ -1790,8 +1792,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1790 /* If we don't need to send a full RXON, we can use 1792 /* If we don't need to send a full RXON, we can use
1791 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter 1793 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1792 * and other flags for the current radio configuration. */ 1794 * and other flags for the current radio configuration. */
1793 if (!iwl_full_rxon_required(priv)) { 1795 if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) {
1794 rc = iwl_send_rxon_assoc(priv); 1796 rc = iwl_send_rxon_assoc(priv,
1797 &priv->contexts[IWL_RXON_CTX_BSS]);
1795 if (rc) { 1798 if (rc) {
1796 IWL_ERR(priv, "Error setting RXON_ASSOC " 1799 IWL_ERR(priv, "Error setting RXON_ASSOC "
1797 "configuration (%d).\n", rc); 1800 "configuration (%d).\n", rc);
@@ -1807,7 +1810,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1807 * an RXON_ASSOC and the new config wants the associated mask enabled, 1810 * an RXON_ASSOC and the new config wants the associated mask enabled,
1808 * we must clear the associated from the active configuration 1811 * we must clear the associated from the active configuration
1809 * before we apply the new config */ 1812 * before we apply the new config */
1810 if (iwl_is_associated(priv) && new_assoc) { 1813 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1811 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 1814 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1812 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1815 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1813 1816
@@ -1819,7 +1822,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1819 active_rxon->reserved5 = 0; 1822 active_rxon->reserved5 = 0;
1820 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1823 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1821 sizeof(struct iwl3945_rxon_cmd), 1824 sizeof(struct iwl3945_rxon_cmd),
1822 &priv->active_rxon); 1825 &priv->contexts[IWL_RXON_CTX_BSS].active);
1823 1826
1824 /* If the mask clearing failed then we set 1827 /* If the mask clearing failed then we set
1825 * active_rxon back to what it was previously */ 1828 * active_rxon back to what it was previously */
@@ -1829,8 +1832,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1829 "configuration (%d).\n", rc); 1832 "configuration (%d).\n", rc);
1830 return rc; 1833 return rc;
1831 } 1834 }
1832 iwl_clear_ucode_stations(priv); 1835 iwl_clear_ucode_stations(priv,
1833 iwl_restore_stations(priv); 1836 &priv->contexts[IWL_RXON_CTX_BSS]);
1837 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1834 } 1838 }
1835 1839
1836 IWL_DEBUG_INFO(priv, "Sending RXON\n" 1840 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1848,7 +1852,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1848 staging_rxon->reserved4 = 0; 1852 staging_rxon->reserved4 = 0;
1849 staging_rxon->reserved5 = 0; 1853 staging_rxon->reserved5 = 0;
1850 1854
1851 iwl_set_rxon_hwcrypto(priv, !iwl3945_mod_params.sw_crypto); 1855 iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1852 1856
1853 /* Apply the new configuration */ 1857 /* Apply the new configuration */
1854 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1858 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
@@ -1862,8 +1866,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1862 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 1866 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1863 1867
1864 if (!new_assoc) { 1868 if (!new_assoc) {
1865 iwl_clear_ucode_stations(priv); 1869 iwl_clear_ucode_stations(priv,
1866 iwl_restore_stations(priv); 1870 &priv->contexts[IWL_RXON_CTX_BSS]);
1871 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1867 } 1872 }
1868 1873
1869 /* If we issue a new RXON command which required a tune then we must 1874 /* If we issue a new RXON command which required a tune then we must
@@ -2302,8 +2307,10 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2302 int ret; 2307 int ret;
2303 2308
2304 if (add) { 2309 if (add) {
2305 ret = iwl_add_bssid_station(priv, vif->bss_conf.bssid, false, 2310 ret = iwl_add_bssid_station(
2306 &vif_priv->ibss_bssid_sta_id); 2311 priv, &priv->contexts[IWL_RXON_CTX_BSS],
2312 vif->bss_conf.bssid, false,
2313 &vif_priv->ibss_bssid_sta_id);
2307 if (ret) 2314 if (ret)
2308 return ret; 2315 return ret;
2309 2316
@@ -2366,7 +2373,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2366 * 1M CCK rates */ 2373 * 1M CCK rates */
2367 2374
2368 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 2375 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2369 iwl_is_associated(priv)) { 2376 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
2370 2377
2371 index = IWL_FIRST_CCK_RATE; 2378 index = IWL_FIRST_CCK_RATE;
2372 for (i = IWL_RATE_6M_INDEX_TABLE; 2379 for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2421,7 +2428,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2421 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 2428 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2422 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 2429 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2423 priv->hw_params.max_stations = IWL3945_STATION_COUNT; 2430 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
2424 priv->hw_params.bcast_sta_id = IWL3945_BROADCAST_ID; 2431 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
2432
2433 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2425 2434
2426 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; 2435 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2427 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; 2436 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
@@ -2439,7 +2448,8 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
2439 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; 2448 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2440 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 2449 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2441 2450
2442 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; 2451 tx_beacon_cmd->tx.sta_id =
2452 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2443 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2453 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2444 2454
2445 frame_size = iwl3945_fill_beacon_frame(priv, 2455 frame_size = iwl3945_fill_beacon_frame(priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d92b72909233..1d6a46d4db59 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -347,7 +347,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
347 struct iwl_chain_noise_data *data = &(priv->chain_noise_data); 347 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
348 348
349 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 349 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
350 iwl_is_associated(priv)) { 350 iwl_is_any_associated(priv)) {
351 struct iwl_calib_diff_gain_cmd cmd; 351 struct iwl_calib_diff_gain_cmd cmd;
352 352
353 /* clear data for chain noise calibration algorithm */ 353 /* clear data for chain noise calibration algorithm */
@@ -576,7 +576,7 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
576 /* Activate all Tx DMA/FIFO channels */ 576 /* Activate all Tx DMA/FIFO channels */
577 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6)); 577 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
578 578
579 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 579 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
580 580
581 /* make sure all queue are not stopped */ 581 /* make sure all queue are not stopped */
582 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 582 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
@@ -587,6 +587,7 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
587 priv->txq_ctx_active_msk = 0; 587 priv->txq_ctx_active_msk = 0;
588 /* Map each Tx/cmd queue to its corresponding fifo */ 588 /* Map each Tx/cmd queue to its corresponding fifo */
589 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); 589 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
590
590 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 591 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
591 int ac = default_queue_to_tx_fifo[i]; 592 int ac = default_queue_to_tx_fifo[i];
592 593
@@ -656,7 +657,7 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
656 sizeof(struct iwl4965_scd_bc_tbl); 657 sizeof(struct iwl4965_scd_bc_tbl);
657 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 658 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
658 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 659 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
659 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 660 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
660 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE; 661 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
661 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE; 662 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
662 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE; 663 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
@@ -1374,6 +1375,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1374 u8 band = 0; 1375 u8 band = 0;
1375 bool is_ht40 = false; 1376 bool is_ht40 = false;
1376 u8 ctrl_chan_high = 0; 1377 u8 ctrl_chan_high = 0;
1378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1377 1379
1378 if (test_bit(STATUS_SCANNING, &priv->status)) { 1380 if (test_bit(STATUS_SCANNING, &priv->status)) {
1379 /* If this gets hit a lot, switch it to a BUG() and catch 1381 /* If this gets hit a lot, switch it to a BUG() and catch
@@ -1385,17 +1387,16 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1385 1387
1386 band = priv->band == IEEE80211_BAND_2GHZ; 1388 band = priv->band == IEEE80211_BAND_2GHZ;
1387 1389
1388 is_ht40 = is_ht40_channel(priv->active_rxon.flags); 1390 is_ht40 = is_ht40_channel(ctx->active.flags);
1389 1391
1390 if (is_ht40 && 1392 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1391 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1392 ctrl_chan_high = 1; 1393 ctrl_chan_high = 1;
1393 1394
1394 cmd.band = band; 1395 cmd.band = band;
1395 cmd.channel = priv->active_rxon.channel; 1396 cmd.channel = ctx->active.channel;
1396 1397
1397 ret = iwl4965_fill_txpower_tbl(priv, band, 1398 ret = iwl4965_fill_txpower_tbl(priv, band,
1398 le16_to_cpu(priv->active_rxon.channel), 1399 le16_to_cpu(ctx->active.channel),
1399 is_ht40, ctrl_chan_high, &cmd.tx_power); 1400 is_ht40, ctrl_chan_high, &cmd.tx_power);
1400 if (ret) 1401 if (ret)
1401 goto out; 1402 goto out;
@@ -1406,12 +1407,13 @@ out:
1406 return ret; 1407 return ret;
1407} 1408}
1408 1409
1409static int iwl4965_send_rxon_assoc(struct iwl_priv *priv) 1410static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1411 struct iwl_rxon_context *ctx)
1410{ 1412{
1411 int ret = 0; 1413 int ret = 0;
1412 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1414 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1413 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; 1415 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
1414 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; 1416 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
1415 1417
1416 if ((rxon1->flags == rxon2->flags) && 1418 if ((rxon1->flags == rxon2->flags) &&
1417 (rxon1->filter_flags == rxon2->filter_flags) && 1419 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1426,16 +1428,16 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1426 return 0; 1428 return 0;
1427 } 1429 }
1428 1430
1429 rxon_assoc.flags = priv->staging_rxon.flags; 1431 rxon_assoc.flags = ctx->staging.flags;
1430 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; 1432 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1431 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; 1433 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1432 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; 1434 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1433 rxon_assoc.reserved = 0; 1435 rxon_assoc.reserved = 0;
1434 rxon_assoc.ofdm_ht_single_stream_basic_rates = 1436 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1435 priv->staging_rxon.ofdm_ht_single_stream_basic_rates; 1437 ctx->staging.ofdm_ht_single_stream_basic_rates;
1436 rxon_assoc.ofdm_ht_dual_stream_basic_rates = 1438 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1437 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; 1439 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1438 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; 1440 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1439 1441
1440 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, 1442 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1441 sizeof(rxon_assoc), &rxon_assoc, NULL); 1443 sizeof(rxon_assoc), &rxon_assoc, NULL);
@@ -1448,6 +1450,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1448static int iwl4965_hw_channel_switch(struct iwl_priv *priv, 1450static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1449 struct ieee80211_channel_switch *ch_switch) 1451 struct ieee80211_channel_switch *ch_switch)
1450{ 1452{
1453 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1451 int rc; 1454 int rc;
1452 u8 band = 0; 1455 u8 band = 0;
1453 bool is_ht40 = false; 1456 bool is_ht40 = false;
@@ -1458,22 +1461,22 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1458 u16 ch; 1461 u16 ch;
1459 u32 tsf_low; 1462 u32 tsf_low;
1460 u8 switch_count; 1463 u8 switch_count;
1461 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); 1464 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1462 struct ieee80211_vif *vif = priv->vif; 1465 struct ieee80211_vif *vif = ctx->vif;
1463 band = priv->band == IEEE80211_BAND_2GHZ; 1466 band = priv->band == IEEE80211_BAND_2GHZ;
1464 1467
1465 is_ht40 = is_ht40_channel(priv->staging_rxon.flags); 1468 is_ht40 = is_ht40_channel(ctx->staging.flags);
1466 1469
1467 if (is_ht40 && 1470 if (is_ht40 &&
1468 (priv->staging_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1471 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1469 ctrl_chan_high = 1; 1472 ctrl_chan_high = 1;
1470 1473
1471 cmd.band = band; 1474 cmd.band = band;
1472 cmd.expect_beacon = 0; 1475 cmd.expect_beacon = 0;
1473 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); 1476 ch = ch_switch->channel->hw_value;
1474 cmd.channel = cpu_to_le16(ch); 1477 cmd.channel = cpu_to_le16(ch);
1475 cmd.rxon_flags = priv->staging_rxon.flags; 1478 cmd.rxon_flags = ctx->staging.flags;
1476 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 1479 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1477 switch_count = ch_switch->count; 1480 switch_count = ch_switch->count;
1478 tsf_low = ch_switch->timestamp & 0x0ffffffff; 1481 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1479 /* 1482 /*
@@ -1508,7 +1511,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1508 cmd.expect_beacon = is_channel_radar(ch_info); 1511 cmd.expect_beacon = is_channel_radar(ch_info);
1509 else { 1512 else {
1510 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 1513 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1511 priv->active_rxon.channel, ch); 1514 ctx->active.channel, ch);
1512 return -EFAULT; 1515 return -EFAULT;
1513 } 1516 }
1514 1517
@@ -2007,7 +2010,7 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
2007 start = IWL_STA_ID; 2010 start = IWL_STA_ID;
2008 2011
2009 if (is_broadcast_ether_addr(addr)) 2012 if (is_broadcast_ether_addr(addr))
2010 return priv->hw_params.bcast_sta_id; 2013 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2011 2014
2012 spin_lock_irqsave(&priv->sta_lock, flags); 2015 spin_lock_irqsave(&priv->sta_lock, flags);
2013 for (i = start; i < priv->hw_params.max_stations; i++) 2016 for (i = start; i < priv->hw_params.max_stations; i++)
@@ -2280,7 +2283,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2280 .set_ct_kill = iwl4965_set_ct_threshold, 2283 .set_ct_kill = iwl4965_set_ct_threshold,
2281 }, 2284 },
2282 .manage_ibss_station = iwlagn_manage_ibss_station, 2285 .manage_ibss_station = iwlagn_manage_ibss_station,
2283 .update_bcast_station = iwl_update_bcast_station, 2286 .update_bcast_stations = iwl_update_bcast_stations,
2284 .debugfs_ops = { 2287 .debugfs_ops = {
2285 .rx_stats_read = iwl_ucode_rx_stats_read, 2288 .rx_stats_read = iwl_ucode_rx_stats_read,
2286 .tx_stats_read = iwl_ucode_tx_stats_read, 2289 .tx_stats_read = iwl_ucode_tx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 146e6431ae95..3975e45e7500 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -62,7 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-5000-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
65 * Use iwl-5000-commands.h for uCode API definitions. 65 * Use iwl-commands.h for uCode API definitions.
66 */ 66 */
67 67
68#ifndef __iwl_5000_hw_h__ 68#ifndef __iwl_5000_hw_h__
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 48bdcd8d2e94..1dbb1246c083 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -180,7 +180,7 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
180 sizeof(struct iwlagn_scd_bc_tbl); 180 sizeof(struct iwlagn_scd_bc_tbl);
181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
182 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 182 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
183 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; 183 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
184 184
185 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 185 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
186 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 186 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -227,7 +227,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
227 sizeof(struct iwlagn_scd_bc_tbl); 227 sizeof(struct iwlagn_scd_bc_tbl);
228 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 228 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
229 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 229 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
230 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; 230 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
231 231
232 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 232 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
233 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 233 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -275,14 +275,19 @@ static void iwl5150_temperature(struct iwl_priv *priv)
275static int iwl5000_hw_channel_switch(struct iwl_priv *priv, 275static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
276 struct ieee80211_channel_switch *ch_switch) 276 struct ieee80211_channel_switch *ch_switch)
277{ 277{
278 /*
279 * MULTI-FIXME
280 * See iwl_mac_channel_switch.
281 */
282 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
278 struct iwl5000_channel_switch_cmd cmd; 283 struct iwl5000_channel_switch_cmd cmd;
279 const struct iwl_channel_info *ch_info; 284 const struct iwl_channel_info *ch_info;
280 u32 switch_time_in_usec, ucode_switch_time; 285 u32 switch_time_in_usec, ucode_switch_time;
281 u16 ch; 286 u16 ch;
282 u32 tsf_low; 287 u32 tsf_low;
283 u8 switch_count; 288 u8 switch_count;
284 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); 289 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
285 struct ieee80211_vif *vif = priv->vif; 290 struct ieee80211_vif *vif = ctx->vif;
286 struct iwl_host_cmd hcmd = { 291 struct iwl_host_cmd hcmd = {
287 .id = REPLY_CHANNEL_SWITCH, 292 .id = REPLY_CHANNEL_SWITCH,
288 .len = sizeof(cmd), 293 .len = sizeof(cmd),
@@ -291,12 +296,12 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
291 }; 296 };
292 297
293 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 298 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
294 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); 299 ch = ch_switch->channel->hw_value;
295 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", 300 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
296 priv->active_rxon.channel, ch); 301 ctx->active.channel, ch);
297 cmd.channel = cpu_to_le16(ch); 302 cmd.channel = cpu_to_le16(ch);
298 cmd.rxon_flags = priv->staging_rxon.flags; 303 cmd.rxon_flags = ctx->staging.flags;
299 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 304 cmd.rxon_filter_flags = ctx->staging.filter_flags;
300 switch_count = ch_switch->count; 305 switch_count = ch_switch->count;
301 tsf_low = ch_switch->timestamp & 0x0ffffffff; 306 tsf_low = ch_switch->timestamp & 0x0ffffffff;
302 /* 307 /*
@@ -331,7 +336,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
331 cmd.expect_beacon = is_channel_radar(ch_info); 336 cmd.expect_beacon = is_channel_radar(ch_info);
332 else { 337 else {
333 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 338 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
334 priv->active_rxon.channel, ch); 339 ctx->active.channel, ch);
335 return -EFAULT; 340 return -EFAULT;
336 } 341 }
337 priv->switch_rxon.channel = cmd.channel; 342 priv->switch_rxon.channel = cmd.channel;
@@ -393,7 +398,7 @@ static struct iwl_lib_ops iwl5000_lib = {
393 .set_ct_kill = iwl5000_set_ct_threshold, 398 .set_ct_kill = iwl5000_set_ct_threshold,
394 }, 399 },
395 .manage_ibss_station = iwlagn_manage_ibss_station, 400 .manage_ibss_station = iwlagn_manage_ibss_station,
396 .update_bcast_station = iwl_update_bcast_station, 401 .update_bcast_stations = iwl_update_bcast_stations,
397 .debugfs_ops = { 402 .debugfs_ops = {
398 .rx_stats_read = iwl_ucode_rx_stats_read, 403 .rx_stats_read = iwl_ucode_rx_stats_read,
399 .tx_stats_read = iwl_ucode_tx_stats_read, 404 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -405,6 +410,11 @@ static struct iwl_lib_ops iwl5000_lib = {
405 .check_ack_health = iwl_good_ack_health, 410 .check_ack_health = iwl_good_ack_health,
406 .txfifo_flush = iwlagn_txfifo_flush, 411 .txfifo_flush = iwlagn_txfifo_flush,
407 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 412 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
413 .tt_ops = {
414 .lower_power_detection = iwl_tt_is_low_power_state,
415 .tt_power_mode = iwl_tt_current_power_mode,
416 .ct_kill_check = iwl_check_for_ct_kill,
417 }
408}; 418};
409 419
410static struct iwl_lib_ops iwl5150_lib = { 420static struct iwl_lib_ops iwl5150_lib = {
@@ -459,7 +469,7 @@ static struct iwl_lib_ops iwl5150_lib = {
459 .set_ct_kill = iwl5150_set_ct_threshold, 469 .set_ct_kill = iwl5150_set_ct_threshold,
460 }, 470 },
461 .manage_ibss_station = iwlagn_manage_ibss_station, 471 .manage_ibss_station = iwlagn_manage_ibss_station,
462 .update_bcast_station = iwl_update_bcast_station, 472 .update_bcast_stations = iwl_update_bcast_stations,
463 .debugfs_ops = { 473 .debugfs_ops = {
464 .rx_stats_read = iwl_ucode_rx_stats_read, 474 .rx_stats_read = iwl_ucode_rx_stats_read,
465 .tx_stats_read = iwl_ucode_tx_stats_read, 475 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -470,6 +480,11 @@ static struct iwl_lib_ops iwl5150_lib = {
470 .check_ack_health = iwl_good_ack_health, 480 .check_ack_health = iwl_good_ack_health,
471 .txfifo_flush = iwlagn_txfifo_flush, 481 .txfifo_flush = iwlagn_txfifo_flush,
472 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 482 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
483 .tt_ops = {
484 .lower_power_detection = iwl_tt_is_low_power_state,
485 .tt_power_mode = iwl_tt_current_power_mode,
486 .ct_kill_check = iwl_check_for_ct_kill,
487 }
473}; 488};
474 489
475static const struct iwl_ops iwl5000_ops = { 490static const struct iwl_ops iwl5000_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index ddba39999997..47891e16a758 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -62,7 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-6000-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-6000-hw.h) only for hardware-related definitions.
65 * Use iwl-5000-commands.h for uCode API definitions. 65 * Use iwl-commands.h for uCode API definitions.
66 */ 66 */
67 67
68#ifndef __iwl_6000_hw_h__ 68#ifndef __iwl_6000_hw_h__
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index cee06b968de8..2fdba088bd27 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -52,7 +52,7 @@
52/* Highest firmware API version supported */ 52/* Highest firmware API version supported */
53#define IWL6000_UCODE_API_MAX 4 53#define IWL6000_UCODE_API_MAX 4
54#define IWL6050_UCODE_API_MAX 4 54#define IWL6050_UCODE_API_MAX 4
55#define IWL6000G2_UCODE_API_MAX 4 55#define IWL6000G2_UCODE_API_MAX 5
56 56
57/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
58#define IWL6000_UCODE_API_MIN 4 58#define IWL6000_UCODE_API_MIN 4
@@ -161,7 +161,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
161 sizeof(struct iwlagn_scd_bc_tbl); 161 sizeof(struct iwlagn_scd_bc_tbl);
162 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 162 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
163 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 163 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
164 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; 164 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
165 165
166 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; 166 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
167 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; 167 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
@@ -198,14 +198,19 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
198static int iwl6000_hw_channel_switch(struct iwl_priv *priv, 198static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
199 struct ieee80211_channel_switch *ch_switch) 199 struct ieee80211_channel_switch *ch_switch)
200{ 200{
201 /*
202 * MULTI-FIXME
203 * See iwl_mac_channel_switch.
204 */
205 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
201 struct iwl6000_channel_switch_cmd cmd; 206 struct iwl6000_channel_switch_cmd cmd;
202 const struct iwl_channel_info *ch_info; 207 const struct iwl_channel_info *ch_info;
203 u32 switch_time_in_usec, ucode_switch_time; 208 u32 switch_time_in_usec, ucode_switch_time;
204 u16 ch; 209 u16 ch;
205 u32 tsf_low; 210 u32 tsf_low;
206 u8 switch_count; 211 u8 switch_count;
207 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); 212 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
208 struct ieee80211_vif *vif = priv->vif; 213 struct ieee80211_vif *vif = ctx->vif;
209 struct iwl_host_cmd hcmd = { 214 struct iwl_host_cmd hcmd = {
210 .id = REPLY_CHANNEL_SWITCH, 215 .id = REPLY_CHANNEL_SWITCH,
211 .len = sizeof(cmd), 216 .len = sizeof(cmd),
@@ -214,12 +219,12 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
214 }; 219 };
215 220
216 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 221 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
217 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); 222 ch = ch_switch->channel->hw_value;
218 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", 223 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
219 priv->active_rxon.channel, ch); 224 ctx->active.channel, ch);
220 cmd.channel = cpu_to_le16(ch); 225 cmd.channel = cpu_to_le16(ch);
221 cmd.rxon_flags = priv->staging_rxon.flags; 226 cmd.rxon_flags = ctx->staging.flags;
222 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 227 cmd.rxon_filter_flags = ctx->staging.filter_flags;
223 switch_count = ch_switch->count; 228 switch_count = ch_switch->count;
224 tsf_low = ch_switch->timestamp & 0x0ffffffff; 229 tsf_low = ch_switch->timestamp & 0x0ffffffff;
225 /* 230 /*
@@ -254,7 +259,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
254 cmd.expect_beacon = is_channel_radar(ch_info); 259 cmd.expect_beacon = is_channel_radar(ch_info);
255 else { 260 else {
256 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 261 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
257 priv->active_rxon.channel, ch); 262 ctx->active.channel, ch);
258 return -EFAULT; 263 return -EFAULT;
259 } 264 }
260 priv->switch_rxon.channel = cmd.channel; 265 priv->switch_rxon.channel = cmd.channel;
@@ -318,7 +323,7 @@ static struct iwl_lib_ops iwl6000_lib = {
318 .set_calib_version = iwl6000_set_calib_version, 323 .set_calib_version = iwl6000_set_calib_version,
319 }, 324 },
320 .manage_ibss_station = iwlagn_manage_ibss_station, 325 .manage_ibss_station = iwlagn_manage_ibss_station,
321 .update_bcast_station = iwl_update_bcast_station, 326 .update_bcast_stations = iwl_update_bcast_stations,
322 .debugfs_ops = { 327 .debugfs_ops = {
323 .rx_stats_read = iwl_ucode_rx_stats_read, 328 .rx_stats_read = iwl_ucode_rx_stats_read,
324 .tx_stats_read = iwl_ucode_tx_stats_read, 329 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -330,6 +335,86 @@ static struct iwl_lib_ops iwl6000_lib = {
330 .check_ack_health = iwl_good_ack_health, 335 .check_ack_health = iwl_good_ack_health,
331 .txfifo_flush = iwlagn_txfifo_flush, 336 .txfifo_flush = iwlagn_txfifo_flush,
332 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 337 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
338 .tt_ops = {
339 .lower_power_detection = iwl_tt_is_low_power_state,
340 .tt_power_mode = iwl_tt_current_power_mode,
341 .ct_kill_check = iwl_check_for_ct_kill,
342 }
343};
344
345static struct iwl_lib_ops iwl6000g2b_lib = {
346 .set_hw_params = iwl6000_hw_set_hw_params,
347 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
348 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
349 .txq_set_sched = iwlagn_txq_set_sched,
350 .txq_agg_enable = iwlagn_txq_agg_enable,
351 .txq_agg_disable = iwlagn_txq_agg_disable,
352 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
353 .txq_free_tfd = iwl_hw_txq_free_tfd,
354 .txq_init = iwl_hw_tx_queue_init,
355 .rx_handler_setup = iwlagn_bt_rx_handler_setup,
356 .setup_deferred_work = iwlagn_bt_setup_deferred_work,
357 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
358 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
359 .load_ucode = iwlagn_load_ucode,
360 .dump_nic_event_log = iwl_dump_nic_event_log,
361 .dump_nic_error_log = iwl_dump_nic_error_log,
362 .dump_csr = iwl_dump_csr,
363 .dump_fh = iwl_dump_fh,
364 .init_alive_start = iwlagn_init_alive_start,
365 .alive_notify = iwlagn_alive_notify,
366 .send_tx_power = iwlagn_send_tx_power,
367 .update_chain_flags = iwl_update_chain_flags,
368 .set_channel_switch = iwl6000_hw_channel_switch,
369 .apm_ops = {
370 .init = iwl_apm_init,
371 .stop = iwl_apm_stop,
372 .config = iwl6000_nic_config,
373 .set_pwr_src = iwl_set_pwr_src,
374 },
375 .eeprom_ops = {
376 .regulatory_bands = {
377 EEPROM_REG_BAND_1_CHANNELS,
378 EEPROM_REG_BAND_2_CHANNELS,
379 EEPROM_REG_BAND_3_CHANNELS,
380 EEPROM_REG_BAND_4_CHANNELS,
381 EEPROM_REG_BAND_5_CHANNELS,
382 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
383 EEPROM_REG_BAND_52_HT40_CHANNELS
384 },
385 .verify_signature = iwlcore_eeprom_verify_signature,
386 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
387 .release_semaphore = iwlcore_eeprom_release_semaphore,
388 .calib_version = iwlagn_eeprom_calib_version,
389 .query_addr = iwlagn_eeprom_query_addr,
390 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
391 },
392 .post_associate = iwl_post_associate,
393 .isr = iwl_isr_ict,
394 .config_ap = iwl_config_ap,
395 .temp_ops = {
396 .temperature = iwlagn_temperature,
397 .set_ct_kill = iwl6000_set_ct_threshold,
398 .set_calib_version = iwl6000_set_calib_version,
399 },
400 .manage_ibss_station = iwlagn_manage_ibss_station,
401 .update_bcast_stations = iwl_update_bcast_stations,
402 .debugfs_ops = {
403 .rx_stats_read = iwl_ucode_rx_stats_read,
404 .tx_stats_read = iwl_ucode_tx_stats_read,
405 .general_stats_read = iwl_ucode_general_stats_read,
406 .bt_stats_read = iwl_ucode_bt_stats_read,
407 },
408 .recover_from_tx_stall = iwl_bg_monitor_recover,
409 .check_plcp_health = iwl_good_plcp_health,
410 .check_ack_health = iwl_good_ack_health,
411 .txfifo_flush = iwlagn_txfifo_flush,
412 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
413 .tt_ops = {
414 .lower_power_detection = iwl_tt_is_low_power_state,
415 .tt_power_mode = iwl_tt_current_power_mode,
416 .ct_kill_check = iwl_check_for_ct_kill,
417 }
333}; 418};
334 419
335static const struct iwl_ops iwl6000_ops = { 420static const struct iwl_ops iwl6000_ops = {
@@ -339,21 +424,9 @@ static const struct iwl_ops iwl6000_ops = {
339 .led = &iwlagn_led_ops, 424 .led = &iwlagn_led_ops,
340}; 425};
341 426
342static void do_not_send_bt_config(struct iwl_priv *priv)
343{
344}
345
346static struct iwl_hcmd_ops iwl6000g2b_hcmd = {
347 .rxon_assoc = iwlagn_send_rxon_assoc,
348 .commit_rxon = iwl_commit_rxon,
349 .set_rxon_chain = iwl_set_rxon_chain,
350 .set_tx_ant = iwlagn_send_tx_ant_config,
351 .send_bt_config = do_not_send_bt_config,
352};
353
354static const struct iwl_ops iwl6000g2b_ops = { 427static const struct iwl_ops iwl6000g2b_ops = {
355 .lib = &iwl6000_lib, 428 .lib = &iwl6000g2b_lib,
356 .hcmd = &iwl6000g2b_hcmd, 429 .hcmd = &iwlagn_bt_hcmd,
357 .utils = &iwlagn_hcmd_utils, 430 .utils = &iwlagn_hcmd_utils,
358 .led = &iwlagn_led_ops, 431 .led = &iwlagn_led_ops,
359}; 432};
@@ -494,7 +567,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
494 .supports_idle = true, 567 .supports_idle = true,
495 .adv_thermal_throttle = true, 568 .adv_thermal_throttle = true,
496 .support_ct_kill_exit = true, 569 .support_ct_kill_exit = true,
497 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 570 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
498 .chain_noise_scale = 1000, 571 .chain_noise_scale = 1000,
499 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 572 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
500 .max_event_log_size = 512, 573 .max_event_log_size = 512,
@@ -502,6 +575,11 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
502 .chain_noise_calib_by_driver = true, 575 .chain_noise_calib_by_driver = true,
503 .need_dc_calib = true, 576 .need_dc_calib = true,
504 .bt_statistics = true, 577 .bt_statistics = true,
578 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
579 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
580 .advanced_bt_coexist = true,
581 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
582 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
505}; 583};
506 584
507struct iwl_cfg iwl6000g2b_2abg_cfg = { 585struct iwl_cfg iwl6000g2b_2abg_cfg = {
@@ -530,7 +608,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
530 .supports_idle = true, 608 .supports_idle = true,
531 .adv_thermal_throttle = true, 609 .adv_thermal_throttle = true,
532 .support_ct_kill_exit = true, 610 .support_ct_kill_exit = true,
533 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 611 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
534 .chain_noise_scale = 1000, 612 .chain_noise_scale = 1000,
535 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 613 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
536 .max_event_log_size = 512, 614 .max_event_log_size = 512,
@@ -538,6 +616,11 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
538 .chain_noise_calib_by_driver = true, 616 .chain_noise_calib_by_driver = true,
539 .need_dc_calib = true, 617 .need_dc_calib = true,
540 .bt_statistics = true, 618 .bt_statistics = true,
619 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
620 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
621 .advanced_bt_coexist = true,
622 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
623 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
541}; 624};
542 625
543struct iwl_cfg iwl6000g2b_2bgn_cfg = { 626struct iwl_cfg iwl6000g2b_2bgn_cfg = {
@@ -568,7 +651,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
568 .supports_idle = true, 651 .supports_idle = true,
569 .adv_thermal_throttle = true, 652 .adv_thermal_throttle = true,
570 .support_ct_kill_exit = true, 653 .support_ct_kill_exit = true,
571 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 654 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
572 .chain_noise_scale = 1000, 655 .chain_noise_scale = 1000,
573 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 656 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
574 .max_event_log_size = 512, 657 .max_event_log_size = 512,
@@ -576,6 +659,11 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
576 .chain_noise_calib_by_driver = true, 659 .chain_noise_calib_by_driver = true,
577 .need_dc_calib = true, 660 .need_dc_calib = true,
578 .bt_statistics = true, 661 .bt_statistics = true,
662 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
663 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
664 .advanced_bt_coexist = true,
665 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
666 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
579}; 667};
580 668
581struct iwl_cfg iwl6000g2b_2bg_cfg = { 669struct iwl_cfg iwl6000g2b_2bg_cfg = {
@@ -604,7 +692,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
604 .supports_idle = true, 692 .supports_idle = true,
605 .adv_thermal_throttle = true, 693 .adv_thermal_throttle = true,
606 .support_ct_kill_exit = true, 694 .support_ct_kill_exit = true,
607 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 695 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
608 .chain_noise_scale = 1000, 696 .chain_noise_scale = 1000,
609 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 697 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
610 .max_event_log_size = 512, 698 .max_event_log_size = 512,
@@ -612,6 +700,11 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
612 .chain_noise_calib_by_driver = true, 700 .chain_noise_calib_by_driver = true,
613 .need_dc_calib = true, 701 .need_dc_calib = true,
614 .bt_statistics = true, 702 .bt_statistics = true,
703 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
704 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
705 .advanced_bt_coexist = true,
706 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
707 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
615}; 708};
616 709
617struct iwl_cfg iwl6000g2b_bgn_cfg = { 710struct iwl_cfg iwl6000g2b_bgn_cfg = {
@@ -642,7 +735,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
642 .supports_idle = true, 735 .supports_idle = true,
643 .adv_thermal_throttle = true, 736 .adv_thermal_throttle = true,
644 .support_ct_kill_exit = true, 737 .support_ct_kill_exit = true,
645 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 738 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
646 .chain_noise_scale = 1000, 739 .chain_noise_scale = 1000,
647 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 740 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
648 .max_event_log_size = 512, 741 .max_event_log_size = 512,
@@ -650,6 +743,11 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
650 .chain_noise_calib_by_driver = true, 743 .chain_noise_calib_by_driver = true,
651 .need_dc_calib = true, 744 .need_dc_calib = true,
652 .bt_statistics = true, 745 .bt_statistics = true,
746 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
747 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
748 .advanced_bt_coexist = true,
749 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
750 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
653}; 751};
654 752
655struct iwl_cfg iwl6000g2b_bg_cfg = { 753struct iwl_cfg iwl6000g2b_bg_cfg = {
@@ -678,7 +776,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
678 .supports_idle = true, 776 .supports_idle = true,
679 .adv_thermal_throttle = true, 777 .adv_thermal_throttle = true,
680 .support_ct_kill_exit = true, 778 .support_ct_kill_exit = true,
681 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 779 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
682 .chain_noise_scale = 1000, 780 .chain_noise_scale = 1000,
683 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 781 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
684 .max_event_log_size = 512, 782 .max_event_log_size = 512,
@@ -686,6 +784,11 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
686 .chain_noise_calib_by_driver = true, 784 .chain_noise_calib_by_driver = true,
687 .need_dc_calib = true, 785 .need_dc_calib = true,
688 .bt_statistics = true, 786 .bt_statistics = true,
787 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
788 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
789 .advanced_bt_coexist = true,
790 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
791 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
689}; 792};
690 793
691/* 794/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index c4c5691032a6..84ad62958535 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -625,7 +625,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
625 625
626 data = &(priv->sensitivity_data); 626 data = &(priv->sensitivity_data);
627 627
628 if (!iwl_is_associated(priv)) { 628 if (!iwl_is_any_associated(priv)) {
629 IWL_DEBUG_CALIB(priv, "<< - not associated\n"); 629 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
630 return; 630 return;
631 } 631 }
@@ -763,6 +763,12 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
763 unsigned long flags; 763 unsigned long flags;
764 struct statistics_rx_non_phy *rx_info; 764 struct statistics_rx_non_phy *rx_info;
765 u8 first_chain; 765 u8 first_chain;
766 /*
767 * MULTI-FIXME:
768 * When we support multiple interfaces on different channels,
769 * this must be modified/fixed.
770 */
771 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
766 772
767 if (priv->disable_chain_noise_cal) 773 if (priv->disable_chain_noise_cal)
768 return; 774 return;
@@ -793,8 +799,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
793 return; 799 return;
794 } 800 }
795 801
796 rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK); 802 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
797 rxon_chnum = le16_to_cpu(priv->staging_rxon.channel); 803 rxon_chnum = le16_to_cpu(ctx->staging.channel);
798 if (priv->cfg->bt_statistics) { 804 if (priv->cfg->bt_statistics) {
799 stat_band24 = !!(((struct iwl_bt_notif_statistics *) 805 stat_band24 = !!(((struct iwl_bt_notif_statistics *)
800 stat_resp)->flag & 806 stat_resp)->flag &
@@ -914,7 +920,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
914 * To be safe, simply mask out any chains that we know 920 * To be safe, simply mask out any chains that we know
915 * are not on the device. 921 * are not on the device.
916 */ 922 */
917 active_chains &= priv->hw_params.valid_rx_ant; 923 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
924 /* operated as 1x1 in full concurrency mode */
925 active_chains &= first_antenna(priv->hw_params.valid_rx_ant);
926 } else
927 active_chains &= priv->hw_params.valid_rx_ant;
918 928
919 num_tx_chains = 0; 929 num_tx_chains = 0;
920 for (i = 0; i < NUM_RX_CHAINS; i++) { 930 for (i = 0; i < NUM_RX_CHAINS; i++) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 75b901b3eb1e..6fb52abafc8d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -37,12 +37,13 @@
37#include "iwl-io.h" 37#include "iwl-io.h"
38#include "iwl-agn.h" 38#include "iwl-agn.h"
39 39
40int iwlagn_send_rxon_assoc(struct iwl_priv *priv) 40int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
41 struct iwl_rxon_context *ctx)
41{ 42{
42 int ret = 0; 43 int ret = 0;
43 struct iwl5000_rxon_assoc_cmd rxon_assoc; 44 struct iwl5000_rxon_assoc_cmd rxon_assoc;
44 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; 45 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
45 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; 46 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
46 47
47 if ((rxon1->flags == rxon2->flags) && 48 if ((rxon1->flags == rxon2->flags) &&
48 (rxon1->filter_flags == rxon2->filter_flags) && 49 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -60,23 +61,23 @@ int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
60 return 0; 61 return 0;
61 } 62 }
62 63
63 rxon_assoc.flags = priv->staging_rxon.flags; 64 rxon_assoc.flags = ctx->staging.flags;
64 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; 65 rxon_assoc.filter_flags = ctx->staging.filter_flags;
65 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; 66 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
66 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; 67 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
67 rxon_assoc.reserved1 = 0; 68 rxon_assoc.reserved1 = 0;
68 rxon_assoc.reserved2 = 0; 69 rxon_assoc.reserved2 = 0;
69 rxon_assoc.reserved3 = 0; 70 rxon_assoc.reserved3 = 0;
70 rxon_assoc.ofdm_ht_single_stream_basic_rates = 71 rxon_assoc.ofdm_ht_single_stream_basic_rates =
71 priv->staging_rxon.ofdm_ht_single_stream_basic_rates; 72 ctx->staging.ofdm_ht_single_stream_basic_rates;
72 rxon_assoc.ofdm_ht_dual_stream_basic_rates = 73 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
73 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; 74 ctx->staging.ofdm_ht_dual_stream_basic_rates;
74 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; 75 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
75 rxon_assoc.ofdm_ht_triple_stream_basic_rates = 76 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
76 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates; 77 ctx->staging.ofdm_ht_triple_stream_basic_rates;
77 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data; 78 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
78 79
79 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, 80 ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd,
80 sizeof(rxon_assoc), &rxon_assoc, NULL); 81 sizeof(rxon_assoc), &rxon_assoc, NULL);
81 if (ret) 82 if (ret)
82 return ret; 83 return ret;
@@ -184,7 +185,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
184 int ret; 185 int ret;
185 186
186 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 187 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
187 iwl_is_associated(priv)) { 188 iwl_is_any_associated(priv)) {
188 struct iwl_calib_chain_noise_reset_cmd cmd; 189 struct iwl_calib_chain_noise_reset_cmd cmd;
189 190
190 /* clear data for chain noise calibration algorithm */ 191 /* clear data for chain noise calibration algorithm */
@@ -235,13 +236,13 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
235 /* data from PHY/DSP regarding signal strength, etc., 236 /* data from PHY/DSP regarding signal strength, etc.,
236 * contents are always there, not configurable by host 237 * contents are always there, not configurable by host
237 */ 238 */
238 struct iwl5000_non_cfg_phy *ncphy = 239 struct iwlagn_non_cfg_phy *ncphy =
239 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf; 240 (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
240 u32 val, rssi_a, rssi_b, rssi_c, max_rssi; 241 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
241 u8 agc; 242 u8 agc;
242 243
243 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]); 244 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
244 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS; 245 agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
245 246
246 /* Find max rssi among 3 possible receivers. 247 /* Find max rssi among 3 possible receivers.
247 * These values are measured by the digital signal processor (DSP). 248 * These values are measured by the digital signal processor (DSP).
@@ -249,11 +250,14 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
249 * if the radio's automatic gain control (AGC) is working right. 250 * if the radio's automatic gain control (AGC) is working right.
250 * AGC value (see below) will provide the "interesting" info. 251 * AGC value (see below) will provide the "interesting" info.
251 */ 252 */
252 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]); 253 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
253 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS; 254 rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
254 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS; 255 IWLAGN_OFDM_RSSI_A_BIT_POS;
255 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]); 256 rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
256 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS; 257 IWLAGN_OFDM_RSSI_B_BIT_POS;
258 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
259 rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
260 IWLAGN_OFDM_RSSI_C_BIT_POS;
257 261
258 max_rssi = max_t(u32, rssi_a, rssi_b); 262 max_rssi = max_t(u32, rssi_a, rssi_b);
259 max_rssi = max_t(u32, max_rssi, rssi_c); 263 max_rssi = max_t(u32, max_rssi, rssi_c);
@@ -266,12 +270,95 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
266 return max_rssi - agc - IWLAGN_RSSI_OFFSET; 270 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
267} 271}
268 272
273static int iwlagn_set_pan_params(struct iwl_priv *priv)
274{
275 struct iwl_wipan_params_cmd cmd;
276 struct iwl_rxon_context *ctx_bss, *ctx_pan;
277 int slot0 = 300, slot1 = 0;
278 int ret;
279
280 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
281 return 0;
282
283 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
284
285 lockdep_assert_held(&priv->mutex);
286
287 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
288 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
289
290 memset(&cmd, 0, sizeof(cmd));
291
292 /* only 2 slots are currently allowed */
293 cmd.num_slots = 2;
294
295 cmd.slots[0].type = 0; /* BSS */
296 cmd.slots[1].type = 1; /* PAN */
297
298 if (ctx_bss->vif && ctx_pan->vif) {
299 int bcnint = ctx_pan->vif->bss_conf.beacon_int;
300
301 /* should be set, but seems unused?? */
302 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
303
304 if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
305 bcnint &&
306 bcnint != ctx_bss->vif->bss_conf.beacon_int) {
307 IWL_ERR(priv,
308 "beacon intervals don't match (%d, %d)\n",
309 ctx_bss->vif->bss_conf.beacon_int,
310 ctx_pan->vif->bss_conf.beacon_int);
311 } else
312 bcnint = max_t(int, bcnint,
313 ctx_bss->vif->bss_conf.beacon_int);
314 if (!bcnint)
315 bcnint = 100;
316 slot0 = bcnint / 2;
317 slot1 = bcnint - slot0;
318
319 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
320 (!ctx_bss->vif->bss_conf.idle &&
321 !ctx_bss->vif->bss_conf.assoc)) {
322 slot0 = bcnint * 3 - 20;
323 slot1 = 20;
324 } else if (!ctx_pan->vif->bss_conf.idle &&
325 !ctx_pan->vif->bss_conf.assoc) {
326 slot1 = bcnint * 3 - 20;
327 slot0 = 20;
328 }
329 } else if (ctx_pan->vif) {
330 slot0 = 0;
331 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
332 ctx_pan->vif->bss_conf.beacon_int;
333 slot1 = max_t(int, 100, slot1);
334 }
335
336 cmd.slots[0].width = cpu_to_le16(slot0);
337 cmd.slots[1].width = cpu_to_le16(slot1);
338
339 ret = iwl_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, sizeof(cmd), &cmd);
340 if (ret)
341 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
342
343 return ret;
344}
345
269struct iwl_hcmd_ops iwlagn_hcmd = { 346struct iwl_hcmd_ops iwlagn_hcmd = {
270 .rxon_assoc = iwlagn_send_rxon_assoc, 347 .rxon_assoc = iwlagn_send_rxon_assoc,
271 .commit_rxon = iwl_commit_rxon, 348 .commit_rxon = iwl_commit_rxon,
272 .set_rxon_chain = iwl_set_rxon_chain, 349 .set_rxon_chain = iwl_set_rxon_chain,
273 .set_tx_ant = iwlagn_send_tx_ant_config, 350 .set_tx_ant = iwlagn_send_tx_ant_config,
274 .send_bt_config = iwl_send_bt_config, 351 .send_bt_config = iwl_send_bt_config,
352 .set_pan_params = iwlagn_set_pan_params,
353};
354
355struct iwl_hcmd_ops iwlagn_bt_hcmd = {
356 .rxon_assoc = iwlagn_send_rxon_assoc,
357 .commit_rxon = iwl_commit_rxon,
358 .set_rxon_chain = iwl_set_rxon_chain,
359 .set_tx_ant = iwlagn_send_tx_ant_config,
360 .send_bt_config = iwlagn_send_advance_bt_config,
361 .set_pan_params = iwlagn_set_pan_params,
275}; 362};
276 363
277struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = { 364struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 9dd9e64c2b0b..a8f2adfd799e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -247,7 +247,14 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
247 struct iwl_ht_agg *agg; 247 struct iwl_ht_agg *agg;
248 248
249 agg = &priv->stations[sta_id].tid[tid].agg; 249 agg = &priv->stations[sta_id].tid[tid].agg;
250 250 /*
251 * If the BT kill count is non-zero, we'll get this
252 * notification again.
253 */
254 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
255 priv->cfg->advanced_bt_coexist) {
256 IWL_WARN(priv, "receive reply tx with bt_kill\n");
257 }
251 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 258 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
252 259
253 /* check if BAR is needed */ 260 /* check if BAR is needed */
@@ -1098,7 +1105,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1098 if (chan->band != band) 1105 if (chan->band != band)
1099 continue; 1106 continue;
1100 1107
1101 channel = ieee80211_frequency_to_channel(chan->center_freq); 1108 channel = chan->hw_value;
1102 scan_ch->channel = cpu_to_le16(channel); 1109 scan_ch->channel = cpu_to_le16(channel);
1103 1110
1104 ch_info = iwl_get_channel_info(priv, band, channel); 1111 ch_info = iwl_get_channel_info(priv, band, channel);
@@ -1156,6 +1163,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1156 }; 1163 };
1157 struct iwl_scan_cmd *scan; 1164 struct iwl_scan_cmd *scan;
1158 struct ieee80211_conf *conf = NULL; 1165 struct ieee80211_conf *conf = NULL;
1166 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1159 u32 rate_flags = 0; 1167 u32 rate_flags = 0;
1160 u16 cmd_len; 1168 u16 cmd_len;
1161 u16 rx_chain = 0; 1169 u16 rx_chain = 0;
@@ -1168,6 +1176,9 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1168 u8 active_chains; 1176 u8 active_chains;
1169 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; 1177 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
1170 1178
1179 if (vif)
1180 ctx = iwl_rxon_ctx_from_vif(vif);
1181
1171 conf = ieee80211_get_hw_conf(priv->hw); 1182 conf = ieee80211_get_hw_conf(priv->hw);
1172 1183
1173 cancel_delayed_work(&priv->scan_check); 1184 cancel_delayed_work(&priv->scan_check);
@@ -1225,7 +1236,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1225 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 1236 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1226 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 1237 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1227 1238
1228 if (iwl_is_associated(priv)) { 1239 if (iwl_is_any_associated(priv)) {
1229 u16 interval = 0; 1240 u16 interval = 0;
1230 u32 extra; 1241 u32 extra;
1231 u32 suspend_time = 100; 1242 u32 suspend_time = 100;
@@ -1276,13 +1287,15 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1276 IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); 1287 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1277 1288
1278 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 1289 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1279 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; 1290 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
1280 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 1291 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1281 1292
1282 switch (priv->scan_band) { 1293 switch (priv->scan_band) {
1283 case IEEE80211_BAND_2GHZ: 1294 case IEEE80211_BAND_2GHZ:
1284 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 1295 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1285 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK) 1296 chan_mod = le32_to_cpu(
1297 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
1298 RXON_FLG_CHANNEL_MODE_MSK)
1286 >> RXON_FLG_CHANNEL_MODE_POS; 1299 >> RXON_FLG_CHANNEL_MODE_POS;
1287 if (chan_mod == CHANNEL_MODE_PURE_40) { 1300 if (chan_mod == CHANNEL_MODE_PURE_40) {
1288 rate = IWL_RATE_6M_PLCP; 1301 rate = IWL_RATE_6M_PLCP;
@@ -1290,6 +1303,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1290 rate = IWL_RATE_1M_PLCP; 1303 rate = IWL_RATE_1M_PLCP;
1291 rate_flags = RATE_MCS_CCK_MSK; 1304 rate_flags = RATE_MCS_CCK_MSK;
1292 } 1305 }
1306 /*
1307 * Internal scans are passive, so we can indiscriminately set
1308 * the BT ignore flag on 2.4 GHz since it applies to TX only.
1309 */
1310 if (priv->cfg->advanced_bt_coexist)
1311 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
1293 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED; 1312 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
1294 break; 1313 break;
1295 case IEEE80211_BAND_5GHZ: 1314 case IEEE80211_BAND_5GHZ:
@@ -1327,6 +1346,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1327 if (priv->cfg->scan_tx_antennas[band]) 1346 if (priv->cfg->scan_tx_antennas[band])
1328 scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; 1347 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
1329 1348
1349 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
1350 /* operated as 1x1 in full concurrency mode */
1351 scan_tx_antennas =
1352 first_antenna(priv->cfg->scan_tx_antennas[band]);
1353 }
1354
1330 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band], 1355 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
1331 scan_tx_antennas); 1356 scan_tx_antennas);
1332 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); 1357 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
@@ -1345,6 +1370,11 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1345 1370
1346 rx_ant = first_antenna(active_chains); 1371 rx_ant = first_antenna(active_chains);
1347 } 1372 }
1373 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
1374 /* operated as 1x1 in full concurrency mode */
1375 rx_ant = first_antenna(rx_ant);
1376 }
1377
1348 /* MIMO is not used here, but value is required */ 1378 /* MIMO is not used here, but value is required */
1349 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 1379 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1350 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 1380 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
@@ -1394,6 +1424,11 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1394 scan->len = cpu_to_le16(cmd.len); 1424 scan->len = cpu_to_le16(cmd.len);
1395 1425
1396 set_bit(STATUS_SCAN_HW, &priv->status); 1426 set_bit(STATUS_SCAN_HW, &priv->status);
1427
1428 if (priv->cfg->ops->hcmd->set_pan_params &&
1429 priv->cfg->ops->hcmd->set_pan_params(priv))
1430 goto done;
1431
1397 if (iwl_send_cmd_sync(priv, &cmd)) 1432 if (iwl_send_cmd_sync(priv, &cmd))
1398 goto done; 1433 goto done;
1399 1434
@@ -1420,7 +1455,8 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1420 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1455 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1421 1456
1422 if (add) 1457 if (add)
1423 return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true, 1458 return iwl_add_bssid_station(priv, vif_priv->ctx,
1459 vif->bss_conf.bssid, true,
1424 &vif_priv->ibss_bssid_sta_id); 1460 &vif_priv->ibss_bssid_sta_id);
1425 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 1461 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1426 vif->bss_conf.bssid); 1462 vif->bss_conf.bssid);
@@ -1453,7 +1489,7 @@ int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1453 1489
1454 /* waiting for all the tx frames complete might take a while */ 1490 /* waiting for all the tx frames complete might take a while */
1455 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { 1491 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1456 if (cnt == IWL_CMD_QUEUE_NUM) 1492 if (cnt == priv->cmd_queue)
1457 continue; 1493 continue;
1458 txq = &priv->txq[cnt]; 1494 txq = &priv->txq[cnt];
1459 q = &txq->q; 1495 q = &txq->q;
@@ -1518,3 +1554,377 @@ done:
1518 ieee80211_wake_queues(priv->hw); 1554 ieee80211_wake_queues(priv->hw);
1519 mutex_unlock(&priv->mutex); 1555 mutex_unlock(&priv->mutex);
1520} 1556}
1557
1558/*
1559 * BT coex
1560 */
1561/*
1562 * Macros to access the lookup table.
1563 *
1564 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
1565* wifi_prio, wifi_txrx and wifi_sh_ant_req.
1566 *
1567 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
1568 *
1569 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
1570 * one after another in 32-bit registers, and "registers" 0 through 7 contain
1571 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
1572 *
1573 * These macros encode that format.
1574 */
1575#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
1576 wifi_txrx, wifi_sh_ant_req) \
1577 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
1578 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
1579
1580#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
1581 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
1582#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1583 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1584 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
1585 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1586 wifi_sh_ant_req))))
1587#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1588 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1589 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
1590 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1591 wifi_sh_ant_req))
1592#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
1593 wifi_req, wifi_prio, wifi_txrx, \
1594 wifi_sh_ant_req) \
1595 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
1596 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1597 wifi_sh_ant_req))
1598
1599#define LUT_WLAN_KILL_OP(lut, op, val) \
1600 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
1601#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1602 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1603 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1604 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
1605#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1606 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1607 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1608 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1609#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1610 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1611 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1612 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1613
1614#define LUT_ANT_SWITCH_OP(lut, op, val) \
1615 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
1616#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1617 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1618 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1619 wifi_req, wifi_prio, wifi_txrx, \
1620 wifi_sh_ant_req))))
1621#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1622 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1623 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1624 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1625#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1626 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1627 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1628 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1629
1630static const __le32 iwlagn_def_3w_lookup[12] = {
1631 cpu_to_le32(0xaaaaaaaa),
1632 cpu_to_le32(0xaaaaaaaa),
1633 cpu_to_le32(0xaeaaaaaa),
1634 cpu_to_le32(0xaaaaaaaa),
1635 cpu_to_le32(0xcc00ff28),
1636 cpu_to_le32(0x0000aaaa),
1637 cpu_to_le32(0xcc00aaaa),
1638 cpu_to_le32(0x0000aaaa),
1639 cpu_to_le32(0xc0004000),
1640 cpu_to_le32(0x00004000),
1641 cpu_to_le32(0xf0005000),
1642 cpu_to_le32(0xf0004000),
1643};
1644
1645static const __le32 iwlagn_concurrent_lookup[12] = {
1646 cpu_to_le32(0xaaaaaaaa),
1647 cpu_to_le32(0xaaaaaaaa),
1648 cpu_to_le32(0xaaaaaaaa),
1649 cpu_to_le32(0xaaaaaaaa),
1650 cpu_to_le32(0xaaaaaaaa),
1651 cpu_to_le32(0xaaaaaaaa),
1652 cpu_to_le32(0xaaaaaaaa),
1653 cpu_to_le32(0xaaaaaaaa),
1654 cpu_to_le32(0x00000000),
1655 cpu_to_le32(0x00000000),
1656 cpu_to_le32(0x00000000),
1657 cpu_to_le32(0x00000000),
1658};
1659
1660void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1661{
1662 struct iwlagn_bt_cmd bt_cmd = {
1663 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
1664 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
1665 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
1666 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
1667 };
1668
1669 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1670 sizeof(bt_cmd.bt3_lookup_table));
1671
1672 bt_cmd.prio_boost = priv->cfg->bt_prio_boost;
1673 bt_cmd.kill_ack_mask = priv->kill_ack_mask;
1674 bt_cmd.kill_cts_mask = priv->kill_cts_mask;
1675 bt_cmd.valid = priv->bt_valid;
1676
1677 /*
1678 * Configure BT coex mode to "no coexistence" when the
1679 * user disabled BT coexistence, we have no interface
1680 * (might be in monitor mode), or the interface is in
1681 * IBSS mode (no proper uCode support for coex then).
1682 */
1683 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1684 bt_cmd.flags = 0;
1685 } else {
1686 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1687 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1688 if (priv->bt_ch_announce)
1689 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1690 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
1691 }
1692 if (priv->bt_full_concurrent)
1693 memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
1694 sizeof(iwlagn_concurrent_lookup));
1695 else
1696 memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup,
1697 sizeof(iwlagn_def_3w_lookup));
1698
1699 IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
1700 bt_cmd.flags ? "active" : "disabled",
1701 priv->bt_full_concurrent ?
1702 "full concurrency" : "3-wire");
1703
1704 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
1705 IWL_ERR(priv, "failed to send BT Coex Config\n");
1706
1707 /*
1708 * When we are doing a restart, need to also reconfigure BT
1709 * SCO to the device. If not doing a restart, bt_sco_active
1710 * will always be false, so there's no need to have an extra
1711 * variable to check for it.
1712 */
1713 if (priv->bt_sco_active) {
1714 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
1715
1716 if (priv->bt_sco_active)
1717 sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
1718 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
1719 sizeof(sco_cmd), &sco_cmd))
1720 IWL_ERR(priv, "failed to send BT SCO command\n");
1721 }
1722}
1723
1724static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1725{
1726 struct iwl_priv *priv =
1727 container_of(work, struct iwl_priv, bt_traffic_change_work);
1728 struct iwl_rxon_context *ctx;
1729 int smps_request = -1;
1730
1731 IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
1732 priv->bt_traffic_load);
1733
1734 switch (priv->bt_traffic_load) {
1735 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1736 smps_request = IEEE80211_SMPS_AUTOMATIC;
1737 break;
1738 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1739 smps_request = IEEE80211_SMPS_DYNAMIC;
1740 break;
1741 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1742 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1743 smps_request = IEEE80211_SMPS_STATIC;
1744 break;
1745 default:
1746 IWL_ERR(priv, "Invalid BT traffic load: %d\n",
1747 priv->bt_traffic_load);
1748 break;
1749 }
1750
1751 mutex_lock(&priv->mutex);
1752
1753 if (priv->cfg->ops->lib->update_chain_flags)
1754 priv->cfg->ops->lib->update_chain_flags(priv);
1755
1756 if (smps_request != -1) {
1757 for_each_context(priv, ctx) {
1758 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
1759 ieee80211_request_smps(ctx->vif, smps_request);
1760 }
1761 }
1762
1763 mutex_unlock(&priv->mutex);
1764}
1765
1766static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1767 struct iwl_bt_uart_msg *uart_msg)
1768{
1769 IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, "
1770 "Update Req = 0x%X",
1771 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
1772 BT_UART_MSG_FRAME1MSGTYPE_POS,
1773 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
1774 BT_UART_MSG_FRAME1SSN_POS,
1775 (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
1776 BT_UART_MSG_FRAME1UPDATEREQ_POS);
1777
1778 IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
1779 "Chl_SeqN = 0x%X, In band = 0x%X",
1780 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
1781 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
1782 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
1783 BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
1784 (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
1785 BT_UART_MSG_FRAME2CHLSEQN_POS,
1786 (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
1787 BT_UART_MSG_FRAME2INBAND_POS);
1788
1789 IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
1790 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
1791 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
1792 BT_UART_MSG_FRAME3SCOESCO_POS,
1793 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
1794 BT_UART_MSG_FRAME3SNIFF_POS,
1795 (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
1796 BT_UART_MSG_FRAME3A2DP_POS,
1797 (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
1798 BT_UART_MSG_FRAME3ACL_POS,
1799 (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
1800 BT_UART_MSG_FRAME3MASTER_POS,
1801 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
1802 BT_UART_MSG_FRAME3OBEX_POS);
1803
1804 IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X",
1805 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
1806 BT_UART_MSG_FRAME4IDLEDURATION_POS);
1807
1808 IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
1809 "eSCO Retransmissions = 0x%X",
1810 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
1811 BT_UART_MSG_FRAME5TXACTIVITY_POS,
1812 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
1813 BT_UART_MSG_FRAME5RXACTIVITY_POS,
1814 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
1815 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
1816
1817 IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
1818 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
1819 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
1820 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
1821 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
1822
1823 IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
1824 "0x%X, Connectable = 0x%X",
1825 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
1826 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1827 (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >>
1828 BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS,
1829 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
1830 BT_UART_MSG_FRAME7CONNECTABLE_POS);
1831}
1832
1833static void iwlagn_set_kill_ack_msk(struct iwl_priv *priv,
1834 struct iwl_bt_uart_msg *uart_msg)
1835{
1836 u8 kill_ack_msk;
1837 __le32 bt_kill_ack_msg[2] = {
1838 cpu_to_le32(0xFFFFFFF), cpu_to_le32(0xFFFFFC00) };
1839
1840 kill_ack_msk = (((BT_UART_MSG_FRAME3A2DP_MSK |
1841 BT_UART_MSG_FRAME3SNIFF_MSK |
1842 BT_UART_MSG_FRAME3SCOESCO_MSK) &
1843 uart_msg->frame3) == 0) ? 1 : 0;
1844 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_ack_msk]) {
1845 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
1846 priv->kill_ack_mask = bt_kill_ack_msg[kill_ack_msk];
1847 /* schedule to send runtime bt_config */
1848 queue_work(priv->workqueue, &priv->bt_runtime_config);
1849 }
1850
1851}
1852
1853void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
1854 struct iwl_rx_mem_buffer *rxb)
1855{
1856 unsigned long flags;
1857 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1858 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
1859 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
1860 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
1861 u8 last_traffic_load;
1862
1863 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
1864 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
1865 IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
1866 IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n",
1867 coex->bt_ci_compliance);
1868 iwlagn_print_uartmsg(priv, uart_msg);
1869
1870 last_traffic_load = priv->notif_bt_traffic_load;
1871 priv->notif_bt_traffic_load = coex->bt_traffic_load;
1872 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
1873 if (priv->bt_status != coex->bt_status ||
1874 last_traffic_load != coex->bt_traffic_load) {
1875 if (coex->bt_status) {
1876 /* BT on */
1877 if (!priv->bt_ch_announce)
1878 priv->bt_traffic_load =
1879 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1880 else
1881 priv->bt_traffic_load =
1882 coex->bt_traffic_load;
1883 } else {
1884 /* BT off */
1885 priv->bt_traffic_load =
1886 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
1887 }
1888 priv->bt_status = coex->bt_status;
1889 queue_work(priv->workqueue,
1890 &priv->bt_traffic_change_work);
1891 }
1892 if (priv->bt_sco_active !=
1893 (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
1894 priv->bt_sco_active = uart_msg->frame3 &
1895 BT_UART_MSG_FRAME3SCOESCO_MSK;
1896 if (priv->bt_sco_active)
1897 sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
1898 iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
1899 sizeof(sco_cmd), &sco_cmd, NULL);
1900 }
1901 }
1902
1903 iwlagn_set_kill_ack_msk(priv, uart_msg);
1904
1905 /* FIXME: based on notification, adjust the prio_boost */
1906
1907 spin_lock_irqsave(&priv->lock, flags);
1908 priv->bt_ci_compliance = coex->bt_ci_compliance;
1909 spin_unlock_irqrestore(&priv->lock, flags);
1910}
1911
1912void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
1913{
1914 iwlagn_rx_handler_setup(priv);
1915 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
1916 iwlagn_bt_coex_profile_notif;
1917}
1918
1919void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
1920{
1921 iwlagn_setup_deferred_work(priv);
1922
1923 INIT_WORK(&priv->bt_traffic_change_work,
1924 iwlagn_bt_traffic_change_work);
1925}
1926
1927void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
1928{
1929 cancel_work_sync(&priv->bt_traffic_change_work);
1930}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 23e5c42e7d7e..57629fba3a7d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -82,6 +82,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
82 struct iwl_lq_sta *lq_sta); 82 struct iwl_lq_sta *lq_sta);
83static void rs_fill_link_cmd(struct iwl_priv *priv, 83static void rs_fill_link_cmd(struct iwl_priv *priv,
84 struct iwl_lq_sta *lq_sta, u32 rate_n_flags); 84 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
85static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
85 86
86 87
87#ifdef CONFIG_MAC80211_DEBUGFS 88#ifdef CONFIG_MAC80211_DEBUGFS
@@ -300,7 +301,19 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
300 struct ieee80211_sta *sta) 301 struct ieee80211_sta *sta)
301{ 302{
302 int ret = -EAGAIN; 303 int ret = -EAGAIN;
303 u32 load = rs_tl_get_load(lq_data, tid); 304 u32 load;
305
306 /*
307 * Don't create TX aggregation sessions when in high
308 * BT traffic, as they would just be disrupted by BT.
309 */
310 if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
311 IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
312 priv->bt_traffic_load);
313 return ret;
314 }
315
316 load = rs_tl_get_load(lq_data, tid);
304 317
305 if (load > IWL_AGG_LOAD_THRESHOLD) { 318 if (load > IWL_AGG_LOAD_THRESHOLD) {
306 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 319 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
@@ -502,6 +515,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
502 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); 515 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
503 u8 mcs; 516 u8 mcs;
504 517
518 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
505 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); 519 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
506 520
507 if (*rate_idx == IWL_RATE_INVALID) { 521 if (*rate_idx == IWL_RATE_INVALID) {
@@ -588,11 +602,13 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
588 * Green-field mode is valid if the station supports it and 602 * Green-field mode is valid if the station supports it and
589 * there are no non-GF stations present in the BSS. 603 * there are no non-GF stations present in the BSS.
590 */ 604 */
591static inline u8 rs_use_green(struct ieee80211_sta *sta, 605static bool rs_use_green(struct ieee80211_sta *sta)
592 struct iwl_ht_config *ht_conf)
593{ 606{
607 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
608 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
609
594 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && 610 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
595 !(ht_conf->non_GF_STA_present); 611 !(ctx->ht.non_gf_sta_present);
596} 612}
597 613
598/** 614/**
@@ -744,6 +760,32 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
744 (a->is_SGI == b->is_SGI); 760 (a->is_SGI == b->is_SGI);
745} 761}
746 762
763static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
764 struct iwl_lq_sta *lq_sta)
765{
766 struct iwl_scale_tbl_info *tbl;
767 bool full_concurrent;
768 unsigned long flags;
769
770 spin_lock_irqsave(&priv->lock, flags);
771 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
772 full_concurrent = true;
773 else
774 full_concurrent = false;
775 spin_unlock_irqrestore(&priv->lock, flags);
776
777 if (priv->bt_full_concurrent != full_concurrent) {
778 priv->bt_full_concurrent = full_concurrent;
779
780 /* Update uCode's rate table. */
781 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
782 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
783 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
784
785 queue_work(priv->workqueue, &priv->bt_full_concurrency);
786 }
787}
788
747/* 789/*
748 * mac80211 sends us Tx status 790 * mac80211 sends us Tx status
749 */ 791 */
@@ -763,6 +805,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
763 u32 tx_rate; 805 u32 tx_rate;
764 struct iwl_scale_tbl_info tbl_type; 806 struct iwl_scale_tbl_info tbl_type;
765 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 807 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
808 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
809 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
766 810
767 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 811 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
768 812
@@ -829,7 +873,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
829 lq_sta->missed_rate_counter++; 873 lq_sta->missed_rate_counter++;
830 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { 874 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
831 lq_sta->missed_rate_counter = 0; 875 lq_sta->missed_rate_counter = 0;
832 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false); 876 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
833 } 877 }
834 /* Regardless, ignore this status info for outdated rate */ 878 /* Regardless, ignore this status info for outdated rate */
835 return; 879 return;
@@ -848,7 +892,20 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
848 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 892 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
849 } else { 893 } else {
850 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); 894 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
851 return; 895 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
896 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
897 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
898 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
899 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
900 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
901 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
902 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
903 /*
904 * no matching table found, let's by-pass the data collection
905 * and continue to perform rate scale to find the rate table
906 */
907 rs_stay_in_table(lq_sta, true);
908 goto done;
852 } 909 }
853 910
854 /* 911 /*
@@ -909,10 +966,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
909 } 966 }
910 /* The last TX rate is cached in lq_sta; it's set in if/else above */ 967 /* The last TX rate is cached in lq_sta; it's set in if/else above */
911 lq_sta->last_rate_n_flags = tx_rate; 968 lq_sta->last_rate_n_flags = tx_rate;
912 969done:
913 /* See if there's a better rate or modulation mode to try. */ 970 /* See if there's a better rate or modulation mode to try. */
914 if (sta && sta->supp_rates[sband->band]) 971 if (sta && sta->supp_rates[sband->band])
915 rs_rate_scale_perform(priv, skb, sta, lq_sta); 972 rs_rate_scale_perform(priv, skb, sta, lq_sta);
973
974 /* Is there a need to switch between full concurrency and 3-wire? */
975 if (priv->bt_ant_couple_ok)
976 rs_bt_update_lq(priv, ctx, lq_sta);
916} 977}
917 978
918/* 979/*
@@ -1106,6 +1167,8 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1106 u16 rate_mask; 1167 u16 rate_mask;
1107 s32 rate; 1168 s32 rate;
1108 s8 is_green = lq_sta->is_green; 1169 s8 is_green = lq_sta->is_green;
1170 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1171 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1109 1172
1110 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) 1173 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1111 return -1; 1174 return -1;
@@ -1126,7 +1189,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1126 tbl->max_search = IWL_MAX_SEARCH; 1189 tbl->max_search = IWL_MAX_SEARCH;
1127 rate_mask = lq_sta->active_mimo2_rate; 1190 rate_mask = lq_sta->active_mimo2_rate;
1128 1191
1129 if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) 1192 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1130 tbl->is_ht40 = 1; 1193 tbl->is_ht40 = 1;
1131 else 1194 else
1132 tbl->is_ht40 = 0; 1195 tbl->is_ht40 = 0;
@@ -1160,6 +1223,8 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
1160 u16 rate_mask; 1223 u16 rate_mask;
1161 s32 rate; 1224 s32 rate;
1162 s8 is_green = lq_sta->is_green; 1225 s8 is_green = lq_sta->is_green;
1226 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1227 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1163 1228
1164 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) 1229 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1165 return -1; 1230 return -1;
@@ -1180,7 +1245,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
1180 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; 1245 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
1181 rate_mask = lq_sta->active_mimo3_rate; 1246 rate_mask = lq_sta->active_mimo3_rate;
1182 1247
1183 if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) 1248 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1184 tbl->is_ht40 = 1; 1249 tbl->is_ht40 = 1;
1185 else 1250 else
1186 tbl->is_ht40 = 0; 1251 tbl->is_ht40 = 0;
@@ -1215,6 +1280,8 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1215 u16 rate_mask; 1280 u16 rate_mask;
1216 u8 is_green = lq_sta->is_green; 1281 u8 is_green = lq_sta->is_green;
1217 s32 rate; 1282 s32 rate;
1283 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1284 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1218 1285
1219 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) 1286 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1220 return -1; 1287 return -1;
@@ -1227,7 +1294,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1227 tbl->max_search = IWL_MAX_SEARCH; 1294 tbl->max_search = IWL_MAX_SEARCH;
1228 rate_mask = lq_sta->active_siso_rate; 1295 rate_mask = lq_sta->active_siso_rate;
1229 1296
1230 if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) 1297 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1231 tbl->is_ht40 = 1; 1298 tbl->is_ht40 = 1;
1232 else 1299 else
1233 tbl->is_ht40 = 0; 1300 tbl->is_ht40 = 0;
@@ -1265,18 +1332,52 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1265 struct iwl_rate_scale_data *window = &(tbl->win[index]); 1332 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1266 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1333 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1267 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1334 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1268 u8 start_action = tbl->action; 1335 u8 start_action;
1269 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1336 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1270 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1337 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1271 int ret = 0; 1338 int ret = 0;
1272 u8 update_search_tbl_counter = 0; 1339 u8 update_search_tbl_counter = 0;
1273 1340
1341 switch (priv->bt_traffic_load) {
1342 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1343 /* nothing */
1344 break;
1345 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1346 /* avoid antenna B unless MIMO */
1347 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1348 if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
1349 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1350 break;
1351 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1352 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1353 /* avoid antenna B and MIMO */
1354 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1355 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1356 tbl->action != IWL_LEGACY_SWITCH_SISO)
1357 tbl->action = IWL_LEGACY_SWITCH_SISO;
1358 break;
1359 default:
1360 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1361 break;
1362 }
1363
1274 if (!iwl_ht_enabled(priv)) 1364 if (!iwl_ht_enabled(priv))
1275 /* stay in Legacy */ 1365 /* stay in Legacy */
1276 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; 1366 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1277 else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && 1367 else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1278 tbl->action > IWL_LEGACY_SWITCH_SISO) 1368 tbl->action > IWL_LEGACY_SWITCH_SISO)
1279 tbl->action = IWL_LEGACY_SWITCH_SISO; 1369 tbl->action = IWL_LEGACY_SWITCH_SISO;
1370
1371 /* configure as 1x1 if bt full concurrency */
1372 if (priv->bt_full_concurrent) {
1373 if (!iwl_ht_enabled(priv))
1374 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1375 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1376 tbl->action = IWL_LEGACY_SWITCH_SISO;
1377 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1378 }
1379
1380 start_action = tbl->action;
1280 for (; ;) { 1381 for (; ;) {
1281 lq_sta->action_counter++; 1382 lq_sta->action_counter++;
1282 switch (tbl->action) { 1383 switch (tbl->action) {
@@ -1291,7 +1392,10 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1291 break; 1392 break;
1292 1393
1293 /* Don't change antenna if success has been great */ 1394 /* Don't change antenna if success has been great */
1294 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1395 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1396 !priv->bt_full_concurrent &&
1397 priv->bt_traffic_load ==
1398 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1295 break; 1399 break;
1296 1400
1297 /* Set up search table to try other antenna */ 1401 /* Set up search table to try other antenna */
@@ -1403,31 +1507,64 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1403 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 1507 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1404 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1508 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1405 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1509 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1406 u8 start_action = tbl->action; 1510 u8 start_action;
1407 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1511 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1408 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1512 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1409 u8 update_search_tbl_counter = 0; 1513 u8 update_search_tbl_counter = 0;
1410 int ret; 1514 int ret;
1411 1515
1516 switch (priv->bt_traffic_load) {
1517 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1518 /* nothing */
1519 break;
1520 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1521 /* avoid antenna B unless MIMO */
1522 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1523 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1524 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1525 break;
1526 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1527 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1528 /* avoid antenna B and MIMO */
1529 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1530 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1531 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1532 break;
1533 default:
1534 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1535 break;
1536 }
1537
1412 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && 1538 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1413 tbl->action > IWL_SISO_SWITCH_ANTENNA2) { 1539 tbl->action > IWL_SISO_SWITCH_ANTENNA2) {
1414 /* stay in SISO */ 1540 /* stay in SISO */
1415 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1541 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1416 } 1542 }
1543
1544 /* configure as 1x1 if bt full concurrency */
1545 if (priv->bt_full_concurrent) {
1546 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1547 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1548 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1549 }
1550
1551 start_action = tbl->action;
1417 for (;;) { 1552 for (;;) {
1418 lq_sta->action_counter++; 1553 lq_sta->action_counter++;
1419 switch (tbl->action) { 1554 switch (tbl->action) {
1420 case IWL_SISO_SWITCH_ANTENNA1: 1555 case IWL_SISO_SWITCH_ANTENNA1:
1421 case IWL_SISO_SWITCH_ANTENNA2: 1556 case IWL_SISO_SWITCH_ANTENNA2:
1422 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); 1557 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1423
1424 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && 1558 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1425 tx_chains_num <= 1) || 1559 tx_chains_num <= 1) ||
1426 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && 1560 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1427 tx_chains_num <= 2)) 1561 tx_chains_num <= 2))
1428 break; 1562 break;
1429 1563
1430 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1564 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1565 !priv->bt_full_concurrent &&
1566 priv->bt_traffic_load ==
1567 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1431 break; 1568 break;
1432 1569
1433 memcpy(search_tbl, tbl, sz); 1570 memcpy(search_tbl, tbl, sz);
@@ -1541,18 +1678,47 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1541 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 1678 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1542 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1679 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1543 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1680 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1544 u8 start_action = tbl->action; 1681 u8 start_action;
1545 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1682 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1546 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1683 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1547 u8 update_search_tbl_counter = 0; 1684 u8 update_search_tbl_counter = 0;
1548 int ret; 1685 int ret;
1549 1686
1687 switch (priv->bt_traffic_load) {
1688 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1689 /* nothing */
1690 break;
1691 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1692 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1693 /* avoid antenna B and MIMO */
1694 if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
1695 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1696 break;
1697 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1698 /* avoid antenna B unless MIMO */
1699 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
1700 tbl->action == IWL_MIMO2_SWITCH_SISO_C)
1701 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1702 break;
1703 default:
1704 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1705 break;
1706 }
1707
1550 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && 1708 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1551 (tbl->action < IWL_MIMO2_SWITCH_SISO_A || 1709 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1552 tbl->action > IWL_MIMO2_SWITCH_SISO_C)) { 1710 tbl->action > IWL_MIMO2_SWITCH_SISO_C)) {
1553 /* switch in SISO */ 1711 /* switch in SISO */
1554 tbl->action = IWL_MIMO2_SWITCH_SISO_A; 1712 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1555 } 1713 }
1714
1715 /* configure as 1x1 if bt full concurrency */
1716 if (priv->bt_full_concurrent &&
1717 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1718 tbl->action > IWL_MIMO2_SWITCH_SISO_C))
1719 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1720
1721 start_action = tbl->action;
1556 for (;;) { 1722 for (;;) {
1557 lq_sta->action_counter++; 1723 lq_sta->action_counter++;
1558 switch (tbl->action) { 1724 switch (tbl->action) {
@@ -1682,18 +1848,47 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1682 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 1848 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1683 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1849 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1684 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1850 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1685 u8 start_action = tbl->action; 1851 u8 start_action;
1686 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1852 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1687 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1853 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1688 int ret; 1854 int ret;
1689 u8 update_search_tbl_counter = 0; 1855 u8 update_search_tbl_counter = 0;
1690 1856
1857 switch (priv->bt_traffic_load) {
1858 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1859 /* nothing */
1860 break;
1861 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1862 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1863 /* avoid antenna B and MIMO */
1864 if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
1865 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1866 break;
1867 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1868 /* avoid antenna B unless MIMO */
1869 if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
1870 tbl->action == IWL_MIMO3_SWITCH_SISO_C)
1871 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1872 break;
1873 default:
1874 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1875 break;
1876 }
1877
1691 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && 1878 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1692 (tbl->action < IWL_MIMO3_SWITCH_SISO_A || 1879 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
1693 tbl->action > IWL_MIMO3_SWITCH_SISO_C)) { 1880 tbl->action > IWL_MIMO3_SWITCH_SISO_C)) {
1694 /* switch in SISO */ 1881 /* switch in SISO */
1695 tbl->action = IWL_MIMO3_SWITCH_SISO_A; 1882 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1696 } 1883 }
1884
1885 /* configure as 1x1 if bt full concurrency */
1886 if (priv->bt_full_concurrent &&
1887 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
1888 tbl->action > IWL_MIMO3_SWITCH_SISO_C))
1889 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1890
1891 start_action = tbl->action;
1697 for (;;) { 1892 for (;;) {
1698 lq_sta->action_counter++; 1893 lq_sta->action_counter++;
1699 switch (tbl->action) { 1894 switch (tbl->action) {
@@ -1820,7 +2015,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1820 * 2) # times calling this function 2015 * 2) # times calling this function
1821 * 3) elapsed time in this mode (not used, for now) 2016 * 3) elapsed time in this mode (not used, for now)
1822 */ 2017 */
1823static void rs_stay_in_table(struct iwl_lq_sta *lq_sta) 2018static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1824{ 2019{
1825 struct iwl_scale_tbl_info *tbl; 2020 struct iwl_scale_tbl_info *tbl;
1826 int i; 2021 int i;
@@ -1851,7 +2046,8 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1851 * allow a new search. Also (below) reset all bitmaps and 2046 * allow a new search. Also (below) reset all bitmaps and
1852 * stats in active history. 2047 * stats in active history.
1853 */ 2048 */
1854 if ((lq_sta->total_failed > lq_sta->max_failure_limit) || 2049 if (force_search ||
2050 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1855 (lq_sta->total_success > lq_sta->max_success_limit) || 2051 (lq_sta->total_success > lq_sta->max_success_limit) ||
1856 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) 2052 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1857 && (flush_interval_passed))) { 2053 && (flush_interval_passed))) {
@@ -1900,6 +2096,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1900 * return rate_n_flags as used in the table 2096 * return rate_n_flags as used in the table
1901 */ 2097 */
1902static u32 rs_update_rate_tbl(struct iwl_priv *priv, 2098static u32 rs_update_rate_tbl(struct iwl_priv *priv,
2099 struct iwl_rxon_context *ctx,
1903 struct iwl_lq_sta *lq_sta, 2100 struct iwl_lq_sta *lq_sta,
1904 struct iwl_scale_tbl_info *tbl, 2101 struct iwl_scale_tbl_info *tbl,
1905 int index, u8 is_green) 2102 int index, u8 is_green)
@@ -1909,7 +2106,7 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
1909 /* Update uCode's rate table. */ 2106 /* Update uCode's rate table. */
1910 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); 2107 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
1911 rs_fill_link_cmd(priv, lq_sta, rate); 2108 rs_fill_link_cmd(priv, lq_sta, rate);
1912 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false); 2109 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1913 2110
1914 return rate; 2111 return rate;
1915} 2112}
@@ -1948,6 +2145,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1948 s32 sr; 2145 s32 sr;
1949 u8 tid = MAX_TID_COUNT; 2146 u8 tid = MAX_TID_COUNT;
1950 struct iwl_tid_data *tid_data; 2147 struct iwl_tid_data *tid_data;
2148 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2149 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1951 2150
1952 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); 2151 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1953 2152
@@ -1986,7 +2185,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1986 if (is_legacy(tbl->lq_type)) 2185 if (is_legacy(tbl->lq_type))
1987 lq_sta->is_green = 0; 2186 lq_sta->is_green = 0;
1988 else 2187 else
1989 lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config); 2188 lq_sta->is_green = rs_use_green(sta);
1990 is_green = lq_sta->is_green; 2189 is_green = lq_sta->is_green;
1991 2190
1992 /* current tx rate */ 2191 /* current tx rate */
@@ -2025,7 +2224,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2025 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 2224 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2026 /* get "active" rate info */ 2225 /* get "active" rate info */
2027 index = iwl_hwrate_to_plcp_idx(tbl->current_rate); 2226 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2028 rate = rs_update_rate_tbl(priv, lq_sta, 2227 rate = rs_update_rate_tbl(priv, ctx, lq_sta,
2029 tbl, index, is_green); 2228 tbl, index, is_green);
2030 } 2229 }
2031 return; 2230 return;
@@ -2067,7 +2266,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2067 2266
2068 /* Should we stay with this modulation mode, 2267 /* Should we stay with this modulation mode,
2069 * or search for a new one? */ 2268 * or search for a new one? */
2070 rs_stay_in_table(lq_sta); 2269 rs_stay_in_table(lq_sta, false);
2071 2270
2072 goto out; 2271 goto out;
2073 } 2272 }
@@ -2215,6 +2414,28 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2215 if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI && 2414 if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI &&
2216 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) 2415 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type)))
2217 scale_action = -1; 2416 scale_action = -1;
2417
2418 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2419 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2420 if (lq_sta->last_bt_traffic > priv->bt_traffic_load) {
2421 /*
2422 * don't set scale_action, don't want to scale up if
2423 * the rate scale doesn't otherwise think that is a
2424 * good idea.
2425 */
2426 } else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) {
2427 scale_action = -1;
2428 }
2429 }
2430 lq_sta->last_bt_traffic = priv->bt_traffic_load;
2431
2432 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2433 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2434 /* search for a new modulation */
2435 rs_stay_in_table(lq_sta, true);
2436 goto lq_update;
2437 }
2438
2218 switch (scale_action) { 2439 switch (scale_action) {
2219 case -1: 2440 case -1:
2220 /* Decrease starting rate, update uCode's rate table */ 2441 /* Decrease starting rate, update uCode's rate table */
@@ -2245,13 +2466,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2245lq_update: 2466lq_update:
2246 /* Replace uCode's rate table for the destination station. */ 2467 /* Replace uCode's rate table for the destination station. */
2247 if (update_lq) 2468 if (update_lq)
2248 rate = rs_update_rate_tbl(priv, lq_sta, 2469 rate = rs_update_rate_tbl(priv, ctx, lq_sta,
2249 tbl, index, is_green); 2470 tbl, index, is_green);
2250 2471
2251 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) { 2472 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
2252 /* Should we stay with this modulation mode, 2473 /* Should we stay with this modulation mode,
2253 * or search for a new one? */ 2474 * or search for a new one? */
2254 rs_stay_in_table(lq_sta); 2475 rs_stay_in_table(lq_sta, false);
2255 } 2476 }
2256 /* 2477 /*
2257 * Search for new modulation mode if we're: 2478 * Search for new modulation mode if we're:
@@ -2287,7 +2508,7 @@ lq_update:
2287 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n", 2508 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
2288 tbl->current_rate, index); 2509 tbl->current_rate, index);
2289 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); 2510 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2290 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false); 2511 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2291 } else 2512 } else
2292 done_search = 1; 2513 done_search = 1;
2293 } 2514 }
@@ -2357,12 +2578,17 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2357 int rate_idx; 2578 int rate_idx;
2358 int i; 2579 int i;
2359 u32 rate; 2580 u32 rate;
2360 u8 use_green = rs_use_green(sta, &priv->current_ht_config); 2581 u8 use_green = rs_use_green(sta);
2361 u8 active_tbl = 0; 2582 u8 active_tbl = 0;
2362 u8 valid_tx_ant; 2583 u8 valid_tx_ant;
2584 struct iwl_station_priv *sta_priv;
2585 struct iwl_rxon_context *ctx;
2363 2586
2364 if (!sta || !lq_sta) 2587 if (!sta || !lq_sta)
2365 goto out; 2588 return;
2589
2590 sta_priv = (void *)sta->drv_priv;
2591 ctx = sta_priv->common.ctx;
2366 2592
2367 i = lq_sta->last_txrate_idx; 2593 i = lq_sta->last_txrate_idx;
2368 2594
@@ -2394,9 +2620,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2394 rs_set_expected_tpt_table(lq_sta, tbl); 2620 rs_set_expected_tpt_table(lq_sta, tbl);
2395 rs_fill_link_cmd(NULL, lq_sta, rate); 2621 rs_fill_link_cmd(NULL, lq_sta, rate);
2396 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; 2622 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2397 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_SYNC, true); 2623 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2398 out:
2399 return;
2400} 2624}
2401 2625
2402static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, 2626static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
@@ -2524,7 +2748,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2524 lq_sta->is_dup = 0; 2748 lq_sta->is_dup = 0;
2525 lq_sta->max_rate_idx = -1; 2749 lq_sta->max_rate_idx = -1;
2526 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; 2750 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2527 lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config); 2751 lq_sta->is_green = rs_use_green(sta);
2528 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); 2752 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2529 lq_sta->band = priv->band; 2753 lq_sta->band = priv->band;
2530 /* 2754 /*
@@ -2594,10 +2818,15 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2594 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2818 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2595 2819
2596 /* Interpret new_rate (rate_n_flags) */ 2820 /* Interpret new_rate (rate_n_flags) */
2597 memset(&tbl_type, 0, sizeof(tbl_type));
2598 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, 2821 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2599 &tbl_type, &rate_idx); 2822 &tbl_type, &rate_idx);
2600 2823
2824 if (priv && priv->bt_full_concurrent) {
2825 /* 1x1 only */
2826 tbl_type.ant_type =
2827 first_antenna(priv->hw_params.valid_tx_ant);
2828 }
2829
2601 /* How many times should we repeat the initial rate? */ 2830 /* How many times should we repeat the initial rate? */
2602 if (is_legacy(tbl_type.lq_type)) { 2831 if (is_legacy(tbl_type.lq_type)) {
2603 ant_toggle_cnt = 1; 2832 ant_toggle_cnt = 1;
@@ -2622,9 +2851,12 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2622 2851
2623 index++; 2852 index++;
2624 repeat_rate--; 2853 repeat_rate--;
2625 2854 if (priv) {
2626 if (priv) 2855 if (priv->bt_full_concurrent)
2627 valid_tx_ant = priv->hw_params.valid_tx_ant; 2856 valid_tx_ant = ANT_A;
2857 else
2858 valid_tx_ant = priv->hw_params.valid_tx_ant;
2859 }
2628 2860
2629 /* Fill rest of rate table */ 2861 /* Fill rest of rate table */
2630 while (index < LINK_QUAL_MAX_RETRY_NUM) { 2862 while (index < LINK_QUAL_MAX_RETRY_NUM) {
@@ -2639,7 +2871,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2639 rs_toggle_antenna(valid_tx_ant, 2871 rs_toggle_antenna(valid_tx_ant,
2640 &new_rate, &tbl_type)) 2872 &new_rate, &tbl_type))
2641 ant_toggle_cnt = 1; 2873 ant_toggle_cnt = 1;
2642} 2874 }
2643 2875
2644 /* Override next rate if needed for debug purposes */ 2876 /* Override next rate if needed for debug purposes */
2645 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2877 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
@@ -2654,6 +2886,12 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2654 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, 2886 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2655 &rate_idx); 2887 &rate_idx);
2656 2888
2889 if (priv && priv->bt_full_concurrent) {
2890 /* 1x1 only */
2891 tbl_type.ant_type =
2892 first_antenna(priv->hw_params.valid_tx_ant);
2893 }
2894
2657 /* Indicate to uCode which entries might be MIMO. 2895 /* Indicate to uCode which entries might be MIMO.
2658 * If initial rate was MIMO, this will finally end up 2896 * If initial rate was MIMO, this will finally end up
2659 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ 2897 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@@ -2694,8 +2932,18 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2694 2932
2695 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 2933 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2696 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 2934 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2935
2697 lq_cmd->agg_params.agg_time_limit = 2936 lq_cmd->agg_params.agg_time_limit =
2698 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); 2937 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2938 /*
2939 * overwrite if needed, pass aggregation time limit
2940 * to uCode in uSec
2941 */
2942 if (priv && priv->cfg->agg_time_limit &&
2943 priv->cfg->agg_time_limit >= LINK_QUAL_AGG_TIME_LIMIT_MIN &&
2944 priv->cfg->agg_time_limit <= LINK_QUAL_AGG_TIME_LIMIT_MAX)
2945 lq_cmd->agg_params.agg_time_limit =
2946 cpu_to_le16(priv->cfg->agg_time_limit);
2699} 2947}
2700 2948
2701static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 2949static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2760,6 +3008,9 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2760 char buf[64]; 3008 char buf[64];
2761 int buf_size; 3009 int buf_size;
2762 u32 parsed_rate; 3010 u32 parsed_rate;
3011 struct iwl_station_priv *sta_priv =
3012 container_of(lq_sta, struct iwl_station_priv, lq_sta);
3013 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2763 3014
2764 priv = lq_sta->drv; 3015 priv = lq_sta->drv;
2765 memset(buf, 0, sizeof(buf)); 3016 memset(buf, 0, sizeof(buf));
@@ -2782,7 +3033,8 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2782 3033
2783 if (lq_sta->dbg_fixed_rate) { 3034 if (lq_sta->dbg_fixed_rate) {
2784 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); 3035 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2785 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false); 3036 iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
3037 false);
2786 } 3038 }
2787 3039
2788 return count; 3040 return count;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 8292f6d48ec6..3970ab1deaf9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -432,6 +432,8 @@ struct iwl_lq_sta {
432 u32 last_rate_n_flags; 432 u32 last_rate_n_flags;
433 /* packets destined for this STA are aggregated */ 433 /* packets destined for this STA are aggregated */
434 u8 is_agg; 434 u8 is_agg;
435 /* BT traffic this sta was last updated in */
436 u8 last_bt_traffic;
435}; 437};
436 438
437static inline u8 num_of_ant(u8 mask) 439static inline u8 num_of_ant(u8 mask)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
new file mode 100644
index 000000000000..07b2c6cadf51
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -0,0 +1,704 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-agn-tt.h"
44
45/* default Thermal Throttling transaction table
46 * Current state | Throttling Down | Throttling Up
47 *=============================================================================
48 * Condition Nxt State Condition Nxt State Condition Nxt State
49 *-----------------------------------------------------------------------------
50 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
51 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
52 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
53 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
54 *=============================================================================
55 */
56static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
57 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
58 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
59 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
60};
61static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
62 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
63 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
64 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
65};
66static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
67 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
68 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
69 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
70};
71static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
72 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
73 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
74 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
75};
76
77/* Advance Thermal Throttling default restriction table */
78static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
79 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
80 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
81 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
82 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
83};
84
85bool iwl_tt_is_low_power_state(struct iwl_priv *priv)
86{
87 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
88
89 if (tt->state >= IWL_TI_1)
90 return true;
91 return false;
92}
93
94u8 iwl_tt_current_power_mode(struct iwl_priv *priv)
95{
96 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
97
98 return tt->tt_power_mode;
99}
100
101bool iwl_ht_enabled(struct iwl_priv *priv)
102{
103 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
104 struct iwl_tt_restriction *restriction;
105
106 if (!priv->thermal_throttle.advanced_tt)
107 return true;
108 restriction = tt->restriction + tt->state;
109 return restriction->is_ht;
110}
111
112static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
113{
114 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
115 bool within_margin = false;
116
117 if (priv->cfg->temperature_kelvin)
118 temp = KELVIN_TO_CELSIUS(priv->temperature);
119
120 if (!priv->thermal_throttle.advanced_tt)
121 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
122 CT_KILL_THRESHOLD_LEGACY) ? true : false;
123 else
124 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
125 CT_KILL_THRESHOLD) ? true : false;
126 return within_margin;
127}
128
129bool iwl_check_for_ct_kill(struct iwl_priv *priv)
130{
131 bool is_ct_kill = false;
132
133 if (iwl_within_ct_kill_margin(priv)) {
134 iwl_tt_enter_ct_kill(priv);
135 is_ct_kill = true;
136 }
137 return is_ct_kill;
138}
139
140enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
141{
142 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
143 struct iwl_tt_restriction *restriction;
144
145 if (!priv->thermal_throttle.advanced_tt)
146 return IWL_ANT_OK_MULTI;
147 restriction = tt->restriction + tt->state;
148 return restriction->tx_stream;
149}
150
151enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
152{
153 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
154 struct iwl_tt_restriction *restriction;
155
156 if (!priv->thermal_throttle.advanced_tt)
157 return IWL_ANT_OK_MULTI;
158 restriction = tt->restriction + tt->state;
159 return restriction->rx_stream;
160}
161
162#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
163#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
164
165/*
166 * toggle the bit to wake up uCode and check the temperature
167 * if the temperature is below CT, uCode will stay awake and send card
168 * state notification with CT_KILL bit clear to inform Thermal Throttling
169 * Management to change state. Otherwise, uCode will go back to sleep
170 * without doing anything, driver should continue the 5 seconds timer
171 * to wake up uCode for temperature check until temperature drop below CT
172 */
173static void iwl_tt_check_exit_ct_kill(unsigned long data)
174{
175 struct iwl_priv *priv = (struct iwl_priv *)data;
176 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
177 unsigned long flags;
178
179 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
180 return;
181
182 if (tt->state == IWL_TI_CT_KILL) {
183 if (priv->thermal_throttle.ct_kill_toggle) {
184 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
185 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
186 priv->thermal_throttle.ct_kill_toggle = false;
187 } else {
188 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
189 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
190 priv->thermal_throttle.ct_kill_toggle = true;
191 }
192 iwl_read32(priv, CSR_UCODE_DRV_GP1);
193 spin_lock_irqsave(&priv->reg_lock, flags);
194 if (!iwl_grab_nic_access(priv))
195 iwl_release_nic_access(priv);
196 spin_unlock_irqrestore(&priv->reg_lock, flags);
197
198 /* Reschedule the ct_kill timer to occur in
199 * CT_KILL_EXIT_DURATION seconds to ensure we get a
200 * thermal update */
201 IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
202 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
203 jiffies + CT_KILL_EXIT_DURATION * HZ);
204 }
205}
206
207static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
208 bool stop)
209{
210 if (stop) {
211 IWL_DEBUG_POWER(priv, "Stop all queues\n");
212 if (priv->mac80211_registered)
213 ieee80211_stop_queues(priv->hw);
214 IWL_DEBUG_POWER(priv,
215 "Schedule 5 seconds CT_KILL Timer\n");
216 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
217 jiffies + CT_KILL_EXIT_DURATION * HZ);
218 } else {
219 IWL_DEBUG_POWER(priv, "Wake all queues\n");
220 if (priv->mac80211_registered)
221 ieee80211_wake_queues(priv->hw);
222 }
223}
224
225static void iwl_tt_ready_for_ct_kill(unsigned long data)
226{
227 struct iwl_priv *priv = (struct iwl_priv *)data;
228 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
229
230 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
231 return;
232
233 /* temperature timer expired, ready to go into CT_KILL state */
234 if (tt->state != IWL_TI_CT_KILL) {
235 IWL_DEBUG_POWER(priv, "entering CT_KILL state when "
236 "temperature timer expired\n");
237 tt->state = IWL_TI_CT_KILL;
238 set_bit(STATUS_CT_KILL, &priv->status);
239 iwl_perform_ct_kill_task(priv, true);
240 }
241}
242
243static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
244{
245 IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
246 /* make request to retrieve statistics information */
247 iwl_send_statistics_request(priv, CMD_SYNC, false);
248 /* Reschedule the ct_kill wait timer */
249 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
250 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
251}
252
253#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
254#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
255#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
256
257/*
258 * Legacy thermal throttling
259 * 1) Avoid NIC destruction due to high temperatures
260 * Chip will identify dangerously high temperatures that can
261 * harm the device and will power down
262 * 2) Avoid the NIC power down due to high temperature
263 * Throttle early enough to lower the power consumption before
264 * drastic steps are needed
265 */
266static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
267{
268 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
269 enum iwl_tt_state old_state;
270
271#ifdef CONFIG_IWLWIFI_DEBUG
272 if ((tt->tt_previous_temp) &&
273 (temp > tt->tt_previous_temp) &&
274 ((temp - tt->tt_previous_temp) >
275 IWL_TT_INCREASE_MARGIN)) {
276 IWL_DEBUG_POWER(priv,
277 "Temperature increase %d degree Celsius\n",
278 (temp - tt->tt_previous_temp));
279 }
280#endif
281 old_state = tt->state;
282 /* in Celsius */
283 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
284 tt->state = IWL_TI_CT_KILL;
285 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
286 tt->state = IWL_TI_2;
287 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
288 tt->state = IWL_TI_1;
289 else
290 tt->state = IWL_TI_0;
291
292#ifdef CONFIG_IWLWIFI_DEBUG
293 tt->tt_previous_temp = temp;
294#endif
295 /* stop ct_kill_waiting_tm timer */
296 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
297 if (tt->state != old_state) {
298 switch (tt->state) {
299 case IWL_TI_0:
300 /*
301 * When the system is ready to go back to IWL_TI_0
302 * we only have to call iwl_power_update_mode() to
303 * do so.
304 */
305 break;
306 case IWL_TI_1:
307 tt->tt_power_mode = IWL_POWER_INDEX_3;
308 break;
309 case IWL_TI_2:
310 tt->tt_power_mode = IWL_POWER_INDEX_4;
311 break;
312 default:
313 tt->tt_power_mode = IWL_POWER_INDEX_5;
314 break;
315 }
316 mutex_lock(&priv->mutex);
317 if (old_state == IWL_TI_CT_KILL)
318 clear_bit(STATUS_CT_KILL, &priv->status);
319 if (tt->state != IWL_TI_CT_KILL &&
320 iwl_power_update_mode(priv, true)) {
321 /* TT state not updated
322 * try again during next temperature read
323 */
324 if (old_state == IWL_TI_CT_KILL)
325 set_bit(STATUS_CT_KILL, &priv->status);
326 tt->state = old_state;
327 IWL_ERR(priv, "Cannot update power mode, "
328 "TT state not updated\n");
329 } else {
330 if (tt->state == IWL_TI_CT_KILL) {
331 if (force) {
332 set_bit(STATUS_CT_KILL, &priv->status);
333 iwl_perform_ct_kill_task(priv, true);
334 } else {
335 iwl_prepare_ct_kill_task(priv);
336 tt->state = old_state;
337 }
338 } else if (old_state == IWL_TI_CT_KILL &&
339 tt->state != IWL_TI_CT_KILL)
340 iwl_perform_ct_kill_task(priv, false);
341 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
342 tt->state);
343 IWL_DEBUG_POWER(priv, "Power Index change to %u\n",
344 tt->tt_power_mode);
345 }
346 mutex_unlock(&priv->mutex);
347 }
348}
349
350/*
351 * Advance thermal throttling
352 * 1) Avoid NIC destruction due to high temperatures
353 * Chip will identify dangerously high temperatures that can
354 * harm the device and will power down
355 * 2) Avoid the NIC power down due to high temperature
356 * Throttle early enough to lower the power consumption before
357 * drastic steps are needed
358 * Actions include relaxing the power down sleep thresholds and
359 * decreasing the number of TX streams
360 * 3) Avoid throughput performance impact as much as possible
361 *
362 *=============================================================================
363 * Condition Nxt State Condition Nxt State Condition Nxt State
364 *-----------------------------------------------------------------------------
365 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
366 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
367 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
368 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
369 *=============================================================================
370 */
371static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
372{
373 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
374 int i;
375 bool changed = false;
376 enum iwl_tt_state old_state;
377 struct iwl_tt_trans *transaction;
378
379 old_state = tt->state;
380 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
381 /* based on the current TT state,
382 * find the curresponding transaction table
383 * each table has (IWL_TI_STATE_MAX - 1) entries
384 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
385 * will advance to the correct table.
386 * then based on the current temperature
387 * find the next state need to transaction to
388 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
389 * in the current table to see if transaction is needed
390 */
391 transaction = tt->transaction +
392 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
393 if (temp >= transaction->tt_low &&
394 temp <= transaction->tt_high) {
395#ifdef CONFIG_IWLWIFI_DEBUG
396 if ((tt->tt_previous_temp) &&
397 (temp > tt->tt_previous_temp) &&
398 ((temp - tt->tt_previous_temp) >
399 IWL_TT_INCREASE_MARGIN)) {
400 IWL_DEBUG_POWER(priv,
401 "Temperature increase %d "
402 "degree Celsius\n",
403 (temp - tt->tt_previous_temp));
404 }
405 tt->tt_previous_temp = temp;
406#endif
407 if (old_state !=
408 transaction->next_state) {
409 changed = true;
410 tt->state =
411 transaction->next_state;
412 }
413 break;
414 }
415 }
416 /* stop ct_kill_waiting_tm timer */
417 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
418 if (changed) {
419 if (tt->state >= IWL_TI_1) {
420 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
421 tt->tt_power_mode = IWL_POWER_INDEX_5;
422
423 if (!iwl_ht_enabled(priv)) {
424 struct iwl_rxon_context *ctx;
425
426 for_each_context(priv, ctx) {
427 struct iwl_rxon_cmd *rxon;
428
429 rxon = &ctx->staging;
430
431 /* disable HT */
432 rxon->flags &= ~(
433 RXON_FLG_CHANNEL_MODE_MSK |
434 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
435 RXON_FLG_HT40_PROT_MSK |
436 RXON_FLG_HT_PROT_MSK);
437 }
438 } else {
439 /* check HT capability and set
440 * according to the system HT capability
441 * in case get disabled before */
442 iwl_set_rxon_ht(priv, &priv->current_ht_config);
443 }
444
445 } else {
446 /*
447 * restore system power setting -- it will be
448 * recalculated automatically.
449 */
450
451 /* check HT capability and set
452 * according to the system HT capability
453 * in case get disabled before */
454 iwl_set_rxon_ht(priv, &priv->current_ht_config);
455 }
456 mutex_lock(&priv->mutex);
457 if (old_state == IWL_TI_CT_KILL)
458 clear_bit(STATUS_CT_KILL, &priv->status);
459 if (tt->state != IWL_TI_CT_KILL &&
460 iwl_power_update_mode(priv, true)) {
461 /* TT state not updated
462 * try again during next temperature read
463 */
464 IWL_ERR(priv, "Cannot update power mode, "
465 "TT state not updated\n");
466 if (old_state == IWL_TI_CT_KILL)
467 set_bit(STATUS_CT_KILL, &priv->status);
468 tt->state = old_state;
469 } else {
470 IWL_DEBUG_POWER(priv,
471 "Thermal Throttling to new state: %u\n",
472 tt->state);
473 if (old_state != IWL_TI_CT_KILL &&
474 tt->state == IWL_TI_CT_KILL) {
475 if (force) {
476 IWL_DEBUG_POWER(priv,
477 "Enter IWL_TI_CT_KILL\n");
478 set_bit(STATUS_CT_KILL, &priv->status);
479 iwl_perform_ct_kill_task(priv, true);
480 } else {
481 iwl_prepare_ct_kill_task(priv);
482 tt->state = old_state;
483 }
484 } else if (old_state == IWL_TI_CT_KILL &&
485 tt->state != IWL_TI_CT_KILL) {
486 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
487 iwl_perform_ct_kill_task(priv, false);
488 }
489 }
490 mutex_unlock(&priv->mutex);
491 }
492}
493
494/* Card State Notification indicated reach critical temperature
495 * if PSP not enable, no Thermal Throttling function will be performed
496 * just set the GP1 bit to acknowledge the event
497 * otherwise, go into IWL_TI_CT_KILL state
498 * since Card State Notification will not provide any temperature reading
499 * for Legacy mode
500 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
501 * for advance mode
502 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
503 */
504static void iwl_bg_ct_enter(struct work_struct *work)
505{
506 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
507 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
508
509 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
510 return;
511
512 if (!iwl_is_ready(priv))
513 return;
514
515 if (tt->state != IWL_TI_CT_KILL) {
516 IWL_ERR(priv, "Device reached critical temperature "
517 "- ucode going to sleep!\n");
518 if (!priv->thermal_throttle.advanced_tt)
519 iwl_legacy_tt_handler(priv,
520 IWL_MINIMAL_POWER_THRESHOLD,
521 true);
522 else
523 iwl_advance_tt_handler(priv,
524 CT_KILL_THRESHOLD + 1, true);
525 }
526}
527
528/* Card State Notification indicated out of critical temperature
529 * since Card State Notification will not provide any temperature reading
530 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
531 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
532 */
533static void iwl_bg_ct_exit(struct work_struct *work)
534{
535 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
536 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
537
538 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
539 return;
540
541 if (!iwl_is_ready(priv))
542 return;
543
544 /* stop ct_kill_exit_tm timer */
545 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
546
547 if (tt->state == IWL_TI_CT_KILL) {
548 IWL_ERR(priv,
549 "Device temperature below critical"
550 "- ucode awake!\n");
551 /*
552 * exit from CT_KILL state
553 * reset the current temperature reading
554 */
555 priv->temperature = 0;
556 if (!priv->thermal_throttle.advanced_tt)
557 iwl_legacy_tt_handler(priv,
558 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
559 true);
560 else
561 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
562 true);
563 }
564}
565
566void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
567{
568 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
569 return;
570
571 IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
572 queue_work(priv->workqueue, &priv->ct_enter);
573}
574EXPORT_SYMBOL(iwl_tt_enter_ct_kill);
575
576void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
577{
578 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
579 return;
580
581 IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
582 queue_work(priv->workqueue, &priv->ct_exit);
583}
584EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
585
586static void iwl_bg_tt_work(struct work_struct *work)
587{
588 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
589 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
590
591 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
592 return;
593
594 if (priv->cfg->temperature_kelvin)
595 temp = KELVIN_TO_CELSIUS(priv->temperature);
596
597 if (!priv->thermal_throttle.advanced_tt)
598 iwl_legacy_tt_handler(priv, temp, false);
599 else
600 iwl_advance_tt_handler(priv, temp, false);
601}
602
603void iwl_tt_handler(struct iwl_priv *priv)
604{
605 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
606 return;
607
608 IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
609 queue_work(priv->workqueue, &priv->tt_work);
610}
611EXPORT_SYMBOL(iwl_tt_handler);
612
613/* Thermal throttling initialization
614 * For advance thermal throttling:
615 * Initialize Thermal Index and temperature threshold table
616 * Initialize thermal throttling restriction table
617 */
618void iwl_tt_initialize(struct iwl_priv *priv)
619{
620 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
621 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
622 struct iwl_tt_trans *transaction;
623
624 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
625
626 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
627
628 tt->state = IWL_TI_0;
629 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
630 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
631 priv->thermal_throttle.ct_kill_exit_tm.function =
632 iwl_tt_check_exit_ct_kill;
633 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
634 priv->thermal_throttle.ct_kill_waiting_tm.data =
635 (unsigned long)priv;
636 priv->thermal_throttle.ct_kill_waiting_tm.function =
637 iwl_tt_ready_for_ct_kill;
638 /* setup deferred ct kill work */
639 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
640 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
641 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
642
643 if (priv->cfg->adv_thermal_throttle) {
644 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
645 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
646 IWL_TI_STATE_MAX, GFP_KERNEL);
647 tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
648 IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
649 GFP_KERNEL);
650 if (!tt->restriction || !tt->transaction) {
651 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
652 priv->thermal_throttle.advanced_tt = false;
653 kfree(tt->restriction);
654 tt->restriction = NULL;
655 kfree(tt->transaction);
656 tt->transaction = NULL;
657 } else {
658 transaction = tt->transaction +
659 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
660 memcpy(transaction, &tt_range_0[0], size);
661 transaction = tt->transaction +
662 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
663 memcpy(transaction, &tt_range_1[0], size);
664 transaction = tt->transaction +
665 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
666 memcpy(transaction, &tt_range_2[0], size);
667 transaction = tt->transaction +
668 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
669 memcpy(transaction, &tt_range_3[0], size);
670 size = sizeof(struct iwl_tt_restriction) *
671 IWL_TI_STATE_MAX;
672 memcpy(tt->restriction,
673 &restriction_range[0], size);
674 priv->thermal_throttle.advanced_tt = true;
675 }
676 } else {
677 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
678 priv->thermal_throttle.advanced_tt = false;
679 }
680}
681EXPORT_SYMBOL(iwl_tt_initialize);
682
683/* cleanup thermal throttling management related memory and timer */
684void iwl_tt_exit(struct iwl_priv *priv)
685{
686 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
687
688 /* stop ct_kill_exit_tm timer if activated */
689 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
690 /* stop ct_kill_waiting_tm timer if activated */
691 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
692 cancel_work_sync(&priv->tt_work);
693 cancel_work_sync(&priv->ct_enter);
694 cancel_work_sync(&priv->ct_exit);
695
696 if (priv->thermal_throttle.advanced_tt) {
697 /* free advance thermal throttling memory */
698 kfree(tt->restriction);
699 tt->restriction = NULL;
700 kfree(tt->transaction);
701 tt->transaction = NULL;
702 }
703}
704EXPORT_SYMBOL(iwl_tt_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
new file mode 100644
index 000000000000..d55060427cac
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
@@ -0,0 +1,129 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_tt_setting_h__
29#define __iwl_tt_setting_h__
30
31#include "iwl-commands.h"
32
33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
37
38enum iwl_antenna_ok {
39 IWL_ANT_OK_NONE,
40 IWL_ANT_OK_SINGLE,
41 IWL_ANT_OK_MULTI,
42};
43
44/* Thermal Throttling State Machine states */
45enum iwl_tt_state {
46 IWL_TI_0, /* normal temperature, system power state */
47 IWL_TI_1, /* high temperature detect, low power state */
48 IWL_TI_2, /* higher temperature detected, lower power state */
49 IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */
50 IWL_TI_STATE_MAX
51};
52
53/**
54 * struct iwl_tt_restriction - Thermal Throttling restriction table
55 * @tx_stream: number of tx stream allowed
56 * @is_ht: ht enable/disable
57 * @rx_stream: number of rx stream allowed
58 *
59 * This table is used by advance thermal throttling management
60 * based on the current thermal throttling state, and determines
61 * the number of tx/rx streams and the status of HT operation.
62 */
63struct iwl_tt_restriction {
64 enum iwl_antenna_ok tx_stream;
65 enum iwl_antenna_ok rx_stream;
66 bool is_ht;
67};
68
69/**
70 * struct iwl_tt_trans - Thermal Throttling transaction table
71 * @next_state: next thermal throttling mode
72 * @tt_low: low temperature threshold to change state
73 * @tt_high: high temperature threshold to change state
74 *
75 * This is used by the advanced thermal throttling algorithm
76 * to determine the next thermal state to go based on the
77 * current temperature.
78 */
79struct iwl_tt_trans {
80 enum iwl_tt_state next_state;
81 u32 tt_low;
82 u32 tt_high;
83};
84
85/**
86 * struct iwl_tt_mgnt - Thermal Throttling Management structure
87 * @advanced_tt: advanced thermal throttle required
88 * @state: current Thermal Throttling state
89 * @tt_power_mode: Thermal Throttling power mode index
90 * being used to set power level when
91 * when thermal throttling state != IWL_TI_0
92 * the tt_power_mode should set to different
93 * power mode based on the current tt state
94 * @tt_previous_temperature: last measured temperature
95 * @iwl_tt_restriction: ptr to restriction tbl, used by advance
96 * thermal throttling to determine how many tx/rx streams
97 * should be used in tt state; and can HT be enabled or not
98 * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling
99 * state transaction
100 * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature
101 * @ct_kill_exit_tm: timer to exit thermal kill
102 */
103struct iwl_tt_mgmt {
104 enum iwl_tt_state state;
105 bool advanced_tt;
106 u8 tt_power_mode;
107 bool ct_kill_toggle;
108#ifdef CONFIG_IWLWIFI_DEBUG
109 s32 tt_previous_temp;
110#endif
111 struct iwl_tt_restriction *restriction;
112 struct iwl_tt_trans *transaction;
113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
115};
116
117u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
118bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
119bool iwl_ht_enabled(struct iwl_priv *priv);
120bool iwl_check_for_ct_kill(struct iwl_priv *priv);
121enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
122enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
123void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
124void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
125void iwl_tt_handler(struct iwl_priv *priv);
126void iwl_tt_initialize(struct iwl_priv *priv);
127void iwl_tt_exit(struct iwl_priv *priv);
128
129#endif /* __iwl_tt_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 69155aa448fb..5950184d9860 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -71,18 +71,6 @@ static const u8 tid_to_ac[] = {
71 2, 3, 3, 2, 1, 1, 0, 0 71 2, 3, 3, 2, 1, 1, 0, 0
72}; 72};
73 73
74static const u8 ac_to_fifo[] = {
75 IWL_TX_FIFO_VO,
76 IWL_TX_FIFO_VI,
77 IWL_TX_FIFO_BE,
78 IWL_TX_FIFO_BK,
79};
80
81static inline int get_fifo_from_ac(u8 ac)
82{
83 return ac_to_fifo[ac];
84}
85
86static inline int get_ac_from_tid(u16 tid) 74static inline int get_ac_from_tid(u16 tid)
87{ 75{
88 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 76 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
@@ -92,10 +80,10 @@ static inline int get_ac_from_tid(u16 tid)
92 return -EINVAL; 80 return -EINVAL;
93} 81}
94 82
95static inline int get_fifo_from_tid(u16 tid) 83static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
96{ 84{
97 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 85 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
98 return get_fifo_from_ac(tid_to_ac[tid]); 86 return ctx->ac_to_fifo[tid_to_ac[tid]];
99 87
100 /* no support for TIDs 8-15 yet */ 88 /* no support for TIDs 8-15 yet */
101 return -EINVAL; 89 return -EINVAL;
@@ -118,7 +106,7 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
118 106
119 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); 107 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
120 108
121 if (txq_id != IWL_CMD_QUEUE_NUM) { 109 if (txq_id != priv->cmd_queue) {
122 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 110 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
123 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; 111 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
124 112
@@ -155,7 +143,7 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
155 143
156 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 144 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
157 145
158 if (txq_id != IWL_CMD_QUEUE_NUM) 146 if (txq_id != priv->cmd_queue)
159 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 147 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
160 148
161 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 149 bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -333,19 +321,15 @@ void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
333 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask); 321 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
334} 322}
335 323
336static inline int get_queue_from_ac(u16 ac)
337{
338 return ac;
339}
340
341/* 324/*
342 * handle build REPLY_TX command notification. 325 * handle build REPLY_TX command notification.
343 */ 326 */
344static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, 327static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
345 struct iwl_tx_cmd *tx_cmd, 328 struct sk_buff *skb,
346 struct ieee80211_tx_info *info, 329 struct iwl_tx_cmd *tx_cmd,
347 struct ieee80211_hdr *hdr, 330 struct ieee80211_tx_info *info,
348 u8 std_id) 331 struct ieee80211_hdr *hdr,
332 u8 std_id)
349{ 333{
350 __le16 fc = hdr->frame_control; 334 __le16 fc = hdr->frame_control;
351 __le32 tx_flags = tx_cmd->tx_flags; 335 __le32 tx_flags = tx_cmd->tx_flags;
@@ -365,6 +349,12 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
365 349
366 if (ieee80211_is_back_req(fc)) 350 if (ieee80211_is_back_req(fc))
367 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 351 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
352 else if (info->band == IEEE80211_BAND_2GHZ &&
353 priv->cfg->advanced_bt_coexist &&
354 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
355 ieee80211_is_reassoc_req(fc) ||
356 skb->protocol == cpu_to_be16(ETH_P_PAE)))
357 tx_flags |= TX_CMD_FLG_IGNORE_BT;
368 358
369 359
370 tx_cmd->sta_id = std_id; 360 tx_cmd->sta_id = std_id;
@@ -454,7 +444,12 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
454 rate_flags |= RATE_MCS_CCK_MSK; 444 rate_flags |= RATE_MCS_CCK_MSK;
455 445
456 /* Set up antennas */ 446 /* Set up antennas */
457 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 447 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
448 /* operated as 1x1 in full concurrency mode */
449 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
450 first_antenna(priv->hw_params.valid_tx_ant));
451 } else
452 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
458 priv->hw_params.valid_tx_ant); 453 priv->hw_params.valid_tx_ant);
459 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 454 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
460 455
@@ -470,8 +465,8 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
470{ 465{
471 struct ieee80211_key_conf *keyconf = info->control.hw_key; 466 struct ieee80211_key_conf *keyconf = info->control.hw_key;
472 467
473 switch (keyconf->alg) { 468 switch (keyconf->cipher) {
474 case ALG_CCMP: 469 case WLAN_CIPHER_SUITE_CCMP:
475 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 470 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
476 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); 471 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
477 if (info->flags & IEEE80211_TX_CTL_AMPDU) 472 if (info->flags & IEEE80211_TX_CTL_AMPDU)
@@ -479,20 +474,20 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
479 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 474 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
480 break; 475 break;
481 476
482 case ALG_TKIP: 477 case WLAN_CIPHER_SUITE_TKIP:
483 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; 478 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
484 ieee80211_get_tkip_key(keyconf, skb_frag, 479 ieee80211_get_tkip_key(keyconf, skb_frag,
485 IEEE80211_TKIP_P2_KEY, tx_cmd->key); 480 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
486 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); 481 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
487 break; 482 break;
488 483
489 case ALG_WEP: 484 case WLAN_CIPHER_SUITE_WEP104:
485 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
486 /* fall through */
487 case WLAN_CIPHER_SUITE_WEP40:
490 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | 488 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
491 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); 489 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
492 490
493 if (keyconf->keylen == WEP_KEY_LEN_128)
494 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
495
496 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); 491 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
497 492
498 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 493 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
@@ -500,7 +495,7 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
500 break; 495 break;
501 496
502 default: 497 default:
503 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); 498 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
504 break; 499 break;
505 } 500 }
506} 501}
@@ -519,6 +514,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
519 struct iwl_device_cmd *out_cmd; 514 struct iwl_device_cmd *out_cmd;
520 struct iwl_cmd_meta *out_meta; 515 struct iwl_cmd_meta *out_meta;
521 struct iwl_tx_cmd *tx_cmd; 516 struct iwl_tx_cmd *tx_cmd;
517 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
522 int swq_id, txq_id; 518 int swq_id, txq_id;
523 dma_addr_t phys_addr; 519 dma_addr_t phys_addr;
524 dma_addr_t txcmd_phys; 520 dma_addr_t txcmd_phys;
@@ -533,6 +529,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
533 u8 *qc = NULL; 529 u8 *qc = NULL;
534 unsigned long flags; 530 unsigned long flags;
535 531
532 if (info->control.vif)
533 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
534
536 spin_lock_irqsave(&priv->lock, flags); 535 spin_lock_irqsave(&priv->lock, flags);
537 if (iwl_is_rfkill(priv)) { 536 if (iwl_is_rfkill(priv)) {
538 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 537 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
@@ -553,7 +552,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
553 hdr_len = ieee80211_hdrlen(fc); 552 hdr_len = ieee80211_hdrlen(fc);
554 553
555 /* Find index into station table for destination station */ 554 /* Find index into station table for destination station */
556 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); 555 sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
557 if (sta_id == IWL_INVALID_STATION) { 556 if (sta_id == IWL_INVALID_STATION) {
558 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 557 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
559 hdr->addr1); 558 hdr->addr1);
@@ -565,8 +564,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
565 if (sta) 564 if (sta)
566 sta_priv = (void *)sta->drv_priv; 565 sta_priv = (void *)sta->drv_priv;
567 566
568 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && 567 if (sta_priv && sta_priv->asleep) {
569 sta_priv->asleep) {
570 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); 568 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
571 /* 569 /*
572 * This sends an asynchronous command to the device, 570 * This sends an asynchronous command to the device,
@@ -580,7 +578,20 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
580 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); 578 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
581 } 579 }
582 580
583 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); 581 /*
582 * Send this frame after DTIM -- there's a special queue
583 * reserved for this for contexts that support AP mode.
584 */
585 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
586 txq_id = ctx->mcast_queue;
587 /*
588 * The microcode will clear the more data
589 * bit in the last frame it transmits.
590 */
591 hdr->frame_control |=
592 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
593 } else
594 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
584 595
585 /* irqs already disabled/saved above when locking priv->lock */ 596 /* irqs already disabled/saved above when locking priv->lock */
586 spin_lock(&priv->sta_lock); 597 spin_lock(&priv->sta_lock);
@@ -625,6 +636,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
625 /* Set up driver data for this TFD */ 636 /* Set up driver data for this TFD */
626 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 637 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
627 txq->txb[q->write_ptr].skb = skb; 638 txq->txb[q->write_ptr].skb = skb;
639 txq->txb[q->write_ptr].ctx = ctx;
628 640
629 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 641 /* Set up first empty entry in queue's array of Tx/cmd buffers */
630 out_cmd = txq->cmd[q->write_ptr]; 642 out_cmd = txq->cmd[q->write_ptr];
@@ -655,7 +667,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
655 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 667 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
656 668
657 /* TODO need this for burst mode later on */ 669 /* TODO need this for burst mode later on */
658 iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); 670 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
659 iwl_dbg_log_tx_data_frame(priv, len, hdr); 671 iwl_dbg_log_tx_data_frame(priv, len, hdr);
660 672
661 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 673 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
@@ -813,7 +825,7 @@ void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
813 /* Tx queues */ 825 /* Tx queues */
814 if (priv->txq) { 826 if (priv->txq) {
815 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 827 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
816 if (txq_id == IWL_CMD_QUEUE_NUM) 828 if (txq_id == priv->cmd_queue)
817 iwl_cmd_queue_free(priv); 829 iwl_cmd_queue_free(priv);
818 else 830 else
819 iwl_tx_queue_free(priv, txq_id); 831 iwl_tx_queue_free(priv, txq_id);
@@ -870,9 +882,9 @@ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
870 882
871 spin_unlock_irqrestore(&priv->lock, flags); 883 spin_unlock_irqrestore(&priv->lock, flags);
872 884
873 /* Alloc and init all Tx queues, including the command queue (#4) */ 885 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
874 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 886 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
875 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 887 slots_num = (txq_id == priv->cmd_queue) ?
876 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 888 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
877 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 889 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
878 txq_id); 890 txq_id);
@@ -910,7 +922,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
910 922
911 /* Alloc and init all Tx queues, including the command queue (#4) */ 923 /* Alloc and init all Tx queues, including the command queue (#4) */
912 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 924 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
913 slots_num = txq_id == IWL_CMD_QUEUE_NUM ? 925 slots_num = txq_id == priv->cmd_queue ?
914 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 926 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
915 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); 927 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
916 } 928 }
@@ -968,7 +980,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
968 unsigned long flags; 980 unsigned long flags;
969 struct iwl_tid_data *tid_data; 981 struct iwl_tid_data *tid_data;
970 982
971 tx_fifo = get_fifo_from_tid(tid); 983 tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
972 if (unlikely(tx_fifo < 0)) 984 if (unlikely(tx_fifo < 0))
973 return tx_fifo; 985 return tx_fifo;
974 986
@@ -1024,12 +1036,12 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1024int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 1036int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1025 struct ieee80211_sta *sta, u16 tid) 1037 struct ieee80211_sta *sta, u16 tid)
1026{ 1038{
1027 int tx_fifo_id, txq_id, sta_id, ssn = -1; 1039 int tx_fifo_id, txq_id, sta_id, ssn;
1028 struct iwl_tid_data *tid_data; 1040 struct iwl_tid_data *tid_data;
1029 int write_ptr, read_ptr; 1041 int write_ptr, read_ptr;
1030 unsigned long flags; 1042 unsigned long flags;
1031 1043
1032 tx_fifo_id = get_fifo_from_tid(tid); 1044 tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
1033 if (unlikely(tx_fifo_id < 0)) 1045 if (unlikely(tx_fifo_id < 0))
1034 return tx_fifo_id; 1046 return tx_fifo_id;
1035 1047
@@ -1042,21 +1054,26 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1042 1054
1043 spin_lock_irqsave(&priv->sta_lock, flags); 1055 spin_lock_irqsave(&priv->sta_lock, flags);
1044 1056
1045 if (priv->stations[sta_id].tid[tid].agg.state ==
1046 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1047 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1048 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1049 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1050 spin_unlock_irqrestore(&priv->sta_lock, flags);
1051 return 0;
1052 }
1053
1054 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1055 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1056
1057 tid_data = &priv->stations[sta_id].tid[tid]; 1057 tid_data = &priv->stations[sta_id].tid[tid];
1058 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 1058 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1059 txq_id = tid_data->agg.txq_id; 1059 txq_id = tid_data->agg.txq_id;
1060
1061 switch (priv->stations[sta_id].tid[tid].agg.state) {
1062 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1063 /*
1064 * This can happen if the peer stops aggregation
1065 * again before we've had a chance to drain the
1066 * queue we selected previously, i.e. before the
1067 * session was really started completely.
1068 */
1069 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1070 goto turn_off;
1071 case IWL_AGG_ON:
1072 break;
1073 default:
1074 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1075 }
1076
1060 write_ptr = priv->txq[txq_id].q.write_ptr; 1077 write_ptr = priv->txq[txq_id].q.write_ptr;
1061 read_ptr = priv->txq[txq_id].q.read_ptr; 1078 read_ptr = priv->txq[txq_id].q.read_ptr;
1062 1079
@@ -1070,6 +1087,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1070 } 1087 }
1071 1088
1072 IWL_DEBUG_HT(priv, "HW queue is empty\n"); 1089 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1090 turn_off:
1073 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1091 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1074 1092
1075 /* do not restore/save irqs */ 1093 /* do not restore/save irqs */
@@ -1098,6 +1116,9 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1098 struct iwl_queue *q = &priv->txq[txq_id].q; 1116 struct iwl_queue *q = &priv->txq[txq_id].q;
1099 u8 *addr = priv->stations[sta_id].sta.sta.addr; 1117 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1100 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; 1118 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1119 struct iwl_rxon_context *ctx;
1120
1121 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1101 1122
1102 lockdep_assert_held(&priv->sta_lock); 1123 lockdep_assert_held(&priv->sta_lock);
1103 1124
@@ -1108,12 +1129,12 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1108 if ((txq_id == tid_data->agg.txq_id) && 1129 if ((txq_id == tid_data->agg.txq_id) &&
1109 (q->read_ptr == q->write_ptr)) { 1130 (q->read_ptr == q->write_ptr)) {
1110 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 1131 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1111 int tx_fifo = get_fifo_from_tid(tid); 1132 int tx_fifo = get_fifo_from_tid(ctx, tid);
1112 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); 1133 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1113 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, 1134 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1114 ssn, tx_fifo); 1135 ssn, tx_fifo);
1115 tid_data->agg.state = IWL_AGG_OFF; 1136 tid_data->agg.state = IWL_AGG_OFF;
1116 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); 1137 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1117 } 1138 }
1118 break; 1139 break;
1119 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1140 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1121,7 +1142,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1121 if (tid_data->tfds_in_queue == 0) { 1142 if (tid_data->tfds_in_queue == 0) {
1122 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); 1143 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1123 tid_data->agg.state = IWL_AGG_ON; 1144 tid_data->agg.state = IWL_AGG_ON;
1124 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); 1145 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1125 } 1146 }
1126 break; 1147 break;
1127 } 1148 }
@@ -1129,14 +1150,14 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1129 return 0; 1150 return 0;
1130} 1151}
1131 1152
1132static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) 1153static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info)
1133{ 1154{
1134 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1155 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1135 struct ieee80211_sta *sta; 1156 struct ieee80211_sta *sta;
1136 struct iwl_station_priv *sta_priv; 1157 struct iwl_station_priv *sta_priv;
1137 1158
1138 rcu_read_lock(); 1159 rcu_read_lock();
1139 sta = ieee80211_find_sta(priv->vif, hdr->addr1); 1160 sta = ieee80211_find_sta(tx_info->ctx->vif, hdr->addr1);
1140 if (sta) { 1161 if (sta) {
1141 sta_priv = (void *)sta->drv_priv; 1162 sta_priv = (void *)sta->drv_priv;
1142 /* avoid atomic ops if this isn't a client */ 1163 /* avoid atomic ops if this isn't a client */
@@ -1146,7 +1167,7 @@ static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1146 } 1167 }
1147 rcu_read_unlock(); 1168 rcu_read_unlock();
1148 1169
1149 ieee80211_tx_status_irqsafe(priv->hw, skb); 1170 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1150} 1171}
1151 1172
1152int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) 1173int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
@@ -1169,7 +1190,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1169 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1190 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1170 1191
1171 tx_info = &txq->txb[txq->q.read_ptr]; 1192 tx_info = &txq->txb[txq->q.read_ptr];
1172 iwlagn_tx_status(priv, tx_info->skb); 1193 iwlagn_tx_status(priv, tx_info);
1173 1194
1174 hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1195 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1175 if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1196 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 6f77441cb65a..a7961bf395fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -52,6 +52,19 @@ static const s8 iwlagn_default_queue_to_tx_fifo[] = {
52 IWL_TX_FIFO_UNUSED, 52 IWL_TX_FIFO_UNUSED,
53}; 53};
54 54
55static const s8 iwlagn_ipan_queue_to_tx_fifo[] = {
56 IWL_TX_FIFO_VO,
57 IWL_TX_FIFO_VI,
58 IWL_TX_FIFO_BE,
59 IWL_TX_FIFO_BK,
60 IWL_TX_FIFO_BK_IPAN,
61 IWL_TX_FIFO_BE_IPAN,
62 IWL_TX_FIFO_VI_IPAN,
63 IWL_TX_FIFO_VO_IPAN,
64 IWL_TX_FIFO_BE_IPAN,
65 IWLAGN_CMD_FIFO_NUM,
66};
67
55static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { 68static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
56 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, 69 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
57 0, COEX_UNASSOC_IDLE_FLAGS}, 70 0, COEX_UNASSOC_IDLE_FLAGS},
@@ -329,8 +342,54 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
329 sizeof(coex_cmd), &coex_cmd); 342 sizeof(coex_cmd), &coex_cmd);
330} 343}
331 344
345static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
346 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
347 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
348 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
349 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
350 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
351 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
352 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
353 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
354 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
355 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
356 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
357 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
358 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
359 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
360 ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
361 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
362 ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
363 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
364 0, 0, 0, 0, 0, 0, 0
365};
366
367static void iwlagn_send_prio_tbl(struct iwl_priv *priv)
368{
369 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
370
371 memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
372 sizeof(iwlagn_bt_prio_tbl));
373 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PRIO_TABLE,
374 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
375 IWL_ERR(priv, "failed to send BT prio tbl command\n");
376}
377
378static void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
379{
380 struct iwl_bt_coex_prot_env_cmd env_cmd;
381
382 env_cmd.action = action;
383 env_cmd.type = type;
384 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV,
385 sizeof(env_cmd), &env_cmd))
386 IWL_ERR(priv, "failed to send BT env command\n");
387}
388
389
332int iwlagn_alive_notify(struct iwl_priv *priv) 390int iwlagn_alive_notify(struct iwl_priv *priv)
333{ 391{
392 const s8 *queues;
334 u32 a; 393 u32 a;
335 unsigned long flags; 394 unsigned long flags;
336 int i, chan; 395 int i, chan;
@@ -365,7 +424,7 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
365 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 424 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
366 425
367 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, 426 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
368 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num)); 427 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
369 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0); 428 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
370 429
371 /* initiate the queues */ 430 /* initiate the queues */
@@ -391,7 +450,13 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
391 /* Activate all Tx DMA/FIFO channels */ 450 /* Activate all Tx DMA/FIFO channels */
392 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); 451 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
393 452
394 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 453 /* map queues to FIFOs */
454 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
455 queues = iwlagn_ipan_queue_to_tx_fifo;
456 else
457 queues = iwlagn_default_queue_to_tx_fifo;
458
459 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
395 460
396 /* make sure all queue are not stopped */ 461 /* make sure all queue are not stopped */
397 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 462 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
@@ -400,11 +465,12 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
400 465
401 /* reset to 0 to enable all the queue first */ 466 /* reset to 0 to enable all the queue first */
402 priv->txq_ctx_active_msk = 0; 467 priv->txq_ctx_active_msk = 0;
403 /* map qos queues to fifos one-to-one */ 468
404 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10); 469 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
470 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
405 471
406 for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) { 472 for (i = 0; i < 10; i++) {
407 int ac = iwlagn_default_queue_to_tx_fifo[i]; 473 int ac = queues[i];
408 474
409 iwl_txq_ctx_activate(priv, i); 475 iwl_txq_ctx_activate(priv, i);
410 476
@@ -416,6 +482,25 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
416 482
417 spin_unlock_irqrestore(&priv->lock, flags); 483 spin_unlock_irqrestore(&priv->lock, flags);
418 484
485 if (priv->cfg->advanced_bt_coexist) {
486 /* Configure Bluetooth device coexistence support */
487 /* need to perform this before any calibration */
488 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
489 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
490 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
491 priv->cfg->ops->hcmd->send_bt_config(priv);
492 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
493
494 if (bt_coex_active && priv->iw_mode != NL80211_IFTYPE_ADHOC) {
495 iwlagn_send_prio_tbl(priv);
496 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
497 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
498 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
499 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
500 }
501
502 }
503
419 iwlagn_send_wimax_coex(priv); 504 iwlagn_send_wimax_coex(priv);
420 505
421 iwlagn_set_Xtal_calib(priv); 506 iwlagn_set_Xtal_calib(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 10d7b9b7f064..ad0e67f5c0d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/delay.h> 39#include <linux/delay.h>
@@ -86,6 +87,9 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
86MODULE_LICENSE("GPL"); 87MODULE_LICENSE("GPL");
87MODULE_ALIAS("iwl4965"); 88MODULE_ALIAS("iwl4965");
88 89
90static int iwlagn_ant_coupling;
91static bool iwlagn_bt_ch_announce = 1;
92
89/** 93/**
90 * iwl_commit_rxon - commit staging_rxon to hardware 94 * iwl_commit_rxon - commit staging_rxon to hardware
91 * 95 *
@@ -94,21 +98,22 @@ MODULE_ALIAS("iwl4965");
94 * function correctly transitions out of the RXON_ASSOC_MSK state if 98 * function correctly transitions out of the RXON_ASSOC_MSK state if
95 * a HW tune is required based on the RXON structure changes. 99 * a HW tune is required based on the RXON structure changes.
96 */ 100 */
97int iwl_commit_rxon(struct iwl_priv *priv) 101int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
98{ 102{
99 /* cast away the const for active_rxon in this function */ 103 /* cast away the const for active_rxon in this function */
100 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 104 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
101 int ret; 105 int ret;
102 bool new_assoc = 106 bool new_assoc =
103 !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK); 107 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
108 bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
104 109
105 if (!iwl_is_alive(priv)) 110 if (!iwl_is_alive(priv))
106 return -EBUSY; 111 return -EBUSY;
107 112
108 /* always get timestamp with Rx frame */ 113 /* always get timestamp with Rx frame */
109 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; 114 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
110 115
111 ret = iwl_check_rxon_cmd(priv); 116 ret = iwl_check_rxon_cmd(priv, ctx);
112 if (ret) { 117 if (ret) {
113 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 118 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
114 return -EINVAL; 119 return -EINVAL;
@@ -119,7 +124,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
119 * abort any previous channel switch if still in process 124 * abort any previous channel switch if still in process
120 */ 125 */
121 if (priv->switch_rxon.switch_in_progress && 126 if (priv->switch_rxon.switch_in_progress &&
122 (priv->switch_rxon.channel != priv->staging_rxon.channel)) { 127 (priv->switch_rxon.channel != ctx->staging.channel)) {
123 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 128 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
124 le16_to_cpu(priv->switch_rxon.channel)); 129 le16_to_cpu(priv->switch_rxon.channel));
125 iwl_chswitch_done(priv, false); 130 iwl_chswitch_done(priv, false);
@@ -128,15 +133,15 @@ int iwl_commit_rxon(struct iwl_priv *priv)
128 /* If we don't need to send a full RXON, we can use 133 /* If we don't need to send a full RXON, we can use
129 * iwl_rxon_assoc_cmd which is used to reconfigure filter 134 * iwl_rxon_assoc_cmd which is used to reconfigure filter
130 * and other flags for the current radio configuration. */ 135 * and other flags for the current radio configuration. */
131 if (!iwl_full_rxon_required(priv)) { 136 if (!iwl_full_rxon_required(priv, ctx)) {
132 ret = iwl_send_rxon_assoc(priv); 137 ret = iwl_send_rxon_assoc(priv, ctx);
133 if (ret) { 138 if (ret) {
134 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); 139 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
135 return ret; 140 return ret;
136 } 141 }
137 142
138 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 143 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
139 iwl_print_rx_config_cmd(priv); 144 iwl_print_rx_config_cmd(priv, ctx);
140 return 0; 145 return 0;
141 } 146 }
142 147
@@ -144,13 +149,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
144 * an RXON_ASSOC and the new config wants the associated mask enabled, 149 * an RXON_ASSOC and the new config wants the associated mask enabled,
145 * we must clear the associated from the active configuration 150 * we must clear the associated from the active configuration
146 * before we apply the new config */ 151 * before we apply the new config */
147 if (iwl_is_associated(priv) && new_assoc) { 152 if (iwl_is_associated_ctx(ctx) && new_assoc) {
148 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 153 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
149 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 154 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
150 155
151 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 156 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
152 sizeof(struct iwl_rxon_cmd), 157 sizeof(struct iwl_rxon_cmd),
153 &priv->active_rxon); 158 active_rxon);
154 159
155 /* If the mask clearing failed then we set 160 /* If the mask clearing failed then we set
156 * active_rxon back to what it was previously */ 161 * active_rxon back to what it was previously */
@@ -159,9 +164,9 @@ int iwl_commit_rxon(struct iwl_priv *priv)
159 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); 164 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
160 return ret; 165 return ret;
161 } 166 }
162 iwl_clear_ucode_stations(priv); 167 iwl_clear_ucode_stations(priv, ctx);
163 iwl_restore_stations(priv); 168 iwl_restore_stations(priv, ctx);
164 ret = iwl_restore_default_wep_keys(priv); 169 ret = iwl_restore_default_wep_keys(priv, ctx);
165 if (ret) { 170 if (ret) {
166 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 171 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
167 return ret; 172 return ret;
@@ -173,27 +178,46 @@ int iwl_commit_rxon(struct iwl_priv *priv)
173 "* channel = %d\n" 178 "* channel = %d\n"
174 "* bssid = %pM\n", 179 "* bssid = %pM\n",
175 (new_assoc ? "" : "out"), 180 (new_assoc ? "" : "out"),
176 le16_to_cpu(priv->staging_rxon.channel), 181 le16_to_cpu(ctx->staging.channel),
177 priv->staging_rxon.bssid_addr); 182 ctx->staging.bssid_addr);
178 183
179 iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); 184 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
185
186 if (!old_assoc) {
187 /*
188 * First of all, before setting associated, we need to
189 * send RXON timing so the device knows about the DTIM
190 * period and other timing values
191 */
192 ret = iwl_send_rxon_timing(priv, ctx);
193 if (ret) {
194 IWL_ERR(priv, "Error setting RXON timing!\n");
195 return ret;
196 }
197 }
198
199 if (priv->cfg->ops->hcmd->set_pan_params) {
200 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
201 if (ret)
202 return ret;
203 }
180 204
181 /* Apply the new configuration 205 /* Apply the new configuration
182 * RXON unassoc clears the station table in uCode so restoration of 206 * RXON unassoc clears the station table in uCode so restoration of
183 * stations is needed after it (the RXON command) completes 207 * stations is needed after it (the RXON command) completes
184 */ 208 */
185 if (!new_assoc) { 209 if (!new_assoc) {
186 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 210 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
187 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); 211 sizeof(struct iwl_rxon_cmd), &ctx->staging);
188 if (ret) { 212 if (ret) {
189 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 213 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
190 return ret; 214 return ret;
191 } 215 }
192 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); 216 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
193 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 217 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
194 iwl_clear_ucode_stations(priv); 218 iwl_clear_ucode_stations(priv, ctx);
195 iwl_restore_stations(priv); 219 iwl_restore_stations(priv, ctx);
196 ret = iwl_restore_default_wep_keys(priv); 220 ret = iwl_restore_default_wep_keys(priv, ctx);
197 if (ret) { 221 if (ret) {
198 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 222 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
199 return ret; 223 return ret;
@@ -205,15 +229,15 @@ int iwl_commit_rxon(struct iwl_priv *priv)
205 /* Apply the new configuration 229 /* Apply the new configuration
206 * RXON assoc doesn't clear the station table in uCode, 230 * RXON assoc doesn't clear the station table in uCode,
207 */ 231 */
208 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 232 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
209 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); 233 sizeof(struct iwl_rxon_cmd), &ctx->staging);
210 if (ret) { 234 if (ret) {
211 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 235 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
212 return ret; 236 return ret;
213 } 237 }
214 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 238 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
215 } 239 }
216 iwl_print_rx_config_cmd(priv); 240 iwl_print_rx_config_cmd(priv, ctx);
217 241
218 iwl_init_sensitivity(priv); 242 iwl_init_sensitivity(priv);
219 243
@@ -230,10 +254,14 @@ int iwl_commit_rxon(struct iwl_priv *priv)
230 254
231void iwl_update_chain_flags(struct iwl_priv *priv) 255void iwl_update_chain_flags(struct iwl_priv *priv)
232{ 256{
257 struct iwl_rxon_context *ctx;
233 258
234 if (priv->cfg->ops->hcmd->set_rxon_chain) 259 if (priv->cfg->ops->hcmd->set_rxon_chain) {
235 priv->cfg->ops->hcmd->set_rxon_chain(priv); 260 for_each_context(priv, ctx) {
236 iwlcore_commit_rxon(priv); 261 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
262 iwlcore_commit_rxon(priv, ctx);
263 }
264 }
237} 265}
238 266
239static void iwl_clear_free_frames(struct iwl_priv *priv) 267static void iwl_clear_free_frames(struct iwl_priv *priv)
@@ -337,6 +365,13 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
337 * beacon contents. 365 * beacon contents.
338 */ 366 */
339 367
368 lockdep_assert_held(&priv->mutex);
369
370 if (!priv->beacon_ctx) {
371 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
372 return -EINVAL;
373 }
374
340 /* Initialize memory */ 375 /* Initialize memory */
341 tx_beacon_cmd = &frame->u.beacon; 376 tx_beacon_cmd = &frame->u.beacon;
342 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 377 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
@@ -349,7 +384,7 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
349 384
350 /* Set up TX command fields */ 385 /* Set up TX command fields */
351 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); 386 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
352 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; 387 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
353 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 388 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
354 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | 389 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
355 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; 390 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
@@ -359,7 +394,7 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
359 frame_size); 394 frame_size);
360 395
361 /* Set up packet rate and flags */ 396 /* Set up packet rate and flags */
362 rate = iwl_rate_get_lowest_plcp(priv); 397 rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx);
363 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 398 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
364 priv->hw_params.valid_tx_ant); 399 priv->hw_params.valid_tx_ant);
365 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 400 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
@@ -592,23 +627,84 @@ static void iwl_bg_beacon_update(struct work_struct *work)
592 container_of(work, struct iwl_priv, beacon_update); 627 container_of(work, struct iwl_priv, beacon_update);
593 struct sk_buff *beacon; 628 struct sk_buff *beacon;
594 629
595 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 630 mutex_lock(&priv->mutex);
596 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 631 if (!priv->beacon_ctx) {
632 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
633 goto out;
634 }
597 635
636 if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
637 /*
638 * The ucode will send beacon notifications even in
639 * IBSS mode, but we don't want to process them. But
640 * we need to defer the type check to here due to
641 * requiring locking around the beacon_ctx access.
642 */
643 goto out;
644 }
645
646 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
647 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
598 if (!beacon) { 648 if (!beacon) {
599 IWL_ERR(priv, "update beacon failed\n"); 649 IWL_ERR(priv, "update beacon failed\n");
600 return; 650 goto out;
601 } 651 }
602 652
603 mutex_lock(&priv->mutex);
604 /* new beacon skb is allocated every time; dispose previous.*/ 653 /* new beacon skb is allocated every time; dispose previous.*/
605 if (priv->ibss_beacon) 654 if (priv->ibss_beacon)
606 dev_kfree_skb(priv->ibss_beacon); 655 dev_kfree_skb(priv->ibss_beacon);
607 656
608 priv->ibss_beacon = beacon; 657 priv->ibss_beacon = beacon;
609 mutex_unlock(&priv->mutex);
610 658
611 iwl_send_beacon_cmd(priv); 659 iwl_send_beacon_cmd(priv);
660 out:
661 mutex_unlock(&priv->mutex);
662}
663
664static void iwl_bg_bt_runtime_config(struct work_struct *work)
665{
666 struct iwl_priv *priv =
667 container_of(work, struct iwl_priv, bt_runtime_config);
668
669 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
670 return;
671
672 /* dont send host command if rf-kill is on */
673 if (!iwl_is_ready_rf(priv))
674 return;
675 priv->cfg->ops->hcmd->send_bt_config(priv);
676}
677
678static void iwl_bg_bt_full_concurrency(struct work_struct *work)
679{
680 struct iwl_priv *priv =
681 container_of(work, struct iwl_priv, bt_full_concurrency);
682 struct iwl_rxon_context *ctx;
683
684 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
685 return;
686
687 /* dont send host command if rf-kill is on */
688 if (!iwl_is_ready_rf(priv))
689 return;
690
691 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
692 priv->bt_full_concurrent ?
693 "full concurrency" : "3-wire");
694
695 /*
696 * LQ & RXON updated cmds must be sent before BT Config cmd
697 * to avoid 3-wire collisions
698 */
699 mutex_lock(&priv->mutex);
700 for_each_context(priv, ctx) {
701 if (priv->cfg->ops->hcmd->set_rxon_chain)
702 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
703 iwlcore_commit_rxon(priv, ctx);
704 }
705 mutex_unlock(&priv->mutex);
706
707 priv->cfg->ops->hcmd->send_bt_config(priv);
612} 708}
613 709
614/** 710/**
@@ -763,10 +859,10 @@ static void iwl_bg_ucode_trace(unsigned long data)
763static void iwl_rx_beacon_notif(struct iwl_priv *priv, 859static void iwl_rx_beacon_notif(struct iwl_priv *priv,
764 struct iwl_rx_mem_buffer *rxb) 860 struct iwl_rx_mem_buffer *rxb)
765{ 861{
766#ifdef CONFIG_IWLWIFI_DEBUG
767 struct iwl_rx_packet *pkt = rxb_addr(rxb); 862 struct iwl_rx_packet *pkt = rxb_addr(rxb);
768 struct iwl4965_beacon_notif *beacon = 863 struct iwl4965_beacon_notif *beacon =
769 (struct iwl4965_beacon_notif *)pkt->u.raw; 864 (struct iwl4965_beacon_notif *)pkt->u.raw;
865#ifdef CONFIG_IWLWIFI_DEBUG
770 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 866 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
771 867
772 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 868 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -778,8 +874,9 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
778 le32_to_cpu(beacon->low_tsf), rate); 874 le32_to_cpu(beacon->low_tsf), rate);
779#endif 875#endif
780 876
781 if ((priv->iw_mode == NL80211_IFTYPE_AP) && 877 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
782 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 878
879 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
783 queue_work(priv->workqueue, &priv->beacon_update); 880 queue_work(priv->workqueue, &priv->beacon_update);
784} 881}
785 882
@@ -1650,30 +1747,44 @@ static void iwl_nic_start(struct iwl_priv *priv)
1650struct iwlagn_ucode_capabilities { 1747struct iwlagn_ucode_capabilities {
1651 u32 max_probe_length; 1748 u32 max_probe_length;
1652 u32 standard_phy_calibration_size; 1749 u32 standard_phy_calibration_size;
1750 bool pan;
1653}; 1751};
1654 1752
1655static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 1753static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
1656static int iwl_mac_setup_register(struct iwl_priv *priv, 1754static int iwl_mac_setup_register(struct iwl_priv *priv,
1657 struct iwlagn_ucode_capabilities *capa); 1755 struct iwlagn_ucode_capabilities *capa);
1658 1756
1757#define UCODE_EXPERIMENTAL_INDEX 100
1758#define UCODE_EXPERIMENTAL_TAG "exp"
1759
1659static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) 1760static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
1660{ 1761{
1661 const char *name_pre = priv->cfg->fw_name_pre; 1762 const char *name_pre = priv->cfg->fw_name_pre;
1763 char tag[8];
1662 1764
1663 if (first) 1765 if (first) {
1766#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
1767 priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
1768 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
1769 } else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
1770#endif
1664 priv->fw_index = priv->cfg->ucode_api_max; 1771 priv->fw_index = priv->cfg->ucode_api_max;
1665 else 1772 sprintf(tag, "%d", priv->fw_index);
1773 } else {
1666 priv->fw_index--; 1774 priv->fw_index--;
1775 sprintf(tag, "%d", priv->fw_index);
1776 }
1667 1777
1668 if (priv->fw_index < priv->cfg->ucode_api_min) { 1778 if (priv->fw_index < priv->cfg->ucode_api_min) {
1669 IWL_ERR(priv, "no suitable firmware found!\n"); 1779 IWL_ERR(priv, "no suitable firmware found!\n");
1670 return -ENOENT; 1780 return -ENOENT;
1671 } 1781 }
1672 1782
1673 sprintf(priv->firmware_name, "%s%d%s", 1783 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1674 name_pre, priv->fw_index, ".ucode");
1675 1784
1676 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", 1785 IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
1786 (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
1787 ? "EXPERIMENTAL " : "",
1677 priv->firmware_name); 1788 priv->firmware_name);
1678 1789
1679 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, 1790 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
@@ -1874,6 +1985,11 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1874 capa->max_probe_length = 1985 capa->max_probe_length =
1875 le32_to_cpup((__le32 *)tlv_data); 1986 le32_to_cpup((__le32 *)tlv_data);
1876 break; 1987 break;
1988 case IWL_UCODE_TLV_PAN:
1989 if (tlv_len)
1990 goto invalid_tlv_len;
1991 capa->pan = true;
1992 break;
1877 case IWL_UCODE_TLV_INIT_EVTLOG_PTR: 1993 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
1878 if (tlv_len != sizeof(u32)) 1994 if (tlv_len != sizeof(u32))
1879 goto invalid_tlv_len; 1995 goto invalid_tlv_len;
@@ -1968,8 +2084,10 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1968 memset(&pieces, 0, sizeof(pieces)); 2084 memset(&pieces, 0, sizeof(pieces));
1969 2085
1970 if (!ucode_raw) { 2086 if (!ucode_raw) {
1971 IWL_ERR(priv, "request for firmware file '%s' failed.\n", 2087 if (priv->fw_index <= priv->cfg->ucode_api_max)
1972 priv->firmware_name); 2088 IWL_ERR(priv,
2089 "request for firmware file '%s' failed.\n",
2090 priv->firmware_name);
1973 goto try_again; 2091 goto try_again;
1974 } 2092 }
1975 2093
@@ -2016,7 +2134,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2016 api_max, api_ver); 2134 api_max, api_ver);
2017 2135
2018 if (build) 2136 if (build)
2019 sprintf(buildstr, " build %u", build); 2137 sprintf(buildstr, " build %u%s", build,
2138 (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
2139 ? " (EXP)" : "");
2020 else 2140 else
2021 buildstr[0] = '\0'; 2141 buildstr[0] = '\0';
2022 2142
@@ -2145,6 +2265,12 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2145 priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size; 2265 priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size;
2146 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr; 2266 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
2147 2267
2268 if (ucode_capa.pan) {
2269 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
2270 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
2271 } else
2272 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2273
2148 /* Copy images into buffers for card's bus-master reads ... */ 2274 /* Copy images into buffers for card's bus-master reads ... */
2149 2275
2150 /* Runtime instructions (first block of data in file) */ 2276 /* Runtime instructions (first block of data in file) */
@@ -2543,6 +2669,9 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2543 return pos; 2669 return pos;
2544 } 2670 }
2545 2671
2672 /* enable/disable bt channel announcement */
2673 priv->bt_ch_announce = iwlagn_bt_ch_announce;
2674
2546#ifdef CONFIG_IWLWIFI_DEBUG 2675#ifdef CONFIG_IWLWIFI_DEBUG
2547 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) 2676 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
2548 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) 2677 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
@@ -2589,6 +2718,52 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2589 return pos; 2718 return pos;
2590} 2719}
2591 2720
2721static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2722{
2723 struct iwl_ct_kill_config cmd;
2724 struct iwl_ct_kill_throttling_config adv_cmd;
2725 unsigned long flags;
2726 int ret = 0;
2727
2728 spin_lock_irqsave(&priv->lock, flags);
2729 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2730 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
2731 spin_unlock_irqrestore(&priv->lock, flags);
2732 priv->thermal_throttle.ct_kill_toggle = false;
2733
2734 if (priv->cfg->support_ct_kill_exit) {
2735 adv_cmd.critical_temperature_enter =
2736 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2737 adv_cmd.critical_temperature_exit =
2738 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
2739
2740 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
2741 sizeof(adv_cmd), &adv_cmd);
2742 if (ret)
2743 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
2744 else
2745 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
2746 "succeeded, "
2747 "critical temperature enter is %d,"
2748 "exit is %d\n",
2749 priv->hw_params.ct_kill_threshold,
2750 priv->hw_params.ct_kill_exit_threshold);
2751 } else {
2752 cmd.critical_temperature_R =
2753 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2754
2755 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
2756 sizeof(cmd), &cmd);
2757 if (ret)
2758 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
2759 else
2760 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
2761 "succeeded, "
2762 "critical temperature is %d\n",
2763 priv->hw_params.ct_kill_threshold);
2764 }
2765}
2766
2592/** 2767/**
2593 * iwl_alive_start - called after REPLY_ALIVE notification received 2768 * iwl_alive_start - called after REPLY_ALIVE notification received
2594 * from protocol/runtime uCode (initialization uCode's 2769 * from protocol/runtime uCode (initialization uCode's
@@ -2597,6 +2772,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2597static void iwl_alive_start(struct iwl_priv *priv) 2772static void iwl_alive_start(struct iwl_priv *priv)
2598{ 2773{
2599 int ret = 0; 2774 int ret = 0;
2775 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2600 2776
2601 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2777 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2602 2778
@@ -2645,27 +2821,31 @@ static void iwl_alive_start(struct iwl_priv *priv)
2645 if (priv->cfg->ops->hcmd->set_tx_ant) 2821 if (priv->cfg->ops->hcmd->set_tx_ant)
2646 priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant); 2822 priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant);
2647 2823
2648 if (iwl_is_associated(priv)) { 2824 if (iwl_is_associated_ctx(ctx)) {
2649 struct iwl_rxon_cmd *active_rxon = 2825 struct iwl_rxon_cmd *active_rxon =
2650 (struct iwl_rxon_cmd *)&priv->active_rxon; 2826 (struct iwl_rxon_cmd *)&ctx->active;
2651 /* apply any changes in staging */ 2827 /* apply any changes in staging */
2652 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2828 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2653 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2829 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2654 } else { 2830 } else {
2831 struct iwl_rxon_context *tmp;
2655 /* Initialize our rx_config data */ 2832 /* Initialize our rx_config data */
2656 iwl_connection_init_rx_config(priv, NULL); 2833 for_each_context(priv, tmp)
2834 iwl_connection_init_rx_config(priv, tmp);
2657 2835
2658 if (priv->cfg->ops->hcmd->set_rxon_chain) 2836 if (priv->cfg->ops->hcmd->set_rxon_chain)
2659 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2837 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2660 } 2838 }
2661 2839
2662 /* Configure Bluetooth device coexistence support */ 2840 if (!priv->cfg->advanced_bt_coexist) {
2663 priv->cfg->ops->hcmd->send_bt_config(priv); 2841 /* Configure Bluetooth device coexistence support */
2842 priv->cfg->ops->hcmd->send_bt_config(priv);
2843 }
2664 2844
2665 iwl_reset_run_time_calib(priv); 2845 iwl_reset_run_time_calib(priv);
2666 2846
2667 /* Configure the adapter for unassociated operation */ 2847 /* Configure the adapter for unassociated operation */
2668 iwlcore_commit_rxon(priv); 2848 iwlcore_commit_rxon(priv, ctx);
2669 2849
2670 /* At this point, the NIC is initialized and operational */ 2850 /* At this point, the NIC is initialized and operational */
2671 iwl_rf_kill_ct_config(priv); 2851 iwl_rf_kill_ct_config(priv);
@@ -2698,10 +2878,22 @@ static void __iwl_down(struct iwl_priv *priv)
2698 if (!exit_pending) 2878 if (!exit_pending)
2699 set_bit(STATUS_EXIT_PENDING, &priv->status); 2879 set_bit(STATUS_EXIT_PENDING, &priv->status);
2700 2880
2701 iwl_clear_ucode_stations(priv); 2881 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2702 iwl_dealloc_bcast_station(priv); 2882 * to prevent rearm timer */
2883 if (priv->cfg->ops->lib->recover_from_tx_stall)
2884 del_timer_sync(&priv->monitor_recover);
2885
2886 iwl_clear_ucode_stations(priv, NULL);
2887 iwl_dealloc_bcast_stations(priv);
2703 iwl_clear_driver_stations(priv); 2888 iwl_clear_driver_stations(priv);
2704 2889
2890 /* reset BT coex data */
2891 priv->bt_status = 0;
2892 priv->bt_traffic_load = priv->cfg->bt_init_traffic_load;
2893 priv->bt_sco_active = false;
2894 priv->bt_full_concurrent = false;
2895 priv->bt_ci_compliance = 0;
2896
2705 /* Unblock any waiting calls */ 2897 /* Unblock any waiting calls */
2706 wake_up_interruptible_all(&priv->wait_command_queue); 2898 wake_up_interruptible_all(&priv->wait_command_queue);
2707 2899
@@ -2834,6 +3026,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
2834 3026
2835static int __iwl_up(struct iwl_priv *priv) 3027static int __iwl_up(struct iwl_priv *priv)
2836{ 3028{
3029 struct iwl_rxon_context *ctx;
2837 int i; 3030 int i;
2838 int ret; 3031 int ret;
2839 3032
@@ -2847,9 +3040,13 @@ static int __iwl_up(struct iwl_priv *priv)
2847 return -EIO; 3040 return -EIO;
2848 } 3041 }
2849 3042
2850 ret = iwl_alloc_bcast_station(priv, true); 3043 for_each_context(priv, ctx) {
2851 if (ret) 3044 ret = iwl_alloc_bcast_station(priv, ctx, true);
2852 return ret; 3045 if (ret) {
3046 iwl_dealloc_bcast_stations(priv);
3047 return ret;
3048 }
3049 }
2853 3050
2854 iwl_prepare_card_hw(priv); 3051 iwl_prepare_card_hw(priv);
2855 3052
@@ -2874,6 +3071,12 @@ static int __iwl_up(struct iwl_priv *priv)
2874 3071
2875 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 3072 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2876 3073
3074 /* must be initialised before iwl_hw_nic_init */
3075 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
3076 priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
3077 else
3078 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
3079
2877 ret = iwlagn_hw_nic_init(priv); 3080 ret = iwlagn_hw_nic_init(priv);
2878 if (ret) { 3081 if (ret) {
2879 IWL_ERR(priv, "Unable to init nic\n"); 3082 IWL_ERR(priv, "Unable to init nic\n");
@@ -3004,11 +3207,42 @@ static void iwl_bg_restart(struct work_struct *data)
3004 return; 3207 return;
3005 3208
3006 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 3209 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
3210 struct iwl_rxon_context *ctx;
3211 bool bt_sco, bt_full_concurrent;
3212 u8 bt_ci_compliance;
3213 u8 bt_load;
3214 u8 bt_status;
3215
3007 mutex_lock(&priv->mutex); 3216 mutex_lock(&priv->mutex);
3008 priv->vif = NULL; 3217 for_each_context(priv, ctx)
3218 ctx->vif = NULL;
3009 priv->is_open = 0; 3219 priv->is_open = 0;
3220
3221 /*
3222 * __iwl_down() will clear the BT status variables,
3223 * which is correct, but when we restart we really
3224 * want to keep them so restore them afterwards.
3225 *
3226 * The restart process will later pick them up and
3227 * re-configure the hw when we reconfigure the BT
3228 * command.
3229 */
3230 bt_sco = priv->bt_sco_active;
3231 bt_full_concurrent = priv->bt_full_concurrent;
3232 bt_ci_compliance = priv->bt_ci_compliance;
3233 bt_load = priv->bt_traffic_load;
3234 bt_status = priv->bt_status;
3235
3236 __iwl_down(priv);
3237
3238 priv->bt_sco_active = bt_sco;
3239 priv->bt_full_concurrent = bt_full_concurrent;
3240 priv->bt_ci_compliance = bt_ci_compliance;
3241 priv->bt_traffic_load = bt_load;
3242 priv->bt_status = bt_status;
3243
3010 mutex_unlock(&priv->mutex); 3244 mutex_unlock(&priv->mutex);
3011 iwl_down(priv); 3245 iwl_cancel_deferred_work(priv);
3012 ieee80211_restart_hw(priv->hw); 3246 ieee80211_restart_hw(priv->hw);
3013 } else { 3247 } else {
3014 iwl_down(priv); 3248 iwl_down(priv);
@@ -3039,12 +3273,15 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
3039 3273
3040void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif) 3274void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3041{ 3275{
3276 struct iwl_rxon_context *ctx;
3042 struct ieee80211_conf *conf = NULL; 3277 struct ieee80211_conf *conf = NULL;
3043 int ret = 0; 3278 int ret = 0;
3044 3279
3045 if (!vif || !priv->is_open) 3280 if (!vif || !priv->is_open)
3046 return; 3281 return;
3047 3282
3283 ctx = iwl_rxon_ctx_from_vif(vif);
3284
3048 if (vif->type == NL80211_IFTYPE_AP) { 3285 if (vif->type == NL80211_IFTYPE_AP) {
3049 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 3286 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3050 return; 3287 return;
@@ -3057,44 +3294,42 @@ void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3057 3294
3058 conf = ieee80211_get_hw_conf(priv->hw); 3295 conf = ieee80211_get_hw_conf(priv->hw);
3059 3296
3060 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3297 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3061 iwlcore_commit_rxon(priv); 3298 iwlcore_commit_rxon(priv, ctx);
3062 3299
3063 iwl_setup_rxon_timing(priv, vif); 3300 ret = iwl_send_rxon_timing(priv, ctx);
3064 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3065 sizeof(priv->rxon_timing), &priv->rxon_timing);
3066 if (ret) 3301 if (ret)
3067 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3302 IWL_WARN(priv, "RXON timing - "
3068 "Attempting to continue.\n"); 3303 "Attempting to continue.\n");
3069 3304
3070 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3305 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3071 3306
3072 iwl_set_rxon_ht(priv, &priv->current_ht_config); 3307 iwl_set_rxon_ht(priv, &priv->current_ht_config);
3073 3308
3074 if (priv->cfg->ops->hcmd->set_rxon_chain) 3309 if (priv->cfg->ops->hcmd->set_rxon_chain)
3075 priv->cfg->ops->hcmd->set_rxon_chain(priv); 3310 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
3076 3311
3077 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid); 3312 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
3078 3313
3079 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3314 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3080 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3315 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3081 3316
3082 if (vif->bss_conf.use_short_preamble) 3317 if (vif->bss_conf.use_short_preamble)
3083 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3318 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3084 else 3319 else
3085 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3320 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3086 3321
3087 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3322 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3088 if (vif->bss_conf.use_short_slot) 3323 if (vif->bss_conf.use_short_slot)
3089 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3324 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3090 else 3325 else
3091 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3326 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3092 } 3327 }
3093 3328
3094 iwlcore_commit_rxon(priv); 3329 iwlcore_commit_rxon(priv, ctx);
3095 3330
3096 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3331 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3097 vif->bss_conf.aid, priv->active_rxon.bssid_addr); 3332 vif->bss_conf.aid, ctx->active.bssid_addr);
3098 3333
3099 switch (vif->type) { 3334 switch (vif->type) {
3100 case NL80211_IFTYPE_STATION: 3335 case NL80211_IFTYPE_STATION:
@@ -3137,11 +3372,14 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3137{ 3372{
3138 int ret; 3373 int ret;
3139 struct ieee80211_hw *hw = priv->hw; 3374 struct ieee80211_hw *hw = priv->hw;
3375 struct iwl_rxon_context *ctx;
3376
3140 hw->rate_control_algorithm = "iwl-agn-rs"; 3377 hw->rate_control_algorithm = "iwl-agn-rs";
3141 3378
3142 /* Tell mac80211 our characteristics */ 3379 /* Tell mac80211 our characteristics */
3143 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3380 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3144 IEEE80211_HW_AMPDU_AGGREGATION | 3381 IEEE80211_HW_AMPDU_AGGREGATION |
3382 IEEE80211_HW_NEED_DTIM_PERIOD |
3145 IEEE80211_HW_SPECTRUM_MGMT; 3383 IEEE80211_HW_SPECTRUM_MGMT;
3146 3384
3147 if (!priv->cfg->broken_powersave) 3385 if (!priv->cfg->broken_powersave)
@@ -3155,9 +3393,10 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3155 hw->sta_data_size = sizeof(struct iwl_station_priv); 3393 hw->sta_data_size = sizeof(struct iwl_station_priv);
3156 hw->vif_data_size = sizeof(struct iwl_vif_priv); 3394 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3157 3395
3158 hw->wiphy->interface_modes = 3396 for_each_context(priv, ctx) {
3159 BIT(NL80211_IFTYPE_STATION) | 3397 hw->wiphy->interface_modes |= ctx->interface_modes;
3160 BIT(NL80211_IFTYPE_ADHOC); 3398 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
3399 }
3161 3400
3162 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3401 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3163 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3402 WIPHY_FLAG_DISABLE_BEACON_HINTS;
@@ -3285,24 +3524,25 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3285 3524
3286void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3525void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3287{ 3526{
3527 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
3288 int ret = 0; 3528 int ret = 0;
3289 3529
3530 lockdep_assert_held(&priv->mutex);
3531
3290 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3532 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3291 return; 3533 return;
3292 3534
3293 /* The following should be done only at AP bring up */ 3535 /* The following should be done only at AP bring up */
3294 if (!iwl_is_associated(priv)) { 3536 if (!iwl_is_associated_ctx(ctx)) {
3295 3537
3296 /* RXON - unassoc (to set timing command) */ 3538 /* RXON - unassoc (to set timing command) */
3297 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3539 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3298 iwlcore_commit_rxon(priv); 3540 iwlcore_commit_rxon(priv, ctx);
3299 3541
3300 /* RXON Timing */ 3542 /* RXON Timing */
3301 iwl_setup_rxon_timing(priv, vif); 3543 ret = iwl_send_rxon_timing(priv, ctx);
3302 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3303 sizeof(priv->rxon_timing), &priv->rxon_timing);
3304 if (ret) 3544 if (ret)
3305 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3545 IWL_WARN(priv, "RXON timing failed - "
3306 "Attempting to continue.\n"); 3546 "Attempting to continue.\n");
3307 3547
3308 /* AP has all antennas */ 3548 /* AP has all antennas */
@@ -3310,28 +3550,30 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3310 priv->hw_params.valid_rx_ant; 3550 priv->hw_params.valid_rx_ant;
3311 iwl_set_rxon_ht(priv, &priv->current_ht_config); 3551 iwl_set_rxon_ht(priv, &priv->current_ht_config);
3312 if (priv->cfg->ops->hcmd->set_rxon_chain) 3552 if (priv->cfg->ops->hcmd->set_rxon_chain)
3313 priv->cfg->ops->hcmd->set_rxon_chain(priv); 3553 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
3314 3554
3315 priv->staging_rxon.assoc_id = 0; 3555 ctx->staging.assoc_id = 0;
3316 3556
3317 if (vif->bss_conf.use_short_preamble) 3557 if (vif->bss_conf.use_short_preamble)
3318 priv->staging_rxon.flags |= 3558 ctx->staging.flags |=
3319 RXON_FLG_SHORT_PREAMBLE_MSK; 3559 RXON_FLG_SHORT_PREAMBLE_MSK;
3320 else 3560 else
3321 priv->staging_rxon.flags &= 3561 ctx->staging.flags &=
3322 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3562 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3323 3563
3324 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3564 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3325 if (vif->bss_conf.use_short_slot) 3565 if (vif->bss_conf.use_short_slot)
3326 priv->staging_rxon.flags |= 3566 ctx->staging.flags |=
3327 RXON_FLG_SHORT_SLOT_MSK; 3567 RXON_FLG_SHORT_SLOT_MSK;
3328 else 3568 else
3329 priv->staging_rxon.flags &= 3569 ctx->staging.flags &=
3330 ~RXON_FLG_SHORT_SLOT_MSK; 3570 ~RXON_FLG_SHORT_SLOT_MSK;
3331 } 3571 }
3572 /* need to send beacon cmd before committing assoc RXON! */
3573 iwl_send_beacon_cmd(priv);
3332 /* restore RXON assoc */ 3574 /* restore RXON assoc */
3333 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3575 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3334 iwlcore_commit_rxon(priv); 3576 iwlcore_commit_rxon(priv, ctx);
3335 } 3577 }
3336 iwl_send_beacon_cmd(priv); 3578 iwl_send_beacon_cmd(priv);
3337 3579
@@ -3348,9 +3590,11 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
3348{ 3590{
3349 3591
3350 struct iwl_priv *priv = hw->priv; 3592 struct iwl_priv *priv = hw->priv;
3593 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3594
3351 IWL_DEBUG_MAC80211(priv, "enter\n"); 3595 IWL_DEBUG_MAC80211(priv, "enter\n");
3352 3596
3353 iwl_update_tkip_key(priv, keyconf, sta, 3597 iwl_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
3354 iv32, phase1key); 3598 iv32, phase1key);
3355 3599
3356 IWL_DEBUG_MAC80211(priv, "leave\n"); 3600 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3362,6 +3606,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3362 struct ieee80211_key_conf *key) 3606 struct ieee80211_key_conf *key)
3363{ 3607{
3364 struct iwl_priv *priv = hw->priv; 3608 struct iwl_priv *priv = hw->priv;
3609 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3610 struct iwl_rxon_context *ctx = vif_priv->ctx;
3365 int ret; 3611 int ret;
3366 u8 sta_id; 3612 u8 sta_id;
3367 bool is_default_wep_key = false; 3613 bool is_default_wep_key = false;
@@ -3373,7 +3619,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3373 return -EOPNOTSUPP; 3619 return -EOPNOTSUPP;
3374 } 3620 }
3375 3621
3376 sta_id = iwl_sta_id_or_broadcast(priv, sta); 3622 sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
3377 if (sta_id == IWL_INVALID_STATION) 3623 if (sta_id == IWL_INVALID_STATION)
3378 return -EINVAL; 3624 return -EINVAL;
3379 3625
@@ -3386,9 +3632,11 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3386 * in 1X mode. 3632 * in 1X mode.
3387 * In legacy wep mode, we use another host command to the uCode. 3633 * In legacy wep mode, we use another host command to the uCode.
3388 */ 3634 */
3389 if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) { 3635 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3636 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3637 !sta) {
3390 if (cmd == SET_KEY) 3638 if (cmd == SET_KEY)
3391 is_default_wep_key = !priv->key_mapping_key; 3639 is_default_wep_key = !ctx->key_mapping_keys;
3392 else 3640 else
3393 is_default_wep_key = 3641 is_default_wep_key =
3394 (key->hw_key_idx == HW_KEY_DEFAULT); 3642 (key->hw_key_idx == HW_KEY_DEFAULT);
@@ -3397,17 +3645,18 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3397 switch (cmd) { 3645 switch (cmd) {
3398 case SET_KEY: 3646 case SET_KEY:
3399 if (is_default_wep_key) 3647 if (is_default_wep_key)
3400 ret = iwl_set_default_wep_key(priv, key); 3648 ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
3401 else 3649 else
3402 ret = iwl_set_dynamic_key(priv, key, sta_id); 3650 ret = iwl_set_dynamic_key(priv, vif_priv->ctx,
3651 key, sta_id);
3403 3652
3404 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); 3653 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3405 break; 3654 break;
3406 case DISABLE_KEY: 3655 case DISABLE_KEY:
3407 if (is_default_wep_key) 3656 if (is_default_wep_key)
3408 ret = iwl_remove_default_wep_key(priv, key); 3657 ret = iwl_remove_default_wep_key(priv, ctx, key);
3409 else 3658 else
3410 ret = iwl_remove_dynamic_key(priv, key, sta_id); 3659 ret = iwl_remove_dynamic_key(priv, ctx, key, sta_id);
3411 3660
3412 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); 3661 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3413 break; 3662 break;
@@ -3476,8 +3725,8 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3476 3725
3477 sta_priv->lq_sta.lq.general_params.flags &= 3726 sta_priv->lq_sta.lq.general_params.flags &=
3478 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; 3727 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
3479 iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, 3728 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
3480 CMD_ASYNC, false); 3729 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
3481 } 3730 }
3482 break; 3731 break;
3483 case IEEE80211_AMPDU_TX_OPERATIONAL: 3732 case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -3492,8 +3741,8 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3492 3741
3493 sta_priv->lq_sta.lq.general_params.flags |= 3742 sta_priv->lq_sta.lq.general_params.flags |=
3494 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; 3743 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
3495 iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, 3744 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
3496 CMD_ASYNC, false); 3745 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
3497 } 3746 }
3498 ret = 0; 3747 ret = 0;
3499 break; 3748 break;
@@ -3539,6 +3788,7 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3539{ 3788{
3540 struct iwl_priv *priv = hw->priv; 3789 struct iwl_priv *priv = hw->priv;
3541 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 3790 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
3791 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3542 bool is_ap = vif->type == NL80211_IFTYPE_STATION; 3792 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3543 int ret; 3793 int ret;
3544 u8 sta_id; 3794 u8 sta_id;
@@ -3554,8 +3804,8 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3554 if (vif->type == NL80211_IFTYPE_AP) 3804 if (vif->type == NL80211_IFTYPE_AP)
3555 sta_priv->client = true; 3805 sta_priv->client = true;
3556 3806
3557 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap, 3807 ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
3558 &sta_id); 3808 is_ap, sta, &sta_id);
3559 if (ret) { 3809 if (ret) {
3560 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3810 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3561 sta->addr, ret); 3811 sta->addr, ret);
@@ -3581,7 +3831,17 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3581 struct iwl_priv *priv = hw->priv; 3831 struct iwl_priv *priv = hw->priv;
3582 const struct iwl_channel_info *ch_info; 3832 const struct iwl_channel_info *ch_info;
3583 struct ieee80211_conf *conf = &hw->conf; 3833 struct ieee80211_conf *conf = &hw->conf;
3834 struct ieee80211_channel *channel = ch_switch->channel;
3584 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 3835 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
3836 /*
3837 * MULTI-FIXME
3838 * When we add support for multiple interfaces, we need to
3839 * revisit this. The channel switch command in the device
3840 * only affects the BSS context, but what does that really
3841 * mean? And what if we get a CSA on the second interface?
3842 * This needs a lot of work.
3843 */
3844 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3585 u16 ch; 3845 u16 ch;
3586 unsigned long flags = 0; 3846 unsigned long flags = 0;
3587 3847
@@ -3594,7 +3854,7 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3594 test_bit(STATUS_SCANNING, &priv->status)) 3854 test_bit(STATUS_SCANNING, &priv->status))
3595 goto out_exit; 3855 goto out_exit;
3596 3856
3597 if (!iwl_is_associated(priv)) 3857 if (!iwl_is_associated_ctx(ctx))
3598 goto out_exit; 3858 goto out_exit;
3599 3859
3600 /* channel switch in progress */ 3860 /* channel switch in progress */
@@ -3604,11 +3864,10 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3604 mutex_lock(&priv->mutex); 3864 mutex_lock(&priv->mutex);
3605 if (priv->cfg->ops->lib->set_channel_switch) { 3865 if (priv->cfg->ops->lib->set_channel_switch) {
3606 3866
3607 ch = ieee80211_frequency_to_channel( 3867 ch = channel->hw_value;
3608 ch_switch->channel->center_freq); 3868 if (le16_to_cpu(ctx->active.channel) != ch) {
3609 if (le16_to_cpu(priv->active_rxon.channel) != ch) {
3610 ch_info = iwl_get_channel_info(priv, 3869 ch_info = iwl_get_channel_info(priv,
3611 conf->channel->band, 3870 channel->band,
3612 ch); 3871 ch);
3613 if (!is_channel_valid(ch_info)) { 3872 if (!is_channel_valid(ch_info)) {
3614 IWL_DEBUG_MAC80211(priv, "invalid channel\n"); 3873 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
@@ -3619,34 +3878,31 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3619 priv->current_ht_config.smps = conf->smps_mode; 3878 priv->current_ht_config.smps = conf->smps_mode;
3620 3879
3621 /* Configure HT40 channels */ 3880 /* Configure HT40 channels */
3622 ht_conf->is_ht = conf_is_ht(conf); 3881 ctx->ht.enabled = conf_is_ht(conf);
3623 if (ht_conf->is_ht) { 3882 if (ctx->ht.enabled) {
3624 if (conf_is_ht40_minus(conf)) { 3883 if (conf_is_ht40_minus(conf)) {
3625 ht_conf->extension_chan_offset = 3884 ctx->ht.extension_chan_offset =
3626 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 3885 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
3627 ht_conf->is_40mhz = true; 3886 ctx->ht.is_40mhz = true;
3628 } else if (conf_is_ht40_plus(conf)) { 3887 } else if (conf_is_ht40_plus(conf)) {
3629 ht_conf->extension_chan_offset = 3888 ctx->ht.extension_chan_offset =
3630 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 3889 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
3631 ht_conf->is_40mhz = true; 3890 ctx->ht.is_40mhz = true;
3632 } else { 3891 } else {
3633 ht_conf->extension_chan_offset = 3892 ctx->ht.extension_chan_offset =
3634 IEEE80211_HT_PARAM_CHA_SEC_NONE; 3893 IEEE80211_HT_PARAM_CHA_SEC_NONE;
3635 ht_conf->is_40mhz = false; 3894 ctx->ht.is_40mhz = false;
3636 } 3895 }
3637 } else 3896 } else
3638 ht_conf->is_40mhz = false; 3897 ctx->ht.is_40mhz = false;
3639 3898
3640 /* if we are switching from ht to 2.4 clear flags 3899 if ((le16_to_cpu(ctx->staging.channel) != ch))
3641 * from any ht related info since 2.4 does not 3900 ctx->staging.flags = 0;
3642 * support ht */
3643 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
3644 priv->staging_rxon.flags = 0;
3645 3901
3646 iwl_set_rxon_channel(priv, conf->channel); 3902 iwl_set_rxon_channel(priv, channel, ctx);
3647 iwl_set_rxon_ht(priv, ht_conf); 3903 iwl_set_rxon_ht(priv, ht_conf);
3648 iwl_set_flags_for_band(priv, conf->channel->band, 3904 iwl_set_flags_for_band(priv, ctx, channel->band,
3649 priv->vif); 3905 ctx->vif);
3650 spin_unlock_irqrestore(&priv->lock, flags); 3906 spin_unlock_irqrestore(&priv->lock, flags);
3651 3907
3652 iwl_set_rate(priv); 3908 iwl_set_rate(priv);
@@ -3663,7 +3919,7 @@ out:
3663 mutex_unlock(&priv->mutex); 3919 mutex_unlock(&priv->mutex);
3664out_exit: 3920out_exit:
3665 if (!priv->switch_rxon.switch_in_progress) 3921 if (!priv->switch_rxon.switch_in_progress)
3666 ieee80211_chswitch_done(priv->vif, false); 3922 ieee80211_chswitch_done(ctx->vif, false);
3667 IWL_DEBUG_MAC80211(priv, "leave\n"); 3923 IWL_DEBUG_MAC80211(priv, "leave\n");
3668} 3924}
3669 3925
@@ -3674,6 +3930,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3674{ 3930{
3675 struct iwl_priv *priv = hw->priv; 3931 struct iwl_priv *priv = hw->priv;
3676 __le32 filter_or = 0, filter_nand = 0; 3932 __le32 filter_or = 0, filter_nand = 0;
3933 struct iwl_rxon_context *ctx;
3677 3934
3678#define CHK(test, flag) do { \ 3935#define CHK(test, flag) do { \
3679 if (*total_flags & (test)) \ 3936 if (*total_flags & (test)) \
@@ -3693,10 +3950,11 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3693 3950
3694 mutex_lock(&priv->mutex); 3951 mutex_lock(&priv->mutex);
3695 3952
3696 priv->staging_rxon.filter_flags &= ~filter_nand; 3953 for_each_context(priv, ctx) {
3697 priv->staging_rxon.filter_flags |= filter_or; 3954 ctx->staging.filter_flags &= ~filter_nand;
3698 3955 ctx->staging.filter_flags |= filter_or;
3699 iwlcore_commit_rxon(priv); 3956 iwlcore_commit_rxon(priv, ctx);
3957 }
3700 3958
3701 mutex_unlock(&priv->mutex); 3959 mutex_unlock(&priv->mutex);
3702 3960
@@ -3765,6 +4023,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3765 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); 4023 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
3766 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); 4024 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
3767 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); 4025 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
4026 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
4027 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
3768 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); 4028 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
3769 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); 4029 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
3770 4030
@@ -3807,10 +4067,10 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3807 cancel_delayed_work(&priv->alive_start); 4067 cancel_delayed_work(&priv->alive_start);
3808 cancel_work_sync(&priv->run_time_calib_work); 4068 cancel_work_sync(&priv->run_time_calib_work);
3809 cancel_work_sync(&priv->beacon_update); 4069 cancel_work_sync(&priv->beacon_update);
4070 cancel_work_sync(&priv->bt_full_concurrency);
4071 cancel_work_sync(&priv->bt_runtime_config);
3810 del_timer_sync(&priv->statistics_periodic); 4072 del_timer_sync(&priv->statistics_periodic);
3811 del_timer_sync(&priv->ucode_trace); 4073 del_timer_sync(&priv->ucode_trace);
3812 if (priv->cfg->ops->lib->recover_from_tx_stall)
3813 del_timer_sync(&priv->monitor_recover);
3814} 4074}
3815 4075
3816static void iwl_init_hw_rates(struct iwl_priv *priv, 4076static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3865,10 +4125,22 @@ static int iwl_init_drv(struct iwl_priv *priv)
3865 4125
3866 /* Choose which receivers/antennas to use */ 4126 /* Choose which receivers/antennas to use */
3867 if (priv->cfg->ops->hcmd->set_rxon_chain) 4127 if (priv->cfg->ops->hcmd->set_rxon_chain)
3868 priv->cfg->ops->hcmd->set_rxon_chain(priv); 4128 priv->cfg->ops->hcmd->set_rxon_chain(priv,
4129 &priv->contexts[IWL_RXON_CTX_BSS]);
3869 4130
3870 iwl_init_scan_params(priv); 4131 iwl_init_scan_params(priv);
3871 4132
4133 /* init bt coex */
4134 if (priv->cfg->advanced_bt_coexist) {
4135 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
4136 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
4137 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
4138 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
4139 priv->bt_duration = BT_DURATION_LIMIT_DEF;
4140 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
4141 priv->dynamic_agg_thresh = BT_AGG_THRESHOLD_DEF;
4142 }
4143
3872 /* Set the tx_power_user_lmt to the lowest power level 4144 /* Set the tx_power_user_lmt to the lowest power level
3873 * this value will get overwritten by channel max power avg 4145 * this value will get overwritten by channel max power avg
3874 * from eeprom */ 4146 * from eeprom */
@@ -3923,11 +4195,60 @@ static struct ieee80211_ops iwl_hw_ops = {
3923 .sta_remove = iwl_mac_sta_remove, 4195 .sta_remove = iwl_mac_sta_remove,
3924 .channel_switch = iwl_mac_channel_switch, 4196 .channel_switch = iwl_mac_channel_switch,
3925 .flush = iwl_mac_flush, 4197 .flush = iwl_mac_flush,
4198 .tx_last_beacon = iwl_mac_tx_last_beacon,
4199};
4200
4201static void iwl_hw_detect(struct iwl_priv *priv)
4202{
4203 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
4204 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
4205 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
4206 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
4207}
4208
4209static int iwl_set_hw_params(struct iwl_priv *priv)
4210{
4211 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
4212 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
4213 if (priv->cfg->mod_params->amsdu_size_8K)
4214 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
4215 else
4216 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
4217
4218 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
4219
4220 if (priv->cfg->mod_params->disable_11n)
4221 priv->cfg->sku &= ~IWL_SKU_N;
4222
4223 /* Device-specific setup */
4224 return priv->cfg->ops->lib->set_hw_params(priv);
4225}
4226
4227static const u8 iwlagn_bss_ac_to_fifo[] = {
4228 IWL_TX_FIFO_VO,
4229 IWL_TX_FIFO_VI,
4230 IWL_TX_FIFO_BE,
4231 IWL_TX_FIFO_BK,
4232};
4233
4234static const u8 iwlagn_bss_ac_to_queue[] = {
4235 0, 1, 2, 3,
4236};
4237
4238static const u8 iwlagn_pan_ac_to_fifo[] = {
4239 IWL_TX_FIFO_VO_IPAN,
4240 IWL_TX_FIFO_VI_IPAN,
4241 IWL_TX_FIFO_BE_IPAN,
4242 IWL_TX_FIFO_BK_IPAN,
4243};
4244
4245static const u8 iwlagn_pan_ac_to_queue[] = {
4246 7, 6, 5, 4,
3926}; 4247};
3927 4248
3928static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4249static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3929{ 4250{
3930 int err = 0; 4251 int err = 0, i;
3931 struct iwl_priv *priv; 4252 struct iwl_priv *priv;
3932 struct ieee80211_hw *hw; 4253 struct ieee80211_hw *hw;
3933 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 4254 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -3955,6 +4276,51 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3955 priv = hw->priv; 4276 priv = hw->priv;
3956 /* At this point both hw and priv are allocated. */ 4277 /* At this point both hw and priv are allocated. */
3957 4278
4279 /*
4280 * The default context is always valid,
4281 * more may be discovered when firmware
4282 * is loaded.
4283 */
4284 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
4285
4286 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
4287 priv->contexts[i].ctxid = i;
4288
4289 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
4290 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
4291 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
4292 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
4293 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
4294 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
4295 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwlagn_bss_ac_to_fifo;
4296 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwlagn_bss_ac_to_queue;
4297 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
4298 BIT(NL80211_IFTYPE_ADHOC);
4299 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
4300 BIT(NL80211_IFTYPE_STATION);
4301 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
4302 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
4303 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
4304
4305 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
4306 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = REPLY_WIPAN_RXON_TIMING;
4307 priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd = REPLY_WIPAN_RXON_ASSOC;
4308 priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
4309 priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
4310 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
4311 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
4312 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
4313 priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo = iwlagn_pan_ac_to_fifo;
4314 priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue = iwlagn_pan_ac_to_queue;
4315 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
4316 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
4317 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
4318 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
4319 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
4320 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
4321
4322 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
4323
3958 SET_IEEE80211_DEV(hw, &pdev->dev); 4324 SET_IEEE80211_DEV(hw, &pdev->dev);
3959 4325
3960 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 4326 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
@@ -3962,12 +4328,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3962 priv->pci_dev = pdev; 4328 priv->pci_dev = pdev;
3963 priv->inta_mask = CSR_INI_SET_MASK; 4329 priv->inta_mask = CSR_INI_SET_MASK;
3964 4330
4331 /* is antenna coupling more than 35dB ? */
4332 priv->bt_ant_couple_ok =
4333 (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
4334 true : false;
4335
4336 /* enable/disable bt channel announcement */
4337 priv->bt_ch_announce = iwlagn_bt_ch_announce;
4338
3965 if (iwl_alloc_traffic_mem(priv)) 4339 if (iwl_alloc_traffic_mem(priv))
3966 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 4340 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3967 4341
3968 /************************** 4342 /**************************
3969 * 2. Initializing PCI bus 4343 * 2. Initializing PCI bus
3970 **************************/ 4344 **************************/
4345 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
4346 PCIE_LINK_STATE_CLKPM);
4347
3971 if (pci_enable_device(pdev)) { 4348 if (pci_enable_device(pdev)) {
3972 err = -ENODEV; 4349 err = -ENODEV;
3973 goto out_ieee80211_free_hw; 4350 goto out_ieee80211_free_hw;
@@ -4492,3 +4869,11 @@ module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
4492 S_IRUGO); 4869 S_IRUGO);
4493MODULE_PARM_DESC(ucode_alternative, 4870MODULE_PARM_DESC(ucode_alternative,
4494 "specify ucode alternative to use from ucode file"); 4871 "specify ucode alternative to use from ucode file");
4872
4873module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO);
4874MODULE_PARM_DESC(antenna_coupling,
4875 "specify antenna coupling in dB (defualt: 0 dB)");
4876
4877module_param_named(bt_ch_announce, iwlagn_bt_ch_announce, bool, S_IRUGO);
4878MODULE_PARM_DESC(bt_ch_announce,
4879 "Enable BT channel announcement mode (default: enable)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index cc6464dc72e5..7c542a8c8f81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -95,6 +95,7 @@ extern struct iwl_cfg iwl1000_bg_cfg;
95 95
96extern struct iwl_mod_params iwlagn_mod_params; 96extern struct iwl_mod_params iwlagn_mod_params;
97extern struct iwl_hcmd_ops iwlagn_hcmd; 97extern struct iwl_hcmd_ops iwlagn_hcmd;
98extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
98extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; 99extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
99 100
100int iwl_reset_ict(struct iwl_priv *priv); 101int iwl_reset_ict(struct iwl_priv *priv);
@@ -223,7 +224,16 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
223 struct ieee80211_vif *vif, bool add); 224 struct ieee80211_vif *vif, bool add);
224 225
225/* hcmd */ 226/* hcmd */
226int iwlagn_send_rxon_assoc(struct iwl_priv *priv); 227int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
228 struct iwl_rxon_context *ctx);
227int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant); 229int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
228 230
231/* bt coex */
232void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
233void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
234 struct iwl_rx_mem_buffer *rxb);
235void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
236void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
237void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
238
229#endif /* __iwl_agn_h__ */ 239#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 60725a5c1b69..3e4ba31b5d59 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -62,7 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions. 64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-4965-hw.h for hardware-related definitions. 65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions. 66 * Please use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
@@ -173,6 +173,23 @@ enum {
173 REPLY_RX_MPDU_CMD = 0xc1, 173 REPLY_RX_MPDU_CMD = 0xc1,
174 REPLY_RX = 0xc3, 174 REPLY_RX = 0xc3,
175 REPLY_COMPRESSED_BA = 0xc5, 175 REPLY_COMPRESSED_BA = 0xc5,
176
177 /* BT Coex */
178 REPLY_BT_COEX_PRIO_TABLE = 0xcc,
179 REPLY_BT_COEX_PROT_ENV = 0xcd,
180 REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
181 REPLY_BT_COEX_SCO = 0xcf,
182
183 /* PAN commands */
184 REPLY_WIPAN_PARAMS = 0xb2,
185 REPLY_WIPAN_RXON = 0xb3, /* use REPLY_RXON structure */
186 REPLY_WIPAN_RXON_TIMING = 0xb4, /* use REPLY_RXON_TIMING structure */
187 REPLY_WIPAN_RXON_ASSOC = 0xb6, /* use REPLY_RXON_ASSOC structure */
188 REPLY_WIPAN_QOS_PARAM = 0xb7, /* use REPLY_QOS_PARAM structure */
189 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
190 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
191 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
192
176 REPLY_MAX = 0xff 193 REPLY_MAX = 0xff
177}; 194};
178 195
@@ -600,6 +617,9 @@ enum {
600 RXON_DEV_TYPE_ESS = 3, 617 RXON_DEV_TYPE_ESS = 3,
601 RXON_DEV_TYPE_IBSS = 4, 618 RXON_DEV_TYPE_IBSS = 4,
602 RXON_DEV_TYPE_SNIFFER = 6, 619 RXON_DEV_TYPE_SNIFFER = 6,
620 RXON_DEV_TYPE_CP = 7,
621 RXON_DEV_TYPE_2STA = 8,
622 RXON_DEV_TYPE_P2P = 9,
603}; 623};
604 624
605 625
@@ -816,7 +836,8 @@ struct iwl_rxon_time_cmd {
816 __le16 atim_window; 836 __le16 atim_window;
817 __le32 beacon_init_val; 837 __le32 beacon_init_val;
818 __le16 listen_interval; 838 __le16 listen_interval;
819 __le16 reserved; 839 u8 dtim_period;
840 u8 delta_cp_bss_tbtts;
820} __packed; 841} __packed;
821 842
822/* 843/*
@@ -953,11 +974,13 @@ struct iwl_qosparam_cmd {
953 974
954/* Special, dedicated locations within device's station table */ 975/* Special, dedicated locations within device's station table */
955#define IWL_AP_ID 0 976#define IWL_AP_ID 0
977#define IWL_AP_ID_PAN 1
956#define IWL_STA_ID 2 978#define IWL_STA_ID 2
957#define IWL3945_BROADCAST_ID 24 979#define IWL3945_BROADCAST_ID 24
958#define IWL3945_STATION_COUNT 25 980#define IWL3945_STATION_COUNT 25
959#define IWL4965_BROADCAST_ID 31 981#define IWL4965_BROADCAST_ID 31
960#define IWL4965_STATION_COUNT 32 982#define IWL4965_STATION_COUNT 32
983#define IWLAGN_PAN_BCAST_ID 14
961#define IWLAGN_BROADCAST_ID 15 984#define IWLAGN_BROADCAST_ID 15
962#define IWLAGN_STATION_COUNT 16 985#define IWLAGN_STATION_COUNT 16
963 986
@@ -966,6 +989,7 @@ struct iwl_qosparam_cmd {
966 989
967#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 990#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
968#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) 991#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
992#define STA_FLG_PAN_STATION cpu_to_le32(1 << 13)
969#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) 993#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
970#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) 994#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
971#define STA_FLG_MAX_AGG_SIZE_POS (19) 995#define STA_FLG_MAX_AGG_SIZE_POS (19)
@@ -994,6 +1018,7 @@ struct iwl_qosparam_cmd {
994#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000) 1018#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
995#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000) 1019#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
996#define STA_KEY_MAX_NUM 8 1020#define STA_KEY_MAX_NUM 8
1021#define STA_KEY_MAX_NUM_PAN 16
997 1022
998/* Flags indicate whether to modify vs. don't change various station params */ 1023/* Flags indicate whether to modify vs. don't change various station params */
999#define STA_MODIFY_KEY_MASK 0x01 1024#define STA_MODIFY_KEY_MASK 0x01
@@ -1056,7 +1081,8 @@ struct sta_id_modify {
1056 * 1081 *
1057 * The device contains an internal table of per-station information, 1082 * The device contains an internal table of per-station information,
1058 * with info on security keys, aggregation parameters, and Tx rates for 1083 * with info on security keys, aggregation parameters, and Tx rates for
1059 * initial Tx attempt and any retries (4965 uses REPLY_TX_LINK_QUALITY_CMD, 1084 * initial Tx attempt and any retries (agn devices uses
1085 * REPLY_TX_LINK_QUALITY_CMD,
1060 * 3945 uses REPLY_RATE_SCALE to set up rate tables). 1086 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
1061 * 1087 *
1062 * REPLY_ADD_STA sets up the table entry for one station, either creating 1088 * REPLY_ADD_STA sets up the table entry for one station, either creating
@@ -1367,21 +1393,24 @@ struct iwl4965_rx_non_cfg_phy {
1367} __packed; 1393} __packed;
1368 1394
1369 1395
1370#define IWL50_RX_RES_PHY_CNT 8 1396#define IWLAGN_RX_RES_PHY_CNT 8
1371#define IWL50_RX_RES_AGC_IDX 1 1397#define IWLAGN_RX_RES_AGC_IDX 1
1372#define IWL50_RX_RES_RSSI_AB_IDX 2 1398#define IWLAGN_RX_RES_RSSI_AB_IDX 2
1373#define IWL50_RX_RES_RSSI_C_IDX 3 1399#define IWLAGN_RX_RES_RSSI_C_IDX 3
1374#define IWL50_OFDM_AGC_MSK 0xfe00 1400#define IWLAGN_OFDM_AGC_MSK 0xfe00
1375#define IWL50_OFDM_AGC_BIT_POS 9 1401#define IWLAGN_OFDM_AGC_BIT_POS 9
1376#define IWL50_OFDM_RSSI_A_MSK 0x00ff 1402#define IWLAGN_OFDM_RSSI_INBAND_A_BITMSK 0x00ff
1377#define IWL50_OFDM_RSSI_A_BIT_POS 0 1403#define IWLAGN_OFDM_RSSI_ALLBAND_A_BITMSK 0xff00
1378#define IWL50_OFDM_RSSI_B_MSK 0xff0000 1404#define IWLAGN_OFDM_RSSI_A_BIT_POS 0
1379#define IWL50_OFDM_RSSI_B_BIT_POS 16 1405#define IWLAGN_OFDM_RSSI_INBAND_B_BITMSK 0xff0000
1380#define IWL50_OFDM_RSSI_C_MSK 0x00ff 1406#define IWLAGN_OFDM_RSSI_ALLBAND_B_BITMSK 0xff000000
1381#define IWL50_OFDM_RSSI_C_BIT_POS 0 1407#define IWLAGN_OFDM_RSSI_B_BIT_POS 16
1408#define IWLAGN_OFDM_RSSI_INBAND_C_BITMSK 0x00ff
1409#define IWLAGN_OFDM_RSSI_ALLBAND_C_BITMSK 0xff00
1410#define IWLAGN_OFDM_RSSI_C_BIT_POS 0
1382 1411
1383struct iwl5000_non_cfg_phy { 1412struct iwlagn_non_cfg_phy {
1384 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */ 1413 __le32 non_cfg_phy[IWLAGN_RX_RES_PHY_CNT]; /* up to 8 phy entries */
1385} __packed; 1414} __packed;
1386 1415
1387 1416
@@ -1401,7 +1430,7 @@ struct iwl_rx_phy_res {
1401 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ 1430 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1402 __le32 rate_n_flags; /* RATE_MCS_* */ 1431 __le32 rate_n_flags; /* RATE_MCS_* */
1403 __le16 byte_count; /* frame's byte-count */ 1432 __le16 byte_count; /* frame's byte-count */
1404 __le16 reserved3; 1433 __le16 frame_time; /* frame's time on the air */
1405} __packed; 1434} __packed;
1406 1435
1407struct iwl_rx_mpdu_res_start { 1436struct iwl_rx_mpdu_res_start {
@@ -1424,12 +1453,12 @@ struct iwl_rx_mpdu_res_start {
1424 * uCode handles all timing and protocol related to control frames 1453 * uCode handles all timing and protocol related to control frames
1425 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler 1454 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1426 * handle reception of block-acks; uCode updates the host driver via 1455 * handle reception of block-acks; uCode updates the host driver via
1427 * REPLY_COMPRESSED_BA (4965). 1456 * REPLY_COMPRESSED_BA.
1428 * 1457 *
1429 * uCode handles retrying Tx when an ACK is expected but not received. 1458 * uCode handles retrying Tx when an ACK is expected but not received.
1430 * This includes trying lower data rates than the one requested in the Tx 1459 * This includes trying lower data rates than the one requested in the Tx
1431 * command, as set up by the REPLY_RATE_SCALE (for 3945) or 1460 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
1432 * REPLY_TX_LINK_QUALITY_CMD (4965). 1461 * REPLY_TX_LINK_QUALITY_CMD (agn).
1433 * 1462 *
1434 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. 1463 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1435 * This command must be executed after every RXON command, before Tx can occur. 1464 * This command must be executed after every RXON command, before Tx can occur.
@@ -1465,7 +1494,7 @@ struct iwl_rx_mpdu_res_start {
1465 * Set this for unicast frames, but not broadcast/multicast. */ 1494 * Set this for unicast frames, but not broadcast/multicast. */
1466#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) 1495#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1467 1496
1468/* For 4965: 1497/* For agn devices:
1469 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). 1498 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1470 * Tx command's initial_rate_index indicates first rate to try; 1499 * Tx command's initial_rate_index indicates first rate to try;
1471 * uCode walks through table for additional Tx attempts. 1500 * uCode walks through table for additional Tx attempts.
@@ -1484,7 +1513,7 @@ struct iwl_rx_mpdu_res_start {
1484 */ 1513 */
1485#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) 1514#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1486 1515
1487/* Tx antenna selection field; used only for 3945, reserved (0) for 4965. 1516/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
1488 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ 1517 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1489#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) 1518#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1490#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8) 1519#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
@@ -1867,9 +1896,10 @@ enum {
1867 * frame in this new agg block failed in previous agg block(s). 1896 * frame in this new agg block failed in previous agg block(s).
1868 * 1897 *
1869 * Note that, for aggregation, ACK (block-ack) status is not delivered here; 1898 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1870 * block-ack has not been received by the time the 4965 records this status. 1899 * block-ack has not been received by the time the agn device records
1900 * this status.
1871 * This status relates to reasons the tx might have been blocked or aborted 1901 * This status relates to reasons the tx might have been blocked or aborted
1872 * within the sending station (this 4965), rather than whether it was 1902 * within the sending station (this agn device), rather than whether it was
1873 * received successfully by the destination station. 1903 * received successfully by the destination station.
1874 */ 1904 */
1875struct agg_tx_status { 1905struct agg_tx_status {
@@ -2092,8 +2122,8 @@ struct iwl_link_qual_general_params {
2092} __packed; 2122} __packed;
2093 2123
2094#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 2124#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
2095#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535) 2125#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
2096#define LINK_QUAL_AGG_TIME_LIMIT_MIN (0) 2126#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
2097 2127
2098#define LINK_QUAL_AGG_DISABLE_START_DEF (3) 2128#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
2099#define LINK_QUAL_AGG_DISABLE_START_MAX (255) 2129#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
@@ -2110,8 +2140,10 @@ struct iwl_link_qual_general_params {
2110 */ 2140 */
2111struct iwl_link_qual_agg_params { 2141struct iwl_link_qual_agg_params {
2112 2142
2113 /* Maximum number of uSec in aggregation. 2143 /*
2114 * Driver should set this to 4000 (4 milliseconds). */ 2144 *Maximum number of uSec in aggregation.
2145 * default set to 4000 (4 milliseconds) if not configured in .cfg
2146 */
2115 __le16 agg_time_limit; 2147 __le16 agg_time_limit;
2116 2148
2117 /* 2149 /*
@@ -2135,14 +2167,16 @@ struct iwl_link_qual_agg_params {
2135/* 2167/*
2136 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) 2168 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
2137 * 2169 *
2138 * For 4965 only; 3945 uses REPLY_RATE_SCALE. 2170 * For agn devices only; 3945 uses REPLY_RATE_SCALE.
2139 * 2171 *
2140 * Each station in the 4965's internal station table has its own table of 16 2172 * Each station in the agn device's internal station table has its own table
2173 * of 16
2141 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when 2174 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
2142 * an ACK is not received. This command replaces the entire table for 2175 * an ACK is not received. This command replaces the entire table for
2143 * one station. 2176 * one station.
2144 * 2177 *
2145 * NOTE: Station must already be in 4965's station table. Use REPLY_ADD_STA. 2178 * NOTE: Station must already be in agn device's station table.
2179 * Use REPLY_ADD_STA.
2146 * 2180 *
2147 * The rate scaling procedures described below work well. Of course, other 2181 * The rate scaling procedures described below work well. Of course, other
2148 * procedures are possible, and may work better for particular environments. 2182 * procedures are possible, and may work better for particular environments.
@@ -2179,12 +2213,12 @@ struct iwl_link_qual_agg_params {
2179 * 2213 *
2180 * ACCUMULATING HISTORY 2214 * ACCUMULATING HISTORY
2181 * 2215 *
2182 * The rate scaling algorithm for 4965, as implemented in Linux driver, uses 2216 * The rate scaling algorithm for agn devices, as implemented in Linux driver,
2183 * two sets of frame Tx success history: One for the current/active modulation 2217 * uses two sets of frame Tx success history: One for the current/active
2184 * mode, and one for a speculative/search mode that is being attempted. If the 2218 * modulation mode, and one for a speculative/search mode that is being
2185 * speculative mode turns out to be more effective (i.e. actual transfer 2219 * attempted. If the speculative mode turns out to be more effective (i.e.
2186 * rate is better), then the driver continues to use the speculative mode 2220 * actual transfer rate is better), then the driver continues to use the
2187 * as the new current active mode. 2221 * speculative mode as the new current active mode.
2188 * 2222 *
2189 * Each history set contains, separately for each possible rate, data for a 2223 * Each history set contains, separately for each possible rate, data for a
2190 * sliding window of the 62 most recent tx attempts at that rate. The data 2224 * sliding window of the 62 most recent tx attempts at that rate. The data
@@ -2195,12 +2229,12 @@ struct iwl_link_qual_agg_params {
2195 * The driver uses the bit map to remove successes from the success sum, as 2229 * The driver uses the bit map to remove successes from the success sum, as
2196 * the oldest tx attempts fall out of the window. 2230 * the oldest tx attempts fall out of the window.
2197 * 2231 *
2198 * When the 4965 makes multiple tx attempts for a given frame, each attempt 2232 * When the agn device makes multiple tx attempts for a given frame, each
2199 * might be at a different rate, and have different modulation characteristics 2233 * attempt might be at a different rate, and have different modulation
2200 * (e.g. antenna, fat channel, short guard interval), as set up in the rate 2234 * characteristics (e.g. antenna, fat channel, short guard interval), as set
2201 * scaling table in the Link Quality command. The driver must determine 2235 * up in the rate scaling table in the Link Quality command. The driver must
2202 * which rate table entry was used for each tx attempt, to determine which 2236 * determine which rate table entry was used for each tx attempt, to determine
2203 * rate-specific history to update, and record only those attempts that 2237 * which rate-specific history to update, and record only those attempts that
2204 * match the modulation characteristics of the history set. 2238 * match the modulation characteristics of the history set.
2205 * 2239 *
2206 * When using block-ack (aggregation), all frames are transmitted at the same 2240 * When using block-ack (aggregation), all frames are transmitted at the same
@@ -2330,7 +2364,7 @@ struct iwl_link_quality_cmd {
2330 /* 2364 /*
2331 * Rate info; when using rate-scaling, Tx command's initial_rate_index 2365 * Rate info; when using rate-scaling, Tx command's initial_rate_index
2332 * specifies 1st Tx rate attempted, via index into this table. 2366 * specifies 1st Tx rate attempted, via index into this table.
2333 * 4965 works its way through table when retrying Tx. 2367 * agn devices works its way through table when retrying Tx.
2334 */ 2368 */
2335 struct { 2369 struct {
2336 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ 2370 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
@@ -2363,10 +2397,26 @@ struct iwl_link_quality_cmd {
2363#define BT_MAX_KILL_DEF (0x5) 2397#define BT_MAX_KILL_DEF (0x5)
2364#define BT_MAX_KILL_MAX (0xFF) 2398#define BT_MAX_KILL_MAX (0xFF)
2365 2399
2400#define BT_DURATION_LIMIT_DEF 625
2401#define BT_DURATION_LIMIT_MAX 1250
2402#define BT_DURATION_LIMIT_MIN 625
2403
2404#define BT_ON_THRESHOLD_DEF 4
2405#define BT_ON_THRESHOLD_MAX 1000
2406#define BT_ON_THRESHOLD_MIN 1
2407
2408#define BT_FRAG_THRESHOLD_DEF 0
2409#define BT_FRAG_THRESHOLD_MAX 0
2410#define BT_FRAG_THRESHOLD_MIN 0
2411
2412#define BT_AGG_THRESHOLD_DEF 0
2413#define BT_AGG_THRESHOLD_MAX 0
2414#define BT_AGG_THRESHOLD_MIN 0
2415
2366/* 2416/*
2367 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2417 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2368 * 2418 *
2369 * 3945 and 4965 support hardware handshake with Bluetooth device on 2419 * 3945 and agn devices support hardware handshake with Bluetooth device on
2370 * same platform. Bluetooth device alerts wireless device when it will Tx; 2420 * same platform. Bluetooth device alerts wireless device when it will Tx;
2371 * wireless device can delay or kill its own Tx to accommodate. 2421 * wireless device can delay or kill its own Tx to accommodate.
2372 */ 2422 */
@@ -2379,6 +2429,74 @@ struct iwl_bt_cmd {
2379 __le32 kill_cts_mask; 2429 __le32 kill_cts_mask;
2380} __packed; 2430} __packed;
2381 2431
2432#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION BIT(0)
2433
2434#define IWLAGN_BT_FLAG_COEX_MODE_MASK (BIT(3)|BIT(4)|BIT(5))
2435#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT 3
2436#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED 0
2437#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W 1
2438#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
2439#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
2440
2441#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
2442#define IWLAGN_BT_FLAG_NOCOEX_NOTIF BIT(7)
2443
2444#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
2445#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
2446#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
2447
2448#define IWLAGN_BT_MAX_KILL_DEFAULT 5
2449
2450#define IWLAGN_BT3_T7_DEFAULT 1
2451
2452#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffffffff)
2453#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffffffff)
2454
2455#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
2456
2457#define IWLAGN_BT3_T2_DEFAULT 0xc
2458
2459#define IWLAGN_BT_VALID_ENABLE_FLAGS cpu_to_le16(BIT(0))
2460#define IWLAGN_BT_VALID_BOOST cpu_to_le16(BIT(1))
2461#define IWLAGN_BT_VALID_MAX_KILL cpu_to_le16(BIT(2))
2462#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3))
2463#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4))
2464#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5))
2465#define IWLAGN_BT_VALID_BT4_TIMES cpu_to_le16(BIT(6))
2466#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7))
2467
2468#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \
2469 IWLAGN_BT_VALID_BOOST | \
2470 IWLAGN_BT_VALID_MAX_KILL | \
2471 IWLAGN_BT_VALID_3W_TIMERS | \
2472 IWLAGN_BT_VALID_KILL_ACK_MASK | \
2473 IWLAGN_BT_VALID_KILL_CTS_MASK | \
2474 IWLAGN_BT_VALID_BT4_TIMES | \
2475 IWLAGN_BT_VALID_3W_LUT)
2476
2477struct iwlagn_bt_cmd {
2478 u8 flags;
2479 u8 ledtime; /* unused */
2480 u8 max_kill;
2481 u8 bt3_timer_t7_value;
2482 __le32 kill_ack_mask;
2483 __le32 kill_cts_mask;
2484 u8 bt3_prio_sample_time;
2485 u8 bt3_timer_t2_value;
2486 __le16 bt4_reaction_time; /* unused */
2487 __le32 bt3_lookup_table[12];
2488 __le16 bt4_decision_time; /* unused */
2489 __le16 valid;
2490 u8 prio_boost;
2491 u8 reserved[3];
2492};
2493
2494#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
2495
2496struct iwlagn_bt_sco_cmd {
2497 __le32 flags;
2498};
2499
2382/****************************************************************************** 2500/******************************************************************************
2383 * (6) 2501 * (6)
2384 * Spectrum Management (802.11h) Commands, Responses, Notifications: 2502 * Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2567,7 +2685,7 @@ struct iwl_powertable_cmd {
2567 2685
2568/* 2686/*
2569 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) 2687 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2570 * 3945 and 4965 identical. 2688 * all devices identical.
2571 */ 2689 */
2572struct iwl_sleep_notification { 2690struct iwl_sleep_notification {
2573 u8 pm_sleep_mode; 2691 u8 pm_sleep_mode;
@@ -2578,7 +2696,7 @@ struct iwl_sleep_notification {
2578 __le32 bcon_timer; 2696 __le32 bcon_timer;
2579} __packed; 2697} __packed;
2580 2698
2581/* Sleep states. 3945 and 4965 identical. */ 2699/* Sleep states. all devices identical. */
2582enum { 2700enum {
2583 IWL_PM_NO_SLEEP = 0, 2701 IWL_PM_NO_SLEEP = 0,
2584 IWL_PM_SLP_MAC = 1, 2702 IWL_PM_SLP_MAC = 1,
@@ -2887,6 +3005,12 @@ struct iwl_scanstart_notification {
2887#define SCAN_OWNER_STATUS 0x1; 3005#define SCAN_OWNER_STATUS 0x1;
2888#define MEASURE_OWNER_STATUS 0x2; 3006#define MEASURE_OWNER_STATUS 0x2;
2889 3007
3008#define IWL_PROBE_STATUS_OK 0
3009#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
3010/* error statuses combined with TX_FAILED */
3011#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
3012#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
3013
2890#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ 3014#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2891/* 3015/*
2892 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) 3016 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
@@ -2894,7 +3018,8 @@ struct iwl_scanstart_notification {
2894struct iwl_scanresults_notification { 3018struct iwl_scanresults_notification {
2895 u8 channel; 3019 u8 channel;
2896 u8 band; 3020 u8 band;
2897 u8 reserved[2]; 3021 u8 probe_status;
3022 u8 num_probe_not_sent; /* not enough time to send */
2898 __le32 tsf_low; 3023 __le32 tsf_low;
2899 __le32 tsf_high; 3024 __le32 tsf_high;
2900 __le32 statistics[NUMBER_OF_STATISTICS]; 3025 __le32 statistics[NUMBER_OF_STATISTICS];
@@ -2906,7 +3031,7 @@ struct iwl_scanresults_notification {
2906struct iwl_scancomplete_notification { 3031struct iwl_scancomplete_notification {
2907 u8 scanned_channels; 3032 u8 scanned_channels;
2908 u8 status; 3033 u8 status;
2909 u8 reserved; 3034 u8 bt_status; /* BT On/Off status */
2910 u8 last_channel; 3035 u8 last_channel;
2911 __le32 tsf_low; 3036 __le32 tsf_low;
2912 __le32 tsf_high; 3037 __le32 tsf_high;
@@ -2919,6 +3044,11 @@ struct iwl_scancomplete_notification {
2919 * 3044 *
2920 *****************************************************************************/ 3045 *****************************************************************************/
2921 3046
3047enum iwl_ibss_manager {
3048 IWL_NOT_IBSS_MANAGER = 0,
3049 IWL_IBSS_MANAGER = 1,
3050};
3051
2922/* 3052/*
2923 * BEACON_NOTIFICATION = 0x90 (notification only, not a command) 3053 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2924 */ 3054 */
@@ -3260,7 +3390,7 @@ struct statistics_general_bt {
3260 3390
3261/* 3391/*
3262 * REPLY_STATISTICS_CMD = 0x9c, 3392 * REPLY_STATISTICS_CMD = 0x9c,
3263 * 3945 and 4965 identical. 3393 * all devices identical.
3264 * 3394 *
3265 * This command triggers an immediate response containing uCode statistics. 3395 * This command triggers an immediate response containing uCode statistics.
3266 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. 3396 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
@@ -3598,7 +3728,7 @@ struct iwl_enhance_sensitivity_cmd {
3598/** 3728/**
3599 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) 3729 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3600 * 3730 *
3601 * This command sets the relative gains of 4965's 3 radio receiver chains. 3731 * This command sets the relative gains of agn device's 3 radio receiver chains.
3602 * 3732 *
3603 * After the first association, driver should accumulate signal and noise 3733 * After the first association, driver should accumulate signal and noise
3604 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 3734 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
@@ -3955,6 +4085,201 @@ struct iwl_coex_event_resp {
3955 4085
3956 4086
3957/****************************************************************************** 4087/******************************************************************************
4088 * Bluetooth Coexistence commands
4089 *
4090 *****************************************************************************/
4091
4092/*
4093 * BT Status notification
4094 * REPLY_BT_COEX_PROFILE_NOTIF = 0xce
4095 */
4096enum iwl_bt_coex_profile_traffic_load {
4097 IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
4098 IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
4099 IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
4100 IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
4101/*
4102 * There are no more even though below is a u8, the
4103 * indication from the BT device only has two bits.
4104 */
4105};
4106
4107#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
4108#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
4109 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
4110#define BT_UART_MSG_FRAME1SSN_POS (3)
4111#define BT_UART_MSG_FRAME1SSN_MSK \
4112 (0x3 << BT_UART_MSG_FRAME1SSN_POS)
4113#define BT_UART_MSG_FRAME1UPDATEREQ_POS (5)
4114#define BT_UART_MSG_FRAME1UPDATEREQ_MSK \
4115 (0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS)
4116#define BT_UART_MSG_FRAME1RESERVED_POS (6)
4117#define BT_UART_MSG_FRAME1RESERVED_MSK \
4118 (0x3 << BT_UART_MSG_FRAME1RESERVED_POS)
4119
4120#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS (0)
4121#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK \
4122 (0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS)
4123#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS (2)
4124#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK \
4125 (0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS)
4126#define BT_UART_MSG_FRAME2CHLSEQN_POS (4)
4127#define BT_UART_MSG_FRAME2CHLSEQN_MSK \
4128 (0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS)
4129#define BT_UART_MSG_FRAME2INBAND_POS (5)
4130#define BT_UART_MSG_FRAME2INBAND_MSK \
4131 (0x1 << BT_UART_MSG_FRAME2INBAND_POS)
4132#define BT_UART_MSG_FRAME2RESERVED_POS (6)
4133#define BT_UART_MSG_FRAME2RESERVED_MSK \
4134 (0x3 << BT_UART_MSG_FRAME2RESERVED_POS)
4135
4136#define BT_UART_MSG_FRAME3SCOESCO_POS (0)
4137#define BT_UART_MSG_FRAME3SCOESCO_MSK \
4138 (0x1 << BT_UART_MSG_FRAME3SCOESCO_POS)
4139#define BT_UART_MSG_FRAME3SNIFF_POS (1)
4140#define BT_UART_MSG_FRAME3SNIFF_MSK \
4141 (0x1 << BT_UART_MSG_FRAME3SNIFF_POS)
4142#define BT_UART_MSG_FRAME3A2DP_POS (2)
4143#define BT_UART_MSG_FRAME3A2DP_MSK \
4144 (0x1 << BT_UART_MSG_FRAME3A2DP_POS)
4145#define BT_UART_MSG_FRAME3ACL_POS (3)
4146#define BT_UART_MSG_FRAME3ACL_MSK \
4147 (0x1 << BT_UART_MSG_FRAME3ACL_POS)
4148#define BT_UART_MSG_FRAME3MASTER_POS (4)
4149#define BT_UART_MSG_FRAME3MASTER_MSK \
4150 (0x1 << BT_UART_MSG_FRAME3MASTER_POS)
4151#define BT_UART_MSG_FRAME3OBEX_POS (5)
4152#define BT_UART_MSG_FRAME3OBEX_MSK \
4153 (0x1 << BT_UART_MSG_FRAME3OBEX_POS)
4154#define BT_UART_MSG_FRAME3RESERVED_POS (6)
4155#define BT_UART_MSG_FRAME3RESERVED_MSK \
4156 (0x3 << BT_UART_MSG_FRAME3RESERVED_POS)
4157
4158#define BT_UART_MSG_FRAME4IDLEDURATION_POS (0)
4159#define BT_UART_MSG_FRAME4IDLEDURATION_MSK \
4160 (0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS)
4161#define BT_UART_MSG_FRAME4RESERVED_POS (6)
4162#define BT_UART_MSG_FRAME4RESERVED_MSK \
4163 (0x3 << BT_UART_MSG_FRAME4RESERVED_POS)
4164
4165#define BT_UART_MSG_FRAME5TXACTIVITY_POS (0)
4166#define BT_UART_MSG_FRAME5TXACTIVITY_MSK \
4167 (0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS)
4168#define BT_UART_MSG_FRAME5RXACTIVITY_POS (2)
4169#define BT_UART_MSG_FRAME5RXACTIVITY_MSK \
4170 (0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS)
4171#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS (4)
4172#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK \
4173 (0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS)
4174#define BT_UART_MSG_FRAME5RESERVED_POS (6)
4175#define BT_UART_MSG_FRAME5RESERVED_MSK \
4176 (0x3 << BT_UART_MSG_FRAME5RESERVED_POS)
4177
4178#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS (0)
4179#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK \
4180 (0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS)
4181#define BT_UART_MSG_FRAME6DISCOVERABLE_POS (5)
4182#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK \
4183 (0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS)
4184#define BT_UART_MSG_FRAME6RESERVED_POS (6)
4185#define BT_UART_MSG_FRAME6RESERVED_MSK \
4186 (0x3 << BT_UART_MSG_FRAME6RESERVED_POS)
4187
4188#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
4189#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
4190 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
4191#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS (3)
4192#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK \
4193 (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS)
4194#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
4195#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
4196 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
4197#define BT_UART_MSG_FRAME7RESERVED_POS (6)
4198#define BT_UART_MSG_FRAME7RESERVED_MSK \
4199 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
4200
4201
4202struct iwl_bt_uart_msg {
4203 u8 header;
4204 u8 frame1;
4205 u8 frame2;
4206 u8 frame3;
4207 u8 frame4;
4208 u8 frame5;
4209 u8 frame6;
4210 u8 frame7;
4211} __attribute__((packed));
4212
4213struct iwl_bt_coex_profile_notif {
4214 struct iwl_bt_uart_msg last_bt_uart_msg;
4215 u8 bt_status; /* 0 - off, 1 - on */
4216 u8 bt_traffic_load; /* 0 .. 3? */
4217 u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
4218 u8 reserved;
4219} __attribute__((packed));
4220
4221#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
4222#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
4223#define IWL_BT_COEX_PRIO_TBL_PRIO_POS 1
4224#define IWL_BT_COEX_PRIO_TBL_PRIO_MASK 0x0e
4225#define IWL_BT_COEX_PRIO_TBL_RESERVED_POS 4
4226#define IWL_BT_COEX_PRIO_TBL_RESERVED_MASK 0xf0
4227#define IWL_BT_COEX_PRIO_TBL_PRIO_SHIFT 1
4228
4229/*
4230 * BT Coexistence Priority table
4231 * REPLY_BT_COEX_PRIO_TABLE = 0xcc
4232 */
4233enum bt_coex_prio_table_events {
4234 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
4235 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
4236 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
4237 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, /* DC calib */
4238 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
4239 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
4240 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
4241 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
4242 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
4243 BT_COEX_PRIO_TBL_EVT_RESERVED0 = 9,
4244 BT_COEX_PRIO_TBL_EVT_RESERVED1 = 10,
4245 BT_COEX_PRIO_TBL_EVT_RESERVED2 = 11,
4246 BT_COEX_PRIO_TBL_EVT_RESERVED3 = 12,
4247 BT_COEX_PRIO_TBL_EVT_RESERVED4 = 13,
4248 BT_COEX_PRIO_TBL_EVT_RESERVED5 = 14,
4249 BT_COEX_PRIO_TBL_EVT_RESERVED6 = 15,
4250 /* BT_COEX_PRIO_TBL_EVT_MAX should always be last */
4251 BT_COEX_PRIO_TBL_EVT_MAX,
4252};
4253
4254enum bt_coex_prio_table_priorities {
4255 BT_COEX_PRIO_TBL_DISABLED = 0,
4256 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
4257 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
4258 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
4259 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
4260 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
4261 BT_COEX_PRIO_TBL_PRIO_RSRVD1 = 6,
4262 BT_COEX_PRIO_TBL_PRIO_RSRVD2 = 7,
4263 BT_COEX_PRIO_TBL_MAX,
4264};
4265
4266struct iwl_bt_coex_prio_table_cmd {
4267 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
4268} __attribute__((packed));
4269
4270#define IWL_BT_COEX_ENV_CLOSE 0
4271#define IWL_BT_COEX_ENV_OPEN 1
4272/*
4273 * BT Protection Envelope
4274 * REPLY_BT_COEX_PROT_ENV = 0xcd
4275 */
4276struct iwl_bt_coex_prot_env_cmd {
4277 u8 action; /* 0 = closed, 1 = open */
4278 u8 type; /* 0 .. 15 */
4279 u8 reserved[2];
4280} __attribute__((packed));
4281
4282/******************************************************************************
3958 * (13) 4283 * (13)
3959 * Union of all expected notifications/responses: 4284 * Union of all expected notifications/responses:
3960 * 4285 *
@@ -3993,6 +4318,7 @@ struct iwl_rx_packet {
3993 struct iwl_missed_beacon_notif missed_beacon; 4318 struct iwl_missed_beacon_notif missed_beacon;
3994 struct iwl_coex_medium_notification coex_medium_notif; 4319 struct iwl_coex_medium_notification coex_medium_notif;
3995 struct iwl_coex_event_resp coex_event; 4320 struct iwl_coex_event_resp coex_event;
4321 struct iwl_bt_coex_profile_notif bt_coex_profile_notif;
3996 __le32 status; 4322 __le32 status;
3997 u8 raw[0]; 4323 u8 raw[0];
3998 } u; 4324 } u;
@@ -4000,4 +4326,94 @@ struct iwl_rx_packet {
4000 4326
4001int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); 4327int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
4002 4328
4329/*
4330 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
4331 */
4332
4333/**
4334 * struct iwl_wipan_slot
4335 * @width: Time in TU
4336 * @type:
4337 * 0 - BSS
4338 * 1 - PAN
4339 */
4340struct iwl_wipan_slot {
4341 __le16 width;
4342 u8 type;
4343 u8 reserved;
4344} __packed;
4345
4346#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_CTS BIT(1) /* reserved */
4347#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_QUIET BIT(2) /* reserved */
4348#define IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE BIT(3) /* reserved */
4349#define IWL_WIPAN_PARAMS_FLG_FILTER_BEACON_NOTIF BIT(4)
4350#define IWL_WIPAN_PARAMS_FLG_FULL_SLOTTED_MODE BIT(5)
4351
4352/**
4353 * struct iwl_wipan_params_cmd
4354 * @flags:
4355 * bit0: reserved
4356 * bit1: CP leave channel with CTS
4357 * bit2: CP leave channel qith Quiet
4358 * bit3: slotted mode
4359 * 1 - work in slotted mode
4360 * 0 - work in non slotted mode
4361 * bit4: filter beacon notification
4362 * bit5: full tx slotted mode. if this flag is set,
4363 * uCode will perform leaving channel methods in context switch
4364 * also when working in same channel mode
4365 * @num_slots: 1 - 10
4366 */
4367struct iwl_wipan_params_cmd {
4368 __le16 flags;
4369 u8 reserved;
4370 u8 num_slots;
4371 struct iwl_wipan_slot slots[10];
4372} __packed;
4373
4374/*
4375 * REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9
4376 *
4377 * TODO: Figure out what this is used for,
4378 * it can only switch between 2.4 GHz
4379 * channels!!
4380 */
4381
4382struct iwl_wipan_p2p_channel_switch_cmd {
4383 __le16 channel;
4384 __le16 reserved;
4385};
4386
4387/*
4388 * REPLY_WIPAN_NOA_NOTIFICATION = 0xbc
4389 *
4390 * This is used by the device to notify us of the
4391 * NoA schedule it determined so we can forward it
4392 * to userspace for inclusion in probe responses.
4393 *
4394 * In beacons, the NoA schedule is simply appended
4395 * to the frame we give the device.
4396 */
4397
4398struct iwl_wipan_noa_descriptor {
4399 u8 count;
4400 __le32 duration;
4401 __le32 interval;
4402 __le32 starttime;
4403} __packed;
4404
4405struct iwl_wipan_noa_attribute {
4406 u8 id;
4407 __le16 length;
4408 u8 index;
4409 u8 ct_window;
4410 struct iwl_wipan_noa_descriptor descr0, descr1;
4411 u8 reserved;
4412} __packed;
4413
4414struct iwl_wipan_noa_notification {
4415 u32 noa_active;
4416 struct iwl_wipan_noa_attribute noa_attribute;
4417} __packed;
4418
4003#endif /* __iwl_commands_h__ */ 4419#endif /* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 07dbc2796448..87a2e40972ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -64,7 +64,8 @@ MODULE_LICENSE("GPL");
64 * 64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE) 65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */ 66 */
67static bool bt_coex_active = true; 67bool bt_coex_active = true;
68EXPORT_SYMBOL_GPL(bt_coex_active);
68module_param(bt_coex_active, bool, S_IRUGO); 69module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70 71
@@ -146,6 +147,10 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
146 int i; 147 int i;
147 u8 ind = ant; 148 u8 ind = ant;
148 149
150 if (priv->band == IEEE80211_BAND_2GHZ &&
151 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
152 return 0;
153
149 for (i = 0; i < RATE_ANT_NUM - 1; i++) { 154 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
150 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; 155 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
151 if (valid & BIT(ind)) 156 if (valid & BIT(ind))
@@ -183,38 +188,30 @@ out:
183} 188}
184EXPORT_SYMBOL(iwl_alloc_all); 189EXPORT_SYMBOL(iwl_alloc_all);
185 190
186void iwl_hw_detect(struct iwl_priv *priv)
187{
188 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
189 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
190 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
191}
192EXPORT_SYMBOL(iwl_hw_detect);
193
194/* 191/*
195 * QoS support 192 * QoS support
196*/ 193*/
197static void iwl_update_qos(struct iwl_priv *priv) 194static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
198{ 195{
199 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 196 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
200 return; 197 return;
201 198
202 priv->qos_data.def_qos_parm.qos_flags = 0; 199 ctx->qos_data.def_qos_parm.qos_flags = 0;
203 200
204 if (priv->qos_data.qos_active) 201 if (ctx->qos_data.qos_active)
205 priv->qos_data.def_qos_parm.qos_flags |= 202 ctx->qos_data.def_qos_parm.qos_flags |=
206 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 203 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
207 204
208 if (priv->current_ht_config.is_ht) 205 if (ctx->ht.enabled)
209 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 206 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
210 207
211 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", 208 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
212 priv->qos_data.qos_active, 209 ctx->qos_data.qos_active,
213 priv->qos_data.def_qos_parm.qos_flags); 210 ctx->qos_data.def_qos_parm.qos_flags);
214 211
215 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, 212 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
216 sizeof(struct iwl_qosparam_cmd), 213 sizeof(struct iwl_qosparam_cmd),
217 &priv->qos_data.def_qos_parm, NULL); 214 &ctx->qos_data.def_qos_parm, NULL);
218} 215}
219 216
220#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 217#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
@@ -247,7 +244,11 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
247 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 244 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
248 245
249 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 246 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
247 if (priv->cfg->ampdu_factor)
248 ht_info->ampdu_factor = priv->cfg->ampdu_factor;
250 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 249 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
250 if (priv->cfg->ampdu_density)
251 ht_info->ampdu_density = priv->cfg->ampdu_density;
251 252
252 ht_info->mcs.rx_mask[0] = 0xFF; 253 ht_info->mcs.rx_mask[0] = 0xFF;
253 if (rx_chains_num >= 2) 254 if (rx_chains_num >= 2)
@@ -440,15 +441,15 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
440 priv->current_ht_config.single_chain_sufficient; 441 priv->current_ht_config.single_chain_sufficient;
441} 442}
442 443
443static u8 iwl_is_channel_extension(struct iwl_priv *priv, 444static bool iwl_is_channel_extension(struct iwl_priv *priv,
444 enum ieee80211_band band, 445 enum ieee80211_band band,
445 u16 channel, u8 extension_chan_offset) 446 u16 channel, u8 extension_chan_offset)
446{ 447{
447 const struct iwl_channel_info *ch_info; 448 const struct iwl_channel_info *ch_info;
448 449
449 ch_info = iwl_get_channel_info(priv, band, channel); 450 ch_info = iwl_get_channel_info(priv, band, channel);
450 if (!is_channel_valid(ch_info)) 451 if (!is_channel_valid(ch_info))
451 return 0; 452 return false;
452 453
453 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) 454 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
454 return !(ch_info->ht40_extension_channel & 455 return !(ch_info->ht40_extension_channel &
@@ -457,31 +458,31 @@ static u8 iwl_is_channel_extension(struct iwl_priv *priv,
457 return !(ch_info->ht40_extension_channel & 458 return !(ch_info->ht40_extension_channel &
458 IEEE80211_CHAN_NO_HT40MINUS); 459 IEEE80211_CHAN_NO_HT40MINUS);
459 460
460 return 0; 461 return false;
461} 462}
462 463
463u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 464bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
464 struct ieee80211_sta_ht_cap *sta_ht_inf) 465 struct iwl_rxon_context *ctx,
466 struct ieee80211_sta_ht_cap *ht_cap)
465{ 467{
466 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 468 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
469 return false;
467 470
468 if (!ht_conf->is_ht || !ht_conf->is_40mhz) 471 /*
469 return 0; 472 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
470
471 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
472 * the bit will not set if it is pure 40MHz case 473 * the bit will not set if it is pure 40MHz case
473 */ 474 */
474 if (sta_ht_inf) { 475 if (ht_cap && !ht_cap->ht_supported)
475 if (!sta_ht_inf->ht_supported) 476 return false;
476 return 0; 477
477 }
478#ifdef CONFIG_IWLWIFI_DEBUGFS 478#ifdef CONFIG_IWLWIFI_DEBUGFS
479 if (priv->disable_ht40) 479 if (priv->disable_ht40)
480 return 0; 480 return false;
481#endif 481#endif
482
482 return iwl_is_channel_extension(priv, priv->band, 483 return iwl_is_channel_extension(priv, priv->band,
483 le16_to_cpu(priv->staging_rxon.channel), 484 le16_to_cpu(ctx->staging.channel),
484 ht_conf->extension_chan_offset); 485 ctx->ht.extension_chan_offset);
485} 486}
486EXPORT_SYMBOL(iwl_is_ht40_tx_allowed); 487EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
487 488
@@ -499,51 +500,64 @@ static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
499 return new_val; 500 return new_val;
500} 501}
501 502
502void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif) 503int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
503{ 504{
504 u64 tsf; 505 u64 tsf;
505 s32 interval_tm, rem; 506 s32 interval_tm, rem;
506 unsigned long flags;
507 struct ieee80211_conf *conf = NULL; 507 struct ieee80211_conf *conf = NULL;
508 u16 beacon_int; 508 u16 beacon_int;
509 struct ieee80211_vif *vif = ctx->vif;
509 510
510 conf = ieee80211_get_hw_conf(priv->hw); 511 conf = ieee80211_get_hw_conf(priv->hw);
511 512
512 spin_lock_irqsave(&priv->lock, flags); 513 lockdep_assert_held(&priv->mutex);
513 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
514 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
515 514
516 beacon_int = vif->bss_conf.beacon_int; 515 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
517 516
518 if (vif->type == NL80211_IFTYPE_ADHOC) { 517 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
519 /* TODO: we need to get atim_window from upper stack 518 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
520 * for now we set to 0 */
521 priv->rxon_timing.atim_window = 0;
522 } else {
523 priv->rxon_timing.atim_window = 0;
524 }
525 519
526 beacon_int = iwl_adjust_beacon_interval(beacon_int, 520 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
521
522 /*
523 * TODO: For IBSS we need to get atim_window from mac80211,
524 * for now just always use 0
525 */
526 ctx->timing.atim_window = 0;
527
528 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
529 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION)) {
530 ctx->timing.beacon_interval =
531 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
532 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
533 } else {
534 beacon_int = iwl_adjust_beacon_interval(beacon_int,
527 priv->hw_params.max_beacon_itrvl * TIME_UNIT); 535 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
528 priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int); 536 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
537 }
529 538
530 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ 539 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
531 interval_tm = beacon_int * TIME_UNIT; 540 interval_tm = beacon_int * TIME_UNIT;
532 rem = do_div(tsf, interval_tm); 541 rem = do_div(tsf, interval_tm);
533 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 542 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
543
544 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
534 545
535 spin_unlock_irqrestore(&priv->lock, flags);
536 IWL_DEBUG_ASSOC(priv, 546 IWL_DEBUG_ASSOC(priv,
537 "beacon interval %d beacon timer %d beacon tim %d\n", 547 "beacon interval %d beacon timer %d beacon tim %d\n",
538 le16_to_cpu(priv->rxon_timing.beacon_interval), 548 le16_to_cpu(ctx->timing.beacon_interval),
539 le32_to_cpu(priv->rxon_timing.beacon_init_val), 549 le32_to_cpu(ctx->timing.beacon_init_val),
540 le16_to_cpu(priv->rxon_timing.atim_window)); 550 le16_to_cpu(ctx->timing.atim_window));
551
552 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
553 sizeof(ctx->timing), &ctx->timing);
541} 554}
542EXPORT_SYMBOL(iwl_setup_rxon_timing); 555EXPORT_SYMBOL(iwl_send_rxon_timing);
543 556
544void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) 557void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
558 int hw_decrypt)
545{ 559{
546 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 560 struct iwl_rxon_cmd *rxon = &ctx->staging;
547 561
548 if (hw_decrypt) 562 if (hw_decrypt)
549 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 563 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
@@ -560,11 +574,11 @@ EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
560 * be #ifdef'd out once the driver is stable and folks aren't actively 574 * be #ifdef'd out once the driver is stable and folks aren't actively
561 * making changes 575 * making changes
562 */ 576 */
563int iwl_check_rxon_cmd(struct iwl_priv *priv) 577int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
564{ 578{
565 int error = 0; 579 int error = 0;
566 int counter = 1; 580 int counter = 1;
567 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 581 struct iwl_rxon_cmd *rxon = &ctx->staging;
568 582
569 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 583 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
570 error |= le32_to_cpu(rxon->flags & 584 error |= le32_to_cpu(rxon->flags &
@@ -636,66 +650,83 @@ EXPORT_SYMBOL(iwl_check_rxon_cmd);
636 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 650 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
637 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 651 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
638 */ 652 */
639int iwl_full_rxon_required(struct iwl_priv *priv) 653int iwl_full_rxon_required(struct iwl_priv *priv,
654 struct iwl_rxon_context *ctx)
640{ 655{
656 const struct iwl_rxon_cmd *staging = &ctx->staging;
657 const struct iwl_rxon_cmd *active = &ctx->active;
658
659#define CHK(cond) \
660 if ((cond)) { \
661 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
662 return 1; \
663 }
664
665#define CHK_NEQ(c1, c2) \
666 if ((c1) != (c2)) { \
667 IWL_DEBUG_INFO(priv, "need full RXON - " \
668 #c1 " != " #c2 " - %d != %d\n", \
669 (c1), (c2)); \
670 return 1; \
671 }
641 672
642 /* These items are only settable from the full RXON command */ 673 /* These items are only settable from the full RXON command */
643 if (!(iwl_is_associated(priv)) || 674 CHK(!iwl_is_associated_ctx(ctx));
644 compare_ether_addr(priv->staging_rxon.bssid_addr, 675 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
645 priv->active_rxon.bssid_addr) || 676 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
646 compare_ether_addr(priv->staging_rxon.node_addr, 677 CHK(compare_ether_addr(staging->wlap_bssid_addr,
647 priv->active_rxon.node_addr) || 678 active->wlap_bssid_addr));
648 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, 679 CHK_NEQ(staging->dev_type, active->dev_type);
649 priv->active_rxon.wlap_bssid_addr) || 680 CHK_NEQ(staging->channel, active->channel);
650 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || 681 CHK_NEQ(staging->air_propagation, active->air_propagation);
651 (priv->staging_rxon.channel != priv->active_rxon.channel) || 682 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
652 (priv->staging_rxon.air_propagation != 683 active->ofdm_ht_single_stream_basic_rates);
653 priv->active_rxon.air_propagation) || 684 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
654 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates != 685 active->ofdm_ht_dual_stream_basic_rates);
655 priv->active_rxon.ofdm_ht_single_stream_basic_rates) || 686 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
656 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates != 687 active->ofdm_ht_triple_stream_basic_rates);
657 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) || 688 CHK_NEQ(staging->assoc_id, active->assoc_id);
658 (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates !=
659 priv->active_rxon.ofdm_ht_triple_stream_basic_rates) ||
660 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
661 return 1;
662 689
663 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can 690 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
664 * be updated with the RXON_ASSOC command -- however only some 691 * be updated with the RXON_ASSOC command -- however only some
665 * flag transitions are allowed using RXON_ASSOC */ 692 * flag transitions are allowed using RXON_ASSOC */
666 693
667 /* Check if we are not switching bands */ 694 /* Check if we are not switching bands */
668 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != 695 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
669 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) 696 active->flags & RXON_FLG_BAND_24G_MSK);
670 return 1;
671 697
672 /* Check if we are switching association toggle */ 698 /* Check if we are switching association toggle */
673 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != 699 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
674 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) 700 active->filter_flags & RXON_FILTER_ASSOC_MSK);
675 return 1; 701
702#undef CHK
703#undef CHK_NEQ
676 704
677 return 0; 705 return 0;
678} 706}
679EXPORT_SYMBOL(iwl_full_rxon_required); 707EXPORT_SYMBOL(iwl_full_rxon_required);
680 708
681u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv) 709u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
710 struct iwl_rxon_context *ctx)
682{ 711{
683 /* 712 /*
684 * Assign the lowest rate -- should really get this from 713 * Assign the lowest rate -- should really get this from
685 * the beacon skb from mac80211. 714 * the beacon skb from mac80211.
686 */ 715 */
687 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) 716 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
688 return IWL_RATE_1M_PLCP; 717 return IWL_RATE_1M_PLCP;
689 else 718 else
690 return IWL_RATE_6M_PLCP; 719 return IWL_RATE_6M_PLCP;
691} 720}
692EXPORT_SYMBOL(iwl_rate_get_lowest_plcp); 721EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
693 722
694void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) 723static void _iwl_set_rxon_ht(struct iwl_priv *priv,
724 struct iwl_ht_config *ht_conf,
725 struct iwl_rxon_context *ctx)
695{ 726{
696 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 727 struct iwl_rxon_cmd *rxon = &ctx->staging;
697 728
698 if (!ht_conf->is_ht) { 729 if (!ctx->ht.enabled) {
699 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | 730 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
700 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | 731 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
701 RXON_FLG_HT40_PROT_MSK | 732 RXON_FLG_HT40_PROT_MSK |
@@ -703,22 +734,22 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
703 return; 734 return;
704 } 735 }
705 736
706 /* FIXME: if the definition of ht_protection changed, the "translation" 737 /* FIXME: if the definition of ht.protection changed, the "translation"
707 * will be needed for rxon->flags 738 * will be needed for rxon->flags
708 */ 739 */
709 rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS); 740 rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
710 741
711 /* Set up channel bandwidth: 742 /* Set up channel bandwidth:
712 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 743 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
713 /* clear the HT channel mode before set the mode */ 744 /* clear the HT channel mode before set the mode */
714 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | 745 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
715 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 746 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
716 if (iwl_is_ht40_tx_allowed(priv, NULL)) { 747 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
717 /* pure ht40 */ 748 /* pure ht40 */
718 if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 749 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
719 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 750 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
720 /* Note: control channel is opposite of extension channel */ 751 /* Note: control channel is opposite of extension channel */
721 switch (ht_conf->extension_chan_offset) { 752 switch (ctx->ht.extension_chan_offset) {
722 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 753 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
723 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 754 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
724 break; 755 break;
@@ -728,7 +759,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
728 } 759 }
729 } else { 760 } else {
730 /* Note: control channel is opposite of extension channel */ 761 /* Note: control channel is opposite of extension channel */
731 switch (ht_conf->extension_chan_offset) { 762 switch (ctx->ht.extension_chan_offset) {
732 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 763 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
733 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 764 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
734 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 765 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
@@ -749,12 +780,20 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
749 } 780 }
750 781
751 if (priv->cfg->ops->hcmd->set_rxon_chain) 782 if (priv->cfg->ops->hcmd->set_rxon_chain)
752 priv->cfg->ops->hcmd->set_rxon_chain(priv); 783 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
753 784
754 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " 785 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
755 "extension channel offset 0x%x\n", 786 "extension channel offset 0x%x\n",
756 le32_to_cpu(rxon->flags), ht_conf->ht_protection, 787 le32_to_cpu(rxon->flags), ctx->ht.protection,
757 ht_conf->extension_chan_offset); 788 ctx->ht.extension_chan_offset);
789}
790
791void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
792{
793 struct iwl_rxon_context *ctx;
794
795 for_each_context(priv, ctx)
796 _iwl_set_rxon_ht(priv, ht_conf, ctx);
758} 797}
759EXPORT_SYMBOL(iwl_set_rxon_ht); 798EXPORT_SYMBOL(iwl_set_rxon_ht);
760 799
@@ -775,6 +814,14 @@ EXPORT_SYMBOL(iwl_set_rxon_ht);
775 */ 814 */
776static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) 815static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
777{ 816{
817 if (priv->cfg->advanced_bt_coexist && (priv->bt_full_concurrent ||
818 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
819 /*
820 * only use chain 'A' in bt high traffic load or
821 * full concurrency mode
822 */
823 return IWL_NUM_RX_CHAINS_SINGLE;
824 }
778 /* # of Rx chains to use when expecting MIMO. */ 825 /* # of Rx chains to use when expecting MIMO. */
779 if (is_single_rx_stream(priv)) 826 if (is_single_rx_stream(priv))
780 return IWL_NUM_RX_CHAINS_SINGLE; 827 return IWL_NUM_RX_CHAINS_SINGLE;
@@ -819,7 +866,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
819 * Selects how many and which Rx receivers/antennas/chains to use. 866 * Selects how many and which Rx receivers/antennas/chains to use.
820 * This should not be used for scan command ... it puts data in wrong place. 867 * This should not be used for scan command ... it puts data in wrong place.
821 */ 868 */
822void iwl_set_rxon_chain(struct iwl_priv *priv) 869void iwl_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
823{ 870{
824 bool is_single = is_single_rx_stream(priv); 871 bool is_single = is_single_rx_stream(priv);
825 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 872 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
@@ -831,11 +878,20 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
831 * Before first association, we assume all antennas are connected. 878 * Before first association, we assume all antennas are connected.
832 * Just after first association, iwl_chain_noise_calibration() 879 * Just after first association, iwl_chain_noise_calibration()
833 * checks which antennas actually *are* connected. */ 880 * checks which antennas actually *are* connected. */
834 if (priv->chain_noise_data.active_chains) 881 if (priv->chain_noise_data.active_chains)
835 active_chains = priv->chain_noise_data.active_chains; 882 active_chains = priv->chain_noise_data.active_chains;
836 else 883 else
837 active_chains = priv->hw_params.valid_rx_ant; 884 active_chains = priv->hw_params.valid_rx_ant;
838 885
886 if (priv->cfg->advanced_bt_coexist && (priv->bt_full_concurrent ||
887 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
888 /*
889 * only use chain 'A' in bt high traffic load or
890 * full concurrency mode
891 */
892 active_chains = first_antenna(active_chains);
893 }
894
839 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; 895 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
840 896
841 /* How many receivers should we use? */ 897 /* How many receivers should we use? */
@@ -856,15 +912,15 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
856 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; 912 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
857 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; 913 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
858 914
859 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain); 915 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
860 916
861 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) 917 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
862 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; 918 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
863 else 919 else
864 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; 920 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
865 921
866 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", 922 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
867 priv->staging_rxon.rx_chain, 923 ctx->staging.rx_chain,
868 active_rx_cnt, idle_rx_cnt); 924 active_rx_cnt, idle_rx_cnt);
869 925
870 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || 926 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
@@ -872,39 +928,41 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
872} 928}
873EXPORT_SYMBOL(iwl_set_rxon_chain); 929EXPORT_SYMBOL(iwl_set_rxon_chain);
874 930
875/* Return valid channel */ 931/* Return valid, unused, channel for a passive scan to reset the RF */
876u8 iwl_get_single_channel_number(struct iwl_priv *priv, 932u8 iwl_get_single_channel_number(struct iwl_priv *priv,
877 enum ieee80211_band band) 933 enum ieee80211_band band)
878{ 934{
879 const struct iwl_channel_info *ch_info; 935 const struct iwl_channel_info *ch_info;
880 int i; 936 int i;
881 u8 channel = 0; 937 u8 channel = 0;
938 u8 min, max;
939 struct iwl_rxon_context *ctx;
882 940
883 /* only scan single channel, good enough to reset the RF */
884 /* pick the first valid not in-use channel */
885 if (band == IEEE80211_BAND_5GHZ) { 941 if (band == IEEE80211_BAND_5GHZ) {
886 for (i = 14; i < priv->channel_count; i++) { 942 min = 14;
887 if (priv->channel_info[i].channel != 943 max = priv->channel_count;
888 le16_to_cpu(priv->staging_rxon.channel)) {
889 channel = priv->channel_info[i].channel;
890 ch_info = iwl_get_channel_info(priv,
891 band, channel);
892 if (is_channel_valid(ch_info))
893 break;
894 }
895 }
896 } else { 944 } else {
897 for (i = 0; i < 14; i++) { 945 min = 0;
898 if (priv->channel_info[i].channel != 946 max = 14;
899 le16_to_cpu(priv->staging_rxon.channel)) { 947 }
900 channel = 948
901 priv->channel_info[i].channel; 949 for (i = min; i < max; i++) {
902 ch_info = iwl_get_channel_info(priv, 950 bool busy = false;
903 band, channel); 951
904 if (is_channel_valid(ch_info)) 952 for_each_context(priv, ctx) {
905 break; 953 busy = priv->channel_info[i].channel ==
906 } 954 le16_to_cpu(ctx->staging.channel);
955 if (busy)
956 break;
907 } 957 }
958
959 if (busy)
960 continue;
961
962 channel = priv->channel_info[i].channel;
963 ch_info = iwl_get_channel_info(priv, band, channel);
964 if (is_channel_valid(ch_info))
965 break;
908 } 966 }
909 967
910 return channel; 968 return channel;
@@ -912,35 +970,27 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
912EXPORT_SYMBOL(iwl_get_single_channel_number); 970EXPORT_SYMBOL(iwl_get_single_channel_number);
913 971
914/** 972/**
915 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON 973 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
916 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz 974 * @ch: requested channel as a pointer to struct ieee80211_channel
917 * @channel: Any channel valid for the requested phymode
918 975
919 * In addition to setting the staging RXON, priv->phymode is also set.
920 *
921 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 976 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
922 * in the staging RXON flag structure based on the phymode 977 * in the staging RXON flag structure based on the ch->band
923 */ 978 */
924int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch) 979int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
980 struct iwl_rxon_context *ctx)
925{ 981{
926 enum ieee80211_band band = ch->band; 982 enum ieee80211_band band = ch->band;
927 u16 channel = ieee80211_frequency_to_channel(ch->center_freq); 983 u16 channel = ch->hw_value;
928 984
929 if (!iwl_get_channel_info(priv, band, channel)) { 985 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
930 IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n",
931 channel, band);
932 return -EINVAL;
933 }
934
935 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
936 (priv->band == band)) 986 (priv->band == band))
937 return 0; 987 return 0;
938 988
939 priv->staging_rxon.channel = cpu_to_le16(channel); 989 ctx->staging.channel = cpu_to_le16(channel);
940 if (band == IEEE80211_BAND_5GHZ) 990 if (band == IEEE80211_BAND_5GHZ)
941 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; 991 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
942 else 992 else
943 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 993 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
944 994
945 priv->band = band; 995 priv->band = band;
946 996
@@ -951,24 +1001,25 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
951EXPORT_SYMBOL(iwl_set_rxon_channel); 1001EXPORT_SYMBOL(iwl_set_rxon_channel);
952 1002
953void iwl_set_flags_for_band(struct iwl_priv *priv, 1003void iwl_set_flags_for_band(struct iwl_priv *priv,
1004 struct iwl_rxon_context *ctx,
954 enum ieee80211_band band, 1005 enum ieee80211_band band,
955 struct ieee80211_vif *vif) 1006 struct ieee80211_vif *vif)
956{ 1007{
957 if (band == IEEE80211_BAND_5GHZ) { 1008 if (band == IEEE80211_BAND_5GHZ) {
958 priv->staging_rxon.flags &= 1009 ctx->staging.flags &=
959 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK 1010 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
960 | RXON_FLG_CCK_MSK); 1011 | RXON_FLG_CCK_MSK);
961 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 1012 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
962 } else { 1013 } else {
963 /* Copied from iwl_post_associate() */ 1014 /* Copied from iwl_post_associate() */
964 if (vif && vif->bss_conf.use_short_slot) 1015 if (vif && vif->bss_conf.use_short_slot)
965 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 1016 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
966 else 1017 else
967 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 1018 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
968 1019
969 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 1020 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
970 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; 1021 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
971 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; 1022 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
972 } 1023 }
973} 1024}
974EXPORT_SYMBOL(iwl_set_flags_for_band); 1025EXPORT_SYMBOL(iwl_set_flags_for_band);
@@ -977,35 +1028,34 @@ EXPORT_SYMBOL(iwl_set_flags_for_band);
977 * initialize rxon structure with default values from eeprom 1028 * initialize rxon structure with default values from eeprom
978 */ 1029 */
979void iwl_connection_init_rx_config(struct iwl_priv *priv, 1030void iwl_connection_init_rx_config(struct iwl_priv *priv,
980 struct ieee80211_vif *vif) 1031 struct iwl_rxon_context *ctx)
981{ 1032{
982 const struct iwl_channel_info *ch_info; 1033 const struct iwl_channel_info *ch_info;
983 enum nl80211_iftype type = NL80211_IFTYPE_STATION;
984 1034
985 if (vif) 1035 memset(&ctx->staging, 0, sizeof(ctx->staging));
986 type = vif->type;
987 1036
988 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 1037 if (!ctx->vif) {
989 1038 ctx->staging.dev_type = ctx->unused_devtype;
990 switch (type) { 1039 } else switch (ctx->vif->type) {
991 case NL80211_IFTYPE_AP: 1040 case NL80211_IFTYPE_AP:
992 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 1041 ctx->staging.dev_type = ctx->ap_devtype;
993 break; 1042 break;
994 1043
995 case NL80211_IFTYPE_STATION: 1044 case NL80211_IFTYPE_STATION:
996 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; 1045 ctx->staging.dev_type = ctx->station_devtype;
997 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 1046 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
998 break; 1047 break;
999 1048
1000 case NL80211_IFTYPE_ADHOC: 1049 case NL80211_IFTYPE_ADHOC:
1001 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; 1050 ctx->staging.dev_type = ctx->ibss_devtype;
1002 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 1051 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1003 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | 1052 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
1004 RXON_FILTER_ACCEPT_GRP_MSK; 1053 RXON_FILTER_ACCEPT_GRP_MSK;
1005 break; 1054 break;
1006 1055
1007 default: 1056 default:
1008 IWL_ERR(priv, "Unsupported interface type %d\n", type); 1057 IWL_ERR(priv, "Unsupported interface type %d\n",
1058 ctx->vif->type);
1009 break; 1059 break;
1010 } 1060 }
1011 1061
@@ -1013,37 +1063,36 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
1013 /* TODO: Figure out when short_preamble would be set and cache from 1063 /* TODO: Figure out when short_preamble would be set and cache from
1014 * that */ 1064 * that */
1015 if (!hw_to_local(priv->hw)->short_preamble) 1065 if (!hw_to_local(priv->hw)->short_preamble)
1016 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 1066 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1017 else 1067 else
1018 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 1068 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1019#endif 1069#endif
1020 1070
1021 ch_info = iwl_get_channel_info(priv, priv->band, 1071 ch_info = iwl_get_channel_info(priv, priv->band,
1022 le16_to_cpu(priv->active_rxon.channel)); 1072 le16_to_cpu(ctx->active.channel));
1023 1073
1024 if (!ch_info) 1074 if (!ch_info)
1025 ch_info = &priv->channel_info[0]; 1075 ch_info = &priv->channel_info[0];
1026 1076
1027 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 1077 ctx->staging.channel = cpu_to_le16(ch_info->channel);
1028 priv->band = ch_info->band; 1078 priv->band = ch_info->band;
1029 1079
1030 iwl_set_flags_for_band(priv, priv->band, vif); 1080 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
1031 1081
1032 priv->staging_rxon.ofdm_basic_rates = 1082 ctx->staging.ofdm_basic_rates =
1033 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 1083 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1034 priv->staging_rxon.cck_basic_rates = 1084 ctx->staging.cck_basic_rates =
1035 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; 1085 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1036 1086
1037 /* clear both MIX and PURE40 mode flag */ 1087 /* clear both MIX and PURE40 mode flag */
1038 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | 1088 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
1039 RXON_FLG_CHANNEL_MODE_PURE_40); 1089 RXON_FLG_CHANNEL_MODE_PURE_40);
1090 if (ctx->vif)
1091 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
1040 1092
1041 if (vif) 1093 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
1042 memcpy(priv->staging_rxon.node_addr, vif->addr, ETH_ALEN); 1094 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
1043 1095 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
1044 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1045 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1046 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
1047} 1096}
1048EXPORT_SYMBOL(iwl_connection_init_rx_config); 1097EXPORT_SYMBOL(iwl_connection_init_rx_config);
1049 1098
@@ -1051,6 +1100,7 @@ void iwl_set_rate(struct iwl_priv *priv)
1051{ 1100{
1052 const struct ieee80211_supported_band *hw = NULL; 1101 const struct ieee80211_supported_band *hw = NULL;
1053 struct ieee80211_rate *rate; 1102 struct ieee80211_rate *rate;
1103 struct iwl_rxon_context *ctx;
1054 int i; 1104 int i;
1055 1105
1056 hw = iwl_get_hw_mode(priv, priv->band); 1106 hw = iwl_get_hw_mode(priv, priv->band);
@@ -1069,21 +1119,29 @@ void iwl_set_rate(struct iwl_priv *priv)
1069 1119
1070 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate); 1120 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
1071 1121
1072 priv->staging_rxon.cck_basic_rates = 1122 for_each_context(priv, ctx) {
1073 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; 1123 ctx->staging.cck_basic_rates =
1124 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1074 1125
1075 priv->staging_rxon.ofdm_basic_rates = 1126 ctx->staging.ofdm_basic_rates =
1076 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 1127 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1128 }
1077} 1129}
1078EXPORT_SYMBOL(iwl_set_rate); 1130EXPORT_SYMBOL(iwl_set_rate);
1079 1131
1080void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) 1132void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
1081{ 1133{
1134 /*
1135 * MULTI-FIXME
1136 * See iwl_mac_channel_switch.
1137 */
1138 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1139
1082 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1140 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1083 return; 1141 return;
1084 1142
1085 if (priv->switch_rxon.switch_in_progress) { 1143 if (priv->switch_rxon.switch_in_progress) {
1086 ieee80211_chswitch_done(priv->vif, is_success); 1144 ieee80211_chswitch_done(ctx->vif, is_success);
1087 mutex_lock(&priv->mutex); 1145 mutex_lock(&priv->mutex);
1088 priv->switch_rxon.switch_in_progress = false; 1146 priv->switch_rxon.switch_in_progress = false;
1089 mutex_unlock(&priv->mutex); 1147 mutex_unlock(&priv->mutex);
@@ -1094,14 +1152,19 @@ EXPORT_SYMBOL(iwl_chswitch_done);
1094void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1152void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1095{ 1153{
1096 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1154 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1097 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1098 struct iwl_csa_notification *csa = &(pkt->u.csa_notif); 1155 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1156 /*
1157 * MULTI-FIXME
1158 * See iwl_mac_channel_switch.
1159 */
1160 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1161 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
1099 1162
1100 if (priv->switch_rxon.switch_in_progress) { 1163 if (priv->switch_rxon.switch_in_progress) {
1101 if (!le32_to_cpu(csa->status) && 1164 if (!le32_to_cpu(csa->status) &&
1102 (csa->channel == priv->switch_rxon.channel)) { 1165 (csa->channel == priv->switch_rxon.channel)) {
1103 rxon->channel = csa->channel; 1166 rxon->channel = csa->channel;
1104 priv->staging_rxon.channel = csa->channel; 1167 ctx->staging.channel = csa->channel;
1105 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", 1168 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1106 le16_to_cpu(csa->channel)); 1169 le16_to_cpu(csa->channel));
1107 iwl_chswitch_done(priv, true); 1170 iwl_chswitch_done(priv, true);
@@ -1115,9 +1178,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1115EXPORT_SYMBOL(iwl_rx_csa); 1178EXPORT_SYMBOL(iwl_rx_csa);
1116 1179
1117#ifdef CONFIG_IWLWIFI_DEBUG 1180#ifdef CONFIG_IWLWIFI_DEBUG
1118void iwl_print_rx_config_cmd(struct iwl_priv *priv) 1181void iwl_print_rx_config_cmd(struct iwl_priv *priv,
1182 struct iwl_rxon_context *ctx)
1119{ 1183{
1120 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 1184 struct iwl_rxon_cmd *rxon = &ctx->staging;
1121 1185
1122 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); 1186 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
1123 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 1187 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
@@ -1157,7 +1221,8 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1157 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false); 1221 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
1158#ifdef CONFIG_IWLWIFI_DEBUG 1222#ifdef CONFIG_IWLWIFI_DEBUG
1159 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) 1223 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
1160 iwl_print_rx_config_cmd(priv); 1224 iwl_print_rx_config_cmd(priv,
1225 &priv->contexts[IWL_RXON_CTX_BSS]);
1161#endif 1226#endif
1162 1227
1163 wake_up_interruptible(&priv->wait_command_queue); 1228 wake_up_interruptible(&priv->wait_command_queue);
@@ -1328,25 +1393,6 @@ out:
1328EXPORT_SYMBOL(iwl_apm_init); 1393EXPORT_SYMBOL(iwl_apm_init);
1329 1394
1330 1395
1331int iwl_set_hw_params(struct iwl_priv *priv)
1332{
1333 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1334 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1335 if (priv->cfg->mod_params->amsdu_size_8K)
1336 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
1337 else
1338 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
1339
1340 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1341
1342 if (priv->cfg->mod_params->disable_11n)
1343 priv->cfg->sku &= ~IWL_SKU_N;
1344
1345 /* Device-specific setup */
1346 return priv->cfg->ops->lib->set_hw_params(priv);
1347}
1348EXPORT_SYMBOL(iwl_set_hw_params);
1349
1350int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1396int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1351{ 1397{
1352 int ret = 0; 1398 int ret = 0;
@@ -1496,76 +1542,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1496} 1542}
1497EXPORT_SYMBOL(iwl_send_statistics_request); 1543EXPORT_SYMBOL(iwl_send_statistics_request);
1498 1544
1499void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1500{
1501 struct iwl_ct_kill_config cmd;
1502 struct iwl_ct_kill_throttling_config adv_cmd;
1503 unsigned long flags;
1504 int ret = 0;
1505
1506 spin_lock_irqsave(&priv->lock, flags);
1507 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1508 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1509 spin_unlock_irqrestore(&priv->lock, flags);
1510 priv->thermal_throttle.ct_kill_toggle = false;
1511
1512 if (priv->cfg->support_ct_kill_exit) {
1513 adv_cmd.critical_temperature_enter =
1514 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1515 adv_cmd.critical_temperature_exit =
1516 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
1517
1518 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1519 sizeof(adv_cmd), &adv_cmd);
1520 if (ret)
1521 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1522 else
1523 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1524 "succeeded, "
1525 "critical temperature enter is %d,"
1526 "exit is %d\n",
1527 priv->hw_params.ct_kill_threshold,
1528 priv->hw_params.ct_kill_exit_threshold);
1529 } else {
1530 cmd.critical_temperature_R =
1531 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1532
1533 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1534 sizeof(cmd), &cmd);
1535 if (ret)
1536 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1537 else
1538 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1539 "succeeded, "
1540 "critical temperature is %d\n",
1541 priv->hw_params.ct_kill_threshold);
1542 }
1543}
1544EXPORT_SYMBOL(iwl_rf_kill_ct_config);
1545
1546
1547/*
1548 * CARD_STATE_CMD
1549 *
1550 * Use: Sets the device's internal card state to enable, disable, or halt
1551 *
1552 * When in the 'enable' state the card operates as normal.
1553 * When in the 'disable' state, the card enters into a low power mode.
1554 * When in the 'halt' state, the card is shut down and must be fully
1555 * restarted to come back on.
1556 */
1557int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1558{
1559 struct iwl_host_cmd cmd = {
1560 .id = REPLY_CARD_STATE_CMD,
1561 .len = sizeof(u32),
1562 .data = &flags,
1563 .flags = meta_flag,
1564 };
1565
1566 return iwl_send_cmd(priv, &cmd);
1567}
1568
1569void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 1545void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1570 struct iwl_rx_mem_buffer *rxb) 1546 struct iwl_rx_mem_buffer *rxb)
1571{ 1547{
@@ -1614,6 +1590,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1614 const struct ieee80211_tx_queue_params *params) 1590 const struct ieee80211_tx_queue_params *params)
1615{ 1591{
1616 struct iwl_priv *priv = hw->priv; 1592 struct iwl_priv *priv = hw->priv;
1593 struct iwl_rxon_context *ctx;
1617 unsigned long flags; 1594 unsigned long flags;
1618 int q; 1595 int q;
1619 1596
@@ -1633,13 +1610,21 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1633 1610
1634 spin_lock_irqsave(&priv->lock, flags); 1611 spin_lock_irqsave(&priv->lock, flags);
1635 1612
1636 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); 1613 /*
1637 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); 1614 * MULTI-FIXME
1638 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 1615 * This may need to be done per interface in nl80211/cfg80211/mac80211.
1639 priv->qos_data.def_qos_parm.ac[q].edca_txop = 1616 */
1640 cpu_to_le16((params->txop * 32)); 1617 for_each_context(priv, ctx) {
1618 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1619 cpu_to_le16(params->cw_min);
1620 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1621 cpu_to_le16(params->cw_max);
1622 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1623 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1624 cpu_to_le16((params->txop * 32));
1641 1625
1642 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 1626 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1627 }
1643 1628
1644 spin_unlock_irqrestore(&priv->lock, flags); 1629 spin_unlock_irqrestore(&priv->lock, flags);
1645 1630
@@ -1648,21 +1633,30 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1648} 1633}
1649EXPORT_SYMBOL(iwl_mac_conf_tx); 1634EXPORT_SYMBOL(iwl_mac_conf_tx);
1650 1635
1636int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1637{
1638 struct iwl_priv *priv = hw->priv;
1639
1640 return priv->ibss_manager == IWL_IBSS_MANAGER;
1641}
1642EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
1643
1651static void iwl_ht_conf(struct iwl_priv *priv, 1644static void iwl_ht_conf(struct iwl_priv *priv,
1652 struct ieee80211_vif *vif) 1645 struct ieee80211_vif *vif)
1653{ 1646{
1654 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 1647 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1655 struct ieee80211_sta *sta; 1648 struct ieee80211_sta *sta;
1656 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 1649 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1650 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1657 1651
1658 IWL_DEBUG_MAC80211(priv, "enter:\n"); 1652 IWL_DEBUG_MAC80211(priv, "enter:\n");
1659 1653
1660 if (!ht_conf->is_ht) 1654 if (!ctx->ht.enabled)
1661 return; 1655 return;
1662 1656
1663 ht_conf->ht_protection = 1657 ctx->ht.protection =
1664 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; 1658 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
1665 ht_conf->non_GF_STA_present = 1659 ctx->ht.non_gf_sta_present =
1666 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 1660 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1667 1661
1668 ht_conf->single_chain_sufficient = false; 1662 ht_conf->single_chain_sufficient = false;
@@ -1706,18 +1700,20 @@ static void iwl_ht_conf(struct iwl_priv *priv,
1706 IWL_DEBUG_MAC80211(priv, "leave\n"); 1700 IWL_DEBUG_MAC80211(priv, "leave\n");
1707} 1701}
1708 1702
1709static inline void iwl_set_no_assoc(struct iwl_priv *priv) 1703static inline void iwl_set_no_assoc(struct iwl_priv *priv,
1704 struct ieee80211_vif *vif)
1710{ 1705{
1706 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1707
1711 iwl_led_disassociate(priv); 1708 iwl_led_disassociate(priv);
1712 /* 1709 /*
1713 * inform the ucode that there is no longer an 1710 * inform the ucode that there is no longer an
1714 * association and that no more packets should be 1711 * association and that no more packets should be
1715 * sent 1712 * sent
1716 */ 1713 */
1717 priv->staging_rxon.filter_flags &= 1714 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1718 ~RXON_FILTER_ASSOC_MSK; 1715 ctx->staging.assoc_id = 0;
1719 priv->staging_rxon.assoc_id = 0; 1716 iwlcore_commit_rxon(priv, ctx);
1720 iwlcore_commit_rxon(priv);
1721} 1717}
1722 1718
1723static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 1719static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -1728,6 +1724,14 @@ static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1728 1724
1729 IWL_DEBUG_MAC80211(priv, "enter\n"); 1725 IWL_DEBUG_MAC80211(priv, "enter\n");
1730 1726
1727 lockdep_assert_held(&priv->mutex);
1728
1729 if (!priv->beacon_ctx) {
1730 IWL_ERR(priv, "update beacon but no beacon context!\n");
1731 dev_kfree_skb(skb);
1732 return -EINVAL;
1733 }
1734
1731 if (!iwl_is_ready_rf(priv)) { 1735 if (!iwl_is_ready_rf(priv)) {
1732 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); 1736 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1733 return -EIO; 1737 return -EIO;
@@ -1746,7 +1750,7 @@ static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1746 IWL_DEBUG_MAC80211(priv, "leave\n"); 1750 IWL_DEBUG_MAC80211(priv, "leave\n");
1747 spin_unlock_irqrestore(&priv->lock, flags); 1751 spin_unlock_irqrestore(&priv->lock, flags);
1748 1752
1749 priv->cfg->ops->lib->post_associate(priv, priv->vif); 1753 priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif);
1750 1754
1751 return 0; 1755 return 0;
1752} 1756}
@@ -1757,6 +1761,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1757 u32 changes) 1761 u32 changes)
1758{ 1762{
1759 struct iwl_priv *priv = hw->priv; 1763 struct iwl_priv *priv = hw->priv;
1764 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1760 int ret; 1765 int ret;
1761 1766
1762 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); 1767 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
@@ -1770,11 +1775,23 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1770 unsigned long flags; 1775 unsigned long flags;
1771 1776
1772 spin_lock_irqsave(&priv->lock, flags); 1777 spin_lock_irqsave(&priv->lock, flags);
1773 priv->qos_data.qos_active = bss_conf->qos; 1778 ctx->qos_data.qos_active = bss_conf->qos;
1774 iwl_update_qos(priv); 1779 iwl_update_qos(priv, ctx);
1775 spin_unlock_irqrestore(&priv->lock, flags); 1780 spin_unlock_irqrestore(&priv->lock, flags);
1776 } 1781 }
1777 1782
1783 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1784 /*
1785 * the add_interface code must make sure we only ever
1786 * have a single interface that could be beaconing at
1787 * any time.
1788 */
1789 if (vif->bss_conf.enable_beacon)
1790 priv->beacon_ctx = ctx;
1791 else
1792 priv->beacon_ctx = NULL;
1793 }
1794
1778 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) { 1795 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
1779 dev_kfree_skb(priv->ibss_beacon); 1796 dev_kfree_skb(priv->ibss_beacon);
1780 priv->ibss_beacon = ieee80211_beacon_get(hw, vif); 1797 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
@@ -1801,13 +1818,13 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1801 1818
1802 /* mac80211 only sets assoc when in STATION mode */ 1819 /* mac80211 only sets assoc when in STATION mode */
1803 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { 1820 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
1804 memcpy(priv->staging_rxon.bssid_addr, 1821 memcpy(ctx->staging.bssid_addr,
1805 bss_conf->bssid, ETH_ALEN); 1822 bss_conf->bssid, ETH_ALEN);
1806 1823
1807 /* currently needed in a few places */ 1824 /* currently needed in a few places */
1808 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 1825 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1809 } else { 1826 } else {
1810 priv->staging_rxon.filter_flags &= 1827 ctx->staging.filter_flags &=
1811 ~RXON_FILTER_ASSOC_MSK; 1828 ~RXON_FILTER_ASSOC_MSK;
1812 } 1829 }
1813 1830
@@ -1830,21 +1847,21 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1830 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", 1847 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
1831 bss_conf->use_short_preamble); 1848 bss_conf->use_short_preamble);
1832 if (bss_conf->use_short_preamble) 1849 if (bss_conf->use_short_preamble)
1833 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 1850 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1834 else 1851 else
1835 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 1852 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1836 } 1853 }
1837 1854
1838 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 1855 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
1839 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot); 1856 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
1840 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) 1857 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
1841 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; 1858 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1842 else 1859 else
1843 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 1860 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1844 if (bss_conf->use_cts_prot) 1861 if (bss_conf->use_cts_prot)
1845 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; 1862 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1846 else 1863 else
1847 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; 1864 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1848 } 1865 }
1849 1866
1850 if (changes & BSS_CHANGED_BASIC_RATES) { 1867 if (changes & BSS_CHANGED_BASIC_RATES) {
@@ -1854,12 +1871,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1854 * like this here: 1871 * like this here:
1855 * 1872 *
1856 if (A-band) 1873 if (A-band)
1857 priv->staging_rxon.ofdm_basic_rates = 1874 ctx->staging.ofdm_basic_rates =
1858 bss_conf->basic_rates; 1875 bss_conf->basic_rates;
1859 else 1876 else
1860 priv->staging_rxon.ofdm_basic_rates = 1877 ctx->staging.ofdm_basic_rates =
1861 bss_conf->basic_rates >> 4; 1878 bss_conf->basic_rates >> 4;
1862 priv->staging_rxon.cck_basic_rates = 1879 ctx->staging.cck_basic_rates =
1863 bss_conf->basic_rates & 0xF; 1880 bss_conf->basic_rates & 0xF;
1864 */ 1881 */
1865 } 1882 }
@@ -1868,7 +1885,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1868 iwl_ht_conf(priv, vif); 1885 iwl_ht_conf(priv, vif);
1869 1886
1870 if (priv->cfg->ops->hcmd->set_rxon_chain) 1887 if (priv->cfg->ops->hcmd->set_rxon_chain)
1871 priv->cfg->ops->hcmd->set_rxon_chain(priv); 1888 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1872 } 1889 }
1873 1890
1874 if (changes & BSS_CHANGED_ASSOC) { 1891 if (changes & BSS_CHANGED_ASSOC) {
@@ -1881,29 +1898,29 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1881 if (!iwl_is_rfkill(priv)) 1898 if (!iwl_is_rfkill(priv))
1882 priv->cfg->ops->lib->post_associate(priv, vif); 1899 priv->cfg->ops->lib->post_associate(priv, vif);
1883 } else 1900 } else
1884 iwl_set_no_assoc(priv); 1901 iwl_set_no_assoc(priv, vif);
1885 } 1902 }
1886 1903
1887 if (changes && iwl_is_associated(priv) && bss_conf->aid) { 1904 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
1888 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", 1905 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
1889 changes); 1906 changes);
1890 ret = iwl_send_rxon_assoc(priv); 1907 ret = iwl_send_rxon_assoc(priv, ctx);
1891 if (!ret) { 1908 if (!ret) {
1892 /* Sync active_rxon with latest change. */ 1909 /* Sync active_rxon with latest change. */
1893 memcpy((void *)&priv->active_rxon, 1910 memcpy((void *)&ctx->active,
1894 &priv->staging_rxon, 1911 &ctx->staging,
1895 sizeof(struct iwl_rxon_cmd)); 1912 sizeof(struct iwl_rxon_cmd));
1896 } 1913 }
1897 } 1914 }
1898 1915
1899 if (changes & BSS_CHANGED_BEACON_ENABLED) { 1916 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1900 if (vif->bss_conf.enable_beacon) { 1917 if (vif->bss_conf.enable_beacon) {
1901 memcpy(priv->staging_rxon.bssid_addr, 1918 memcpy(ctx->staging.bssid_addr,
1902 bss_conf->bssid, ETH_ALEN); 1919 bss_conf->bssid, ETH_ALEN);
1903 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 1920 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1904 iwlcore_config_ap(priv, vif); 1921 iwlcore_config_ap(priv, vif);
1905 } else 1922 } else
1906 iwl_set_no_assoc(priv); 1923 iwl_set_no_assoc(priv, vif);
1907 } 1924 }
1908 1925
1909 if (changes & BSS_CHANGED_IBSS) { 1926 if (changes & BSS_CHANGED_IBSS) {
@@ -1915,6 +1932,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1915 bss_conf->bssid); 1932 bss_conf->bssid);
1916 } 1933 }
1917 1934
1935 if (changes & BSS_CHANGED_IDLE &&
1936 priv->cfg->ops->hcmd->set_pan_params) {
1937 if (priv->cfg->ops->hcmd->set_pan_params(priv))
1938 IWL_ERR(priv, "failed to update PAN params\n");
1939 }
1940
1918 mutex_unlock(&priv->mutex); 1941 mutex_unlock(&priv->mutex);
1919 1942
1920 IWL_DEBUG_MAC80211(priv, "leave\n"); 1943 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1923,17 +1946,21 @@ EXPORT_SYMBOL(iwl_bss_info_changed);
1923 1946
1924static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif) 1947static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
1925{ 1948{
1926 iwl_connection_init_rx_config(priv, vif); 1949 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1950
1951 iwl_connection_init_rx_config(priv, ctx);
1927 1952
1928 if (priv->cfg->ops->hcmd->set_rxon_chain) 1953 if (priv->cfg->ops->hcmd->set_rxon_chain)
1929 priv->cfg->ops->hcmd->set_rxon_chain(priv); 1954 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1930 1955
1931 return iwlcore_commit_rxon(priv); 1956 return iwlcore_commit_rxon(priv, ctx);
1932} 1957}
1933 1958
1934int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1959int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1935{ 1960{
1936 struct iwl_priv *priv = hw->priv; 1961 struct iwl_priv *priv = hw->priv;
1962 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1963 struct iwl_rxon_context *tmp, *ctx = NULL;
1937 int err = 0; 1964 int err = 0;
1938 1965
1939 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 1966 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
@@ -1946,23 +1973,60 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1946 goto out; 1973 goto out;
1947 } 1974 }
1948 1975
1949 if (priv->vif) { 1976 for_each_context(priv, tmp) {
1950 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n"); 1977 u32 possible_modes =
1978 tmp->interface_modes | tmp->exclusive_interface_modes;
1979
1980 if (tmp->vif) {
1981 /* check if this busy context is exclusive */
1982 if (tmp->exclusive_interface_modes &
1983 BIT(tmp->vif->type)) {
1984 err = -EINVAL;
1985 goto out;
1986 }
1987 continue;
1988 }
1989
1990 if (!(possible_modes & BIT(vif->type)))
1991 continue;
1992
1993 /* have maybe usable context w/o interface */
1994 ctx = tmp;
1995 break;
1996 }
1997
1998 if (!ctx) {
1951 err = -EOPNOTSUPP; 1999 err = -EOPNOTSUPP;
1952 goto out; 2000 goto out;
1953 } 2001 }
1954 2002
1955 priv->vif = vif; 2003 vif_priv->ctx = ctx;
2004 ctx->vif = vif;
2005 /*
2006 * This variable will be correct only when there's just
2007 * a single context, but all code using it is for hardware
2008 * that supports only one context.
2009 */
1956 priv->iw_mode = vif->type; 2010 priv->iw_mode = vif->type;
1957 2011
1958 err = iwl_set_mode(priv, vif); 2012 err = iwl_set_mode(priv, vif);
1959 if (err) 2013 if (err)
1960 goto out_err; 2014 goto out_err;
1961 2015
2016 if (priv->cfg->advanced_bt_coexist &&
2017 vif->type == NL80211_IFTYPE_ADHOC) {
2018 /*
2019 * pretend to have high BT traffic as long as we
2020 * are operating in IBSS mode, as this will cause
2021 * the rate scaling etc. to behave as intended.
2022 */
2023 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
2024 }
2025
1962 goto out; 2026 goto out;
1963 2027
1964 out_err: 2028 out_err:
1965 priv->vif = NULL; 2029 ctx->vif = NULL;
1966 priv->iw_mode = NL80211_IFTYPE_STATION; 2030 priv->iw_mode = NL80211_IFTYPE_STATION;
1967 out: 2031 out:
1968 mutex_unlock(&priv->mutex); 2032 mutex_unlock(&priv->mutex);
@@ -1976,26 +2040,36 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1976 struct ieee80211_vif *vif) 2040 struct ieee80211_vif *vif)
1977{ 2041{
1978 struct iwl_priv *priv = hw->priv; 2042 struct iwl_priv *priv = hw->priv;
2043 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1979 bool scan_completed = false; 2044 bool scan_completed = false;
1980 2045
1981 IWL_DEBUG_MAC80211(priv, "enter\n"); 2046 IWL_DEBUG_MAC80211(priv, "enter\n");
1982 2047
1983 mutex_lock(&priv->mutex); 2048 mutex_lock(&priv->mutex);
1984 2049
1985 if (iwl_is_ready_rf(priv)) { 2050 WARN_ON(ctx->vif != vif);
1986 iwl_scan_cancel_timeout(priv, 100); 2051 ctx->vif = NULL;
1987 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2052
1988 iwlcore_commit_rxon(priv); 2053 iwl_scan_cancel_timeout(priv, 100);
1989 } 2054 iwl_set_mode(priv, vif);
1990 if (priv->vif == vif) { 2055
1991 priv->vif = NULL; 2056 if (priv->scan_vif == vif) {
1992 if (priv->scan_vif == vif) { 2057 scan_completed = true;
1993 scan_completed = true; 2058 priv->scan_vif = NULL;
1994 priv->scan_vif = NULL; 2059 priv->scan_request = NULL;
1995 priv->scan_request = NULL;
1996 }
1997 memset(priv->bssid, 0, ETH_ALEN);
1998 } 2060 }
2061
2062 /*
2063 * When removing the IBSS interface, overwrite the
2064 * BT traffic load with the stored one from the last
2065 * notification, if any. If this is a device that
2066 * doesn't implement this, this has no effect since
2067 * both values are the same and zero.
2068 */
2069 if (vif->type == NL80211_IFTYPE_ADHOC)
2070 priv->bt_traffic_load = priv->notif_bt_traffic_load;
2071
2072 memset(priv->bssid, 0, ETH_ALEN);
1999 mutex_unlock(&priv->mutex); 2073 mutex_unlock(&priv->mutex);
2000 2074
2001 if (scan_completed) 2075 if (scan_completed)
@@ -2014,7 +2088,9 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2014 struct iwl_priv *priv = hw->priv; 2088 struct iwl_priv *priv = hw->priv;
2015 const struct iwl_channel_info *ch_info; 2089 const struct iwl_channel_info *ch_info;
2016 struct ieee80211_conf *conf = &hw->conf; 2090 struct ieee80211_conf *conf = &hw->conf;
2091 struct ieee80211_channel *channel = conf->channel;
2017 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 2092 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2093 struct iwl_rxon_context *ctx;
2018 unsigned long flags = 0; 2094 unsigned long flags = 0;
2019 int ret = 0; 2095 int ret = 0;
2020 u16 ch; 2096 u16 ch;
@@ -2023,7 +2099,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2023 mutex_lock(&priv->mutex); 2099 mutex_lock(&priv->mutex);
2024 2100
2025 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", 2101 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2026 conf->channel->hw_value, changed); 2102 channel->hw_value, changed);
2027 2103
2028 if (unlikely(!priv->cfg->mod_params->disable_hw_scan && 2104 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2029 test_bit(STATUS_SCANNING, &priv->status))) { 2105 test_bit(STATUS_SCANNING, &priv->status))) {
@@ -2044,7 +2120,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2044 * configured. 2120 * configured.
2045 */ 2121 */
2046 if (priv->cfg->ops->hcmd->set_rxon_chain) 2122 if (priv->cfg->ops->hcmd->set_rxon_chain)
2047 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2123 for_each_context(priv, ctx)
2124 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2048 } 2125 }
2049 2126
2050 /* during scanning mac80211 will delay channel setting until 2127 /* during scanning mac80211 will delay channel setting until
@@ -2054,8 +2131,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2054 if (scan_active) 2131 if (scan_active)
2055 goto set_ch_out; 2132 goto set_ch_out;
2056 2133
2057 ch = ieee80211_frequency_to_channel(conf->channel->center_freq); 2134 ch = channel->hw_value;
2058 ch_info = iwl_get_channel_info(priv, conf->channel->band, ch); 2135 ch_info = iwl_get_channel_info(priv, channel->band, ch);
2059 if (!is_channel_valid(ch_info)) { 2136 if (!is_channel_valid(ch_info)) {
2060 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); 2137 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2061 ret = -EINVAL; 2138 ret = -EINVAL;
@@ -2064,42 +2141,49 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2064 2141
2065 spin_lock_irqsave(&priv->lock, flags); 2142 spin_lock_irqsave(&priv->lock, flags);
2066 2143
2067 /* Configure HT40 channels */ 2144 for_each_context(priv, ctx) {
2068 ht_conf->is_ht = conf_is_ht(conf); 2145 /* Configure HT40 channels */
2069 if (ht_conf->is_ht) { 2146 ctx->ht.enabled = conf_is_ht(conf);
2070 if (conf_is_ht40_minus(conf)) { 2147 if (ctx->ht.enabled) {
2071 ht_conf->extension_chan_offset = 2148 if (conf_is_ht40_minus(conf)) {
2072 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 2149 ctx->ht.extension_chan_offset =
2073 ht_conf->is_40mhz = true; 2150 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2074 } else if (conf_is_ht40_plus(conf)) { 2151 ctx->ht.is_40mhz = true;
2075 ht_conf->extension_chan_offset = 2152 } else if (conf_is_ht40_plus(conf)) {
2076 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 2153 ctx->ht.extension_chan_offset =
2077 ht_conf->is_40mhz = true; 2154 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2078 } else { 2155 ctx->ht.is_40mhz = true;
2079 ht_conf->extension_chan_offset = 2156 } else {
2080 IEEE80211_HT_PARAM_CHA_SEC_NONE; 2157 ctx->ht.extension_chan_offset =
2081 ht_conf->is_40mhz = false; 2158 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2082 } 2159 ctx->ht.is_40mhz = false;
2083 } else 2160 }
2084 ht_conf->is_40mhz = false; 2161 } else
2085 /* Default to no protection. Protection mode will later be set 2162 ctx->ht.is_40mhz = false;
2086 * from BSS config in iwl_ht_conf */
2087 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2088 2163
2089 /* if we are switching from ht to 2.4 clear flags 2164 /*
2090 * from any ht related info since 2.4 does not 2165 * Default to no protection. Protection mode will
2091 * support ht */ 2166 * later be set from BSS config in iwl_ht_conf
2092 if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) 2167 */
2093 priv->staging_rxon.flags = 0; 2168 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2169
2170 /* if we are switching from ht to 2.4 clear flags
2171 * from any ht related info since 2.4 does not
2172 * support ht */
2173 if ((le16_to_cpu(ctx->staging.channel) != ch))
2174 ctx->staging.flags = 0;
2094 2175
2095 iwl_set_rxon_channel(priv, conf->channel); 2176 iwl_set_rxon_channel(priv, channel, ctx);
2096 iwl_set_rxon_ht(priv, ht_conf); 2177 iwl_set_rxon_ht(priv, ht_conf);
2178
2179 iwl_set_flags_for_band(priv, ctx, channel->band,
2180 ctx->vif);
2181 }
2097 2182
2098 iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
2099 spin_unlock_irqrestore(&priv->lock, flags); 2183 spin_unlock_irqrestore(&priv->lock, flags);
2100 2184
2101 if (priv->cfg->ops->lib->update_bcast_station) 2185 if (priv->cfg->ops->lib->update_bcast_stations)
2102 ret = priv->cfg->ops->lib->update_bcast_station(priv); 2186 ret = priv->cfg->ops->lib->update_bcast_stations(priv);
2103 2187
2104 set_ch_out: 2188 set_ch_out:
2105 /* The list of supported rates and rate mask can be different 2189 /* The list of supported rates and rate mask can be different
@@ -2130,12 +2214,13 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2130 if (scan_active) 2214 if (scan_active)
2131 goto out; 2215 goto out;
2132 2216
2133 if (memcmp(&priv->active_rxon, 2217 for_each_context(priv, ctx) {
2134 &priv->staging_rxon, sizeof(priv->staging_rxon))) 2218 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2135 iwlcore_commit_rxon(priv); 2219 iwlcore_commit_rxon(priv, ctx);
2136 else 2220 else
2137 IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n"); 2221 IWL_DEBUG_INFO(priv,
2138 2222 "Not re-sending same RXON configuration.\n");
2223 }
2139 2224
2140out: 2225out:
2141 IWL_DEBUG_MAC80211(priv, "leave\n"); 2226 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2148,6 +2233,8 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2148{ 2233{
2149 struct iwl_priv *priv = hw->priv; 2234 struct iwl_priv *priv = hw->priv;
2150 unsigned long flags; 2235 unsigned long flags;
2236 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2237 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2151 2238
2152 mutex_lock(&priv->mutex); 2239 mutex_lock(&priv->mutex);
2153 IWL_DEBUG_MAC80211(priv, "enter\n"); 2240 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -2178,8 +2265,8 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2178 * clear RXON_FILTER_ASSOC_MSK bit 2265 * clear RXON_FILTER_ASSOC_MSK bit
2179 */ 2266 */
2180 iwl_scan_cancel_timeout(priv, 100); 2267 iwl_scan_cancel_timeout(priv, 100);
2181 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2268 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2182 iwlcore_commit_rxon(priv); 2269 iwlcore_commit_rxon(priv, ctx);
2183 2270
2184 iwl_set_rate(priv); 2271 iwl_set_rate(priv);
2185 2272
@@ -2588,7 +2675,7 @@ static void iwl_force_rf_reset(struct iwl_priv *priv)
2588 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2675 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2589 return; 2676 return;
2590 2677
2591 if (!iwl_is_associated(priv)) { 2678 if (!iwl_is_any_associated(priv)) {
2592 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); 2679 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
2593 return; 2680 return;
2594 } 2681 }
@@ -2714,10 +2801,14 @@ static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2714 "queue %d, not read %d time\n", 2801 "queue %d, not read %d time\n",
2715 q->id, 2802 q->id,
2716 q->repeat_same_read_ptr); 2803 q->repeat_same_read_ptr);
2717 mod_timer(&priv->monitor_recover, jiffies + 2804 if (!priv->cfg->advanced_bt_coexist) {
2718 msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS)); 2805 mod_timer(&priv->monitor_recover,
2806 jiffies + msecs_to_jiffies(
2807 IWL_ONE_HUNDRED_MSECS));
2808 return 1;
2809 }
2719 } 2810 }
2720 return 1; 2811 return 0;
2721 } else { 2812 } else {
2722 q->last_read_ptr = q->read_ptr; 2813 q->last_read_ptr = q->read_ptr;
2723 q->repeat_same_read_ptr = 0; 2814 q->repeat_same_read_ptr = 0;
@@ -2735,25 +2826,27 @@ void iwl_bg_monitor_recover(unsigned long data)
2735 return; 2826 return;
2736 2827
2737 /* monitor and check for stuck cmd queue */ 2828 /* monitor and check for stuck cmd queue */
2738 if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM)) 2829 if (iwl_check_stuck_queue(priv, priv->cmd_queue))
2739 return; 2830 return;
2740 2831
2741 /* monitor and check for other stuck queues */ 2832 /* monitor and check for other stuck queues */
2742 if (iwl_is_associated(priv)) { 2833 if (iwl_is_any_associated(priv)) {
2743 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { 2834 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
2744 /* skip as we already checked the command queue */ 2835 /* skip as we already checked the command queue */
2745 if (cnt == IWL_CMD_QUEUE_NUM) 2836 if (cnt == priv->cmd_queue)
2746 continue; 2837 continue;
2747 if (iwl_check_stuck_queue(priv, cnt)) 2838 if (iwl_check_stuck_queue(priv, cnt))
2748 return; 2839 return;
2749 } 2840 }
2750 } 2841 }
2751 /* 2842 if (priv->cfg->monitor_recover_period) {
2752 * Reschedule the timer to occur in 2843 /*
2753 * priv->cfg->monitor_recover_period 2844 * Reschedule the timer to occur in
2754 */ 2845 * priv->cfg->monitor_recover_period
2755 mod_timer(&priv->monitor_recover, 2846 */
2756 jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period)); 2847 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
2848 priv->cfg->monitor_recover_period));
2849 }
2757} 2850}
2758EXPORT_SYMBOL(iwl_bg_monitor_recover); 2851EXPORT_SYMBOL(iwl_bg_monitor_recover);
2759 2852
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 5e6ee3da6bbf..f7b57ed84f66 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -88,11 +88,13 @@ struct iwl_cmd;
88#define IWL_CMD(x) case x: return #x 88#define IWL_CMD(x) case x: return #x
89 89
90struct iwl_hcmd_ops { 90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv); 91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv); 92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv); 93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
94 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant); 95 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
95 void (*send_bt_config)(struct iwl_priv *priv); 96 void (*send_bt_config)(struct iwl_priv *priv);
97 int (*set_pan_params)(struct iwl_priv *priv);
96}; 98};
97 99
98struct iwl_hcmd_utils_ops { 100struct iwl_hcmd_utils_ops {
@@ -136,6 +138,12 @@ struct iwl_temp_ops {
136 void (*set_calib_version)(struct iwl_priv *priv); 138 void (*set_calib_version)(struct iwl_priv *priv);
137}; 139};
138 140
141struct iwl_tt_ops {
142 bool (*lower_power_detection)(struct iwl_priv *priv);
143 u8 (*tt_power_mode)(struct iwl_priv *priv);
144 bool (*ct_kill_check)(struct iwl_priv *priv);
145};
146
139struct iwl_lib_ops { 147struct iwl_lib_ops {
140 /* set hw dependent parameters */ 148 /* set hw dependent parameters */
141 int (*set_hw_params)(struct iwl_priv *priv); 149 int (*set_hw_params)(struct iwl_priv *priv);
@@ -199,7 +207,7 @@ struct iwl_lib_ops {
199 /* station management */ 207 /* station management */
200 int (*manage_ibss_station)(struct iwl_priv *priv, 208 int (*manage_ibss_station)(struct iwl_priv *priv,
201 struct ieee80211_vif *vif, bool add); 209 struct ieee80211_vif *vif, bool add);
202 int (*update_bcast_station)(struct iwl_priv *priv); 210 int (*update_bcast_stations)(struct iwl_priv *priv);
203 /* recover from tx queue stall */ 211 /* recover from tx queue stall */
204 void (*recover_from_tx_stall)(unsigned long data); 212 void (*recover_from_tx_stall)(unsigned long data);
205 /* check for plcp health */ 213 /* check for plcp health */
@@ -212,6 +220,9 @@ struct iwl_lib_ops {
212 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control); 220 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
213 221
214 struct iwl_debugfs_ops debugfs_ops; 222 struct iwl_debugfs_ops debugfs_ops;
223
224 /* thermal throttling */
225 struct iwl_tt_ops tt_ops;
215}; 226};
216 227
217struct iwl_led_ops { 228struct iwl_led_ops {
@@ -269,6 +280,14 @@ struct iwl_mod_params {
269 * @chain_noise_calib_by_driver: driver has the capability to perform 280 * @chain_noise_calib_by_driver: driver has the capability to perform
270 * chain noise calibration operation 281 * chain noise calibration operation
271 * @scan_antennas: available antenna for scan operation 282 * @scan_antennas: available antenna for scan operation
283 * @advanced_bt_coexist: support advanced bt coexist
284 * @bt_init_traffic_load: specify initial bt traffic load
285 * @bt_prio_boost: default bt priority boost value
286 * @need_dc_calib: need to perform init dc calibration
287 * @bt_statistics: use BT version of statistics notification
288 * @agg_time_limit: maximum number of uSec in aggregation
289 * @ampdu_factor: Maximum A-MPDU length factor
290 * @ampdu_density: Minimum A-MPDU spacing
272 * 291 *
273 * We enable the driver to be backward compatible wrt API version. The 292 * We enable the driver to be backward compatible wrt API version. The
274 * driver specifies which APIs it supports (with @ucode_api_max being the 293 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -337,8 +356,14 @@ struct iwl_cfg {
337 const bool chain_noise_calib_by_driver; 356 const bool chain_noise_calib_by_driver;
338 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 357 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
339 u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; 358 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
359 bool advanced_bt_coexist;
360 u8 bt_init_traffic_load;
361 u8 bt_prio_boost;
340 const bool need_dc_calib; 362 const bool need_dc_calib;
341 const bool bt_statistics; 363 const bool bt_statistics;
364 u16 agg_time_limit;
365 u8 ampdu_factor;
366 u8 ampdu_density;
342}; 367};
343 368
344/*************************** 369/***************************
@@ -347,38 +372,41 @@ struct iwl_cfg {
347 372
348struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 373struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
349 struct ieee80211_ops *hw_ops); 374 struct ieee80211_ops *hw_ops);
350void iwl_hw_detect(struct iwl_priv *priv);
351void iwl_activate_qos(struct iwl_priv *priv); 375void iwl_activate_qos(struct iwl_priv *priv);
352int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 376int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
353 const struct ieee80211_tx_queue_params *params); 377 const struct ieee80211_tx_queue_params *params);
354void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); 378int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
355int iwl_check_rxon_cmd(struct iwl_priv *priv); 379void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
356int iwl_full_rxon_required(struct iwl_priv *priv); 380 int hw_decrypt);
357void iwl_set_rxon_chain(struct iwl_priv *priv); 381int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
358int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); 382int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
383void iwl_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
384int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
385 struct iwl_rxon_context *ctx);
359void iwl_set_flags_for_band(struct iwl_priv *priv, 386void iwl_set_flags_for_band(struct iwl_priv *priv,
387 struct iwl_rxon_context *ctx,
360 enum ieee80211_band band, 388 enum ieee80211_band band,
361 struct ieee80211_vif *vif); 389 struct ieee80211_vif *vif);
362u8 iwl_get_single_channel_number(struct iwl_priv *priv, 390u8 iwl_get_single_channel_number(struct iwl_priv *priv,
363 enum ieee80211_band band); 391 enum ieee80211_band band);
364void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); 392void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
365u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 393bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
366 struct ieee80211_sta_ht_cap *sta_ht_inf); 394 struct iwl_rxon_context *ctx,
395 struct ieee80211_sta_ht_cap *ht_cap);
367void iwl_connection_init_rx_config(struct iwl_priv *priv, 396void iwl_connection_init_rx_config(struct iwl_priv *priv,
368 struct ieee80211_vif *vif); 397 struct iwl_rxon_context *ctx);
369void iwl_set_rate(struct iwl_priv *priv); 398void iwl_set_rate(struct iwl_priv *priv);
370int iwl_set_decrypted_flag(struct iwl_priv *priv, 399int iwl_set_decrypted_flag(struct iwl_priv *priv,
371 struct ieee80211_hdr *hdr, 400 struct ieee80211_hdr *hdr,
372 u32 decrypt_res, 401 u32 decrypt_res,
373 struct ieee80211_rx_status *stats); 402 struct ieee80211_rx_status *stats);
374void iwl_irq_handle_error(struct iwl_priv *priv); 403void iwl_irq_handle_error(struct iwl_priv *priv);
375int iwl_set_hw_params(struct iwl_priv *priv);
376void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif); 404void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
377void iwl_bss_info_changed(struct ieee80211_hw *hw, 405void iwl_bss_info_changed(struct ieee80211_hw *hw,
378 struct ieee80211_vif *vif, 406 struct ieee80211_vif *vif,
379 struct ieee80211_bss_conf *bss_conf, 407 struct ieee80211_bss_conf *bss_conf,
380 u32 changes); 408 u32 changes);
381int iwl_commit_rxon(struct iwl_priv *priv); 409int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
382int iwl_mac_add_interface(struct ieee80211_hw *hw, 410int iwl_mac_add_interface(struct ieee80211_hw *hw,
383 struct ieee80211_vif *vif); 411 struct ieee80211_vif *vif);
384void iwl_mac_remove_interface(struct ieee80211_hw *hw, 412void iwl_mac_remove_interface(struct ieee80211_hw *hw,
@@ -496,7 +524,8 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
496 524
497int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); 525int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
498 526
499u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); 527u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
528 struct iwl_rxon_context *ctx);
500 529
501u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); 530u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
502 531
@@ -527,7 +556,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
527int iwl_mac_hw_scan(struct ieee80211_hw *hw, 556int iwl_mac_hw_scan(struct ieee80211_hw *hw,
528 struct ieee80211_vif *vif, 557 struct ieee80211_vif *vif,
529 struct cfg80211_scan_request *req); 558 struct cfg80211_scan_request *req);
530void iwl_bg_start_internal_scan(struct work_struct *work);
531void iwl_internal_short_hw_scan(struct iwl_priv *priv); 559void iwl_internal_short_hw_scan(struct iwl_priv *priv);
532int iwl_force_reset(struct iwl_priv *priv, int mode, bool external); 560int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
533u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, 561u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -539,9 +567,6 @@ u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
539u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 567u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
540 enum ieee80211_band band, 568 enum ieee80211_band band,
541 struct ieee80211_vif *vif); 569 struct ieee80211_vif *vif);
542void iwl_bg_scan_check(struct work_struct *data);
543void iwl_bg_abort_scan(struct work_struct *work);
544void iwl_bg_scan_completed(struct work_struct *work);
545void iwl_setup_scan_deferred_work(struct iwl_priv *priv); 570void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
546 571
547/* For faster active scanning, scan will move to the next channel if fewer than 572/* For faster active scanning, scan will move to the next channel if fewer than
@@ -580,8 +605,6 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
580 605
581int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 606int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
582 607
583int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
584 u8 meta_flag);
585 608
586/***************************************************** 609/*****************************************************
587 * PCI * 610 * PCI *
@@ -616,9 +639,11 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv,
616void iwl_dump_csr(struct iwl_priv *priv); 639void iwl_dump_csr(struct iwl_priv *priv);
617int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display); 640int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
618#ifdef CONFIG_IWLWIFI_DEBUG 641#ifdef CONFIG_IWLWIFI_DEBUG
619void iwl_print_rx_config_cmd(struct iwl_priv *priv); 642void iwl_print_rx_config_cmd(struct iwl_priv *priv,
643 struct iwl_rxon_context *ctx);
620#else 644#else
621static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv) 645static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
646 struct iwl_rxon_context *ctx)
622{ 647{
623} 648}
624#endif 649#endif
@@ -695,23 +720,24 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
695 return iwl_is_ready(priv); 720 return iwl_is_ready(priv);
696} 721}
697 722
698extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
699extern void iwl_send_bt_config(struct iwl_priv *priv); 723extern void iwl_send_bt_config(struct iwl_priv *priv);
700extern int iwl_send_statistics_request(struct iwl_priv *priv, 724extern int iwl_send_statistics_request(struct iwl_priv *priv,
701 u8 flags, bool clear); 725 u8 flags, bool clear);
702extern int iwl_send_lq_cmd(struct iwl_priv *priv, 726extern int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
703 struct iwl_link_quality_cmd *lq, u8 flags, bool init); 727 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
704void iwl_apm_stop(struct iwl_priv *priv); 728void iwl_apm_stop(struct iwl_priv *priv);
705int iwl_apm_init(struct iwl_priv *priv); 729int iwl_apm_init(struct iwl_priv *priv);
706 730
707void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif); 731int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
708static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) 732static inline int iwl_send_rxon_assoc(struct iwl_priv *priv,
733 struct iwl_rxon_context *ctx)
709{ 734{
710 return priv->cfg->ops->hcmd->rxon_assoc(priv); 735 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
711} 736}
712static inline int iwlcore_commit_rxon(struct iwl_priv *priv) 737static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
738 struct iwl_rxon_context *ctx)
713{ 739{
714 return priv->cfg->ops->hcmd->commit_rxon(priv); 740 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
715} 741}
716static inline void iwlcore_config_ap(struct iwl_priv *priv, 742static inline void iwlcore_config_ap(struct iwl_priv *priv,
717 struct ieee80211_vif *vif) 743 struct ieee80211_vif *vif)
@@ -723,4 +749,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
723{ 749{
724 return priv->hw->wiphy->bands[band]; 750 return priv->hw->wiphy->bands[band];
725} 751}
752
753extern bool bt_coex_active;
754extern bool bt_siso_mode;
755
726#endif /* __iwl_core_h__ */ 756#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index e96a1bb12783..0ee8f516c4ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -467,8 +467,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
467 for (i = 0; i < supp_band->n_channels; i++) 467 for (i = 0; i < supp_band->n_channels; i++)
468 pos += scnprintf(buf + pos, bufsz - pos, 468 pos += scnprintf(buf + pos, bufsz - pos,
469 "%d: %ddBm: BSS%s%s, %s.\n", 469 "%d: %ddBm: BSS%s%s, %s.\n",
470 ieee80211_frequency_to_channel( 470 channels[i].hw_value,
471 channels[i].center_freq),
472 channels[i].max_power, 471 channels[i].max_power,
473 channels[i].flags & IEEE80211_CHAN_RADAR ? 472 channels[i].flags & IEEE80211_CHAN_RADAR ?
474 " (IEEE 802.11h required)" : "", 473 " (IEEE 802.11h required)" : "",
@@ -491,8 +490,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
491 for (i = 0; i < supp_band->n_channels; i++) 490 for (i = 0; i < supp_band->n_channels; i++)
492 pos += scnprintf(buf + pos, bufsz - pos, 491 pos += scnprintf(buf + pos, bufsz - pos,
493 "%d: %ddBm: BSS%s%s, %s.\n", 492 "%d: %ddBm: BSS%s%s, %s.\n",
494 ieee80211_frequency_to_channel( 493 channels[i].hw_value,
495 channels[i].center_freq),
496 channels[i].max_power, 494 channels[i].max_power,
497 channels[i].flags & IEEE80211_CHAN_RADAR ? 495 channels[i].flags & IEEE80211_CHAN_RADAR ?
498 " (IEEE 802.11h required)" : "", 496 " (IEEE 802.11h required)" : "",
@@ -645,19 +643,25 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
645 size_t count, loff_t *ppos) 643 size_t count, loff_t *ppos)
646{ 644{
647 struct iwl_priv *priv = file->private_data; 645 struct iwl_priv *priv = file->private_data;
646 struct iwl_rxon_context *ctx;
648 int pos = 0, i; 647 int pos = 0, i;
649 char buf[256]; 648 char buf[256 * NUM_IWL_RXON_CTX];
650 const size_t bufsz = sizeof(buf); 649 const size_t bufsz = sizeof(buf);
651 650
652 for (i = 0; i < AC_NUM; i++) { 651 for_each_context(priv, ctx) {
653 pos += scnprintf(buf + pos, bufsz - pos, 652 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
654 "\tcw_min\tcw_max\taifsn\ttxop\n"); 653 ctx->ctxid);
655 pos += scnprintf(buf + pos, bufsz - pos, 654 for (i = 0; i < AC_NUM; i++) {
655 pos += scnprintf(buf + pos, bufsz - pos,
656 "\tcw_min\tcw_max\taifsn\ttxop\n");
657 pos += scnprintf(buf + pos, bufsz - pos,
656 "AC[%d]\t%u\t%u\t%u\t%u\n", i, 658 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
657 priv->qos_data.def_qos_parm.ac[i].cw_min, 659 ctx->qos_data.def_qos_parm.ac[i].cw_min,
658 priv->qos_data.def_qos_parm.ac[i].cw_max, 660 ctx->qos_data.def_qos_parm.ac[i].cw_max,
659 priv->qos_data.def_qos_parm.ac[i].aifsn, 661 ctx->qos_data.def_qos_parm.ac[i].aifsn,
660 priv->qos_data.def_qos_parm.ac[i].edca_txop); 662 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
663 }
664 pos += scnprintf(buf + pos, bufsz - pos, "\n");
661 } 665 }
662 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 666 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
663} 667}
@@ -732,7 +736,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
732 return -EFAULT; 736 return -EFAULT;
733 if (sscanf(buf, "%d", &ht40) != 1) 737 if (sscanf(buf, "%d", &ht40) != 1)
734 return -EFAULT; 738 return -EFAULT;
735 if (!iwl_is_associated(priv)) 739 if (!iwl_is_any_associated(priv))
736 priv->disable_ht40 = ht40 ? true : false; 740 priv->disable_ht40 = ht40 ? true : false;
737 else { 741 else {
738 IWL_ERR(priv, "Sta associated with AP - " 742 IWL_ERR(priv, "Sta associated with AP - "
@@ -1321,7 +1325,8 @@ static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
1321 int len = 0; 1325 int len = 0;
1322 char buf[20]; 1326 char buf[20];
1323 1327
1324 len = sprintf(buf, "0x%04X\n", le32_to_cpu(priv->active_rxon.flags)); 1328 len = sprintf(buf, "0x%04X\n",
1329 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1325 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1330 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1326} 1331}
1327 1332
@@ -1334,7 +1339,7 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
1334 char buf[20]; 1339 char buf[20];
1335 1340
1336 len = sprintf(buf, "0x%04X\n", 1341 len = sprintf(buf, "0x%04X\n",
1337 le32_to_cpu(priv->active_rxon.filter_flags)); 1342 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1338 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1343 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1339} 1344}
1340 1345
@@ -1529,6 +1534,76 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1529 user_buf, count, ppos); 1534 user_buf, count, ppos);
1530} 1535}
1531 1536
1537static ssize_t iwl_dbgfs_monitor_period_write(struct file *file,
1538 const char __user *user_buf,
1539 size_t count, loff_t *ppos) {
1540
1541 struct iwl_priv *priv = file->private_data;
1542 char buf[8];
1543 int buf_size;
1544 int period;
1545
1546 memset(buf, 0, sizeof(buf));
1547 buf_size = min(count, sizeof(buf) - 1);
1548 if (copy_from_user(buf, user_buf, buf_size))
1549 return -EFAULT;
1550 if (sscanf(buf, "%d", &period) != 1)
1551 return -EINVAL;
1552 if (period < 0 || period > IWL_MAX_MONITORING_PERIOD)
1553 priv->cfg->monitor_recover_period = IWL_DEF_MONITORING_PERIOD;
1554 else
1555 priv->cfg->monitor_recover_period = period;
1556
1557 if (priv->cfg->monitor_recover_period)
1558 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
1559 priv->cfg->monitor_recover_period));
1560 else
1561 del_timer_sync(&priv->monitor_recover);
1562 return count;
1563}
1564
1565static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1566 char __user *user_buf,
1567 size_t count, loff_t *ppos) {
1568
1569 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1570 int pos = 0;
1571 char buf[200];
1572 const size_t bufsz = sizeof(buf);
1573 ssize_t ret;
1574
1575 pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
1576 priv->bt_full_concurrent ? "full concurrency" : "3-wire");
1577 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
1578 "last traffic notif: %d\n",
1579 priv->bt_status ? "On" : "Off", priv->notif_bt_traffic_load);
1580 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
1581 "sco_active: %d, kill_ack_mask: %x, "
1582 "kill_cts_mask: %x\n",
1583 priv->bt_ch_announce, priv->bt_sco_active,
1584 priv->kill_ack_mask, priv->kill_cts_mask);
1585
1586 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
1587 switch (priv->bt_traffic_load) {
1588 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1589 pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n");
1590 break;
1591 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1592 pos += scnprintf(buf + pos, bufsz - pos, "High\n");
1593 break;
1594 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1595 pos += scnprintf(buf + pos, bufsz - pos, "Low\n");
1596 break;
1597 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1598 default:
1599 pos += scnprintf(buf + pos, bufsz - pos, "None\n");
1600 break;
1601 }
1602
1603 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1604 return ret;
1605}
1606
1532DEBUGFS_READ_FILE_OPS(rx_statistics); 1607DEBUGFS_READ_FILE_OPS(rx_statistics);
1533DEBUGFS_READ_FILE_OPS(tx_statistics); 1608DEBUGFS_READ_FILE_OPS(tx_statistics);
1534DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1609DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1552,6 +1627,8 @@ DEBUGFS_READ_FILE_OPS(rxon_flags);
1552DEBUGFS_READ_FILE_OPS(rxon_filter_flags); 1627DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1553DEBUGFS_WRITE_FILE_OPS(txfifo_flush); 1628DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
1554DEBUGFS_READ_FILE_OPS(ucode_bt_stats); 1629DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
1630DEBUGFS_WRITE_FILE_OPS(monitor_period);
1631DEBUGFS_READ_FILE_OPS(bt_traffic);
1555 1632
1556/* 1633/*
1557 * Create the debugfs files and directories 1634 * Create the debugfs files and directories
@@ -1623,6 +1700,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1623 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); 1700 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
1624 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 1701 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1625 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 1702 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1703 DEBUGFS_ADD_FILE(monitor_period, dir_debug, S_IWUSR);
1704 if (priv->cfg->advanced_bt_coexist)
1705 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
1626 if (priv->cfg->sensitivity_calib_by_driver) 1706 if (priv->cfg->sensitivity_calib_by_driver)
1627 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, 1707 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1628 &priv->disable_sens_cal); 1708 &priv->disable_sens_cal);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2e97cd2fa98a..4dd38b7b8b74 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -47,6 +47,7 @@
47#include "iwl-led.h" 47#include "iwl-led.h"
48#include "iwl-power.h" 48#include "iwl-power.h"
49#include "iwl-agn-rs.h" 49#include "iwl-agn-rs.h"
50#include "iwl-agn-tt.h"
50 51
51struct iwl_tx_queue; 52struct iwl_tx_queue;
52 53
@@ -143,6 +144,7 @@ struct iwl_queue {
143/* One for each TFD */ 144/* One for each TFD */
144struct iwl_tx_info { 145struct iwl_tx_info {
145 struct sk_buff *skb; 146 struct sk_buff *skb;
147 struct iwl_rxon_context *ctx;
146}; 148};
147 149
148/** 150/**
@@ -252,10 +254,14 @@ struct iwl_channel_info {
252 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; 254 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
253}; 255};
254 256
255#define IWL_TX_FIFO_BK 0 257#define IWL_TX_FIFO_BK 0 /* shared */
256#define IWL_TX_FIFO_BE 1 258#define IWL_TX_FIFO_BE 1
257#define IWL_TX_FIFO_VI 2 259#define IWL_TX_FIFO_VI 2 /* shared */
258#define IWL_TX_FIFO_VO 3 260#define IWL_TX_FIFO_VO 3
261#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
262#define IWL_TX_FIFO_BE_IPAN 4
263#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
264#define IWL_TX_FIFO_VO_IPAN 5
259#define IWL_TX_FIFO_UNUSED -1 265#define IWL_TX_FIFO_UNUSED -1
260 266
261/* Minimum number of queues. MAX_NUM is defined in hw specific files. 267/* Minimum number of queues. MAX_NUM is defined in hw specific files.
@@ -264,11 +270,17 @@ struct iwl_channel_info {
264#define IWL_MIN_NUM_QUEUES 10 270#define IWL_MIN_NUM_QUEUES 10
265 271
266/* 272/*
267 * Queue #4 is the command queue for 3945/4965/5x00/1000/6x00, 273 * Command queue depends on iPAN support.
268 * the driver maps it into the appropriate device FIFO for the
269 * uCode.
270 */ 274 */
271#define IWL_CMD_QUEUE_NUM 4 275#define IWL_DEFAULT_CMD_QUEUE_NUM 4
276#define IWL_IPAN_CMD_QUEUE_NUM 9
277
278/*
279 * This queue number is required for proper operation
280 * because the ucode will stop/start the scheduler as
281 * required.
282 */
283#define IWL_IPAN_MCAST_QUEUE 8
272 284
273/* Power management (not Tx power) structures */ 285/* Power management (not Tx power) structures */
274 286
@@ -420,7 +432,7 @@ struct iwl_tid_data {
420}; 432};
421 433
422struct iwl_hw_key { 434struct iwl_hw_key {
423 enum ieee80211_key_alg alg; 435 u32 cipher;
424 int keylen; 436 int keylen;
425 u8 keyidx; 437 u8 keyidx;
426 u8 key[32]; 438 u8 key[32];
@@ -434,7 +446,13 @@ union iwl_ht_rate_supp {
434 }; 446 };
435}; 447};
436 448
437#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3) 449#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
450#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
451#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
452#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
453#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
454#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
455#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
438 456
439/* 457/*
440 * Maximal MPDU density for TX aggregation 458 * Maximal MPDU density for TX aggregation
@@ -443,19 +461,17 @@ union iwl_ht_rate_supp {
443 * 6 - 8us density 461 * 6 - 8us density
444 * 7 - 16us density 462 * 7 - 16us density
445 */ 463 */
464#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
446#define CFG_HT_MPDU_DENSITY_4USEC (0x5) 465#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
466#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
467#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
447#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC 468#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
469#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
470#define CFG_HT_MPDU_DENSITY_MIN (0x1)
448 471
449struct iwl_ht_config { 472struct iwl_ht_config {
450 /* self configuration data */
451 bool is_ht;
452 bool is_40mhz;
453 bool single_chain_sufficient; 473 bool single_chain_sufficient;
454 enum ieee80211_smps_mode smps; /* current smps mode */ 474 enum ieee80211_smps_mode smps; /* current smps mode */
455 /* BSS related data */
456 u8 extension_chan_offset;
457 u8 ht_protection;
458 u8 non_GF_STA_present;
459}; 475};
460 476
461/* QoS structures */ 477/* QoS structures */
@@ -473,12 +489,13 @@ struct iwl_qos_info {
473struct iwl_station_entry { 489struct iwl_station_entry {
474 struct iwl_addsta_cmd sta; 490 struct iwl_addsta_cmd sta;
475 struct iwl_tid_data tid[MAX_TID_COUNT]; 491 struct iwl_tid_data tid[MAX_TID_COUNT];
476 u8 used; 492 u8 used, ctxid;
477 struct iwl_hw_key keyinfo; 493 struct iwl_hw_key keyinfo;
478 struct iwl_link_quality_cmd *lq; 494 struct iwl_link_quality_cmd *lq;
479}; 495};
480 496
481struct iwl_station_priv_common { 497struct iwl_station_priv_common {
498 struct iwl_rxon_context *ctx;
482 u8 sta_id; 499 u8 sta_id;
483}; 500};
484 501
@@ -507,6 +524,7 @@ struct iwl_station_priv {
507 * space for us to put data into. 524 * space for us to put data into.
508 */ 525 */
509struct iwl_vif_priv { 526struct iwl_vif_priv {
527 struct iwl_rxon_context *ctx;
510 u8 ibss_bssid_sta_id; 528 u8 ibss_bssid_sta_id;
511}; 529};
512 530
@@ -564,6 +582,7 @@ enum iwl_ucode_tlv_type {
564 IWL_UCODE_TLV_INIT_DATA = 4, 582 IWL_UCODE_TLV_INIT_DATA = 4,
565 IWL_UCODE_TLV_BOOT = 5, 583 IWL_UCODE_TLV_BOOT = 5,
566 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ 584 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
585 IWL_UCODE_TLV_PAN = 7,
567 IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, 586 IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
568 IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, 587 IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
569 IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, 588 IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
@@ -658,7 +677,6 @@ struct iwl_sensitivity_ranges {
658 * @rx_page_order: Rx buffer page order 677 * @rx_page_order: Rx buffer page order
659 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR 678 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
660 * @max_stations: 679 * @max_stations:
661 * @bcast_sta_id:
662 * @ht40_channel: is 40MHz width possible in band 2.4 680 * @ht40_channel: is 40MHz width possible in band 2.4
663 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) 681 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
664 * @sw_crypto: 0 for hw, 1 for sw 682 * @sw_crypto: 0 for hw, 1 for sw
@@ -682,7 +700,6 @@ struct iwl_hw_params {
682 u32 rx_page_order; 700 u32 rx_page_order;
683 u32 rx_wrt_ptr_reg; 701 u32 rx_wrt_ptr_reg;
684 u8 max_stations; 702 u8 max_stations;
685 u8 bcast_sta_id;
686 u8 ht40_channel; 703 u8 ht40_channel;
687 u8 max_beacon_itrvl; /* in 1024 ms */ 704 u8 max_beacon_itrvl; /* in 1024 ms */
688 u32 max_inst_size; 705 u32 max_inst_size;
@@ -1052,7 +1069,10 @@ struct iwl_event_log {
1052#define IWL_DEF_MONITORING_PERIOD (1000) 1069#define IWL_DEF_MONITORING_PERIOD (1000)
1053#define IWL_LONG_MONITORING_PERIOD (5000) 1070#define IWL_LONG_MONITORING_PERIOD (5000)
1054#define IWL_ONE_HUNDRED_MSECS (100) 1071#define IWL_ONE_HUNDRED_MSECS (100)
1055#define IWL_SIXTY_SECS (60000) 1072#define IWL_MAX_MONITORING_PERIOD (60000)
1073
1074/* BT Antenna Coupling Threshold (dB) */
1075#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
1056 1076
1057enum iwl_reset { 1077enum iwl_reset {
1058 IWL_RF_RESET = 0, 1078 IWL_RF_RESET = 0,
@@ -1082,6 +1102,57 @@ struct iwl_force_reset {
1082 */ 1102 */
1083#define IWLAGN_EXT_BEACON_TIME_POS 22 1103#define IWLAGN_EXT_BEACON_TIME_POS 22
1084 1104
1105enum iwl_rxon_context_id {
1106 IWL_RXON_CTX_BSS,
1107 IWL_RXON_CTX_PAN,
1108
1109 NUM_IWL_RXON_CTX
1110};
1111
1112struct iwl_rxon_context {
1113 struct ieee80211_vif *vif;
1114
1115 const u8 *ac_to_fifo;
1116 const u8 *ac_to_queue;
1117 u8 mcast_queue;
1118
1119 enum iwl_rxon_context_id ctxid;
1120
1121 u32 interface_modes, exclusive_interface_modes;
1122 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
1123
1124 /*
1125 * We declare this const so it can only be
1126 * changed via explicit cast within the
1127 * routines that actually update the physical
1128 * hardware.
1129 */
1130 const struct iwl_rxon_cmd active;
1131 struct iwl_rxon_cmd staging;
1132
1133 struct iwl_rxon_time_cmd timing;
1134
1135 struct iwl_qos_info qos_data;
1136
1137 u8 bcast_sta_id, ap_sta_id;
1138
1139 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
1140 u8 qos_cmd;
1141 u8 wep_key_cmd;
1142
1143 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1144 u8 key_mapping_keys;
1145
1146 __le32 station_flags;
1147
1148 struct {
1149 bool non_gf_sta_present;
1150 u8 protection;
1151 bool enabled, is_40mhz;
1152 u8 extension_chan_offset;
1153 } ht;
1154};
1155
1085struct iwl_priv { 1156struct iwl_priv {
1086 1157
1087 /* ieee device used by generic ieee processing code */ 1158 /* ieee device used by generic ieee processing code */
@@ -1110,6 +1181,9 @@ struct iwl_priv {
1110 u32 ucode_beacon_time; 1181 u32 ucode_beacon_time;
1111 int missed_beacon_threshold; 1182 int missed_beacon_threshold;
1112 1183
1184 /* track IBSS manager (last beacon) status */
1185 u32 ibss_manager;
1186
1113 /* storing the jiffies when the plcp error rate is received */ 1187 /* storing the jiffies when the plcp error rate is received */
1114 unsigned long plcp_jiffies; 1188 unsigned long plcp_jiffies;
1115 1189
@@ -1155,6 +1229,15 @@ struct iwl_priv {
1155 u32 hw_wa_rev; 1229 u32 hw_wa_rev;
1156 u8 rev_id; 1230 u8 rev_id;
1157 1231
1232 /* microcode/device supports multiple contexts */
1233 u8 valid_contexts;
1234
1235 /* command queue number */
1236 u8 cmd_queue;
1237
1238 /* max number of station keys */
1239 u8 sta_key_max_num;
1240
1158 /* EEPROM MAC addresses */ 1241 /* EEPROM MAC addresses */
1159 struct mac_address addresses[2]; 1242 struct mac_address addresses[2];
1160 1243
@@ -1172,15 +1255,7 @@ struct iwl_priv {
1172 u8 ucode_write_complete; /* the image write is complete */ 1255 u8 ucode_write_complete; /* the image write is complete */
1173 char firmware_name[25]; 1256 char firmware_name[25];
1174 1257
1175 1258 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1176 struct iwl_rxon_time_cmd rxon_timing;
1177
1178 /* We declare this const so it can only be
1179 * changed via explicit cast within the
1180 * routines that actually update the physical
1181 * hardware */
1182 const struct iwl_rxon_cmd active_rxon;
1183 struct iwl_rxon_cmd staging_rxon;
1184 1259
1185 struct iwl_switch_rxon switch_rxon; 1260 struct iwl_switch_rxon switch_rxon;
1186 1261
@@ -1242,8 +1317,6 @@ struct iwl_priv {
1242 spinlock_t sta_lock; 1317 spinlock_t sta_lock;
1243 int num_stations; 1318 int num_stations;
1244 struct iwl_station_entry stations[IWL_STATION_COUNT]; 1319 struct iwl_station_entry stations[IWL_STATION_COUNT];
1245 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
1246 u8 key_mapping_key;
1247 unsigned long ucode_key_table; 1320 unsigned long ucode_key_table;
1248 1321
1249 /* queue refcounts */ 1322 /* queue refcounts */
@@ -1268,7 +1341,6 @@ struct iwl_priv {
1268 1341
1269 /* Last Rx'd beacon timestamp */ 1342 /* Last Rx'd beacon timestamp */
1270 u64 timestamp; 1343 u64 timestamp;
1271 struct ieee80211_vif *vif;
1272 1344
1273 union { 1345 union {
1274#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) 1346#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
@@ -1348,12 +1420,27 @@ struct iwl_priv {
1348#endif 1420#endif
1349 }; 1421 };
1350 1422
1423 /* bt coex */
1424 u8 bt_status;
1425 u8 bt_traffic_load, notif_bt_traffic_load;
1426 bool bt_ch_announce;
1427 bool bt_sco_active;
1428 bool bt_full_concurrent;
1429 bool bt_ant_couple_ok;
1430 __le32 kill_ack_mask;
1431 __le32 kill_cts_mask;
1432 __le16 bt_valid;
1433 u16 bt_on_thresh;
1434 u16 bt_duration;
1435 u16 dynamic_frag_thresh;
1436 u16 dynamic_agg_thresh;
1437 u8 bt_ci_compliance;
1438 struct work_struct bt_traffic_change_work;
1439
1351 struct iwl_hw_params hw_params; 1440 struct iwl_hw_params hw_params;
1352 1441
1353 u32 inta_mask; 1442 u32 inta_mask;
1354 1443
1355 struct iwl_qos_info qos_data;
1356
1357 struct workqueue_struct *workqueue; 1444 struct workqueue_struct *workqueue;
1358 1445
1359 struct work_struct restart; 1446 struct work_struct restart;
@@ -1361,11 +1448,15 @@ struct iwl_priv {
1361 struct work_struct rx_replenish; 1448 struct work_struct rx_replenish;
1362 struct work_struct abort_scan; 1449 struct work_struct abort_scan;
1363 struct work_struct beacon_update; 1450 struct work_struct beacon_update;
1451 struct iwl_rxon_context *beacon_ctx;
1452
1364 struct work_struct tt_work; 1453 struct work_struct tt_work;
1365 struct work_struct ct_enter; 1454 struct work_struct ct_enter;
1366 struct work_struct ct_exit; 1455 struct work_struct ct_exit;
1367 struct work_struct start_internal_scan; 1456 struct work_struct start_internal_scan;
1368 struct work_struct tx_flush; 1457 struct work_struct tx_flush;
1458 struct work_struct bt_full_concurrency;
1459 struct work_struct bt_runtime_config;
1369 1460
1370 struct tasklet_struct irq_tasklet; 1461 struct tasklet_struct irq_tasklet;
1371 1462
@@ -1453,10 +1544,34 @@ static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1453 return NULL; 1544 return NULL;
1454} 1545}
1455 1546
1547static inline struct iwl_rxon_context *
1548iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1549{
1550 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1551
1552 return vif_priv->ctx;
1553}
1554
1555#define for_each_context(priv, ctx) \
1556 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1557 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1558 if (priv->valid_contexts & BIT(ctx->ctxid))
1559
1560static inline int iwl_is_associated(struct iwl_priv *priv,
1561 enum iwl_rxon_context_id ctxid)
1562{
1563 return (priv->contexts[ctxid].active.filter_flags &
1564 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1565}
1566
1567static inline int iwl_is_any_associated(struct iwl_priv *priv)
1568{
1569 return iwl_is_associated(priv, IWL_RXON_CTX_BSS);
1570}
1456 1571
1457static inline int iwl_is_associated(struct iwl_priv *priv) 1572static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
1458{ 1573{
1459 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; 1574 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1460} 1575}
1461 1576
1462static inline int is_channel_valid(const struct iwl_channel_info *ch_info) 1577static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 258d059ef41f..c373b53babea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -97,6 +97,17 @@ const char *get_cmd_string(u8 cmd)
97 IWL_CMD(REPLY_TX_POWER_DBM_CMD); 97 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
98 IWL_CMD(TEMPERATURE_NOTIFICATION); 98 IWL_CMD(TEMPERATURE_NOTIFICATION);
99 IWL_CMD(TX_ANT_CONFIGURATION_CMD); 99 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
100 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
101 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
102 IWL_CMD(REPLY_BT_COEX_PROT_ENV);
103 IWL_CMD(REPLY_WIPAN_PARAMS);
104 IWL_CMD(REPLY_WIPAN_RXON);
105 IWL_CMD(REPLY_WIPAN_RXON_TIMING);
106 IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
107 IWL_CMD(REPLY_WIPAN_QOS_PARAM);
108 IWL_CMD(REPLY_WIPAN_WEPKEY);
109 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
110 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
100 default: 111 default:
101 return "UNKNOWN"; 112 return "UNKNOWN";
102 113
@@ -229,7 +240,7 @@ cancel:
229 * in later, it will possibly set an invalid 240 * in later, it will possibly set an invalid
230 * address (cmd->meta.source). 241 * address (cmd->meta.source).
231 */ 242 */
232 priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_idx].flags &= 243 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
233 ~CMD_WANT_SKB; 244 ~CMD_WANT_SKB;
234 } 245 }
235fail: 246fail:
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index cda6a94d6cc9..63c0ab46261f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -192,47 +192,6 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
192 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); 192 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
193} 193}
194 194
195/* default Thermal Throttling transaction table
196 * Current state | Throttling Down | Throttling Up
197 *=============================================================================
198 * Condition Nxt State Condition Nxt State Condition Nxt State
199 *-----------------------------------------------------------------------------
200 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
201 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
202 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
203 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
204 *=============================================================================
205 */
206static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
207 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
208 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
209 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
210};
211static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
212 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
213 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
214 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
215};
216static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
217 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
218 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
219 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
220};
221static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
222 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
223 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
224 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
225};
226
227/* Advance Thermal Throttling default restriction table */
228static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
229 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
230 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
231 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
232 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
233};
234
235
236static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, 195static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
237 struct iwl_powertable_cmd *cmd) 196 struct iwl_powertable_cmd *cmd)
238{ 197{
@@ -308,7 +267,6 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
308int iwl_power_update_mode(struct iwl_priv *priv, bool force) 267int iwl_power_update_mode(struct iwl_priv *priv, bool force)
309{ 268{
310 int ret = 0; 269 int ret = 0;
311 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
312 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; 270 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
313 bool update_chains; 271 bool update_chains;
314 struct iwl_powertable_cmd cmd; 272 struct iwl_powertable_cmd cmd;
@@ -325,9 +283,13 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
325 else if (priv->cfg->supports_idle && 283 else if (priv->cfg->supports_idle &&
326 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 284 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
327 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20); 285 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
328 else if (tt->state >= IWL_TI_1) 286 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
329 iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper); 287 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
330 else if (!enabled) 288 priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) {
289 /* in thermal throttling low power state */
290 iwl_static_sleep_cmd(priv, &cmd,
291 priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper);
292 } else if (!enabled)
331 iwl_power_sleep_cam_cmd(priv, &cmd); 293 iwl_power_sleep_cam_cmd(priv, &cmd);
332 else if (priv->power_data.debug_sleep_level_override >= 0) 294 else if (priv->power_data.debug_sleep_level_override >= 0)
333 iwl_static_sleep_cmd(priv, &cmd, 295 iwl_static_sleep_cmd(priv, &cmd,
@@ -367,592 +329,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
367} 329}
368EXPORT_SYMBOL(iwl_power_update_mode); 330EXPORT_SYMBOL(iwl_power_update_mode);
369 331
370bool iwl_ht_enabled(struct iwl_priv *priv)
371{
372 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
373 struct iwl_tt_restriction *restriction;
374
375 if (!priv->thermal_throttle.advanced_tt)
376 return true;
377 restriction = tt->restriction + tt->state;
378 return restriction->is_ht;
379}
380EXPORT_SYMBOL(iwl_ht_enabled);
381
382bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
383{
384 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
385 bool within_margin = false;
386
387 if (priv->cfg->temperature_kelvin)
388 temp = KELVIN_TO_CELSIUS(priv->temperature);
389
390 if (!priv->thermal_throttle.advanced_tt)
391 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
392 CT_KILL_THRESHOLD_LEGACY) ? true : false;
393 else
394 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
395 CT_KILL_THRESHOLD) ? true : false;
396 return within_margin;
397}
398
399enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
400{
401 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
402 struct iwl_tt_restriction *restriction;
403
404 if (!priv->thermal_throttle.advanced_tt)
405 return IWL_ANT_OK_MULTI;
406 restriction = tt->restriction + tt->state;
407 return restriction->tx_stream;
408}
409EXPORT_SYMBOL(iwl_tx_ant_restriction);
410
411enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
412{
413 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
414 struct iwl_tt_restriction *restriction;
415
416 if (!priv->thermal_throttle.advanced_tt)
417 return IWL_ANT_OK_MULTI;
418 restriction = tt->restriction + tt->state;
419 return restriction->rx_stream;
420}
421
422#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
423#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
424
425/*
426 * toggle the bit to wake up uCode and check the temperature
427 * if the temperature is below CT, uCode will stay awake and send card
428 * state notification with CT_KILL bit clear to inform Thermal Throttling
429 * Management to change state. Otherwise, uCode will go back to sleep
430 * without doing anything, driver should continue the 5 seconds timer
431 * to wake up uCode for temperature check until temperature drop below CT
432 */
433static void iwl_tt_check_exit_ct_kill(unsigned long data)
434{
435 struct iwl_priv *priv = (struct iwl_priv *)data;
436 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
437 unsigned long flags;
438
439 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
440 return;
441
442 if (tt->state == IWL_TI_CT_KILL) {
443 if (priv->thermal_throttle.ct_kill_toggle) {
444 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
445 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
446 priv->thermal_throttle.ct_kill_toggle = false;
447 } else {
448 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
449 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
450 priv->thermal_throttle.ct_kill_toggle = true;
451 }
452 iwl_read32(priv, CSR_UCODE_DRV_GP1);
453 spin_lock_irqsave(&priv->reg_lock, flags);
454 if (!iwl_grab_nic_access(priv))
455 iwl_release_nic_access(priv);
456 spin_unlock_irqrestore(&priv->reg_lock, flags);
457
458 /* Reschedule the ct_kill timer to occur in
459 * CT_KILL_EXIT_DURATION seconds to ensure we get a
460 * thermal update */
461 IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
462 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
463 CT_KILL_EXIT_DURATION * HZ);
464 }
465}
466
467static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
468 bool stop)
469{
470 if (stop) {
471 IWL_DEBUG_POWER(priv, "Stop all queues\n");
472 if (priv->mac80211_registered)
473 ieee80211_stop_queues(priv->hw);
474 IWL_DEBUG_POWER(priv,
475 "Schedule 5 seconds CT_KILL Timer\n");
476 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
477 CT_KILL_EXIT_DURATION * HZ);
478 } else {
479 IWL_DEBUG_POWER(priv, "Wake all queues\n");
480 if (priv->mac80211_registered)
481 ieee80211_wake_queues(priv->hw);
482 }
483}
484
485static void iwl_tt_ready_for_ct_kill(unsigned long data)
486{
487 struct iwl_priv *priv = (struct iwl_priv *)data;
488 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
489
490 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
491 return;
492
493 /* temperature timer expired, ready to go into CT_KILL state */
494 if (tt->state != IWL_TI_CT_KILL) {
495 IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n");
496 tt->state = IWL_TI_CT_KILL;
497 set_bit(STATUS_CT_KILL, &priv->status);
498 iwl_perform_ct_kill_task(priv, true);
499 }
500}
501
502static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
503{
504 IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
505 /* make request to retrieve statistics information */
506 iwl_send_statistics_request(priv, CMD_SYNC, false);
507 /* Reschedule the ct_kill wait timer */
508 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
509 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
510}
511
512#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
513#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
514#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
515
516/*
517 * Legacy thermal throttling
518 * 1) Avoid NIC destruction due to high temperatures
519 * Chip will identify dangerously high temperatures that can
520 * harm the device and will power down
521 * 2) Avoid the NIC power down due to high temperature
522 * Throttle early enough to lower the power consumption before
523 * drastic steps are needed
524 */
525static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
526{
527 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
528 enum iwl_tt_state old_state;
529
530#ifdef CONFIG_IWLWIFI_DEBUG
531 if ((tt->tt_previous_temp) &&
532 (temp > tt->tt_previous_temp) &&
533 ((temp - tt->tt_previous_temp) >
534 IWL_TT_INCREASE_MARGIN)) {
535 IWL_DEBUG_POWER(priv,
536 "Temperature increase %d degree Celsius\n",
537 (temp - tt->tt_previous_temp));
538 }
539#endif
540 old_state = tt->state;
541 /* in Celsius */
542 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
543 tt->state = IWL_TI_CT_KILL;
544 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
545 tt->state = IWL_TI_2;
546 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
547 tt->state = IWL_TI_1;
548 else
549 tt->state = IWL_TI_0;
550
551#ifdef CONFIG_IWLWIFI_DEBUG
552 tt->tt_previous_temp = temp;
553#endif
554 /* stop ct_kill_waiting_tm timer */
555 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
556 if (tt->state != old_state) {
557 switch (tt->state) {
558 case IWL_TI_0:
559 /*
560 * When the system is ready to go back to IWL_TI_0
561 * we only have to call iwl_power_update_mode() to
562 * do so.
563 */
564 break;
565 case IWL_TI_1:
566 tt->tt_power_mode = IWL_POWER_INDEX_3;
567 break;
568 case IWL_TI_2:
569 tt->tt_power_mode = IWL_POWER_INDEX_4;
570 break;
571 default:
572 tt->tt_power_mode = IWL_POWER_INDEX_5;
573 break;
574 }
575 mutex_lock(&priv->mutex);
576 if (old_state == IWL_TI_CT_KILL)
577 clear_bit(STATUS_CT_KILL, &priv->status);
578 if (tt->state != IWL_TI_CT_KILL &&
579 iwl_power_update_mode(priv, true)) {
580 /* TT state not updated
581 * try again during next temperature read
582 */
583 if (old_state == IWL_TI_CT_KILL)
584 set_bit(STATUS_CT_KILL, &priv->status);
585 tt->state = old_state;
586 IWL_ERR(priv, "Cannot update power mode, "
587 "TT state not updated\n");
588 } else {
589 if (tt->state == IWL_TI_CT_KILL) {
590 if (force) {
591 set_bit(STATUS_CT_KILL, &priv->status);
592 iwl_perform_ct_kill_task(priv, true);
593 } else {
594 iwl_prepare_ct_kill_task(priv);
595 tt->state = old_state;
596 }
597 } else if (old_state == IWL_TI_CT_KILL &&
598 tt->state != IWL_TI_CT_KILL)
599 iwl_perform_ct_kill_task(priv, false);
600 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
601 tt->state);
602 IWL_DEBUG_POWER(priv, "Power Index change to %u\n",
603 tt->tt_power_mode);
604 }
605 mutex_unlock(&priv->mutex);
606 }
607}
608
609/*
610 * Advance thermal throttling
611 * 1) Avoid NIC destruction due to high temperatures
612 * Chip will identify dangerously high temperatures that can
613 * harm the device and will power down
614 * 2) Avoid the NIC power down due to high temperature
615 * Throttle early enough to lower the power consumption before
616 * drastic steps are needed
617 * Actions include relaxing the power down sleep thresholds and
618 * decreasing the number of TX streams
619 * 3) Avoid throughput performance impact as much as possible
620 *
621 *=============================================================================
622 * Condition Nxt State Condition Nxt State Condition Nxt State
623 *-----------------------------------------------------------------------------
624 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
625 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
626 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
627 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
628 *=============================================================================
629 */
630static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
631{
632 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
633 int i;
634 bool changed = false;
635 enum iwl_tt_state old_state;
636 struct iwl_tt_trans *transaction;
637
638 old_state = tt->state;
639 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
640 /* based on the current TT state,
641 * find the curresponding transaction table
642 * each table has (IWL_TI_STATE_MAX - 1) entries
643 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
644 * will advance to the correct table.
645 * then based on the current temperature
646 * find the next state need to transaction to
647 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
648 * in the current table to see if transaction is needed
649 */
650 transaction = tt->transaction +
651 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
652 if (temp >= transaction->tt_low &&
653 temp <= transaction->tt_high) {
654#ifdef CONFIG_IWLWIFI_DEBUG
655 if ((tt->tt_previous_temp) &&
656 (temp > tt->tt_previous_temp) &&
657 ((temp - tt->tt_previous_temp) >
658 IWL_TT_INCREASE_MARGIN)) {
659 IWL_DEBUG_POWER(priv,
660 "Temperature increase %d "
661 "degree Celsius\n",
662 (temp - tt->tt_previous_temp));
663 }
664 tt->tt_previous_temp = temp;
665#endif
666 if (old_state !=
667 transaction->next_state) {
668 changed = true;
669 tt->state =
670 transaction->next_state;
671 }
672 break;
673 }
674 }
675 /* stop ct_kill_waiting_tm timer */
676 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
677 if (changed) {
678 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
679
680 if (tt->state >= IWL_TI_1) {
681 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
682 tt->tt_power_mode = IWL_POWER_INDEX_5;
683 if (!iwl_ht_enabled(priv))
684 /* disable HT */
685 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
686 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
687 RXON_FLG_HT40_PROT_MSK |
688 RXON_FLG_HT_PROT_MSK);
689 else {
690 /* check HT capability and set
691 * according to the system HT capability
692 * in case get disabled before */
693 iwl_set_rxon_ht(priv, &priv->current_ht_config);
694 }
695
696 } else {
697 /*
698 * restore system power setting -- it will be
699 * recalculated automatically.
700 */
701
702 /* check HT capability and set
703 * according to the system HT capability
704 * in case get disabled before */
705 iwl_set_rxon_ht(priv, &priv->current_ht_config);
706 }
707 mutex_lock(&priv->mutex);
708 if (old_state == IWL_TI_CT_KILL)
709 clear_bit(STATUS_CT_KILL, &priv->status);
710 if (tt->state != IWL_TI_CT_KILL &&
711 iwl_power_update_mode(priv, true)) {
712 /* TT state not updated
713 * try again during next temperature read
714 */
715 IWL_ERR(priv, "Cannot update power mode, "
716 "TT state not updated\n");
717 if (old_state == IWL_TI_CT_KILL)
718 set_bit(STATUS_CT_KILL, &priv->status);
719 tt->state = old_state;
720 } else {
721 IWL_DEBUG_POWER(priv,
722 "Thermal Throttling to new state: %u\n",
723 tt->state);
724 if (old_state != IWL_TI_CT_KILL &&
725 tt->state == IWL_TI_CT_KILL) {
726 if (force) {
727 IWL_DEBUG_POWER(priv,
728 "Enter IWL_TI_CT_KILL\n");
729 set_bit(STATUS_CT_KILL, &priv->status);
730 iwl_perform_ct_kill_task(priv, true);
731 } else {
732 iwl_prepare_ct_kill_task(priv);
733 tt->state = old_state;
734 }
735 } else if (old_state == IWL_TI_CT_KILL &&
736 tt->state != IWL_TI_CT_KILL) {
737 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
738 iwl_perform_ct_kill_task(priv, false);
739 }
740 }
741 mutex_unlock(&priv->mutex);
742 }
743}
744
745/* Card State Notification indicated reach critical temperature
746 * if PSP not enable, no Thermal Throttling function will be performed
747 * just set the GP1 bit to acknowledge the event
748 * otherwise, go into IWL_TI_CT_KILL state
749 * since Card State Notification will not provide any temperature reading
750 * for Legacy mode
751 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
752 * for advance mode
753 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
754 */
755static void iwl_bg_ct_enter(struct work_struct *work)
756{
757 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
758 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
759
760 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
761 return;
762
763 if (!iwl_is_ready(priv))
764 return;
765
766 if (tt->state != IWL_TI_CT_KILL) {
767 IWL_ERR(priv, "Device reached critical temperature "
768 "- ucode going to sleep!\n");
769 if (!priv->thermal_throttle.advanced_tt)
770 iwl_legacy_tt_handler(priv,
771 IWL_MINIMAL_POWER_THRESHOLD,
772 true);
773 else
774 iwl_advance_tt_handler(priv,
775 CT_KILL_THRESHOLD + 1, true);
776 }
777}
778
779/* Card State Notification indicated out of critical temperature
780 * since Card State Notification will not provide any temperature reading
781 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
782 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
783 */
784static void iwl_bg_ct_exit(struct work_struct *work)
785{
786 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
787 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
788
789 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
790 return;
791
792 if (!iwl_is_ready(priv))
793 return;
794
795 /* stop ct_kill_exit_tm timer */
796 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
797
798 if (tt->state == IWL_TI_CT_KILL) {
799 IWL_ERR(priv,
800 "Device temperature below critical"
801 "- ucode awake!\n");
802 /*
803 * exit from CT_KILL state
804 * reset the current temperature reading
805 */
806 priv->temperature = 0;
807 if (!priv->thermal_throttle.advanced_tt)
808 iwl_legacy_tt_handler(priv,
809 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
810 true);
811 else
812 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
813 true);
814 }
815}
816
817void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
818{
819 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
820 return;
821
822 IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
823 queue_work(priv->workqueue, &priv->ct_enter);
824}
825EXPORT_SYMBOL(iwl_tt_enter_ct_kill);
826
827void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
828{
829 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
830 return;
831
832 IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
833 queue_work(priv->workqueue, &priv->ct_exit);
834}
835EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
836
837static void iwl_bg_tt_work(struct work_struct *work)
838{
839 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
840 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
841
842 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
843 return;
844
845 if (priv->cfg->temperature_kelvin)
846 temp = KELVIN_TO_CELSIUS(priv->temperature);
847
848 if (!priv->thermal_throttle.advanced_tt)
849 iwl_legacy_tt_handler(priv, temp, false);
850 else
851 iwl_advance_tt_handler(priv, temp, false);
852}
853
854void iwl_tt_handler(struct iwl_priv *priv)
855{
856 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
857 return;
858
859 IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
860 queue_work(priv->workqueue, &priv->tt_work);
861}
862EXPORT_SYMBOL(iwl_tt_handler);
863
864/* Thermal throttling initialization
865 * For advance thermal throttling:
866 * Initialize Thermal Index and temperature threshold table
867 * Initialize thermal throttling restriction table
868 */
869void iwl_tt_initialize(struct iwl_priv *priv)
870{
871 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
872 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
873 struct iwl_tt_trans *transaction;
874
875 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
876
877 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
878
879 tt->state = IWL_TI_0;
880 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
881 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
882 priv->thermal_throttle.ct_kill_exit_tm.function =
883 iwl_tt_check_exit_ct_kill;
884 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
885 priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv;
886 priv->thermal_throttle.ct_kill_waiting_tm.function =
887 iwl_tt_ready_for_ct_kill;
888 /* setup deferred ct kill work */
889 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
890 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
891 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
892
893 if (priv->cfg->adv_thermal_throttle) {
894 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
895 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
896 IWL_TI_STATE_MAX, GFP_KERNEL);
897 tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
898 IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
899 GFP_KERNEL);
900 if (!tt->restriction || !tt->transaction) {
901 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
902 priv->thermal_throttle.advanced_tt = false;
903 kfree(tt->restriction);
904 tt->restriction = NULL;
905 kfree(tt->transaction);
906 tt->transaction = NULL;
907 } else {
908 transaction = tt->transaction +
909 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
910 memcpy(transaction, &tt_range_0[0], size);
911 transaction = tt->transaction +
912 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
913 memcpy(transaction, &tt_range_1[0], size);
914 transaction = tt->transaction +
915 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
916 memcpy(transaction, &tt_range_2[0], size);
917 transaction = tt->transaction +
918 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
919 memcpy(transaction, &tt_range_3[0], size);
920 size = sizeof(struct iwl_tt_restriction) *
921 IWL_TI_STATE_MAX;
922 memcpy(tt->restriction,
923 &restriction_range[0], size);
924 priv->thermal_throttle.advanced_tt = true;
925 }
926 } else {
927 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
928 priv->thermal_throttle.advanced_tt = false;
929 }
930}
931EXPORT_SYMBOL(iwl_tt_initialize);
932
933/* cleanup thermal throttling management related memory and timer */
934void iwl_tt_exit(struct iwl_priv *priv)
935{
936 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
937
938 /* stop ct_kill_exit_tm timer if activated */
939 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
940 /* stop ct_kill_waiting_tm timer if activated */
941 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
942 cancel_work_sync(&priv->tt_work);
943 cancel_work_sync(&priv->ct_enter);
944 cancel_work_sync(&priv->ct_exit);
945
946 if (priv->thermal_throttle.advanced_tt) {
947 /* free advance thermal throttling memory */
948 kfree(tt->restriction);
949 tt->restriction = NULL;
950 kfree(tt->transaction);
951 tt->transaction = NULL;
952 }
953}
954EXPORT_SYMBOL(iwl_tt_exit);
955
956/* initialize to default */ 332/* initialize to default */
957void iwl_power_initialize(struct iwl_priv *priv) 333void iwl_power_initialize(struct iwl_priv *priv)
958{ 334{
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 5db91c10dcc8..df81565a7cc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -30,90 +30,6 @@
30 30
31#include "iwl-commands.h" 31#include "iwl-commands.h"
32 32
33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
37
38enum iwl_antenna_ok {
39 IWL_ANT_OK_NONE,
40 IWL_ANT_OK_SINGLE,
41 IWL_ANT_OK_MULTI,
42};
43
44/* Thermal Throttling State Machine states */
45enum iwl_tt_state {
46 IWL_TI_0, /* normal temperature, system power state */
47 IWL_TI_1, /* high temperature detect, low power state */
48 IWL_TI_2, /* higher temperature detected, lower power state */
49 IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */
50 IWL_TI_STATE_MAX
51};
52
53/**
54 * struct iwl_tt_restriction - Thermal Throttling restriction table
55 * @tx_stream: number of tx stream allowed
56 * @is_ht: ht enable/disable
57 * @rx_stream: number of rx stream allowed
58 *
59 * This table is used by advance thermal throttling management
60 * based on the current thermal throttling state, and determines
61 * the number of tx/rx streams and the status of HT operation.
62 */
63struct iwl_tt_restriction {
64 enum iwl_antenna_ok tx_stream;
65 enum iwl_antenna_ok rx_stream;
66 bool is_ht;
67};
68
69/**
70 * struct iwl_tt_trans - Thermal Throttling transaction table
71 * @next_state: next thermal throttling mode
72 * @tt_low: low temperature threshold to change state
73 * @tt_high: high temperature threshold to change state
74 *
75 * This is used by the advanced thermal throttling algorithm
76 * to determine the next thermal state to go based on the
77 * current temperature.
78 */
79struct iwl_tt_trans {
80 enum iwl_tt_state next_state;
81 u32 tt_low;
82 u32 tt_high;
83};
84
85/**
86 * struct iwl_tt_mgnt - Thermal Throttling Management structure
87 * @advanced_tt: advanced thermal throttle required
88 * @state: current Thermal Throttling state
89 * @tt_power_mode: Thermal Throttling power mode index
90 * being used to set power level when
91 * when thermal throttling state != IWL_TI_0
92 * the tt_power_mode should set to different
93 * power mode based on the current tt state
94 * @tt_previous_temperature: last measured temperature
95 * @iwl_tt_restriction: ptr to restriction tbl, used by advance
96 * thermal throttling to determine how many tx/rx streams
97 * should be used in tt state; and can HT be enabled or not
98 * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling
99 * state transaction
100 * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature
101 * @ct_kill_exit_tm: timer to exit thermal kill
102 */
103struct iwl_tt_mgmt {
104 enum iwl_tt_state state;
105 bool advanced_tt;
106 u8 tt_power_mode;
107 bool ct_kill_toggle;
108#ifdef CONFIG_IWLWIFI_DEBUG
109 s32 tt_previous_temp;
110#endif
111 struct iwl_tt_restriction *restriction;
112 struct iwl_tt_trans *transaction;
113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
115};
116
117enum iwl_power_level { 33enum iwl_power_level {
118 IWL_POWER_INDEX_1, 34 IWL_POWER_INDEX_1,
119 IWL_POWER_INDEX_2, 35 IWL_POWER_INDEX_2,
@@ -130,15 +46,6 @@ struct iwl_power_mgr {
130}; 46};
131 47
132int iwl_power_update_mode(struct iwl_priv *priv, bool force); 48int iwl_power_update_mode(struct iwl_priv *priv, bool force);
133bool iwl_ht_enabled(struct iwl_priv *priv);
134bool iwl_within_ct_kill_margin(struct iwl_priv *priv);
135enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
136enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
137void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
138void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
139void iwl_tt_handler(struct iwl_priv *priv);
140void iwl_tt_initialize(struct iwl_priv *priv);
141void iwl_tt_exit(struct iwl_priv *priv);
142void iwl_power_initialize(struct iwl_priv *priv); 49void iwl_power_initialize(struct iwl_priv *priv);
143 50
144extern bool no_sleep_autoadjust; 51extern bool no_sleep_autoadjust;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index b1f101caf19d..5469655646ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -306,7 +306,7 @@
306 * at a time, until receiving ACK from receiving station, or reaching 306 * at a time, until receiving ACK from receiving station, or reaching
307 * retry limit and giving up. 307 * retry limit and giving up.
308 * 308 *
309 * The command queue (#4) must use this mode! 309 * The command queue (#4/#9) must use this mode!
310 * This mode does not require use of the Byte Count table in host DRAM. 310 * This mode does not require use of the Byte Count table in host DRAM.
311 * 311 *
312 * Driver controls scheduler operation via 3 means: 312 * Driver controls scheduler operation via 3 means:
@@ -322,7 +322,7 @@
322 * (1024 bytes for each queue). 322 * (1024 bytes for each queue).
323 * 323 *
324 * After receiving "Alive" response from uCode, driver must initialize 324 * After receiving "Alive" response from uCode, driver must initialize
325 * the scheduler (especially for queue #4, the command queue, otherwise 325 * the scheduler (especially for queue #4/#9, the command queue, otherwise
326 * the driver can't issue commands!): 326 * the driver can't issue commands!):
327 */ 327 */
328 328
@@ -555,8 +555,9 @@
555#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ 555#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
556 ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc) 556 ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
557 557
558#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\ 558#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv) \
559 (~(1<<IWL_CMD_QUEUE_NUM))) 559 (((1<<(priv)->hw_params.max_txq_num) - 1) &\
560 (~(1<<(priv)->cmd_queue)))
560 561
561#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00) 562#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
562 563
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 79773e353baa..10be197b0f22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -228,7 +228,7 @@ void iwl_recover_from_statistics(struct iwl_priv *priv,
228{ 228{
229 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 229 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
230 return; 230 return;
231 if (iwl_is_associated(priv)) { 231 if (iwl_is_any_associated(priv)) {
232 if (priv->cfg->ops->lib->check_ack_health) { 232 if (priv->cfg->ops->lib->check_ack_health) {
233 if (!priv->cfg->ops->lib->check_ack_health( 233 if (!priv->cfg->ops->lib->check_ack_health(
234 priv, pkt)) { 234 priv, pkt)) {
@@ -266,7 +266,12 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
266{ 266{
267 u16 fc = le16_to_cpu(hdr->frame_control); 267 u16 fc = le16_to_cpu(hdr->frame_control);
268 268
269 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) 269 /*
270 * All contexts have the same setting here due to it being
271 * a module parameter, so OK to check any context.
272 */
273 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
274 RXON_FILTER_DIS_DECRYPT_MSK)
270 return 0; 275 return 0;
271 276
272 if (!(fc & IEEE80211_FCTL_PROTECTED)) 277 if (!(fc & IEEE80211_FCTL_PROTECTED))
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a4b3663a262f..7727f0966d31 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -206,7 +206,6 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
206static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, 206static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
207 struct iwl_rx_mem_buffer *rxb) 207 struct iwl_rx_mem_buffer *rxb)
208{ 208{
209#ifdef CONFIG_IWLWIFI_DEBUG
210 struct iwl_rx_packet *pkt = rxb_addr(rxb); 209 struct iwl_rx_packet *pkt = rxb_addr(rxb);
211 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 210 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
212 211
@@ -214,7 +213,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
214 scan_notif->scanned_channels, 213 scan_notif->scanned_channels,
215 scan_notif->tsf_low, 214 scan_notif->tsf_low,
216 scan_notif->tsf_high, scan_notif->status); 215 scan_notif->tsf_high, scan_notif->status);
217#endif
218 216
219 /* The HW is no longer scanning */ 217 /* The HW is no longer scanning */
220 clear_bit(STATUS_SCAN_HW, &priv->status); 218 clear_bit(STATUS_SCAN_HW, &priv->status);
@@ -236,6 +234,26 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
236 234
237 clear_bit(STATUS_SCANNING, &priv->status); 235 clear_bit(STATUS_SCANNING, &priv->status);
238 236
237 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
238 priv->cfg->advanced_bt_coexist && priv->bt_status !=
239 scan_notif->bt_status) {
240 if (scan_notif->bt_status) {
241 /* BT on */
242 if (!priv->bt_ch_announce)
243 priv->bt_traffic_load =
244 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
245 /*
246 * otherwise, no traffic load information provided
247 * no changes made
248 */
249 } else {
250 /* BT off */
251 priv->bt_traffic_load =
252 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
253 }
254 priv->bt_status = scan_notif->bt_status;
255 queue_work(priv->workqueue, &priv->bt_traffic_change_work);
256 }
239 queue_work(priv->workqueue, &priv->scan_completed); 257 queue_work(priv->workqueue, &priv->scan_completed);
240} 258}
241 259
@@ -268,18 +286,28 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
268 enum ieee80211_band band, 286 enum ieee80211_band band,
269 struct ieee80211_vif *vif) 287 struct ieee80211_vif *vif)
270{ 288{
289 struct iwl_rxon_context *ctx;
271 u16 passive = (band == IEEE80211_BAND_2GHZ) ? 290 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
272 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 291 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
273 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; 292 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
274 293
275 if (iwl_is_associated(priv)) { 294 if (iwl_is_any_associated(priv)) {
276 /* If we're associated, we clamp the maximum passive 295 /*
277 * dwell time to be 98% of the beacon interval (minus 296 * If we're associated, we clamp the maximum passive
278 * 2 * channel tune time) */ 297 * dwell time to be 98% of the smallest beacon interval
279 passive = vif ? vif->bss_conf.beacon_int : 0; 298 * (minus 2 * channel tune time)
280 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) 299 */
281 passive = IWL_PASSIVE_DWELL_BASE; 300 for_each_context(priv, ctx) {
282 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; 301 u16 value;
302
303 if (!iwl_is_associated_ctx(ctx))
304 continue;
305 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
306 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
307 value = IWL_PASSIVE_DWELL_BASE;
308 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
309 passive = min(value, passive);
310 }
283 } 311 }
284 312
285 return passive; 313 return passive;
@@ -378,7 +406,7 @@ void iwl_internal_short_hw_scan(struct iwl_priv *priv)
378 queue_work(priv->workqueue, &priv->start_internal_scan); 406 queue_work(priv->workqueue, &priv->start_internal_scan);
379} 407}
380 408
381void iwl_bg_start_internal_scan(struct work_struct *work) 409static void iwl_bg_start_internal_scan(struct work_struct *work)
382{ 410{
383 struct iwl_priv *priv = 411 struct iwl_priv *priv =
384 container_of(work, struct iwl_priv, start_internal_scan); 412 container_of(work, struct iwl_priv, start_internal_scan);
@@ -418,9 +446,8 @@ void iwl_bg_start_internal_scan(struct work_struct *work)
418 unlock: 446 unlock:
419 mutex_unlock(&priv->mutex); 447 mutex_unlock(&priv->mutex);
420} 448}
421EXPORT_SYMBOL(iwl_bg_start_internal_scan);
422 449
423void iwl_bg_scan_check(struct work_struct *data) 450static void iwl_bg_scan_check(struct work_struct *data)
424{ 451{
425 struct iwl_priv *priv = 452 struct iwl_priv *priv =
426 container_of(data, struct iwl_priv, scan_check.work); 453 container_of(data, struct iwl_priv, scan_check.work);
@@ -439,7 +466,6 @@ void iwl_bg_scan_check(struct work_struct *data)
439 } 466 }
440 mutex_unlock(&priv->mutex); 467 mutex_unlock(&priv->mutex);
441} 468}
442EXPORT_SYMBOL(iwl_bg_scan_check);
443 469
444/** 470/**
445 * iwl_fill_probe_req - fill in all required fields and IE for probe request 471 * iwl_fill_probe_req - fill in all required fields and IE for probe request
@@ -489,7 +515,7 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
489} 515}
490EXPORT_SYMBOL(iwl_fill_probe_req); 516EXPORT_SYMBOL(iwl_fill_probe_req);
491 517
492void iwl_bg_abort_scan(struct work_struct *work) 518static void iwl_bg_abort_scan(struct work_struct *work)
493{ 519{
494 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); 520 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
495 521
@@ -504,13 +530,14 @@ void iwl_bg_abort_scan(struct work_struct *work)
504 iwl_send_scan_abort(priv); 530 iwl_send_scan_abort(priv);
505 mutex_unlock(&priv->mutex); 531 mutex_unlock(&priv->mutex);
506} 532}
507EXPORT_SYMBOL(iwl_bg_abort_scan);
508 533
509void iwl_bg_scan_completed(struct work_struct *work) 534static void iwl_bg_scan_completed(struct work_struct *work)
510{ 535{
511 struct iwl_priv *priv = 536 struct iwl_priv *priv =
512 container_of(work, struct iwl_priv, scan_completed); 537 container_of(work, struct iwl_priv, scan_completed);
513 bool internal = false; 538 bool internal = false;
539 bool scan_completed = false;
540 struct iwl_rxon_context *ctx;
514 541
515 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 542 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
516 543
@@ -521,7 +548,8 @@ void iwl_bg_scan_completed(struct work_struct *work)
521 priv->is_internal_short_scan = false; 548 priv->is_internal_short_scan = false;
522 IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); 549 IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
523 internal = true; 550 internal = true;
524 } else { 551 } else if (priv->scan_request) {
552 scan_completed = true;
525 priv->scan_request = NULL; 553 priv->scan_request = NULL;
526 priv->scan_vif = NULL; 554 priv->scan_vif = NULL;
527 } 555 }
@@ -540,11 +568,13 @@ void iwl_bg_scan_completed(struct work_struct *work)
540 * Since setting the RXON may have been deferred while 568 * Since setting the RXON may have been deferred while
541 * performing the scan, fire one off if needed 569 * performing the scan, fire one off if needed
542 */ 570 */
543 if (memcmp(&priv->active_rxon, 571 for_each_context(priv, ctx)
544 &priv->staging_rxon, sizeof(priv->staging_rxon))) 572 iwlcore_commit_rxon(priv, ctx);
545 iwlcore_commit_rxon(priv);
546 573
547 out: 574 out:
575 if (priv->cfg->ops->hcmd->set_pan_params)
576 priv->cfg->ops->hcmd->set_pan_params(priv);
577
548 mutex_unlock(&priv->mutex); 578 mutex_unlock(&priv->mutex);
549 579
550 /* 580 /*
@@ -552,10 +582,9 @@ void iwl_bg_scan_completed(struct work_struct *work)
552 * into driver again into functions that will attempt to take 582 * into driver again into functions that will attempt to take
553 * mutex. 583 * mutex.
554 */ 584 */
555 if (!internal) 585 if (scan_completed)
556 ieee80211_scan_completed(priv->hw, false); 586 ieee80211_scan_completed(priv->hw, false);
557} 587}
558EXPORT_SYMBOL(iwl_bg_scan_completed);
559 588
560void iwl_setup_scan_deferred_work(struct iwl_priv *priv) 589void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
561{ 590{
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 7e0829be5e78..ccd09027c7cd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -172,12 +172,14 @@ int iwl_send_add_sta(struct iwl_priv *priv,
172EXPORT_SYMBOL(iwl_send_add_sta); 172EXPORT_SYMBOL(iwl_send_add_sta);
173 173
174static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 174static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
175 struct ieee80211_sta_ht_cap *sta_ht_inf) 175 struct ieee80211_sta *sta,
176 struct iwl_rxon_context *ctx)
176{ 177{
178 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
177 __le32 sta_flags; 179 __le32 sta_flags;
178 u8 mimo_ps_mode; 180 u8 mimo_ps_mode;
179 181
180 if (!sta_ht_inf || !sta_ht_inf->ht_supported) 182 if (!sta || !sta_ht_inf->ht_supported)
181 goto done; 183 goto done;
182 184
183 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 185 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
@@ -211,7 +213,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
211 sta_flags |= cpu_to_le32( 213 sta_flags |= cpu_to_le32(
212 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 214 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
213 215
214 if (iwl_is_ht40_tx_allowed(priv, sta_ht_inf)) 216 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
215 sta_flags |= STA_FLG_HT40_EN_MSK; 217 sta_flags |= STA_FLG_HT40_EN_MSK;
216 else 218 else
217 sta_flags &= ~STA_FLG_HT40_EN_MSK; 219 sta_flags &= ~STA_FLG_HT40_EN_MSK;
@@ -226,9 +228,9 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
226 * 228 *
227 * should be called with sta_lock held 229 * should be called with sta_lock held
228 */ 230 */
229static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr, 231static u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
230 bool is_ap, 232 const u8 *addr, bool is_ap,
231 struct ieee80211_sta_ht_cap *ht_info) 233 struct ieee80211_sta *sta)
232{ 234{
233 struct iwl_station_entry *station; 235 struct iwl_station_entry *station;
234 int i; 236 int i;
@@ -236,9 +238,9 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
236 u16 rate; 238 u16 rate;
237 239
238 if (is_ap) 240 if (is_ap)
239 sta_id = IWL_AP_ID; 241 sta_id = ctx->ap_sta_id;
240 else if (is_broadcast_ether_addr(addr)) 242 else if (is_broadcast_ether_addr(addr))
241 sta_id = priv->hw_params.bcast_sta_id; 243 sta_id = ctx->bcast_sta_id;
242 else 244 else
243 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { 245 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
244 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 246 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
@@ -289,14 +291,22 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
289 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 291 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
290 station->sta.mode = 0; 292 station->sta.mode = 0;
291 station->sta.sta.sta_id = sta_id; 293 station->sta.sta.sta_id = sta_id;
292 station->sta.station_flags = 0; 294 station->sta.station_flags = ctx->station_flags;
295 station->ctxid = ctx->ctxid;
296
297 if (sta) {
298 struct iwl_station_priv_common *sta_priv;
299
300 sta_priv = (void *)sta->drv_priv;
301 sta_priv->ctx = ctx;
302 }
293 303
294 /* 304 /*
295 * OK to call unconditionally, since local stations (IBSS BSSID 305 * OK to call unconditionally, since local stations (IBSS BSSID
296 * STA and broadcast STA) pass in a NULL ht_info, and mac80211 306 * STA and broadcast STA) pass in a NULL sta, and mac80211
297 * doesn't allow HT IBSS. 307 * doesn't allow HT IBSS.
298 */ 308 */
299 iwl_set_ht_add_station(priv, sta_id, ht_info); 309 iwl_set_ht_add_station(priv, sta_id, sta, ctx);
300 310
301 /* 3945 only */ 311 /* 3945 only */
302 rate = (priv->band == IEEE80211_BAND_5GHZ) ? 312 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
@@ -313,10 +323,9 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
313/** 323/**
314 * iwl_add_station_common - 324 * iwl_add_station_common -
315 */ 325 */
316int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr, 326int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
317 bool is_ap, 327 const u8 *addr, bool is_ap,
318 struct ieee80211_sta_ht_cap *ht_info, 328 struct ieee80211_sta *sta, u8 *sta_id_r)
319 u8 *sta_id_r)
320{ 329{
321 unsigned long flags_spin; 330 unsigned long flags_spin;
322 int ret = 0; 331 int ret = 0;
@@ -325,7 +334,7 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
325 334
326 *sta_id_r = 0; 335 *sta_id_r = 0;
327 spin_lock_irqsave(&priv->sta_lock, flags_spin); 336 spin_lock_irqsave(&priv->sta_lock, flags_spin);
328 sta_id = iwl_prep_station(priv, addr, is_ap, ht_info); 337 sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
329 if (sta_id == IWL_INVALID_STATION) { 338 if (sta_id == IWL_INVALID_STATION) {
330 IWL_ERR(priv, "Unable to prepare station %pM for addition\n", 339 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
331 addr); 340 addr);
@@ -431,8 +440,8 @@ static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
431 * 440 *
432 * Function sleeps. 441 * Function sleeps.
433 */ 442 */
434int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs, 443int iwl_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
435 u8 *sta_id_r) 444 const u8 *addr, bool init_rs, u8 *sta_id_r)
436{ 445{
437 int ret; 446 int ret;
438 u8 sta_id; 447 u8 sta_id;
@@ -442,7 +451,7 @@ int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
442 if (sta_id_r) 451 if (sta_id_r)
443 *sta_id_r = IWL_INVALID_STATION; 452 *sta_id_r = IWL_INVALID_STATION;
444 453
445 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id); 454 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
446 if (ret) { 455 if (ret) {
447 IWL_ERR(priv, "Unable to add station %pM\n", addr); 456 IWL_ERR(priv, "Unable to add station %pM\n", addr);
448 return ret; 457 return ret;
@@ -464,7 +473,7 @@ int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
464 return -ENOMEM; 473 return -ENOMEM;
465 } 474 }
466 475
467 ret = iwl_send_lq_cmd(priv, link_cmd, CMD_SYNC, true); 476 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
468 if (ret) 477 if (ret)
469 IWL_ERR(priv, "Link quality command failed (%d)\n", ret); 478 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
470 479
@@ -616,7 +625,8 @@ EXPORT_SYMBOL_GPL(iwl_remove_station);
616 * other than explicit station management would cause this in 625 * other than explicit station management would cause this in
617 * the ucode, e.g. unassociated RXON. 626 * the ucode, e.g. unassociated RXON.
618 */ 627 */
619void iwl_clear_ucode_stations(struct iwl_priv *priv) 628void iwl_clear_ucode_stations(struct iwl_priv *priv,
629 struct iwl_rxon_context *ctx)
620{ 630{
621 int i; 631 int i;
622 unsigned long flags_spin; 632 unsigned long flags_spin;
@@ -626,6 +636,9 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv)
626 636
627 spin_lock_irqsave(&priv->sta_lock, flags_spin); 637 spin_lock_irqsave(&priv->sta_lock, flags_spin);
628 for (i = 0; i < priv->hw_params.max_stations; i++) { 638 for (i = 0; i < priv->hw_params.max_stations; i++) {
639 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
640 continue;
641
629 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { 642 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
630 IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i); 643 IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
631 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 644 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
@@ -647,7 +660,7 @@ EXPORT_SYMBOL(iwl_clear_ucode_stations);
647 * 660 *
648 * Function sleeps. 661 * Function sleeps.
649 */ 662 */
650void iwl_restore_stations(struct iwl_priv *priv) 663void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
651{ 664{
652 struct iwl_addsta_cmd sta_cmd; 665 struct iwl_addsta_cmd sta_cmd;
653 struct iwl_link_quality_cmd lq; 666 struct iwl_link_quality_cmd lq;
@@ -665,6 +678,8 @@ void iwl_restore_stations(struct iwl_priv *priv)
665 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); 678 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
666 spin_lock_irqsave(&priv->sta_lock, flags_spin); 679 spin_lock_irqsave(&priv->sta_lock, flags_spin);
667 for (i = 0; i < priv->hw_params.max_stations; i++) { 680 for (i = 0; i < priv->hw_params.max_stations; i++) {
681 if (ctx->ctxid != priv->stations[i].ctxid)
682 continue;
668 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && 683 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
669 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { 684 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
670 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", 685 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
@@ -700,7 +715,7 @@ void iwl_restore_stations(struct iwl_priv *priv)
700 * current LQ command 715 * current LQ command
701 */ 716 */
702 if (send_lq) 717 if (send_lq)
703 iwl_send_lq_cmd(priv, &lq, CMD_SYNC, true); 718 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
704 spin_lock_irqsave(&priv->sta_lock, flags_spin); 719 spin_lock_irqsave(&priv->sta_lock, flags_spin);
705 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 720 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
706 } 721 }
@@ -718,7 +733,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
718{ 733{
719 int i; 734 int i;
720 735
721 for (i = 0; i < STA_KEY_MAX_NUM; i++) 736 for (i = 0; i < priv->sta_key_max_num; i++)
722 if (!test_and_set_bit(i, &priv->ucode_key_table)) 737 if (!test_and_set_bit(i, &priv->ucode_key_table))
723 return i; 738 return i;
724 739
@@ -726,7 +741,9 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
726} 741}
727EXPORT_SYMBOL(iwl_get_free_ucode_key_index); 742EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
728 743
729static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) 744static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
745 struct iwl_rxon_context *ctx,
746 bool send_if_empty)
730{ 747{
731 int i, not_empty = 0; 748 int i, not_empty = 0;
732 u8 buff[sizeof(struct iwl_wep_cmd) + 749 u8 buff[sizeof(struct iwl_wep_cmd) +
@@ -734,7 +751,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
734 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; 751 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
735 size_t cmd_size = sizeof(struct iwl_wep_cmd); 752 size_t cmd_size = sizeof(struct iwl_wep_cmd);
736 struct iwl_host_cmd cmd = { 753 struct iwl_host_cmd cmd = {
737 .id = REPLY_WEPKEY, 754 .id = ctx->wep_key_cmd,
738 .data = wep_cmd, 755 .data = wep_cmd,
739 .flags = CMD_SYNC, 756 .flags = CMD_SYNC,
740 }; 757 };
@@ -746,16 +763,16 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
746 763
747 for (i = 0; i < WEP_KEYS_MAX ; i++) { 764 for (i = 0; i < WEP_KEYS_MAX ; i++) {
748 wep_cmd->key[i].key_index = i; 765 wep_cmd->key[i].key_index = i;
749 if (priv->wep_keys[i].key_size) { 766 if (ctx->wep_keys[i].key_size) {
750 wep_cmd->key[i].key_offset = i; 767 wep_cmd->key[i].key_offset = i;
751 not_empty = 1; 768 not_empty = 1;
752 } else { 769 } else {
753 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; 770 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
754 } 771 }
755 772
756 wep_cmd->key[i].key_size = priv->wep_keys[i].key_size; 773 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
757 memcpy(&wep_cmd->key[i].key[3], priv->wep_keys[i].key, 774 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
758 priv->wep_keys[i].key_size); 775 ctx->wep_keys[i].key_size);
759 } 776 }
760 777
761 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; 778 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
@@ -771,15 +788,17 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
771 return 0; 788 return 0;
772} 789}
773 790
774int iwl_restore_default_wep_keys(struct iwl_priv *priv) 791int iwl_restore_default_wep_keys(struct iwl_priv *priv,
792 struct iwl_rxon_context *ctx)
775{ 793{
776 lockdep_assert_held(&priv->mutex); 794 lockdep_assert_held(&priv->mutex);
777 795
778 return iwl_send_static_wepkey_cmd(priv, 0); 796 return iwl_send_static_wepkey_cmd(priv, ctx, false);
779} 797}
780EXPORT_SYMBOL(iwl_restore_default_wep_keys); 798EXPORT_SYMBOL(iwl_restore_default_wep_keys);
781 799
782int iwl_remove_default_wep_key(struct iwl_priv *priv, 800int iwl_remove_default_wep_key(struct iwl_priv *priv,
801 struct iwl_rxon_context *ctx,
783 struct ieee80211_key_conf *keyconf) 802 struct ieee80211_key_conf *keyconf)
784{ 803{
785 int ret; 804 int ret;
@@ -789,13 +808,13 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
789 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 808 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
790 keyconf->keyidx); 809 keyconf->keyidx);
791 810
792 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 811 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
793 if (iwl_is_rfkill(priv)) { 812 if (iwl_is_rfkill(priv)) {
794 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); 813 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
795 /* but keys in device are clear anyway so return success */ 814 /* but keys in device are clear anyway so return success */
796 return 0; 815 return 0;
797 } 816 }
798 ret = iwl_send_static_wepkey_cmd(priv, 1); 817 ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
799 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", 818 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
800 keyconf->keyidx, ret); 819 keyconf->keyidx, ret);
801 820
@@ -804,6 +823,7 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
804EXPORT_SYMBOL(iwl_remove_default_wep_key); 823EXPORT_SYMBOL(iwl_remove_default_wep_key);
805 824
806int iwl_set_default_wep_key(struct iwl_priv *priv, 825int iwl_set_default_wep_key(struct iwl_priv *priv,
826 struct iwl_rxon_context *ctx,
807 struct ieee80211_key_conf *keyconf) 827 struct ieee80211_key_conf *keyconf)
808{ 828{
809 int ret; 829 int ret;
@@ -818,13 +838,13 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
818 838
819 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 839 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
820 keyconf->hw_key_idx = HW_KEY_DEFAULT; 840 keyconf->hw_key_idx = HW_KEY_DEFAULT;
821 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; 841 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
822 842
823 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; 843 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
824 memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key, 844 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
825 keyconf->keylen); 845 keyconf->keylen);
826 846
827 ret = iwl_send_static_wepkey_cmd(priv, 0); 847 ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
828 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", 848 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
829 keyconf->keylen, keyconf->keyidx, ret); 849 keyconf->keylen, keyconf->keyidx, ret);
830 850
@@ -833,8 +853,9 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
833EXPORT_SYMBOL(iwl_set_default_wep_key); 853EXPORT_SYMBOL(iwl_set_default_wep_key);
834 854
835static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, 855static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
836 struct ieee80211_key_conf *keyconf, 856 struct iwl_rxon_context *ctx,
837 u8 sta_id) 857 struct ieee80211_key_conf *keyconf,
858 u8 sta_id)
838{ 859{
839 unsigned long flags; 860 unsigned long flags;
840 __le16 key_flags = 0; 861 __le16 key_flags = 0;
@@ -851,12 +872,12 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
851 if (keyconf->keylen == WEP_KEY_LEN_128) 872 if (keyconf->keylen == WEP_KEY_LEN_128)
852 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; 873 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
853 874
854 if (sta_id == priv->hw_params.bcast_sta_id) 875 if (sta_id == ctx->bcast_sta_id)
855 key_flags |= STA_KEY_MULTICAST_MSK; 876 key_flags |= STA_KEY_MULTICAST_MSK;
856 877
857 spin_lock_irqsave(&priv->sta_lock, flags); 878 spin_lock_irqsave(&priv->sta_lock, flags);
858 879
859 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 880 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
860 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; 881 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
861 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; 882 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
862 883
@@ -887,8 +908,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
887} 908}
888 909
889static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, 910static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
890 struct ieee80211_key_conf *keyconf, 911 struct iwl_rxon_context *ctx,
891 u8 sta_id) 912 struct ieee80211_key_conf *keyconf,
913 u8 sta_id)
892{ 914{
893 unsigned long flags; 915 unsigned long flags;
894 __le16 key_flags = 0; 916 __le16 key_flags = 0;
@@ -900,13 +922,13 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
900 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 922 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
901 key_flags &= ~STA_KEY_FLG_INVALID; 923 key_flags &= ~STA_KEY_FLG_INVALID;
902 924
903 if (sta_id == priv->hw_params.bcast_sta_id) 925 if (sta_id == ctx->bcast_sta_id)
904 key_flags |= STA_KEY_MULTICAST_MSK; 926 key_flags |= STA_KEY_MULTICAST_MSK;
905 927
906 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 928 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
907 929
908 spin_lock_irqsave(&priv->sta_lock, flags); 930 spin_lock_irqsave(&priv->sta_lock, flags);
909 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 931 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
910 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; 932 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
911 933
912 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 934 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
@@ -936,8 +958,9 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
936} 958}
937 959
938static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 960static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
939 struct ieee80211_key_conf *keyconf, 961 struct iwl_rxon_context *ctx,
940 u8 sta_id) 962 struct ieee80211_key_conf *keyconf,
963 u8 sta_id)
941{ 964{
942 unsigned long flags; 965 unsigned long flags;
943 int ret = 0; 966 int ret = 0;
@@ -947,7 +970,7 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
947 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 970 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
948 key_flags &= ~STA_KEY_FLG_INVALID; 971 key_flags &= ~STA_KEY_FLG_INVALID;
949 972
950 if (sta_id == priv->hw_params.bcast_sta_id) 973 if (sta_id == ctx->bcast_sta_id)
951 key_flags |= STA_KEY_MULTICAST_MSK; 974 key_flags |= STA_KEY_MULTICAST_MSK;
952 975
953 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 976 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -955,7 +978,7 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
955 978
956 spin_lock_irqsave(&priv->sta_lock, flags); 979 spin_lock_irqsave(&priv->sta_lock, flags);
957 980
958 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 981 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
959 priv->stations[sta_id].keyinfo.keylen = 16; 982 priv->stations[sta_id].keyinfo.keylen = 16;
960 983
961 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 984 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
@@ -982,8 +1005,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
982} 1005}
983 1006
984void iwl_update_tkip_key(struct iwl_priv *priv, 1007void iwl_update_tkip_key(struct iwl_priv *priv,
985 struct ieee80211_key_conf *keyconf, 1008 struct iwl_rxon_context *ctx,
986 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) 1009 struct ieee80211_key_conf *keyconf,
1010 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
987{ 1011{
988 u8 sta_id; 1012 u8 sta_id;
989 unsigned long flags; 1013 unsigned long flags;
@@ -995,7 +1019,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
995 return; 1019 return;
996 } 1020 }
997 1021
998 sta_id = iwl_sta_id_or_broadcast(priv, sta); 1022 sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta);
999 if (sta_id == IWL_INVALID_STATION) 1023 if (sta_id == IWL_INVALID_STATION)
1000 return; 1024 return;
1001 1025
@@ -1018,8 +1042,9 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
1018EXPORT_SYMBOL(iwl_update_tkip_key); 1042EXPORT_SYMBOL(iwl_update_tkip_key);
1019 1043
1020int iwl_remove_dynamic_key(struct iwl_priv *priv, 1044int iwl_remove_dynamic_key(struct iwl_priv *priv,
1021 struct ieee80211_key_conf *keyconf, 1045 struct iwl_rxon_context *ctx,
1022 u8 sta_id) 1046 struct ieee80211_key_conf *keyconf,
1047 u8 sta_id)
1023{ 1048{
1024 unsigned long flags; 1049 unsigned long flags;
1025 u16 key_flags; 1050 u16 key_flags;
@@ -1028,7 +1053,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1028 1053
1029 lockdep_assert_held(&priv->mutex); 1054 lockdep_assert_held(&priv->mutex);
1030 1055
1031 priv->key_mapping_key--; 1056 ctx->key_mapping_keys--;
1032 1057
1033 spin_lock_irqsave(&priv->sta_lock, flags); 1058 spin_lock_irqsave(&priv->sta_lock, flags);
1034 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); 1059 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
@@ -1080,34 +1105,36 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1080} 1105}
1081EXPORT_SYMBOL(iwl_remove_dynamic_key); 1106EXPORT_SYMBOL(iwl_remove_dynamic_key);
1082 1107
1083int iwl_set_dynamic_key(struct iwl_priv *priv, 1108int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1084 struct ieee80211_key_conf *keyconf, u8 sta_id) 1109 struct ieee80211_key_conf *keyconf, u8 sta_id)
1085{ 1110{
1086 int ret; 1111 int ret;
1087 1112
1088 lockdep_assert_held(&priv->mutex); 1113 lockdep_assert_held(&priv->mutex);
1089 1114
1090 priv->key_mapping_key++; 1115 ctx->key_mapping_keys++;
1091 keyconf->hw_key_idx = HW_KEY_DYNAMIC; 1116 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
1092 1117
1093 switch (keyconf->alg) { 1118 switch (keyconf->cipher) {
1094 case ALG_CCMP: 1119 case WLAN_CIPHER_SUITE_CCMP:
1095 ret = iwl_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); 1120 ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id);
1096 break; 1121 break;
1097 case ALG_TKIP: 1122 case WLAN_CIPHER_SUITE_TKIP:
1098 ret = iwl_set_tkip_dynamic_key_info(priv, keyconf, sta_id); 1123 ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id);
1099 break; 1124 break;
1100 case ALG_WEP: 1125 case WLAN_CIPHER_SUITE_WEP40:
1101 ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id); 1126 case WLAN_CIPHER_SUITE_WEP104:
1127 ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id);
1102 break; 1128 break;
1103 default: 1129 default:
1104 IWL_ERR(priv, 1130 IWL_ERR(priv,
1105 "Unknown alg: %s alg = %d\n", __func__, keyconf->alg); 1131 "Unknown alg: %s cipher = %x\n", __func__,
1132 keyconf->cipher);
1106 ret = -EINVAL; 1133 ret = -EINVAL;
1107 } 1134 }
1108 1135
1109 IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n", 1136 IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
1110 keyconf->alg, keyconf->keylen, keyconf->keyidx, 1137 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1111 sta_id, ret); 1138 sta_id, ret);
1112 1139
1113 return ret; 1140 return ret;
@@ -1147,16 +1174,16 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
1147 * RXON flags are updated and when LQ command is updated. 1174 * RXON flags are updated and when LQ command is updated.
1148 */ 1175 */
1149static bool is_lq_table_valid(struct iwl_priv *priv, 1176static bool is_lq_table_valid(struct iwl_priv *priv,
1177 struct iwl_rxon_context *ctx,
1150 struct iwl_link_quality_cmd *lq) 1178 struct iwl_link_quality_cmd *lq)
1151{ 1179{
1152 int i; 1180 int i;
1153 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1154 1181
1155 if (ht_conf->is_ht) 1182 if (ctx->ht.enabled)
1156 return true; 1183 return true;
1157 1184
1158 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", 1185 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
1159 priv->active_rxon.channel); 1186 ctx->active.channel);
1160 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 1187 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
1161 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { 1188 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
1162 IWL_DEBUG_INFO(priv, 1189 IWL_DEBUG_INFO(priv,
@@ -1178,7 +1205,7 @@ static bool is_lq_table_valid(struct iwl_priv *priv,
1178 * this case to clear the state indicating that station creation is in 1205 * this case to clear the state indicating that station creation is in
1179 * progress. 1206 * progress.
1180 */ 1207 */
1181int iwl_send_lq_cmd(struct iwl_priv *priv, 1208int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1182 struct iwl_link_quality_cmd *lq, u8 flags, bool init) 1209 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
1183{ 1210{
1184 int ret = 0; 1211 int ret = 0;
@@ -1197,7 +1224,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
1197 iwl_dump_lq_cmd(priv, lq); 1224 iwl_dump_lq_cmd(priv, lq);
1198 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 1225 BUG_ON(init && (cmd.flags & CMD_ASYNC));
1199 1226
1200 if (is_lq_table_valid(priv, lq)) 1227 if (is_lq_table_valid(priv, ctx, lq))
1201 ret = iwl_send_cmd(priv, &cmd); 1228 ret = iwl_send_cmd(priv, &cmd);
1202 else 1229 else
1203 ret = -EINVAL; 1230 ret = -EINVAL;
@@ -1223,14 +1250,15 @@ EXPORT_SYMBOL(iwl_send_lq_cmd);
1223 * and marks it driver active, so that it will be restored to the 1250 * and marks it driver active, so that it will be restored to the
1224 * device at the next best time. 1251 * device at the next best time.
1225 */ 1252 */
1226int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq) 1253int iwl_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1254 bool init_lq)
1227{ 1255{
1228 struct iwl_link_quality_cmd *link_cmd; 1256 struct iwl_link_quality_cmd *link_cmd;
1229 unsigned long flags; 1257 unsigned long flags;
1230 u8 sta_id; 1258 u8 sta_id;
1231 1259
1232 spin_lock_irqsave(&priv->sta_lock, flags); 1260 spin_lock_irqsave(&priv->sta_lock, flags);
1233 sta_id = iwl_prep_station(priv, iwl_bcast_addr, false, NULL); 1261 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
1234 if (sta_id == IWL_INVALID_STATION) { 1262 if (sta_id == IWL_INVALID_STATION) {
1235 IWL_ERR(priv, "Unable to prepare broadcast station\n"); 1263 IWL_ERR(priv, "Unable to prepare broadcast station\n");
1236 spin_unlock_irqrestore(&priv->sta_lock, flags); 1264 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -1265,11 +1293,12 @@ EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
1265 * Only used by iwlagn. Placed here to have all bcast station management 1293 * Only used by iwlagn. Placed here to have all bcast station management
1266 * code together. 1294 * code together.
1267 */ 1295 */
1268int iwl_update_bcast_station(struct iwl_priv *priv) 1296static int iwl_update_bcast_station(struct iwl_priv *priv,
1297 struct iwl_rxon_context *ctx)
1269{ 1298{
1270 unsigned long flags; 1299 unsigned long flags;
1271 struct iwl_link_quality_cmd *link_cmd; 1300 struct iwl_link_quality_cmd *link_cmd;
1272 u8 sta_id = priv->hw_params.bcast_sta_id; 1301 u8 sta_id = ctx->bcast_sta_id;
1273 1302
1274 link_cmd = iwl_sta_alloc_lq(priv, sta_id); 1303 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
1275 if (!link_cmd) { 1304 if (!link_cmd) {
@@ -1287,9 +1316,23 @@ int iwl_update_bcast_station(struct iwl_priv *priv)
1287 1316
1288 return 0; 1317 return 0;
1289} 1318}
1290EXPORT_SYMBOL_GPL(iwl_update_bcast_station);
1291 1319
1292void iwl_dealloc_bcast_station(struct iwl_priv *priv) 1320int iwl_update_bcast_stations(struct iwl_priv *priv)
1321{
1322 struct iwl_rxon_context *ctx;
1323 int ret = 0;
1324
1325 for_each_context(priv, ctx) {
1326 ret = iwl_update_bcast_station(priv, ctx);
1327 if (ret)
1328 break;
1329 }
1330
1331 return ret;
1332}
1333EXPORT_SYMBOL_GPL(iwl_update_bcast_stations);
1334
1335void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
1293{ 1336{
1294 unsigned long flags; 1337 unsigned long flags;
1295 int i; 1338 int i;
@@ -1307,7 +1350,7 @@ void iwl_dealloc_bcast_station(struct iwl_priv *priv)
1307 } 1350 }
1308 spin_unlock_irqrestore(&priv->sta_lock, flags); 1351 spin_unlock_irqrestore(&priv->sta_lock, flags);
1309} 1352}
1310EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station); 1353EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
1311 1354
1312/** 1355/**
1313 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table 1356 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index d38a350ba0bd..56bad3f60d81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -44,32 +44,37 @@
44 44
45 45
46int iwl_remove_default_wep_key(struct iwl_priv *priv, 46int iwl_remove_default_wep_key(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx,
47 struct ieee80211_key_conf *key); 48 struct ieee80211_key_conf *key);
48int iwl_set_default_wep_key(struct iwl_priv *priv, 49int iwl_set_default_wep_key(struct iwl_priv *priv,
50 struct iwl_rxon_context *ctx,
49 struct ieee80211_key_conf *key); 51 struct ieee80211_key_conf *key);
50int iwl_restore_default_wep_keys(struct iwl_priv *priv); 52int iwl_restore_default_wep_keys(struct iwl_priv *priv,
51int iwl_set_dynamic_key(struct iwl_priv *priv, 53 struct iwl_rxon_context *ctx);
54int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
52 struct ieee80211_key_conf *key, u8 sta_id); 55 struct ieee80211_key_conf *key, u8 sta_id);
53int iwl_remove_dynamic_key(struct iwl_priv *priv, 56int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
54 struct ieee80211_key_conf *key, u8 sta_id); 57 struct ieee80211_key_conf *key, u8 sta_id);
55void iwl_update_tkip_key(struct iwl_priv *priv, 58void iwl_update_tkip_key(struct iwl_priv *priv,
56 struct ieee80211_key_conf *keyconf, 59 struct iwl_rxon_context *ctx,
57 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); 60 struct ieee80211_key_conf *keyconf,
58 61 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
59void iwl_restore_stations(struct iwl_priv *priv); 62
60void iwl_clear_ucode_stations(struct iwl_priv *priv); 63void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
61int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq); 64void iwl_clear_ucode_stations(struct iwl_priv *priv,
62void iwl_dealloc_bcast_station(struct iwl_priv *priv); 65 struct iwl_rxon_context *ctx);
63int iwl_update_bcast_station(struct iwl_priv *priv); 66int iwl_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
67 bool init_lq);
68void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
69int iwl_update_bcast_stations(struct iwl_priv *priv);
64int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 70int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
65int iwl_send_add_sta(struct iwl_priv *priv, 71int iwl_send_add_sta(struct iwl_priv *priv,
66 struct iwl_addsta_cmd *sta, u8 flags); 72 struct iwl_addsta_cmd *sta, u8 flags);
67int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs, 73int iwl_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
68 u8 *sta_id_r); 74 const u8 *addr, bool init_rs, u8 *sta_id_r);
69int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr, 75int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
70 bool is_ap, 76 const u8 *addr, bool is_ap,
71 struct ieee80211_sta_ht_cap *ht_info, 77 struct ieee80211_sta *sta, u8 *sta_id_r);
72 u8 *sta_id_r);
73int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, 78int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
74 const u8 *addr); 79 const u8 *addr);
75int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 80int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -94,20 +99,25 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
94static inline void iwl_clear_driver_stations(struct iwl_priv *priv) 99static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
95{ 100{
96 unsigned long flags; 101 unsigned long flags;
102 struct iwl_rxon_context *ctx;
97 103
98 spin_lock_irqsave(&priv->sta_lock, flags); 104 spin_lock_irqsave(&priv->sta_lock, flags);
99 memset(priv->stations, 0, sizeof(priv->stations)); 105 memset(priv->stations, 0, sizeof(priv->stations));
100 priv->num_stations = 0; 106 priv->num_stations = 0;
101 107
102 /*
103 * Remove all key information that is not stored as part of station
104 * information since mac80211 may not have had a
105 * chance to remove all the keys. When device is reconfigured by
106 * mac80211 after an error all keys will be reconfigured.
107 */
108 priv->ucode_key_table = 0; 108 priv->ucode_key_table = 0;
109 priv->key_mapping_key = 0; 109
110 memset(priv->wep_keys, 0, sizeof(priv->wep_keys)); 110 for_each_context(priv, ctx) {
111 /*
112 * Remove all key information that is not stored as part
113 * of station information since mac80211 may not have had
114 * a chance to remove all the keys. When device is
115 * reconfigured by mac80211 after an error all keys will
116 * be reconfigured.
117 */
118 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
119 ctx->key_mapping_keys = 0;
120 }
111 121
112 spin_unlock_irqrestore(&priv->sta_lock, flags); 122 spin_unlock_irqrestore(&priv->sta_lock, flags);
113} 123}
@@ -123,6 +133,7 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
123/** 133/**
124 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta 134 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
125 * @priv: iwl priv 135 * @priv: iwl priv
136 * @context: the current context
126 * @sta: mac80211 station 137 * @sta: mac80211 station
127 * 138 *
128 * In certain circumstances mac80211 passes a station pointer 139 * In certain circumstances mac80211 passes a station pointer
@@ -131,12 +142,13 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
131 * inline wraps that pattern. 142 * inline wraps that pattern.
132 */ 143 */
133static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv, 144static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
145 struct iwl_rxon_context *context,
134 struct ieee80211_sta *sta) 146 struct ieee80211_sta *sta)
135{ 147{
136 int sta_id; 148 int sta_id;
137 149
138 if (!sta) 150 if (!sta)
139 return priv->hw_params.bcast_sta_id; 151 return context->bcast_sta_id;
140 152
141 sta_id = iwl_sta_id(sta); 153 sta_id = iwl_sta_id(sta);
142 154
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index a81989c06983..347d3dc6a015 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -134,7 +134,7 @@ EXPORT_SYMBOL(iwl_tx_queue_free);
134 */ 134 */
135void iwl_cmd_queue_free(struct iwl_priv *priv) 135void iwl_cmd_queue_free(struct iwl_priv *priv)
136{ 136{
137 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 137 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
138 struct iwl_queue *q = &txq->q; 138 struct iwl_queue *q = &txq->q;
139 struct device *dev = &priv->pci_dev->dev; 139 struct device *dev = &priv->pci_dev->dev;
140 int i; 140 int i;
@@ -271,7 +271,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
271 271
272 /* Driver private data, only for Tx (not command) queues, 272 /* Driver private data, only for Tx (not command) queues,
273 * not shared with device. */ 273 * not shared with device. */
274 if (id != IWL_CMD_QUEUE_NUM) { 274 if (id != priv->cmd_queue) {
275 txq->txb = kzalloc(sizeof(txq->txb[0]) * 275 txq->txb = kzalloc(sizeof(txq->txb[0]) *
276 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 276 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
277 if (!txq->txb) { 277 if (!txq->txb) {
@@ -314,13 +314,13 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
314 314
315 /* 315 /*
316 * Alloc buffer array for commands (Tx or other types of commands). 316 * Alloc buffer array for commands (Tx or other types of commands).
317 * For the command queue (#4), allocate command space + one big 317 * For the command queue (#4/#9), allocate command space + one big
318 * command for scan, since scan command is very huge; the system will 318 * command for scan, since scan command is very huge; the system will
319 * not have two scans at the same time, so only one is needed. 319 * not have two scans at the same time, so only one is needed.
320 * For normal Tx queues (all other queues), no super-size command 320 * For normal Tx queues (all other queues), no super-size command
321 * space is needed. 321 * space is needed.
322 */ 322 */
323 if (txq_id == IWL_CMD_QUEUE_NUM) 323 if (txq_id == priv->cmd_queue)
324 actual_slots++; 324 actual_slots++;
325 325
326 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, 326 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
@@ -355,7 +355,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
355 * need an swq_id so don't set one to catch errors, all others can 355 * need an swq_id so don't set one to catch errors, all others can
356 * be set up to the identity mapping. 356 * be set up to the identity mapping.
357 */ 357 */
358 if (txq_id != IWL_CMD_QUEUE_NUM) 358 if (txq_id != priv->cmd_queue)
359 txq->swq_id = txq_id; 359 txq->swq_id = txq_id;
360 360
361 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 361 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
@@ -385,7 +385,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
385{ 385{
386 int actual_slots = slots_num; 386 int actual_slots = slots_num;
387 387
388 if (txq_id == IWL_CMD_QUEUE_NUM) 388 if (txq_id == priv->cmd_queue)
389 actual_slots++; 389 actual_slots++;
390 390
391 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); 391 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
@@ -413,7 +413,7 @@ EXPORT_SYMBOL(iwl_tx_queue_reset);
413 */ 413 */
414int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 414int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
415{ 415{
416 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 416 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
417 struct iwl_queue *q = &txq->q; 417 struct iwl_queue *q = &txq->q;
418 struct iwl_device_cmd *out_cmd; 418 struct iwl_device_cmd *out_cmd;
419 struct iwl_cmd_meta *out_meta; 419 struct iwl_cmd_meta *out_meta;
@@ -422,6 +422,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
422 int len; 422 int len;
423 u32 idx; 423 u32 idx;
424 u16 fix_size; 424 u16 fix_size;
425 bool is_ct_kill = false;
425 426
426 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); 427 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
427 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 428 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
@@ -443,9 +444,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
443 444
444 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 445 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
445 IWL_ERR(priv, "No space in command queue\n"); 446 IWL_ERR(priv, "No space in command queue\n");
446 if (iwl_within_ct_kill_margin(priv)) 447 if (priv->cfg->ops->lib->tt_ops.ct_kill_check) {
447 iwl_tt_enter_ct_kill(priv); 448 is_ct_kill =
448 else { 449 priv->cfg->ops->lib->tt_ops.ct_kill_check(priv);
450 }
451 if (!is_ct_kill) {
449 IWL_ERR(priv, "Restarting adapter due to queue full\n"); 452 IWL_ERR(priv, "Restarting adapter due to queue full\n");
450 queue_work(priv->workqueue, &priv->restart); 453 queue_work(priv->workqueue, &priv->restart);
451 } 454 }
@@ -480,7 +483,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
480 * information */ 483 * information */
481 484
482 out_cmd->hdr.flags = 0; 485 out_cmd->hdr.flags = 0;
483 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | 486 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
484 INDEX_TO_SEQ(q->write_ptr)); 487 INDEX_TO_SEQ(q->write_ptr));
485 if (cmd->flags & CMD_SIZE_HUGE) 488 if (cmd->flags & CMD_SIZE_HUGE)
486 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 489 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
@@ -497,15 +500,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
497 get_cmd_string(out_cmd->hdr.cmd), 500 get_cmd_string(out_cmd->hdr.cmd),
498 out_cmd->hdr.cmd, 501 out_cmd->hdr.cmd,
499 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 502 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
500 q->write_ptr, idx, IWL_CMD_QUEUE_NUM); 503 q->write_ptr, idx, priv->cmd_queue);
501 break; 504 break;
502 default: 505 default:
503 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " 506 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
504 "%d bytes at %d[%d]:%d\n", 507 "%d bytes at %d[%d]:%d\n",
505 get_cmd_string(out_cmd->hdr.cmd), 508 get_cmd_string(out_cmd->hdr.cmd),
506 out_cmd->hdr.cmd, 509 out_cmd->hdr.cmd,
507 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 510 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
508 q->write_ptr, idx, IWL_CMD_QUEUE_NUM); 511 q->write_ptr, idx, priv->cmd_queue);
509 } 512 }
510#endif 513#endif
511 txq->need_update = 1; 514 txq->need_update = 1;
@@ -584,16 +587,16 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
584 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 587 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
585 struct iwl_device_cmd *cmd; 588 struct iwl_device_cmd *cmd;
586 struct iwl_cmd_meta *meta; 589 struct iwl_cmd_meta *meta;
587 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 590 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
588 591
589 /* If a Tx command is being handled and it isn't in the actual 592 /* If a Tx command is being handled and it isn't in the actual
590 * command queue then there a command routing bug has been introduced 593 * command queue then there a command routing bug has been introduced
591 * in the queue management code. */ 594 * in the queue management code. */
592 if (WARN(txq_id != IWL_CMD_QUEUE_NUM, 595 if (WARN(txq_id != priv->cmd_queue,
593 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n", 596 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
594 txq_id, sequence, 597 txq_id, priv->cmd_queue, sequence,
595 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr, 598 priv->txq[priv->cmd_queue].q.read_ptr,
596 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) { 599 priv->txq[priv->cmd_queue].q.write_ptr)) {
597 iwl_print_hex_error(priv, pkt, 32); 600 iwl_print_hex_error(priv, pkt, 32);
598 return; 601 return;
599 } 602 }
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 59a308b02f95..68e624afb987 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/delay.h> 39#include <linux/delay.h>
@@ -143,7 +144,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
143 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 144 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
144 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 145 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
145 146
146 if (sta_id == priv->hw_params.bcast_sta_id) 147 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
147 key_flags |= STA_KEY_MULTICAST_MSK; 148 key_flags |= STA_KEY_MULTICAST_MSK;
148 149
149 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 150 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -151,7 +152,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
151 key_flags &= ~STA_KEY_FLG_INVALID; 152 key_flags &= ~STA_KEY_FLG_INVALID;
152 153
153 spin_lock_irqsave(&priv->sta_lock, flags); 154 spin_lock_irqsave(&priv->sta_lock, flags);
154 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 155 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
155 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; 156 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
156 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 157 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
157 keyconf->keylen); 158 keyconf->keylen);
@@ -222,23 +223,25 @@ static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
222 223
223 keyconf->hw_key_idx = HW_KEY_DYNAMIC; 224 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
224 225
225 switch (keyconf->alg) { 226 switch (keyconf->cipher) {
226 case ALG_CCMP: 227 case WLAN_CIPHER_SUITE_CCMP:
227 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); 228 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
228 break; 229 break;
229 case ALG_TKIP: 230 case WLAN_CIPHER_SUITE_TKIP:
230 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); 231 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
231 break; 232 break;
232 case ALG_WEP: 233 case WLAN_CIPHER_SUITE_WEP40:
234 case WLAN_CIPHER_SUITE_WEP104:
233 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); 235 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
234 break; 236 break;
235 default: 237 default:
236 IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg); 238 IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
239 keyconf->cipher);
237 ret = -EINVAL; 240 ret = -EINVAL;
238 } 241 }
239 242
240 IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n", 243 IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
241 keyconf->alg, keyconf->keylen, keyconf->keyidx, 244 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
242 sta_id, ret); 245 sta_id, ret);
243 246
244 return ret; 247 return ret;
@@ -254,10 +257,11 @@ static int iwl3945_remove_static_key(struct iwl_priv *priv)
254static int iwl3945_set_static_key(struct iwl_priv *priv, 257static int iwl3945_set_static_key(struct iwl_priv *priv,
255 struct ieee80211_key_conf *key) 258 struct ieee80211_key_conf *key)
256{ 259{
257 if (key->alg == ALG_WEP) 260 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
261 key->cipher == WLAN_CIPHER_SUITE_WEP104)
258 return -EOPNOTSUPP; 262 return -EOPNOTSUPP;
259 263
260 IWL_ERR(priv, "Static key invalid: alg %d\n", key->alg); 264 IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
261 return -EINVAL; 265 return -EINVAL;
262} 266}
263 267
@@ -313,7 +317,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
313 int left) 317 int left)
314{ 318{
315 319
316 if (!iwl_is_associated(priv) || !priv->ibss_beacon) 320 if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->ibss_beacon)
317 return 0; 321 return 0;
318 322
319 if (priv->ibss_beacon->len > left) 323 if (priv->ibss_beacon->len > left)
@@ -339,7 +343,8 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
339 return -ENOMEM; 343 return -ENOMEM;
340 } 344 }
341 345
342 rate = iwl_rate_get_lowest_plcp(priv); 346 rate = iwl_rate_get_lowest_plcp(priv,
347 &priv->contexts[IWL_RXON_CTX_BSS]);
343 348
344 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); 349 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
345 350
@@ -369,23 +374,25 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
369 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 374 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
370 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; 375 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
371 376
372 switch (keyinfo->alg) { 377 tx_cmd->sec_ctl = 0;
373 case ALG_CCMP: 378
379 switch (keyinfo->cipher) {
380 case WLAN_CIPHER_SUITE_CCMP:
374 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 381 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
375 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); 382 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
376 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 383 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
377 break; 384 break;
378 385
379 case ALG_TKIP: 386 case WLAN_CIPHER_SUITE_TKIP:
380 break; 387 break;
381 388
382 case ALG_WEP: 389 case WLAN_CIPHER_SUITE_WEP104:
383 tx_cmd->sec_ctl = TX_CMD_SEC_WEP | 390 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
391 /* fall through */
392 case WLAN_CIPHER_SUITE_WEP40:
393 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
384 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 394 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
385 395
386 if (keyinfo->keylen == 13)
387 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
388
389 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); 396 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
390 397
391 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 398 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
@@ -393,7 +400,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
393 break; 400 break;
394 401
395 default: 402 default:
396 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg); 403 IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
397 break; 404 break;
398 } 405 }
399} 406}
@@ -506,7 +513,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
506 hdr_len = ieee80211_hdrlen(fc); 513 hdr_len = ieee80211_hdrlen(fc);
507 514
508 /* Find index into station table for destination station */ 515 /* Find index into station table for destination station */
509 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); 516 sta_id = iwl_sta_id_or_broadcast(
517 priv, &priv->contexts[IWL_RXON_CTX_BSS],
518 info->control.sta);
510 if (sta_id == IWL_INVALID_STATION) { 519 if (sta_id == IWL_INVALID_STATION) {
511 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 520 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
512 hdr->addr1); 521 hdr->addr1);
@@ -536,6 +545,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
536 /* Set up driver data for this TFD */ 545 /* Set up driver data for this TFD */
537 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 546 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
538 txq->txb[q->write_ptr].skb = skb; 547 txq->txb[q->write_ptr].skb = skb;
548 txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
539 549
540 /* Init first empty entry in queue's array of Tx/cmd buffers */ 550 /* Init first empty entry in queue's array of Tx/cmd buffers */
541 out_cmd = txq->cmd[idx]; 551 out_cmd = txq->cmd[idx];
@@ -677,11 +687,12 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
677 int rc; 687 int rc;
678 int spectrum_resp_status; 688 int spectrum_resp_status;
679 int duration = le16_to_cpu(params->duration); 689 int duration = le16_to_cpu(params->duration);
690 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
680 691
681 if (iwl_is_associated(priv)) 692 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
682 add_time = iwl_usecs_to_beacons(priv, 693 add_time = iwl_usecs_to_beacons(priv,
683 le64_to_cpu(params->start_time) - priv->_3945.last_tsf, 694 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
684 le16_to_cpu(priv->rxon_timing.beacon_interval)); 695 le16_to_cpu(ctx->timing.beacon_interval));
685 696
686 memset(&spectrum, 0, sizeof(spectrum)); 697 memset(&spectrum, 0, sizeof(spectrum));
687 698
@@ -692,18 +703,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
692 cmd.len = sizeof(spectrum); 703 cmd.len = sizeof(spectrum);
693 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 704 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
694 705
695 if (iwl_is_associated(priv)) 706 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
696 spectrum.start_time = 707 spectrum.start_time =
697 iwl_add_beacon_time(priv, 708 iwl_add_beacon_time(priv,
698 priv->_3945.last_beacon_time, add_time, 709 priv->_3945.last_beacon_time, add_time,
699 le16_to_cpu(priv->rxon_timing.beacon_interval)); 710 le16_to_cpu(ctx->timing.beacon_interval));
700 else 711 else
701 spectrum.start_time = 0; 712 spectrum.start_time = 0;
702 713
703 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); 714 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
704 spectrum.channels[0].channel = params->channel; 715 spectrum.channels[0].channel = params->channel;
705 spectrum.channels[0].type = type; 716 spectrum.channels[0].type = type;
706 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) 717 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
707 spectrum.flags |= RXON_FLG_BAND_24G_MSK | 718 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
708 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; 719 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
709 720
@@ -792,7 +803,8 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
792 struct sk_buff *beacon; 803 struct sk_buff *beacon;
793 804
794 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 805 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
795 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 806 beacon = ieee80211_beacon_get(priv->hw,
807 priv->contexts[IWL_RXON_CTX_BSS].vif);
796 808
797 if (!beacon) { 809 if (!beacon) {
798 IWL_ERR(priv, "update beacon failed\n"); 810 IWL_ERR(priv, "update beacon failed\n");
@@ -813,9 +825,9 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
813static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, 825static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
814 struct iwl_rx_mem_buffer *rxb) 826 struct iwl_rx_mem_buffer *rxb)
815{ 827{
816#ifdef CONFIG_IWLWIFI_DEBUG
817 struct iwl_rx_packet *pkt = rxb_addr(rxb); 828 struct iwl_rx_packet *pkt = rxb_addr(rxb);
818 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 829 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
830#ifdef CONFIG_IWLWIFI_DEBUG
819 u8 rate = beacon->beacon_notify_hdr.rate; 831 u8 rate = beacon->beacon_notify_hdr.rate;
820 832
821 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 833 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -827,6 +839,8 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
827 le32_to_cpu(beacon->low_tsf), rate); 839 le32_to_cpu(beacon->low_tsf), rate);
828#endif 840#endif
829 841
842 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
843
830 if ((priv->iw_mode == NL80211_IFTYPE_AP) && 844 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
831 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 845 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
832 queue_work(priv->workqueue, &priv->beacon_update); 846 queue_work(priv->workqueue, &priv->beacon_update);
@@ -2460,6 +2474,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2460{ 2474{
2461 int thermal_spin = 0; 2475 int thermal_spin = 0;
2462 u32 rfkill; 2476 u32 rfkill;
2477 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2463 2478
2464 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2479 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2465 2480
@@ -2517,22 +2532,22 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2517 2532
2518 iwl_power_update_mode(priv, true); 2533 iwl_power_update_mode(priv, true);
2519 2534
2520 if (iwl_is_associated(priv)) { 2535 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
2521 struct iwl3945_rxon_cmd *active_rxon = 2536 struct iwl3945_rxon_cmd *active_rxon =
2522 (struct iwl3945_rxon_cmd *)(&priv->active_rxon); 2537 (struct iwl3945_rxon_cmd *)(&ctx->active);
2523 2538
2524 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2539 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2525 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2540 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2526 } else { 2541 } else {
2527 /* Initialize our rx_config data */ 2542 /* Initialize our rx_config data */
2528 iwl_connection_init_rx_config(priv, NULL); 2543 iwl_connection_init_rx_config(priv, ctx);
2529 } 2544 }
2530 2545
2531 /* Configure Bluetooth device coexistence support */ 2546 /* Configure Bluetooth device coexistence support */
2532 priv->cfg->ops->hcmd->send_bt_config(priv); 2547 priv->cfg->ops->hcmd->send_bt_config(priv);
2533 2548
2534 /* Configure the adapter for unassociated operation */ 2549 /* Configure the adapter for unassociated operation */
2535 iwlcore_commit_rxon(priv); 2550 iwlcore_commit_rxon(priv, ctx);
2536 2551
2537 iwl3945_reg_txpower_periodic(priv); 2552 iwl3945_reg_txpower_periodic(priv);
2538 2553
@@ -2563,9 +2578,14 @@ static void __iwl3945_down(struct iwl_priv *priv)
2563 if (!exit_pending) 2578 if (!exit_pending)
2564 set_bit(STATUS_EXIT_PENDING, &priv->status); 2579 set_bit(STATUS_EXIT_PENDING, &priv->status);
2565 2580
2581 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2582 * to prevent rearm timer */
2583 if (priv->cfg->ops->lib->recover_from_tx_stall)
2584 del_timer_sync(&priv->monitor_recover);
2585
2566 /* Station information will now be cleared in device */ 2586 /* Station information will now be cleared in device */
2567 iwl_clear_ucode_stations(priv); 2587 iwl_clear_ucode_stations(priv, NULL);
2568 iwl_dealloc_bcast_station(priv); 2588 iwl_dealloc_bcast_stations(priv);
2569 iwl_clear_driver_stations(priv); 2589 iwl_clear_driver_stations(priv);
2570 2590
2571 /* Unblock any waiting calls */ 2591 /* Unblock any waiting calls */
@@ -2647,7 +2667,8 @@ static int __iwl3945_up(struct iwl_priv *priv)
2647{ 2667{
2648 int rc, i; 2668 int rc, i;
2649 2669
2650 rc = iwl_alloc_bcast_station(priv, false); 2670 rc = iwl_alloc_bcast_station(priv, &priv->contexts[IWL_RXON_CTX_BSS],
2671 false);
2651 if (rc) 2672 if (rc)
2652 return rc; 2673 return rc;
2653 2674
@@ -2870,7 +2891,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2870 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2891 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2871 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 2892 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2872 2893
2873 if (iwl_is_associated(priv)) { 2894 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
2874 u16 interval = 0; 2895 u16 interval = 0;
2875 u32 extra; 2896 u32 extra;
2876 u32 suspend_time = 100; 2897 u32 suspend_time = 100;
@@ -2931,7 +2952,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2931 /* We don't build a direct scan probe request; the uCode will do 2952 /* We don't build a direct scan probe request; the uCode will do
2932 * that based on the direct_mask added to each channel entry */ 2953 * that based on the direct_mask added to each channel entry */
2933 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 2954 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2934 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; 2955 scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2935 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2956 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2936 2957
2937 /* flags + rate selection */ 2958 /* flags + rate selection */
@@ -3029,8 +3050,10 @@ static void iwl3945_bg_restart(struct work_struct *data)
3029 return; 3050 return;
3030 3051
3031 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 3052 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
3053 struct iwl_rxon_context *ctx;
3032 mutex_lock(&priv->mutex); 3054 mutex_lock(&priv->mutex);
3033 priv->vif = NULL; 3055 for_each_context(priv, ctx)
3056 ctx->vif = NULL;
3034 priv->is_open = 0; 3057 priv->is_open = 0;
3035 mutex_unlock(&priv->mutex); 3058 mutex_unlock(&priv->mutex);
3036 iwl3945_down(priv); 3059 iwl3945_down(priv);
@@ -3064,6 +3087,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3064{ 3087{
3065 int rc = 0; 3088 int rc = 0;
3066 struct ieee80211_conf *conf = NULL; 3089 struct ieee80211_conf *conf = NULL;
3090 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3067 3091
3068 if (!vif || !priv->is_open) 3092 if (!vif || !priv->is_open)
3069 return; 3093 return;
@@ -3074,7 +3098,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3074 } 3098 }
3075 3099
3076 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3100 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3077 vif->bss_conf.aid, priv->active_rxon.bssid_addr); 3101 vif->bss_conf.aid, ctx->active.bssid_addr);
3078 3102
3079 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3103 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3080 return; 3104 return;
@@ -3083,37 +3107,34 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3083 3107
3084 conf = ieee80211_get_hw_conf(priv->hw); 3108 conf = ieee80211_get_hw_conf(priv->hw);
3085 3109
3086 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3110 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3087 iwlcore_commit_rxon(priv); 3111 iwlcore_commit_rxon(priv, ctx);
3088 3112
3089 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); 3113 rc = iwl_send_rxon_timing(priv, ctx);
3090 iwl_setup_rxon_timing(priv, vif);
3091 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3092 sizeof(priv->rxon_timing), &priv->rxon_timing);
3093 if (rc) 3114 if (rc)
3094 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3115 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3095 "Attempting to continue.\n"); 3116 "Attempting to continue.\n");
3096 3117
3097 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3118 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3098 3119
3099 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid); 3120 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
3100 3121
3101 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3122 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3102 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3123 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3103 3124
3104 if (vif->bss_conf.use_short_preamble) 3125 if (vif->bss_conf.use_short_preamble)
3105 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3126 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3106 else 3127 else
3107 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3128 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3108 3129
3109 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3130 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3110 if (vif->bss_conf.use_short_slot) 3131 if (vif->bss_conf.use_short_slot)
3111 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3132 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3112 else 3133 else
3113 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3134 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3114 } 3135 }
3115 3136
3116 iwlcore_commit_rxon(priv); 3137 iwlcore_commit_rxon(priv, ctx);
3117 3138
3118 switch (vif->type) { 3139 switch (vif->type) {
3119 case NL80211_IFTYPE_STATION: 3140 case NL80211_IFTYPE_STATION:
@@ -3250,48 +3271,45 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3250 3271
3251void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3272void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3252{ 3273{
3274 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3253 int rc = 0; 3275 int rc = 0;
3254 3276
3255 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3277 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3256 return; 3278 return;
3257 3279
3258 /* The following should be done only at AP bring up */ 3280 /* The following should be done only at AP bring up */
3259 if (!(iwl_is_associated(priv))) { 3281 if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) {
3260 3282
3261 /* RXON - unassoc (to set timing command) */ 3283 /* RXON - unassoc (to set timing command) */
3262 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3284 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3263 iwlcore_commit_rxon(priv); 3285 iwlcore_commit_rxon(priv, ctx);
3264 3286
3265 /* RXON Timing */ 3287 /* RXON Timing */
3266 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); 3288 rc = iwl_send_rxon_timing(priv, ctx);
3267 iwl_setup_rxon_timing(priv, vif);
3268 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3269 sizeof(priv->rxon_timing),
3270 &priv->rxon_timing);
3271 if (rc) 3289 if (rc)
3272 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3290 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3273 "Attempting to continue.\n"); 3291 "Attempting to continue.\n");
3274 3292
3275 priv->staging_rxon.assoc_id = 0; 3293 ctx->staging.assoc_id = 0;
3276 3294
3277 if (vif->bss_conf.use_short_preamble) 3295 if (vif->bss_conf.use_short_preamble)
3278 priv->staging_rxon.flags |= 3296 ctx->staging.flags |=
3279 RXON_FLG_SHORT_PREAMBLE_MSK; 3297 RXON_FLG_SHORT_PREAMBLE_MSK;
3280 else 3298 else
3281 priv->staging_rxon.flags &= 3299 ctx->staging.flags &=
3282 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3300 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3283 3301
3284 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3302 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3285 if (vif->bss_conf.use_short_slot) 3303 if (vif->bss_conf.use_short_slot)
3286 priv->staging_rxon.flags |= 3304 ctx->staging.flags |=
3287 RXON_FLG_SHORT_SLOT_MSK; 3305 RXON_FLG_SHORT_SLOT_MSK;
3288 else 3306 else
3289 priv->staging_rxon.flags &= 3307 ctx->staging.flags &=
3290 ~RXON_FLG_SHORT_SLOT_MSK; 3308 ~RXON_FLG_SHORT_SLOT_MSK;
3291 } 3309 }
3292 /* restore RXON assoc */ 3310 /* restore RXON assoc */
3293 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3311 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3294 iwlcore_commit_rxon(priv); 3312 iwlcore_commit_rxon(priv, ctx);
3295 } 3313 }
3296 iwl3945_send_beacon_cmd(priv); 3314 iwl3945_send_beacon_cmd(priv);
3297 3315
@@ -3317,10 +3335,11 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3317 return -EOPNOTSUPP; 3335 return -EOPNOTSUPP;
3318 } 3336 }
3319 3337
3320 static_key = !iwl_is_associated(priv); 3338 static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
3321 3339
3322 if (!static_key) { 3340 if (!static_key) {
3323 sta_id = iwl_sta_id_or_broadcast(priv, sta); 3341 sta_id = iwl_sta_id_or_broadcast(
3342 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
3324 if (sta_id == IWL_INVALID_STATION) 3343 if (sta_id == IWL_INVALID_STATION)
3325 return -EINVAL; 3344 return -EINVAL;
3326 } 3345 }
@@ -3371,8 +3390,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3371 sta_priv->common.sta_id = IWL_INVALID_STATION; 3390 sta_priv->common.sta_id = IWL_INVALID_STATION;
3372 3391
3373 3392
3374 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap, 3393 ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS],
3375 &sta_id); 3394 sta->addr, is_ap, sta, &sta_id);
3376 if (ret) { 3395 if (ret) {
3377 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3396 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3378 sta->addr, ret); 3397 sta->addr, ret);
@@ -3399,6 +3418,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3399{ 3418{
3400 struct iwl_priv *priv = hw->priv; 3419 struct iwl_priv *priv = hw->priv;
3401 __le32 filter_or = 0, filter_nand = 0; 3420 __le32 filter_or = 0, filter_nand = 0;
3421 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3402 3422
3403#define CHK(test, flag) do { \ 3423#define CHK(test, flag) do { \
3404 if (*total_flags & (test)) \ 3424 if (*total_flags & (test)) \
@@ -3418,8 +3438,8 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3418 3438
3419 mutex_lock(&priv->mutex); 3439 mutex_lock(&priv->mutex);
3420 3440
3421 priv->staging_rxon.filter_flags &= ~filter_nand; 3441 ctx->staging.filter_flags &= ~filter_nand;
3422 priv->staging_rxon.filter_flags |= filter_or; 3442 ctx->staging.filter_flags |= filter_or;
3423 3443
3424 /* 3444 /*
3425 * Committing directly here breaks for some reason, 3445 * Committing directly here breaks for some reason,
@@ -3533,8 +3553,9 @@ static ssize_t show_flags(struct device *d,
3533 struct device_attribute *attr, char *buf) 3553 struct device_attribute *attr, char *buf)
3534{ 3554{
3535 struct iwl_priv *priv = dev_get_drvdata(d); 3555 struct iwl_priv *priv = dev_get_drvdata(d);
3556 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3536 3557
3537 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); 3558 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3538} 3559}
3539 3560
3540static ssize_t store_flags(struct device *d, 3561static ssize_t store_flags(struct device *d,
@@ -3543,17 +3564,18 @@ static ssize_t store_flags(struct device *d,
3543{ 3564{
3544 struct iwl_priv *priv = dev_get_drvdata(d); 3565 struct iwl_priv *priv = dev_get_drvdata(d);
3545 u32 flags = simple_strtoul(buf, NULL, 0); 3566 u32 flags = simple_strtoul(buf, NULL, 0);
3567 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3546 3568
3547 mutex_lock(&priv->mutex); 3569 mutex_lock(&priv->mutex);
3548 if (le32_to_cpu(priv->staging_rxon.flags) != flags) { 3570 if (le32_to_cpu(ctx->staging.flags) != flags) {
3549 /* Cancel any currently running scans... */ 3571 /* Cancel any currently running scans... */
3550 if (iwl_scan_cancel_timeout(priv, 100)) 3572 if (iwl_scan_cancel_timeout(priv, 100))
3551 IWL_WARN(priv, "Could not cancel scan.\n"); 3573 IWL_WARN(priv, "Could not cancel scan.\n");
3552 else { 3574 else {
3553 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", 3575 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3554 flags); 3576 flags);
3555 priv->staging_rxon.flags = cpu_to_le32(flags); 3577 ctx->staging.flags = cpu_to_le32(flags);
3556 iwlcore_commit_rxon(priv); 3578 iwlcore_commit_rxon(priv, ctx);
3557 } 3579 }
3558 } 3580 }
3559 mutex_unlock(&priv->mutex); 3581 mutex_unlock(&priv->mutex);
@@ -3567,9 +3589,10 @@ static ssize_t show_filter_flags(struct device *d,
3567 struct device_attribute *attr, char *buf) 3589 struct device_attribute *attr, char *buf)
3568{ 3590{
3569 struct iwl_priv *priv = dev_get_drvdata(d); 3591 struct iwl_priv *priv = dev_get_drvdata(d);
3592 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3570 3593
3571 return sprintf(buf, "0x%04X\n", 3594 return sprintf(buf, "0x%04X\n",
3572 le32_to_cpu(priv->active_rxon.filter_flags)); 3595 le32_to_cpu(ctx->active.filter_flags));
3573} 3596}
3574 3597
3575static ssize_t store_filter_flags(struct device *d, 3598static ssize_t store_filter_flags(struct device *d,
@@ -3577,19 +3600,20 @@ static ssize_t store_filter_flags(struct device *d,
3577 const char *buf, size_t count) 3600 const char *buf, size_t count)
3578{ 3601{
3579 struct iwl_priv *priv = dev_get_drvdata(d); 3602 struct iwl_priv *priv = dev_get_drvdata(d);
3603 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3580 u32 filter_flags = simple_strtoul(buf, NULL, 0); 3604 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3581 3605
3582 mutex_lock(&priv->mutex); 3606 mutex_lock(&priv->mutex);
3583 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { 3607 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3584 /* Cancel any currently running scans... */ 3608 /* Cancel any currently running scans... */
3585 if (iwl_scan_cancel_timeout(priv, 100)) 3609 if (iwl_scan_cancel_timeout(priv, 100))
3586 IWL_WARN(priv, "Could not cancel scan.\n"); 3610 IWL_WARN(priv, "Could not cancel scan.\n");
3587 else { 3611 else {
3588 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " 3612 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3589 "0x%04X\n", filter_flags); 3613 "0x%04X\n", filter_flags);
3590 priv->staging_rxon.filter_flags = 3614 ctx->staging.filter_flags =
3591 cpu_to_le32(filter_flags); 3615 cpu_to_le32(filter_flags);
3592 iwlcore_commit_rxon(priv); 3616 iwlcore_commit_rxon(priv, ctx);
3593 } 3617 }
3594 } 3618 }
3595 mutex_unlock(&priv->mutex); 3619 mutex_unlock(&priv->mutex);
@@ -3637,8 +3661,9 @@ static ssize_t store_measurement(struct device *d,
3637 const char *buf, size_t count) 3661 const char *buf, size_t count)
3638{ 3662{
3639 struct iwl_priv *priv = dev_get_drvdata(d); 3663 struct iwl_priv *priv = dev_get_drvdata(d);
3664 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3640 struct ieee80211_measurement_params params = { 3665 struct ieee80211_measurement_params params = {
3641 .channel = le16_to_cpu(priv->active_rxon.channel), 3666 .channel = le16_to_cpu(ctx->active.channel),
3642 .start_time = cpu_to_le64(priv->_3945.last_tsf), 3667 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3643 .duration = cpu_to_le16(1), 3668 .duration = cpu_to_le16(1),
3644 }; 3669 };
@@ -3785,10 +3810,8 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3785 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3810 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3786 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3811 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3787 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); 3812 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3788 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 3813
3789 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 3814 iwl_setup_scan_deferred_work(priv);
3790 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
3791 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
3792 3815
3793 iwl3945_hw_setup_deferred_work(priv); 3816 iwl3945_hw_setup_deferred_work(priv);
3794 3817
@@ -3812,8 +3835,6 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3812 cancel_delayed_work(&priv->alive_start); 3835 cancel_delayed_work(&priv->alive_start);
3813 cancel_work_sync(&priv->start_internal_scan); 3836 cancel_work_sync(&priv->start_internal_scan);
3814 cancel_work_sync(&priv->beacon_update); 3837 cancel_work_sync(&priv->beacon_update);
3815 if (priv->cfg->ops->lib->recover_from_tx_stall)
3816 del_timer_sync(&priv->monitor_recover);
3817} 3838}
3818 3839
3819static struct attribute *iwl3945_sysfs_entries[] = { 3840static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3853,6 +3874,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
3853 .hw_scan = iwl_mac_hw_scan, 3874 .hw_scan = iwl_mac_hw_scan,
3854 .sta_add = iwl3945_mac_sta_add, 3875 .sta_add = iwl3945_mac_sta_add,
3855 .sta_remove = iwl_mac_sta_remove, 3876 .sta_remove = iwl_mac_sta_remove,
3877 .tx_last_beacon = iwl_mac_tx_last_beacon,
3856}; 3878};
3857 3879
3858static int iwl3945_init_drv(struct iwl_priv *priv) 3880static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3933,8 +3955,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3933 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3955 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3934 3956
3935 hw->wiphy->interface_modes = 3957 hw->wiphy->interface_modes =
3936 BIT(NL80211_IFTYPE_STATION) | 3958 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3937 BIT(NL80211_IFTYPE_ADHOC);
3938 3959
3939 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3960 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3940 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3961 WIPHY_FLAG_DISABLE_BEACON_HINTS;
@@ -3966,7 +3987,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3966 3987
3967static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3988static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3968{ 3989{
3969 int err = 0; 3990 int err = 0, i;
3970 struct iwl_priv *priv; 3991 struct iwl_priv *priv;
3971 struct ieee80211_hw *hw; 3992 struct ieee80211_hw *hw;
3972 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 3993 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -3988,6 +4009,27 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3988 priv = hw->priv; 4009 priv = hw->priv;
3989 SET_IEEE80211_DEV(hw, &pdev->dev); 4010 SET_IEEE80211_DEV(hw, &pdev->dev);
3990 4011
4012 priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
4013
4014 /* 3945 has only one valid context */
4015 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
4016
4017 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
4018 priv->contexts[i].ctxid = i;
4019
4020 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
4021 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
4022 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
4023 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
4024 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
4025 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
4026 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
4027 BIT(NL80211_IFTYPE_STATION) |
4028 BIT(NL80211_IFTYPE_ADHOC);
4029 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
4030 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
4031 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
4032
3991 /* 4033 /*
3992 * Disabling hardware scan means that mac80211 will perform scans 4034 * Disabling hardware scan means that mac80211 will perform scans
3993 * "the hard way", rather than using device's scan. 4035 * "the hard way", rather than using device's scan.
@@ -4009,6 +4051,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4009 /*************************** 4051 /***************************
4010 * 2. Initializing PCI bus 4052 * 2. Initializing PCI bus
4011 * *************************/ 4053 * *************************/
4054 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
4055 PCIE_LINK_STATE_CLKPM);
4056
4012 if (pci_enable_device(pdev)) { 4057 if (pci_enable_device(pdev)) {
4013 err = -ENODEV; 4058 err = -ENODEV;
4014 goto out_ieee80211_free_hw; 4059 goto out_ieee80211_free_hw;
@@ -4120,7 +4165,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4120 } 4165 }
4121 4166
4122 iwl_set_rxon_channel(priv, 4167 iwl_set_rxon_channel(priv,
4123 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); 4168 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
4169 &priv->contexts[IWL_RXON_CTX_BSS]);
4124 iwl3945_setup_deferred_work(priv); 4170 iwl3945_setup_deferred_work(priv);
4125 iwl3945_setup_rx_handlers(priv); 4171 iwl3945_setup_rx_handlers(priv);
4126 iwl_power_initialize(priv); 4172 iwl_power_initialize(priv);
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index c02fcedea9fa..a944893ae3ca 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -1195,11 +1195,8 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1195 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: " 1195 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
1196 "oid is 0x%x\n", hdr->oid); 1196 "oid is 0x%x\n", hdr->oid);
1197 1197
1198 if (hdr->oid <= WIFI_IF_NTFY_MAX) { 1198 set_bit(hdr->oid, &iwm->wifi_ntfy[0]);
1199 set_bit(hdr->oid, &iwm->wifi_ntfy[0]); 1199 wake_up_interruptible(&iwm->wifi_ntfy_queue);
1200 wake_up_interruptible(&iwm->wifi_ntfy_queue);
1201 } else
1202 return -EINVAL;
1203 1200
1204 switch (hdr->oid) { 1201 switch (hdr->oid) {
1205 case UMAC_WIFI_IF_CMD_SET_PROFILE: 1202 case UMAC_WIFI_IF_CMD_SET_PROFILE:
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 3e82f1627209..317f086ced0a 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -10,6 +10,7 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/wait.h>
13#include <linux/ieee80211.h> 14#include <linux/ieee80211.h>
14#include <net/cfg80211.h> 15#include <net/cfg80211.h>
15#include <asm/unaligned.h> 16#include <asm/unaligned.h>
@@ -526,20 +527,31 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
526 527
527 pos = scanresp->bssdesc_and_tlvbuffer; 528 pos = scanresp->bssdesc_and_tlvbuffer;
528 529
530 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_RSP", scanresp->bssdesc_and_tlvbuffer,
531 scanresp->bssdescriptsize);
532
529 tsfdesc = pos + bsssize; 533 tsfdesc = pos + bsssize;
530 tsfsize = 4 + 8 * scanresp->nr_sets; 534 tsfsize = 4 + 8 * scanresp->nr_sets;
535 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TSF", (u8 *) tsfdesc, tsfsize);
531 536
532 /* Validity check: we expect a Marvell-Local TLV */ 537 /* Validity check: we expect a Marvell-Local TLV */
533 i = get_unaligned_le16(tsfdesc); 538 i = get_unaligned_le16(tsfdesc);
534 tsfdesc += 2; 539 tsfdesc += 2;
535 if (i != TLV_TYPE_TSFTIMESTAMP) 540 if (i != TLV_TYPE_TSFTIMESTAMP) {
541 lbs_deb_scan("scan response: invalid TSF Timestamp %d\n", i);
536 goto done; 542 goto done;
543 }
544
537 /* Validity check: the TLV holds TSF values with 8 bytes each, so 545 /* Validity check: the TLV holds TSF values with 8 bytes each, so
538 * the size in the TLV must match the nr_sets value */ 546 * the size in the TLV must match the nr_sets value */
539 i = get_unaligned_le16(tsfdesc); 547 i = get_unaligned_le16(tsfdesc);
540 tsfdesc += 2; 548 tsfdesc += 2;
541 if (i / 8 != scanresp->nr_sets) 549 if (i / 8 != scanresp->nr_sets) {
550 lbs_deb_scan("scan response: invalid number of TSF timestamp "
551 "sets (expected %d got %d)\n", scanresp->nr_sets,
552 i / 8);
542 goto done; 553 goto done;
554 }
543 555
544 for (i = 0; i < scanresp->nr_sets; i++) { 556 for (i = 0; i < scanresp->nr_sets; i++) {
545 const u8 *bssid; 557 const u8 *bssid;
@@ -581,8 +593,11 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
581 id = *pos++; 593 id = *pos++;
582 elen = *pos++; 594 elen = *pos++;
583 left -= 2; 595 left -= 2;
584 if (elen > left || elen == 0) 596 if (elen > left || elen == 0) {
597 lbs_deb_scan("scan response: invalid IE fmt\n");
585 goto done; 598 goto done;
599 }
600
586 if (id == WLAN_EID_DS_PARAMS) 601 if (id == WLAN_EID_DS_PARAMS)
587 chan_no = *pos; 602 chan_no = *pos;
588 if (id == WLAN_EID_SSID) { 603 if (id == WLAN_EID_SSID) {
@@ -613,7 +628,9 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
613 capa, intvl, ie, ielen, 628 capa, intvl, ie, ielen,
614 LBS_SCAN_RSSI_TO_MBM(rssi), 629 LBS_SCAN_RSSI_TO_MBM(rssi),
615 GFP_KERNEL); 630 GFP_KERNEL);
616 } 631 } else
632 lbs_deb_scan("scan response: missing BSS channel IE\n");
633
617 tsfdesc += 8; 634 tsfdesc += 8;
618 } 635 }
619 ret = 0; 636 ret = 0;
@@ -1103,7 +1120,7 @@ static int lbs_associate(struct lbs_private *priv,
1103 lbs_deb_hex(LBS_DEB_ASSOC, "Common Rates", tmp, pos - tmp); 1120 lbs_deb_hex(LBS_DEB_ASSOC, "Common Rates", tmp, pos - tmp);
1104 1121
1105 /* add auth type TLV */ 1122 /* add auth type TLV */
1106 if (priv->fwrelease >= 0x09000000) 1123 if (MRVL_FW_MAJOR_REV(priv->fwrelease) >= 9)
1107 pos += lbs_add_auth_type_tlv(pos, sme->auth_type); 1124 pos += lbs_add_auth_type_tlv(pos, sme->auth_type);
1108 1125
1109 /* add WPA/WPA2 TLV */ 1126 /* add WPA/WPA2 TLV */
@@ -1114,6 +1131,9 @@ static int lbs_associate(struct lbs_private *priv,
1114 (u16)(pos - (u8 *) &cmd->iebuf); 1131 (u16)(pos - (u8 *) &cmd->iebuf);
1115 cmd->hdr.size = cpu_to_le16(len); 1132 cmd->hdr.size = cpu_to_le16(len);
1116 1133
1134 lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_CMD", (u8 *) cmd,
1135 le16_to_cpu(cmd->hdr.size));
1136
1117 /* store for later use */ 1137 /* store for later use */
1118 memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN); 1138 memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN);
1119 1139
@@ -1121,14 +1141,28 @@ static int lbs_associate(struct lbs_private *priv,
1121 if (ret) 1141 if (ret)
1122 goto done; 1142 goto done;
1123 1143
1124
1125 /* generate connect message to cfg80211 */ 1144 /* generate connect message to cfg80211 */
1126 1145
1127 resp = (void *) cmd; /* recast for easier field access */ 1146 resp = (void *) cmd; /* recast for easier field access */
1128 status = le16_to_cpu(resp->statuscode); 1147 status = le16_to_cpu(resp->statuscode);
1129 1148
1130 /* Convert statis code of old firmware */ 1149 /* Older FW versions map the IEEE 802.11 Status Code in the association
1131 if (priv->fwrelease < 0x09000000) 1150 * response to the following values returned in resp->statuscode:
1151 *
1152 * IEEE Status Code Marvell Status Code
1153 * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
1154 * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1155 * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1156 * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1157 * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1158 * others -> 0x0003 ASSOC_RESULT_REFUSED
1159 *
1160 * Other response codes:
1161 * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
1162 * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
1163 * association response from the AP)
1164 */
1165 if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) {
1132 switch (status) { 1166 switch (status) {
1133 case 0: 1167 case 0:
1134 break; 1168 break;
@@ -1150,11 +1184,16 @@ static int lbs_associate(struct lbs_private *priv,
1150 break; 1184 break;
1151 default: 1185 default:
1152 lbs_deb_assoc("association failure %d\n", status); 1186 lbs_deb_assoc("association failure %d\n", status);
1153 status = WLAN_STATUS_UNSPECIFIED_FAILURE; 1187 /* v5 OLPC firmware does return the AP status code if
1188 * it's not one of the values above. Let that through.
1189 */
1190 break;
1191 }
1154 } 1192 }
1155 1193
1156 lbs_deb_assoc("status %d, capability 0x%04x\n", status, 1194 lbs_deb_assoc("status %d, statuscode 0x%04x, capability 0x%04x, "
1157 le16_to_cpu(resp->capability)); 1195 "aid 0x%04x\n", status, le16_to_cpu(resp->statuscode),
1196 le16_to_cpu(resp->capability), le16_to_cpu(resp->aid));
1158 1197
1159 resp_ie_len = le16_to_cpu(resp->hdr.size) 1198 resp_ie_len = le16_to_cpu(resp->hdr.size)
1160 - sizeof(resp->hdr) 1199 - sizeof(resp->hdr)
@@ -1174,7 +1213,6 @@ static int lbs_associate(struct lbs_private *priv,
1174 netif_tx_wake_all_queues(priv->dev); 1213 netif_tx_wake_all_queues(priv->dev);
1175 } 1214 }
1176 1215
1177
1178done: 1216done:
1179 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 1217 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1180 return ret; 1218 return ret;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 1d141fefd767..2ae752d10065 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -8,7 +8,14 @@
8#define _LBS_DECL_H_ 8#define _LBS_DECL_H_
9 9
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/firmware.h>
11 12
13/* Should be terminated by a NULL entry */
14struct lbs_fw_table {
15 int model;
16 const char *helper;
17 const char *fwname;
18};
12 19
13struct lbs_private; 20struct lbs_private;
14struct sk_buff; 21struct sk_buff;
@@ -53,4 +60,10 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
53u32 lbs_fw_index_to_data_rate(u8 index); 60u32 lbs_fw_index_to_data_rate(u8 index);
54u8 lbs_data_rate_to_fw_index(u32 rate); 61u8 lbs_data_rate_to_fw_index(u32 rate);
55 62
63int lbs_get_firmware(struct device *dev, const char *user_helper,
64 const char *user_mainfw, u32 card_model,
65 const struct lbs_fw_table *fw_table,
66 const struct firmware **helper,
67 const struct firmware **mainfw);
68
56#endif 69#endif
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 9c298396be50..e213a5dc049d 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -48,7 +48,6 @@
48MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>"); 48MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>");
49MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards"); 49MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards");
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51MODULE_FIRMWARE("libertas_cs_helper.fw");
52 51
53 52
54 53
@@ -61,9 +60,34 @@ struct if_cs_card {
61 struct lbs_private *priv; 60 struct lbs_private *priv;
62 void __iomem *iobase; 61 void __iomem *iobase;
63 bool align_regs; 62 bool align_regs;
63 u32 model;
64}; 64};
65 65
66 66
67enum {
68 MODEL_UNKNOWN = 0x00,
69 MODEL_8305 = 0x01,
70 MODEL_8381 = 0x02,
71 MODEL_8385 = 0x03
72};
73
74static const struct lbs_fw_table fw_table[] = {
75 { MODEL_8305, "libertas/cf8305.bin", NULL },
76 { MODEL_8305, "libertas_cs_helper.fw", NULL },
77 { MODEL_8381, "libertas/cf8381_helper.bin", "libertas/cf8381.bin" },
78 { MODEL_8381, "libertas_cs_helper.fw", "libertas_cs.fw" },
79 { MODEL_8385, "libertas/cf8385_helper.bin", "libertas/cf8385.bin" },
80 { MODEL_8385, "libertas_cs_helper.fw", "libertas_cs.fw" },
81 { 0, NULL, NULL }
82};
83MODULE_FIRMWARE("libertas/cf8305.bin");
84MODULE_FIRMWARE("libertas/cf8381_helper.bin");
85MODULE_FIRMWARE("libertas/cf8381.bin");
86MODULE_FIRMWARE("libertas/cf8385_helper.bin");
87MODULE_FIRMWARE("libertas/cf8385.bin");
88MODULE_FIRMWARE("libertas_cs_helper.fw");
89MODULE_FIRMWARE("libertas_cs.fw");
90
67 91
68/********************************************************************/ 92/********************************************************************/
69/* Hardware access */ 93/* Hardware access */
@@ -289,22 +313,19 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
289#define CF8385_MANFID 0x02df 313#define CF8385_MANFID 0x02df
290#define CF8385_CARDID 0x8103 314#define CF8385_CARDID 0x8103
291 315
292static inline int if_cs_hw_is_cf8305(struct pcmcia_device *p_dev) 316/* FIXME: just use the 'driver_info' field of 'struct pcmcia_device_id' when
293{ 317 * that gets fixed. Currently there's no way to access it from the probe hook.
294 return (p_dev->manf_id == CF8305_MANFID && 318 */
295 p_dev->card_id == CF8305_CARDID); 319static inline u32 get_model(u16 manf_id, u16 card_id)
296}
297
298static inline int if_cs_hw_is_cf8381(struct pcmcia_device *p_dev)
299{
300 return (p_dev->manf_id == CF8381_MANFID &&
301 p_dev->card_id == CF8381_CARDID);
302}
303
304static inline int if_cs_hw_is_cf8385(struct pcmcia_device *p_dev)
305{ 320{
306 return (p_dev->manf_id == CF8385_MANFID && 321 /* NOTE: keep in sync with if_cs_ids */
307 p_dev->card_id == CF8385_CARDID); 322 if (manf_id == CF8305_MANFID && card_id == CF8305_CARDID)
323 return MODEL_8305;
324 else if (manf_id == CF8381_MANFID && card_id == CF8381_CARDID)
325 return MODEL_8381;
326 else if (manf_id == CF8385_MANFID && card_id == CF8385_CARDID)
327 return MODEL_8385;
328 return MODEL_UNKNOWN;
308} 329}
309 330
310/********************************************************************/ 331/********************************************************************/
@@ -558,12 +579,11 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
558 * 579 *
559 * Return 0 on success 580 * Return 0 on success
560 */ 581 */
561static int if_cs_prog_helper(struct if_cs_card *card) 582static int if_cs_prog_helper(struct if_cs_card *card, const struct firmware *fw)
562{ 583{
563 int ret = 0; 584 int ret = 0;
564 int sent = 0; 585 int sent = 0;
565 u8 scratch; 586 u8 scratch;
566 const struct firmware *fw;
567 587
568 lbs_deb_enter(LBS_DEB_CS); 588 lbs_deb_enter(LBS_DEB_CS);
569 589
@@ -589,14 +609,6 @@ static int if_cs_prog_helper(struct if_cs_card *card)
589 goto done; 609 goto done;
590 } 610 }
591 611
592 /* TODO: make firmware file configurable */
593 ret = request_firmware(&fw, "libertas_cs_helper.fw",
594 &card->p_dev->dev);
595 if (ret) {
596 lbs_pr_err("can't load helper firmware\n");
597 ret = -ENODEV;
598 goto done;
599 }
600 lbs_deb_cs("helper size %td\n", fw->size); 612 lbs_deb_cs("helper size %td\n", fw->size);
601 613
602 /* "Set the 5 bytes of the helper image to 0" */ 614 /* "Set the 5 bytes of the helper image to 0" */
@@ -635,7 +647,7 @@ static int if_cs_prog_helper(struct if_cs_card *card)
635 if (ret < 0) { 647 if (ret < 0) {
636 lbs_pr_err("can't download helper at 0x%x, ret %d\n", 648 lbs_pr_err("can't download helper at 0x%x, ret %d\n",
637 sent, ret); 649 sent, ret);
638 goto err_release; 650 goto done;
639 } 651 }
640 652
641 if (count == 0) 653 if (count == 0)
@@ -644,17 +656,14 @@ static int if_cs_prog_helper(struct if_cs_card *card)
644 sent += count; 656 sent += count;
645 } 657 }
646 658
647err_release:
648 release_firmware(fw);
649done: 659done:
650 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 660 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
651 return ret; 661 return ret;
652} 662}
653 663
654 664
655static int if_cs_prog_real(struct if_cs_card *card) 665static int if_cs_prog_real(struct if_cs_card *card, const struct firmware *fw)
656{ 666{
657 const struct firmware *fw;
658 int ret = 0; 667 int ret = 0;
659 int retry = 0; 668 int retry = 0;
660 int len = 0; 669 int len = 0;
@@ -662,21 +671,13 @@ static int if_cs_prog_real(struct if_cs_card *card)
662 671
663 lbs_deb_enter(LBS_DEB_CS); 672 lbs_deb_enter(LBS_DEB_CS);
664 673
665 /* TODO: make firmware file configurable */
666 ret = request_firmware(&fw, "libertas_cs.fw",
667 &card->p_dev->dev);
668 if (ret) {
669 lbs_pr_err("can't load firmware\n");
670 ret = -ENODEV;
671 goto done;
672 }
673 lbs_deb_cs("fw size %td\n", fw->size); 674 lbs_deb_cs("fw size %td\n", fw->size);
674 675
675 ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW, 676 ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW,
676 IF_CS_SQ_HELPER_OK); 677 IF_CS_SQ_HELPER_OK);
677 if (ret < 0) { 678 if (ret < 0) {
678 lbs_pr_err("helper firmware doesn't answer\n"); 679 lbs_pr_err("helper firmware doesn't answer\n");
679 goto err_release; 680 goto done;
680 } 681 }
681 682
682 for (sent = 0; sent < fw->size; sent += len) { 683 for (sent = 0; sent < fw->size; sent += len) {
@@ -691,7 +692,7 @@ static int if_cs_prog_real(struct if_cs_card *card)
691 if (retry > 20) { 692 if (retry > 20) {
692 lbs_pr_err("could not download firmware\n"); 693 lbs_pr_err("could not download firmware\n");
693 ret = -ENODEV; 694 ret = -ENODEV;
694 goto err_release; 695 goto done;
695 } 696 }
696 if (retry) { 697 if (retry) {
697 sent -= len; 698 sent -= len;
@@ -710,7 +711,7 @@ static int if_cs_prog_real(struct if_cs_card *card)
710 IF_CS_BIT_COMMAND); 711 IF_CS_BIT_COMMAND);
711 if (ret < 0) { 712 if (ret < 0) {
712 lbs_pr_err("can't download firmware at 0x%x\n", sent); 713 lbs_pr_err("can't download firmware at 0x%x\n", sent);
713 goto err_release; 714 goto done;
714 } 715 }
715 } 716 }
716 717
@@ -718,9 +719,6 @@ static int if_cs_prog_real(struct if_cs_card *card)
718 if (ret < 0) 719 if (ret < 0)
719 lbs_pr_err("firmware download failed\n"); 720 lbs_pr_err("firmware download failed\n");
720 721
721err_release:
722 release_firmware(fw);
723
724done: 722done:
725 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 723 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
726 return ret; 724 return ret;
@@ -824,6 +822,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
824 unsigned int prod_id; 822 unsigned int prod_id;
825 struct lbs_private *priv; 823 struct lbs_private *priv;
826 struct if_cs_card *card; 824 struct if_cs_card *card;
825 const struct firmware *helper = NULL;
826 const struct firmware *mainfw = NULL;
827 827
828 lbs_deb_enter(LBS_DEB_CS); 828 lbs_deb_enter(LBS_DEB_CS);
829 829
@@ -843,7 +843,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
843 goto out1; 843 goto out1;
844 } 844 }
845 845
846
847 /* 846 /*
848 * Allocate an interrupt line. Note that this does not assign 847 * Allocate an interrupt line. Note that this does not assign
849 * a handler to the interrupt, unless the 'Handler' member of 848 * a handler to the interrupt, unless the 'Handler' member of
@@ -881,34 +880,47 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
881 */ 880 */
882 card->align_regs = 0; 881 card->align_regs = 0;
883 882
883 card->model = get_model(p_dev->manf_id, p_dev->card_id);
884 if (card->model == MODEL_UNKNOWN) {
885 lbs_pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n",
886 p_dev->manf_id, p_dev->card_id);
887 goto out2;
888 }
889
884 /* Check if we have a current silicon */ 890 /* Check if we have a current silicon */
885 prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID); 891 prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
886 if (if_cs_hw_is_cf8305(p_dev)) { 892 if (card->model == MODEL_8305) {
887 card->align_regs = 1; 893 card->align_regs = 1;
888 if (prod_id < IF_CS_CF8305_B1_REV) { 894 if (prod_id < IF_CS_CF8305_B1_REV) {
889 lbs_pr_err("old chips like 8305 rev B3 " 895 lbs_pr_err("8305 rev B0 and older are not supported\n");
890 "aren't supported\n");
891 ret = -ENODEV; 896 ret = -ENODEV;
892 goto out2; 897 goto out2;
893 } 898 }
894 } 899 }
895 900
896 if (if_cs_hw_is_cf8381(p_dev) && prod_id < IF_CS_CF8381_B3_REV) { 901 if ((card->model == MODEL_8381) && prod_id < IF_CS_CF8381_B3_REV) {
897 lbs_pr_err("old chips like 8381 rev B3 aren't supported\n"); 902 lbs_pr_err("8381 rev B2 and older are not supported\n");
898 ret = -ENODEV; 903 ret = -ENODEV;
899 goto out2; 904 goto out2;
900 } 905 }
901 906
902 if (if_cs_hw_is_cf8385(p_dev) && prod_id < IF_CS_CF8385_B1_REV) { 907 if ((card->model == MODEL_8385) && prod_id < IF_CS_CF8385_B1_REV) {
903 lbs_pr_err("old chips like 8385 rev B1 aren't supported\n"); 908 lbs_pr_err("8385 rev B0 and older are not supported\n");
904 ret = -ENODEV; 909 ret = -ENODEV;
905 goto out2; 910 goto out2;
906 } 911 }
907 912
913 ret = lbs_get_firmware(&p_dev->dev, NULL, NULL, card->model,
914 &fw_table[0], &helper, &mainfw);
915 if (ret) {
916 lbs_pr_err("failed to find firmware (%d)\n", ret);
917 goto out2;
918 }
919
908 /* Load the firmware early, before calling into libertas.ko */ 920 /* Load the firmware early, before calling into libertas.ko */
909 ret = if_cs_prog_helper(card); 921 ret = if_cs_prog_helper(card, helper);
910 if (ret == 0 && !if_cs_hw_is_cf8305(p_dev)) 922 if (ret == 0 && (card->model != MODEL_8305))
911 ret = if_cs_prog_real(card); 923 ret = if_cs_prog_real(card, mainfw);
912 if (ret) 924 if (ret)
913 goto out2; 925 goto out2;
914 926
@@ -957,6 +969,11 @@ out2:
957out1: 969out1:
958 pcmcia_disable_device(p_dev); 970 pcmcia_disable_device(p_dev);
959out: 971out:
972 if (helper)
973 release_firmware(helper);
974 if (mainfw)
975 release_firmware(mainfw);
976
960 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 977 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
961 return ret; 978 return ret;
962} 979}
@@ -993,6 +1010,7 @@ static struct pcmcia_device_id if_cs_ids[] = {
993 PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID), 1010 PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID),
994 PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID), 1011 PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID),
995 PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID), 1012 PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID),
1013 /* NOTE: keep in sync with get_model() */
996 PCMCIA_DEVICE_NULL, 1014 PCMCIA_DEVICE_NULL,
997}; 1015};
998MODULE_DEVICE_TABLE(pcmcia, if_cs_ids); 1016MODULE_DEVICE_TABLE(pcmcia, if_cs_ids);
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 87b634978b35..296fd00a5129 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -76,36 +76,32 @@ static const struct sdio_device_id if_sdio_ids[] = {
76 76
77MODULE_DEVICE_TABLE(sdio, if_sdio_ids); 77MODULE_DEVICE_TABLE(sdio, if_sdio_ids);
78 78
79struct if_sdio_model { 79#define MODEL_8385 0x04
80 int model; 80#define MODEL_8686 0x0b
81 const char *helper; 81#define MODEL_8688 0x10
82 const char *firmware; 82
83}; 83static const struct lbs_fw_table fw_table[] = {
84 84 { MODEL_8385, "libertas/sd8385_helper.bin", "libertas/sd8385.bin" },
85static struct if_sdio_model if_sdio_models[] = { 85 { MODEL_8385, "sd8385_helper.bin", "sd8385.bin" },
86 { 86 { MODEL_8686, "libertas/sd8686_v9_helper.bin", "libertas/sd8686_v9.bin" },
87 /* 8385 */ 87 { MODEL_8686, "libertas/sd8686_v8_helper.bin", "libertas/sd8686_v8.bin" },
88 .model = IF_SDIO_MODEL_8385, 88 { MODEL_8686, "sd8686_helper.bin", "sd8686.bin" },
89 .helper = "sd8385_helper.bin", 89 { MODEL_8688, "libertas/sd8688_helper.bin", "libertas/sd8688.bin" },
90 .firmware = "sd8385.bin", 90 { MODEL_8688, "sd8688_helper.bin", "sd8688.bin" },
91 }, 91 { 0, NULL, NULL }
92 {
93 /* 8686 */
94 .model = IF_SDIO_MODEL_8686,
95 .helper = "sd8686_helper.bin",
96 .firmware = "sd8686.bin",
97 },
98 {
99 /* 8688 */
100 .model = IF_SDIO_MODEL_8688,
101 .helper = "sd8688_helper.bin",
102 .firmware = "sd8688.bin",
103 },
104}; 92};
93MODULE_FIRMWARE("libertas/sd8385_helper.bin");
94MODULE_FIRMWARE("libertas/sd8385.bin");
105MODULE_FIRMWARE("sd8385_helper.bin"); 95MODULE_FIRMWARE("sd8385_helper.bin");
106MODULE_FIRMWARE("sd8385.bin"); 96MODULE_FIRMWARE("sd8385.bin");
97MODULE_FIRMWARE("libertas/sd8686_v9_helper.bin");
98MODULE_FIRMWARE("libertas/sd8686_v9.bin");
99MODULE_FIRMWARE("libertas/sd8686_v8_helper.bin");
100MODULE_FIRMWARE("libertas/sd8686_v8.bin");
107MODULE_FIRMWARE("sd8686_helper.bin"); 101MODULE_FIRMWARE("sd8686_helper.bin");
108MODULE_FIRMWARE("sd8686.bin"); 102MODULE_FIRMWARE("sd8686.bin");
103MODULE_FIRMWARE("libertas/sd8688_helper.bin");
104MODULE_FIRMWARE("libertas/sd8688.bin");
109MODULE_FIRMWARE("sd8688_helper.bin"); 105MODULE_FIRMWARE("sd8688_helper.bin");
110MODULE_FIRMWARE("sd8688.bin"); 106MODULE_FIRMWARE("sd8688.bin");
111 107
@@ -187,11 +183,11 @@ static u16 if_sdio_read_rx_len(struct if_sdio_card *card, int *err)
187 u16 rx_len; 183 u16 rx_len;
188 184
189 switch (card->model) { 185 switch (card->model) {
190 case IF_SDIO_MODEL_8385: 186 case MODEL_8385:
191 case IF_SDIO_MODEL_8686: 187 case MODEL_8686:
192 rx_len = if_sdio_read_scratch(card, &ret); 188 rx_len = if_sdio_read_scratch(card, &ret);
193 break; 189 break;
194 case IF_SDIO_MODEL_8688: 190 case MODEL_8688:
195 default: /* for newer chipsets */ 191 default: /* for newer chipsets */
196 rx_len = sdio_readb(card->func, IF_SDIO_RX_LEN, &ret); 192 rx_len = sdio_readb(card->func, IF_SDIO_RX_LEN, &ret);
197 if (!ret) 193 if (!ret)
@@ -288,7 +284,7 @@ static int if_sdio_handle_event(struct if_sdio_card *card,
288 284
289 lbs_deb_enter(LBS_DEB_SDIO); 285 lbs_deb_enter(LBS_DEB_SDIO);
290 286
291 if (card->model == IF_SDIO_MODEL_8385) { 287 if (card->model == MODEL_8385) {
292 event = sdio_readb(card->func, IF_SDIO_EVENT, &ret); 288 event = sdio_readb(card->func, IF_SDIO_EVENT, &ret);
293 if (ret) 289 if (ret)
294 goto out; 290 goto out;
@@ -466,10 +462,10 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
466 462
467#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY) 463#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY)
468 464
469static int if_sdio_prog_helper(struct if_sdio_card *card) 465static int if_sdio_prog_helper(struct if_sdio_card *card,
466 const struct firmware *fw)
470{ 467{
471 int ret; 468 int ret;
472 const struct firmware *fw;
473 unsigned long timeout; 469 unsigned long timeout;
474 u8 *chunk_buffer; 470 u8 *chunk_buffer;
475 u32 chunk_size; 471 u32 chunk_size;
@@ -478,16 +474,10 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
478 474
479 lbs_deb_enter(LBS_DEB_SDIO); 475 lbs_deb_enter(LBS_DEB_SDIO);
480 476
481 ret = request_firmware(&fw, card->helper, &card->func->dev);
482 if (ret) {
483 lbs_pr_err("can't load helper firmware\n");
484 goto out;
485 }
486
487 chunk_buffer = kzalloc(64, GFP_KERNEL); 477 chunk_buffer = kzalloc(64, GFP_KERNEL);
488 if (!chunk_buffer) { 478 if (!chunk_buffer) {
489 ret = -ENOMEM; 479 ret = -ENOMEM;
490 goto release_fw; 480 goto out;
491 } 481 }
492 482
493 sdio_claim_host(card->func); 483 sdio_claim_host(card->func);
@@ -562,22 +552,19 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
562release: 552release:
563 sdio_release_host(card->func); 553 sdio_release_host(card->func);
564 kfree(chunk_buffer); 554 kfree(chunk_buffer);
565release_fw:
566 release_firmware(fw);
567 555
568out: 556out:
569 if (ret) 557 if (ret)
570 lbs_pr_err("failed to load helper firmware\n"); 558 lbs_pr_err("failed to load helper firmware\n");
571 559
572 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 560 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
573
574 return ret; 561 return ret;
575} 562}
576 563
577static int if_sdio_prog_real(struct if_sdio_card *card) 564static int if_sdio_prog_real(struct if_sdio_card *card,
565 const struct firmware *fw)
578{ 566{
579 int ret; 567 int ret;
580 const struct firmware *fw;
581 unsigned long timeout; 568 unsigned long timeout;
582 u8 *chunk_buffer; 569 u8 *chunk_buffer;
583 u32 chunk_size; 570 u32 chunk_size;
@@ -586,16 +573,10 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
586 573
587 lbs_deb_enter(LBS_DEB_SDIO); 574 lbs_deb_enter(LBS_DEB_SDIO);
588 575
589 ret = request_firmware(&fw, card->firmware, &card->func->dev);
590 if (ret) {
591 lbs_pr_err("can't load firmware\n");
592 goto out;
593 }
594
595 chunk_buffer = kzalloc(512, GFP_KERNEL); 576 chunk_buffer = kzalloc(512, GFP_KERNEL);
596 if (!chunk_buffer) { 577 if (!chunk_buffer) {
597 ret = -ENOMEM; 578 ret = -ENOMEM;
598 goto release_fw; 579 goto out;
599 } 580 }
600 581
601 sdio_claim_host(card->func); 582 sdio_claim_host(card->func);
@@ -685,15 +666,12 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
685release: 666release:
686 sdio_release_host(card->func); 667 sdio_release_host(card->func);
687 kfree(chunk_buffer); 668 kfree(chunk_buffer);
688release_fw:
689 release_firmware(fw);
690 669
691out: 670out:
692 if (ret) 671 if (ret)
693 lbs_pr_err("failed to load firmware\n"); 672 lbs_pr_err("failed to load firmware\n");
694 673
695 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 674 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
696
697 return ret; 675 return ret;
698} 676}
699 677
@@ -701,6 +679,8 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
701{ 679{
702 int ret; 680 int ret;
703 u16 scratch; 681 u16 scratch;
682 const struct firmware *helper = NULL;
683 const struct firmware *mainfw = NULL;
704 684
705 lbs_deb_enter(LBS_DEB_SDIO); 685 lbs_deb_enter(LBS_DEB_SDIO);
706 686
@@ -718,11 +698,18 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
718 goto success; 698 goto success;
719 } 699 }
720 700
721 ret = if_sdio_prog_helper(card); 701 ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
702 card->model, &fw_table[0], &helper, &mainfw);
703 if (ret) {
704 lbs_pr_err("failed to find firmware (%d)\n", ret);
705 goto out;
706 }
707
708 ret = if_sdio_prog_helper(card, helper);
722 if (ret) 709 if (ret)
723 goto out; 710 goto out;
724 711
725 ret = if_sdio_prog_real(card); 712 ret = if_sdio_prog_real(card, mainfw);
726 if (ret) 713 if (ret)
727 goto out; 714 goto out;
728 715
@@ -733,8 +720,12 @@ success:
733 ret = 0; 720 ret = 0;
734 721
735out: 722out:
736 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 723 if (helper)
724 release_firmware(helper);
725 if (mainfw)
726 release_firmware(mainfw);
737 727
728 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
738 return ret; 729 return ret;
739} 730}
740 731
@@ -938,7 +929,7 @@ static int if_sdio_probe(struct sdio_func *func,
938 "ID: %x", &model) == 1) 929 "ID: %x", &model) == 1)
939 break; 930 break;
940 if (!strcmp(func->card->info[i], "IBIS Wireless SDIO Card")) { 931 if (!strcmp(func->card->info[i], "IBIS Wireless SDIO Card")) {
941 model = IF_SDIO_MODEL_8385; 932 model = MODEL_8385;
942 break; 933 break;
943 } 934 }
944 } 935 }
@@ -956,13 +947,13 @@ static int if_sdio_probe(struct sdio_func *func,
956 card->model = model; 947 card->model = model;
957 948
958 switch (card->model) { 949 switch (card->model) {
959 case IF_SDIO_MODEL_8385: 950 case MODEL_8385:
960 card->scratch_reg = IF_SDIO_SCRATCH_OLD; 951 card->scratch_reg = IF_SDIO_SCRATCH_OLD;
961 break; 952 break;
962 case IF_SDIO_MODEL_8686: 953 case MODEL_8686:
963 card->scratch_reg = IF_SDIO_SCRATCH; 954 card->scratch_reg = IF_SDIO_SCRATCH;
964 break; 955 break;
965 case IF_SDIO_MODEL_8688: 956 case MODEL_8688:
966 default: /* for newer chipsets */ 957 default: /* for newer chipsets */
967 card->scratch_reg = IF_SDIO_FW_STATUS; 958 card->scratch_reg = IF_SDIO_FW_STATUS;
968 break; 959 break;
@@ -972,49 +963,17 @@ static int if_sdio_probe(struct sdio_func *func,
972 card->workqueue = create_workqueue("libertas_sdio"); 963 card->workqueue = create_workqueue("libertas_sdio");
973 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); 964 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
974 965
975 for (i = 0;i < ARRAY_SIZE(if_sdio_models);i++) { 966 /* Check if we support this card */
976 if (card->model == if_sdio_models[i].model) 967 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
968 if (card->model == fw_table[i].model)
977 break; 969 break;
978 } 970 }
979 971 if (i == ARRAY_SIZE(fw_table)) {
980 if (i == ARRAY_SIZE(if_sdio_models)) {
981 lbs_pr_err("unknown card model 0x%x\n", card->model); 972 lbs_pr_err("unknown card model 0x%x\n", card->model);
982 ret = -ENODEV; 973 ret = -ENODEV;
983 goto free; 974 goto free;
984 } 975 }
985 976
986 card->helper = if_sdio_models[i].helper;
987 card->firmware = if_sdio_models[i].firmware;
988
989 kparam_block_sysfs_write(helper_name);
990 if (lbs_helper_name) {
991 char *helper = kstrdup(lbs_helper_name, GFP_KERNEL);
992 if (!helper) {
993 kparam_unblock_sysfs_write(helper_name);
994 ret = -ENOMEM;
995 goto free;
996 }
997 lbs_deb_sdio("overriding helper firmware: %s\n",
998 lbs_helper_name);
999 card->helper = helper;
1000 card->helper_allocated = true;
1001 }
1002 kparam_unblock_sysfs_write(helper_name);
1003
1004 kparam_block_sysfs_write(fw_name);
1005 if (lbs_fw_name) {
1006 char *fw_name = kstrdup(lbs_fw_name, GFP_KERNEL);
1007 if (!fw_name) {
1008 kparam_unblock_sysfs_write(fw_name);
1009 ret = -ENOMEM;
1010 goto free;
1011 }
1012 lbs_deb_sdio("overriding firmware: %s\n", lbs_fw_name);
1013 card->firmware = fw_name;
1014 card->firmware_allocated = true;
1015 }
1016 kparam_unblock_sysfs_write(fw_name);
1017
1018 sdio_claim_host(func); 977 sdio_claim_host(func);
1019 978
1020 ret = sdio_enable_func(func); 979 ret = sdio_enable_func(func);
@@ -1028,7 +987,7 @@ static int if_sdio_probe(struct sdio_func *func,
1028 /* For 1-bit transfers to the 8686 model, we need to enable the 987 /* For 1-bit transfers to the 8686 model, we need to enable the
1029 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0 988 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
1030 * bit to allow access to non-vendor registers. */ 989 * bit to allow access to non-vendor registers. */
1031 if ((card->model == IF_SDIO_MODEL_8686) && 990 if ((card->model == MODEL_8686) &&
1032 (host->caps & MMC_CAP_SDIO_IRQ) && 991 (host->caps & MMC_CAP_SDIO_IRQ) &&
1033 (host->ios.bus_width == MMC_BUS_WIDTH_1)) { 992 (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
1034 u8 reg; 993 u8 reg;
@@ -1091,8 +1050,8 @@ static int if_sdio_probe(struct sdio_func *func,
1091 * Get rx_unit if the chip is SD8688 or newer. 1050 * Get rx_unit if the chip is SD8688 or newer.
1092 * SD8385 & SD8686 do not have rx_unit. 1051 * SD8385 & SD8686 do not have rx_unit.
1093 */ 1052 */
1094 if ((card->model != IF_SDIO_MODEL_8385) 1053 if ((card->model != MODEL_8385)
1095 && (card->model != IF_SDIO_MODEL_8686)) 1054 && (card->model != MODEL_8686))
1096 card->rx_unit = if_sdio_read_rx_unit(card); 1055 card->rx_unit = if_sdio_read_rx_unit(card);
1097 else 1056 else
1098 card->rx_unit = 0; 1057 card->rx_unit = 0;
@@ -1108,7 +1067,7 @@ static int if_sdio_probe(struct sdio_func *func,
1108 /* 1067 /*
1109 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions 1068 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
1110 */ 1069 */
1111 if (card->model == IF_SDIO_MODEL_8688) { 1070 if (card->model == MODEL_8688) {
1112 struct cmd_header cmd; 1071 struct cmd_header cmd;
1113 1072
1114 memset(&cmd, 0, sizeof(cmd)); 1073 memset(&cmd, 0, sizeof(cmd));
@@ -1165,7 +1124,7 @@ static void if_sdio_remove(struct sdio_func *func)
1165 1124
1166 card = sdio_get_drvdata(func); 1125 card = sdio_get_drvdata(func);
1167 1126
1168 if (user_rmmod && (card->model == IF_SDIO_MODEL_8688)) { 1127 if (user_rmmod && (card->model == MODEL_8688)) {
1169 /* 1128 /*
1170 * FUNC_SHUTDOWN is required for SD8688 WLAN/BT 1129 * FUNC_SHUTDOWN is required for SD8688 WLAN/BT
1171 * multiple functions 1130 * multiple functions
diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/libertas/if_sdio.h
index 12179c1dc9c9..62fda3592f67 100644
--- a/drivers/net/wireless/libertas/if_sdio.h
+++ b/drivers/net/wireless/libertas/if_sdio.h
@@ -12,10 +12,6 @@
12#ifndef _LBS_IF_SDIO_H 12#ifndef _LBS_IF_SDIO_H
13#define _LBS_IF_SDIO_H 13#define _LBS_IF_SDIO_H
14 14
15#define IF_SDIO_MODEL_8385 0x04
16#define IF_SDIO_MODEL_8686 0x0b
17#define IF_SDIO_MODEL_8688 0x10
18
19#define IF_SDIO_IOPORT 0x00 15#define IF_SDIO_IOPORT 0x00
20 16
21#define IF_SDIO_H_INT_MASK 0x04 17#define IF_SDIO_H_INT_MASK 0x04
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index fe3f08028eb3..79bcb4e5d2ca 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -39,9 +39,6 @@ struct if_spi_card {
39 struct lbs_private *priv; 39 struct lbs_private *priv;
40 struct libertas_spi_platform_data *pdata; 40 struct libertas_spi_platform_data *pdata;
41 41
42 char helper_fw_name[IF_SPI_FW_NAME_MAX];
43 char main_fw_name[IF_SPI_FW_NAME_MAX];
44
45 /* The card ID and card revision, as reported by the hardware. */ 42 /* The card ID and card revision, as reported by the hardware. */
46 u16 card_id; 43 u16 card_id;
47 u8 card_rev; 44 u8 card_rev;
@@ -70,10 +67,28 @@ static void free_if_spi_card(struct if_spi_card *card)
70 kfree(card); 67 kfree(card);
71} 68}
72 69
73static struct chip_ident chip_id_to_device_name[] = { 70#define MODEL_8385 0x04
74 { .chip_id = 0x04, .name = 8385 }, 71#define MODEL_8686 0x0b
75 { .chip_id = 0x0b, .name = 8686 }, 72#define MODEL_8688 0x10
73
74static const struct lbs_fw_table fw_table[] = {
75 { MODEL_8385, "libertas/gspi8385_helper.bin", "libertas/gspi8385.bin" },
76 { MODEL_8385, "libertas/gspi8385_hlp.bin", "libertas/gspi8385.bin" },
77 { MODEL_8686, "libertas/gspi8686_v9_helper.bin", "libertas/gspi8686_v9.bin" },
78 { MODEL_8686, "libertas/gspi8686_hlp.bin", "libertas/gspi8686.bin" },
79 { MODEL_8688, "libertas/gspi8688_helper.bin", "libertas/gspi8688.bin" },
80 { 0, NULL, NULL }
76}; 81};
82MODULE_FIRMWARE("libertas/gspi8385_helper.bin");
83MODULE_FIRMWARE("libertas/gspi8385_hlp.bin");
84MODULE_FIRMWARE("libertas/gspi8385.bin");
85MODULE_FIRMWARE("libertas/gspi8686_v9_helper.bin");
86MODULE_FIRMWARE("libertas/gspi8686_v9.bin");
87MODULE_FIRMWARE("libertas/gspi8686_hlp.bin");
88MODULE_FIRMWARE("libertas/gspi8686.bin");
89MODULE_FIRMWARE("libertas/gspi8688_helper.bin");
90MODULE_FIRMWARE("libertas/gspi8688.bin");
91
77 92
78/* 93/*
79 * SPI Interface Unit Routines 94 * SPI Interface Unit Routines
@@ -399,26 +414,20 @@ static int spu_init(struct if_spi_card *card, int use_dummy_writes)
399 * Firmware Loading 414 * Firmware Loading
400 */ 415 */
401 416
402static int if_spi_prog_helper_firmware(struct if_spi_card *card) 417static int if_spi_prog_helper_firmware(struct if_spi_card *card,
418 const struct firmware *firmware)
403{ 419{
404 int err = 0; 420 int err = 0;
405 const struct firmware *firmware = NULL;
406 int bytes_remaining; 421 int bytes_remaining;
407 const u8 *fw; 422 const u8 *fw;
408 u8 temp[HELPER_FW_LOAD_CHUNK_SZ]; 423 u8 temp[HELPER_FW_LOAD_CHUNK_SZ];
409 struct spi_device *spi = card->spi;
410 424
411 lbs_deb_enter(LBS_DEB_SPI); 425 lbs_deb_enter(LBS_DEB_SPI);
412 426
413 err = spu_set_interrupt_mode(card, 1, 0); 427 err = spu_set_interrupt_mode(card, 1, 0);
414 if (err) 428 if (err)
415 goto out; 429 goto out;
416 /* Get helper firmware image */ 430
417 err = request_firmware(&firmware, card->helper_fw_name, &spi->dev);
418 if (err) {
419 lbs_pr_err("request_firmware failed with err = %d\n", err);
420 goto out;
421 }
422 bytes_remaining = firmware->size; 431 bytes_remaining = firmware->size;
423 fw = firmware->data; 432 fw = firmware->data;
424 433
@@ -429,13 +438,13 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card)
429 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, 438 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG,
430 HELPER_FW_LOAD_CHUNK_SZ); 439 HELPER_FW_LOAD_CHUNK_SZ);
431 if (err) 440 if (err)
432 goto release_firmware; 441 goto out;
433 442
434 err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG, 443 err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG,
435 IF_SPI_HIST_CMD_DOWNLOAD_RDY, 444 IF_SPI_HIST_CMD_DOWNLOAD_RDY,
436 IF_SPI_HIST_CMD_DOWNLOAD_RDY); 445 IF_SPI_HIST_CMD_DOWNLOAD_RDY);
437 if (err) 446 if (err)
438 goto release_firmware; 447 goto out;
439 448
440 /* Feed the data into the command read/write port reg 449 /* Feed the data into the command read/write port reg
441 * in chunks of 64 bytes */ 450 * in chunks of 64 bytes */
@@ -446,16 +455,16 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card)
446 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, 455 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG,
447 temp, HELPER_FW_LOAD_CHUNK_SZ); 456 temp, HELPER_FW_LOAD_CHUNK_SZ);
448 if (err) 457 if (err)
449 goto release_firmware; 458 goto out;
450 459
451 /* Interrupt the boot code */ 460 /* Interrupt the boot code */
452 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); 461 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
453 if (err) 462 if (err)
454 goto release_firmware; 463 goto out;
455 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, 464 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
456 IF_SPI_CIC_CMD_DOWNLOAD_OVER); 465 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
457 if (err) 466 if (err)
458 goto release_firmware; 467 goto out;
459 bytes_remaining -= HELPER_FW_LOAD_CHUNK_SZ; 468 bytes_remaining -= HELPER_FW_LOAD_CHUNK_SZ;
460 fw += HELPER_FW_LOAD_CHUNK_SZ; 469 fw += HELPER_FW_LOAD_CHUNK_SZ;
461 } 470 }
@@ -465,18 +474,16 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card)
465 * bootloader. This completes the helper download. */ 474 * bootloader. This completes the helper download. */
466 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK); 475 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK);
467 if (err) 476 if (err)
468 goto release_firmware; 477 goto out;
469 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); 478 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
470 if (err) 479 if (err)
471 goto release_firmware; 480 goto out;
472 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, 481 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
473 IF_SPI_CIC_CMD_DOWNLOAD_OVER); 482 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
474 goto release_firmware; 483 goto out;
475 484
476 lbs_deb_spi("waiting for helper to boot...\n"); 485 lbs_deb_spi("waiting for helper to boot...\n");
477 486
478release_firmware:
479 release_firmware(firmware);
480out: 487out:
481 if (err) 488 if (err)
482 lbs_pr_err("failed to load helper firmware (err=%d)\n", err); 489 lbs_pr_err("failed to load helper firmware (err=%d)\n", err);
@@ -523,13 +530,12 @@ static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card,
523 return len; 530 return len;
524} 531}
525 532
526static int if_spi_prog_main_firmware(struct if_spi_card *card) 533static int if_spi_prog_main_firmware(struct if_spi_card *card,
534 const struct firmware *firmware)
527{ 535{
528 int len, prev_len; 536 int len, prev_len;
529 int bytes, crc_err = 0, err = 0; 537 int bytes, crc_err = 0, err = 0;
530 const struct firmware *firmware = NULL;
531 const u8 *fw; 538 const u8 *fw;
532 struct spi_device *spi = card->spi;
533 u16 num_crc_errs; 539 u16 num_crc_errs;
534 540
535 lbs_deb_enter(LBS_DEB_SPI); 541 lbs_deb_enter(LBS_DEB_SPI);
@@ -538,19 +544,11 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
538 if (err) 544 if (err)
539 goto out; 545 goto out;
540 546
541 /* Get firmware image */
542 err = request_firmware(&firmware, card->main_fw_name, &spi->dev);
543 if (err) {
544 lbs_pr_err("%s: can't get firmware '%s' from kernel. "
545 "err = %d\n", __func__, card->main_fw_name, err);
546 goto out;
547 }
548
549 err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0); 547 err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0);
550 if (err) { 548 if (err) {
551 lbs_pr_err("%s: timed out waiting for initial " 549 lbs_pr_err("%s: timed out waiting for initial "
552 "scratch reg = 0\n", __func__); 550 "scratch reg = 0\n", __func__);
553 goto release_firmware; 551 goto out;
554 } 552 }
555 553
556 num_crc_errs = 0; 554 num_crc_errs = 0;
@@ -560,7 +558,7 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
560 while ((len = if_spi_prog_main_firmware_check_len(card, &crc_err))) { 558 while ((len = if_spi_prog_main_firmware_check_len(card, &crc_err))) {
561 if (len < 0) { 559 if (len < 0) {
562 err = len; 560 err = len;
563 goto release_firmware; 561 goto out;
564 } 562 }
565 if (bytes < 0) { 563 if (bytes < 0) {
566 /* If there are no more bytes left, we would normally 564 /* If there are no more bytes left, we would normally
@@ -575,7 +573,7 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
575 lbs_pr_err("Too many CRC errors encountered " 573 lbs_pr_err("Too many CRC errors encountered "
576 "in firmware load.\n"); 574 "in firmware load.\n");
577 err = -EIO; 575 err = -EIO;
578 goto release_firmware; 576 goto out;
579 } 577 }
580 } else { 578 } else {
581 /* Previous transfer succeeded. Advance counters. */ 579 /* Previous transfer succeeded. Advance counters. */
@@ -590,15 +588,15 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
590 588
591 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); 589 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
592 if (err) 590 if (err)
593 goto release_firmware; 591 goto out;
594 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, 592 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG,
595 card->cmd_buffer, len); 593 card->cmd_buffer, len);
596 if (err) 594 if (err)
597 goto release_firmware; 595 goto out;
598 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG , 596 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG ,
599 IF_SPI_CIC_CMD_DOWNLOAD_OVER); 597 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
600 if (err) 598 if (err)
601 goto release_firmware; 599 goto out;
602 prev_len = len; 600 prev_len = len;
603 } 601 }
604 if (bytes > prev_len) { 602 if (bytes > prev_len) {
@@ -611,12 +609,9 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
611 SUCCESSFUL_FW_DOWNLOAD_MAGIC); 609 SUCCESSFUL_FW_DOWNLOAD_MAGIC);
612 if (err) { 610 if (err) {
613 lbs_pr_err("failed to confirm the firmware download\n"); 611 lbs_pr_err("failed to confirm the firmware download\n");
614 goto release_firmware; 612 goto out;
615 } 613 }
616 614
617release_firmware:
618 release_firmware(firmware);
619
620out: 615out:
621 if (err) 616 if (err)
622 lbs_pr_err("failed to load firmware (err=%d)\n", err); 617 lbs_pr_err("failed to load firmware (err=%d)\n", err);
@@ -800,14 +795,16 @@ static int lbs_spi_thread(void *data)
800 goto err; 795 goto err;
801 } 796 }
802 797
803 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) 798 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
804 err = if_spi_c2h_cmd(card); 799 err = if_spi_c2h_cmd(card);
805 if (err) 800 if (err)
806 goto err; 801 goto err;
807 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) 802 }
803 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
808 err = if_spi_c2h_data(card); 804 err = if_spi_c2h_data(card);
809 if (err) 805 if (err)
810 goto err; 806 goto err;
807 }
811 808
812 /* workaround: in PS mode, the card does not set the Command 809 /* workaround: in PS mode, the card does not set the Command
813 * Download Ready bit, but it sets TX Download Ready. */ 810 * Download Ready bit, but it sets TX Download Ready. */
@@ -886,37 +883,16 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
886 * SPI callbacks 883 * SPI callbacks
887 */ 884 */
888 885
889static int if_spi_calculate_fw_names(u16 card_id,
890 char *helper_fw, char *main_fw)
891{
892 int i;
893 for (i = 0; i < ARRAY_SIZE(chip_id_to_device_name); ++i) {
894 if (card_id == chip_id_to_device_name[i].chip_id)
895 break;
896 }
897 if (i == ARRAY_SIZE(chip_id_to_device_name)) {
898 lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id);
899 return -EAFNOSUPPORT;
900 }
901 snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d_hlp.bin",
902 chip_id_to_device_name[i].name);
903 snprintf(main_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d.bin",
904 chip_id_to_device_name[i].name);
905 return 0;
906}
907MODULE_FIRMWARE("libertas/gspi8385_hlp.bin");
908MODULE_FIRMWARE("libertas/gspi8385.bin");
909MODULE_FIRMWARE("libertas/gspi8686_hlp.bin");
910MODULE_FIRMWARE("libertas/gspi8686.bin");
911
912static int __devinit if_spi_probe(struct spi_device *spi) 886static int __devinit if_spi_probe(struct spi_device *spi)
913{ 887{
914 struct if_spi_card *card; 888 struct if_spi_card *card;
915 struct lbs_private *priv = NULL; 889 struct lbs_private *priv = NULL;
916 struct libertas_spi_platform_data *pdata = spi->dev.platform_data; 890 struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
917 int err = 0; 891 int err = 0, i;
918 u32 scratch; 892 u32 scratch;
919 struct sched_param param = { .sched_priority = 1 }; 893 struct sched_param param = { .sched_priority = 1 };
894 const struct firmware *helper = NULL;
895 const struct firmware *mainfw = NULL;
920 896
921 lbs_deb_enter(LBS_DEB_SPI); 897 lbs_deb_enter(LBS_DEB_SPI);
922 898
@@ -961,10 +937,25 @@ static int __devinit if_spi_probe(struct spi_device *spi)
961 lbs_deb_spi("Firmware is already loaded for " 937 lbs_deb_spi("Firmware is already loaded for "
962 "Marvell WLAN 802.11 adapter\n"); 938 "Marvell WLAN 802.11 adapter\n");
963 else { 939 else {
964 err = if_spi_calculate_fw_names(card->card_id, 940 /* Check if we support this card */
965 card->helper_fw_name, card->main_fw_name); 941 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
966 if (err) 942 if (card->card_id == fw_table[i].model)
943 break;
944 }
945 if (i == ARRAY_SIZE(fw_table)) {
946 lbs_pr_err("Unsupported chip_id: 0x%02x\n",
947 card->card_id);
948 err = -ENODEV;
967 goto free_card; 949 goto free_card;
950 }
951
952 err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
953 card->card_id, &fw_table[0], &helper,
954 &mainfw);
955 if (err) {
956 lbs_pr_err("failed to find firmware (%d)\n", err);
957 goto free_card;
958 }
968 959
969 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter " 960 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
970 "(chip_id = 0x%04x, chip_rev = 0x%02x) " 961 "(chip_id = 0x%04x, chip_rev = 0x%02x) "
@@ -973,10 +964,10 @@ static int __devinit if_spi_probe(struct spi_device *spi)
973 card->card_id, card->card_rev, 964 card->card_id, card->card_rev,
974 spi->master->bus_num, spi->chip_select, 965 spi->master->bus_num, spi->chip_select,
975 spi->max_speed_hz); 966 spi->max_speed_hz);
976 err = if_spi_prog_helper_firmware(card); 967 err = if_spi_prog_helper_firmware(card, helper);
977 if (err) 968 if (err)
978 goto free_card; 969 goto free_card;
979 err = if_spi_prog_main_firmware(card); 970 err = if_spi_prog_main_firmware(card, mainfw);
980 if (err) 971 if (err)
981 goto free_card; 972 goto free_card;
982 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n"); 973 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
@@ -1044,6 +1035,11 @@ remove_card:
1044free_card: 1035free_card:
1045 free_if_spi_card(card); 1036 free_if_spi_card(card);
1046out: 1037out:
1038 if (helper)
1039 release_firmware(helper);
1040 if (mainfw)
1041 release_firmware(mainfw);
1042
1047 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); 1043 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1048 return err; 1044 return err;
1049} 1045}
diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h
index f87eec410848..8b1417d3b71b 100644
--- a/drivers/net/wireless/libertas/if_spi.h
+++ b/drivers/net/wireless/libertas/if_spi.h
@@ -25,11 +25,6 @@
25 25
26#define IF_SPI_FW_NAME_MAX 30 26#define IF_SPI_FW_NAME_MAX 30
27 27
28struct chip_ident {
29 u16 chip_id;
30 u16 name;
31};
32
33#define MAX_MAIN_FW_LOAD_CRC_ERR 10 28#define MAX_MAIN_FW_LOAD_CRC_ERR 10
34 29
35/* Chunk size when loading the helper firmware */ 30/* Chunk size when loading the helper firmware */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 3ff61063671a..e906616232a2 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -26,15 +26,25 @@
26 26
27#define MESSAGE_HEADER_LEN 4 27#define MESSAGE_HEADER_LEN 4
28 28
29static char *lbs_fw_name = "usb8388.bin"; 29static char *lbs_fw_name = NULL;
30module_param_named(fw_name, lbs_fw_name, charp, 0644); 30module_param_named(fw_name, lbs_fw_name, charp, 0644);
31 31
32MODULE_FIRMWARE("libertas/usb8388_v9.bin");
33MODULE_FIRMWARE("libertas/usb8388_v5.bin");
34MODULE_FIRMWARE("libertas/usb8388.bin");
35MODULE_FIRMWARE("libertas/usb8682.bin");
32MODULE_FIRMWARE("usb8388.bin"); 36MODULE_FIRMWARE("usb8388.bin");
33 37
38enum {
39 MODEL_UNKNOWN = 0x0,
40 MODEL_8388 = 0x1,
41 MODEL_8682 = 0x2
42};
43
34static struct usb_device_id if_usb_table[] = { 44static struct usb_device_id if_usb_table[] = {
35 /* Enter the device signature inside */ 45 /* Enter the device signature inside */
36 { USB_DEVICE(0x1286, 0x2001) }, 46 { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 },
37 { USB_DEVICE(0x05a3, 0x8388) }, 47 { USB_DEVICE(0x05a3, 0x8388), .driver_info = MODEL_8388 },
38 {} /* Terminating entry */ 48 {} /* Terminating entry */
39}; 49};
40 50
@@ -66,6 +76,8 @@ static ssize_t if_usb_firmware_set(struct device *dev,
66 struct if_usb_card *cardp = priv->card; 76 struct if_usb_card *cardp = priv->card;
67 int ret; 77 int ret;
68 78
79 BUG_ON(buf == NULL);
80
69 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW); 81 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
70 if (ret == 0) 82 if (ret == 0)
71 return count; 83 return count;
@@ -91,6 +103,8 @@ static ssize_t if_usb_boot2_set(struct device *dev,
91 struct if_usb_card *cardp = priv->card; 103 struct if_usb_card *cardp = priv->card;
92 int ret; 104 int ret;
93 105
106 BUG_ON(buf == NULL);
107
94 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2); 108 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
95 if (ret == 0) 109 if (ret == 0)
96 return count; 110 return count;
@@ -244,6 +258,7 @@ static int if_usb_probe(struct usb_interface *intf,
244 init_waitqueue_head(&cardp->fw_wq); 258 init_waitqueue_head(&cardp->fw_wq);
245 259
246 cardp->udev = udev; 260 cardp->udev = udev;
261 cardp->model = (uint32_t) id->driver_info;
247 iface_desc = intf->cur_altsetting; 262 iface_desc = intf->cur_altsetting;
248 263
249 lbs_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X" 264 lbs_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X"
@@ -924,6 +939,38 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp,
924 return ret; 939 return ret;
925} 940}
926 941
942/* table of firmware file names */
943static const struct {
944 u32 model;
945 const char *fwname;
946} fw_table[] = {
947 { MODEL_8388, "libertas/usb8388_v9.bin" },
948 { MODEL_8388, "libertas/usb8388_v5.bin" },
949 { MODEL_8388, "libertas/usb8388.bin" },
950 { MODEL_8388, "usb8388.bin" },
951 { MODEL_8682, "libertas/usb8682.bin" }
952};
953
954static int get_fw(struct if_usb_card *cardp, const char *fwname)
955{
956 int i;
957
958 /* Try user-specified firmware first */
959 if (fwname)
960 return request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
961
962 /* Otherwise search for firmware to use */
963 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
964 if (fw_table[i].model != cardp->model)
965 continue;
966 if (request_firmware(&cardp->fw, fw_table[i].fwname,
967 &cardp->udev->dev) == 0)
968 return 0;
969 }
970
971 return -ENOENT;
972}
973
927static int __if_usb_prog_firmware(struct if_usb_card *cardp, 974static int __if_usb_prog_firmware(struct if_usb_card *cardp,
928 const char *fwname, int cmd) 975 const char *fwname, int cmd)
929{ 976{
@@ -933,10 +980,9 @@ static int __if_usb_prog_firmware(struct if_usb_card *cardp,
933 980
934 lbs_deb_enter(LBS_DEB_USB); 981 lbs_deb_enter(LBS_DEB_USB);
935 982
936 ret = request_firmware(&cardp->fw, fwname, &cardp->udev->dev); 983 ret = get_fw(cardp, fwname);
937 if (ret < 0) { 984 if (ret) {
938 lbs_pr_err("request_firmware() failed with %#x\n", ret); 985 lbs_pr_err("failed to find firmware (%d)\n", ret);
939 lbs_pr_err("firmware %s not found\n", fwname);
940 goto done; 986 goto done;
941 } 987 }
942 988
diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h
index 5ba0aee0eb2f..d819e7e3c9aa 100644
--- a/drivers/net/wireless/libertas/if_usb.h
+++ b/drivers/net/wireless/libertas/if_usb.h
@@ -43,6 +43,7 @@ struct bootcmdresp
43/** USB card description structure*/ 43/** USB card description structure*/
44struct if_usb_card { 44struct if_usb_card {
45 struct usb_device *udev; 45 struct usb_device *udev;
46 uint32_t model; /* MODEL_* */
46 struct urb *rx_urb, *tx_urb; 47 struct urb *rx_urb, *tx_urb;
47 struct lbs_private *priv; 48 struct lbs_private *priv;
48 49
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 24958a86747b..47ce5a6ba120 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -1047,6 +1047,111 @@ void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx)
1047} 1047}
1048EXPORT_SYMBOL_GPL(lbs_notify_command_response); 1048EXPORT_SYMBOL_GPL(lbs_notify_command_response);
1049 1049
1050/**
1051 * @brief Retrieves two-stage firmware
1052 *
1053 * @param dev A pointer to device structure
1054 * @param user_helper User-defined helper firmware file
1055 * @param user_mainfw User-defined main firmware file
1056 * @param card_model Bus-specific card model ID used to filter firmware table
1057 * elements
1058 * @param fw_table Table of firmware file names and device model numbers
1059 * terminated by an entry with a NULL helper name
1060 * @param helper On success, the helper firmware; caller must free
1061 * @param mainfw On success, the main firmware; caller must free
1062 *
1063 * @return 0 on success, non-zero on failure
1064 */
1065int lbs_get_firmware(struct device *dev, const char *user_helper,
1066 const char *user_mainfw, u32 card_model,
1067 const struct lbs_fw_table *fw_table,
1068 const struct firmware **helper,
1069 const struct firmware **mainfw)
1070{
1071 const struct lbs_fw_table *iter;
1072 int ret;
1073
1074 BUG_ON(helper == NULL);
1075 BUG_ON(mainfw == NULL);
1076
1077 /* Try user-specified firmware first */
1078 if (user_helper) {
1079 ret = request_firmware(helper, user_helper, dev);
1080 if (ret) {
1081 lbs_pr_err("couldn't find helper firmware %s",
1082 user_helper);
1083 goto fail;
1084 }
1085 }
1086 if (user_mainfw) {
1087 ret = request_firmware(mainfw, user_mainfw, dev);
1088 if (ret) {
1089 lbs_pr_err("couldn't find main firmware %s",
1090 user_mainfw);
1091 goto fail;
1092 }
1093 }
1094
1095 if (*helper && *mainfw)
1096 return 0;
1097
1098 /* Otherwise search for firmware to use. If neither the helper or
1099 * the main firmware were specified by the user, then we need to
1100 * make sure that found helper & main are from the same entry in
1101 * fw_table.
1102 */
1103 iter = fw_table;
1104 while (iter && iter->helper) {
1105 if (iter->model != card_model)
1106 goto next;
1107
1108 if (*helper == NULL) {
1109 ret = request_firmware(helper, iter->helper, dev);
1110 if (ret)
1111 goto next;
1112
1113 /* If the device has one-stage firmware (ie cf8305) and
1114 * we've got it then we don't need to bother with the
1115 * main firmware.
1116 */
1117 if (iter->fwname == NULL)
1118 return 0;
1119 }
1120
1121 if (*mainfw == NULL) {
1122 ret = request_firmware(mainfw, iter->fwname, dev);
1123 if (ret && !user_helper) {
1124 /* Clear the helper if it wasn't user-specified
1125 * and the main firmware load failed, to ensure
1126 * we don't have mismatched firmware pairs.
1127 */
1128 release_firmware(*helper);
1129 *helper = NULL;
1130 }
1131 }
1132
1133 if (*helper && *mainfw)
1134 return 0;
1135
1136 next:
1137 iter++;
1138 }
1139
1140 fail:
1141 /* Failed */
1142 if (*helper) {
1143 release_firmware(*helper);
1144 *helper = NULL;
1145 }
1146 if (*mainfw) {
1147 release_firmware(*mainfw);
1148 *mainfw = NULL;
1149 }
1150
1151 return -ENOENT;
1152}
1153EXPORT_SYMBOL_GPL(lbs_get_firmware);
1154
1050static int __init lbs_init_module(void) 1155static int __init lbs_init_module(void)
1051{ 1156{
1052 lbs_deb_enter(LBS_DEB_MAIN); 1157 lbs_deb_enter(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 41a4f214ade1..ba7d96584cb6 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -54,7 +54,7 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
54/** 54/**
55 * if_usb_wrike_bulk_callback - call back to handle URB status 55 * if_usb_wrike_bulk_callback - call back to handle URB status
56 * 56 *
57 * @param urb pointer to urb structure 57 * @param urb pointer to urb structure
58 */ 58 */
59static void if_usb_write_bulk_callback(struct urb *urb) 59static void if_usb_write_bulk_callback(struct urb *urb)
60{ 60{
@@ -178,16 +178,19 @@ static int if_usb_probe(struct usb_interface *intf,
178 le16_to_cpu(endpoint->wMaxPacketSize); 178 le16_to_cpu(endpoint->wMaxPacketSize);
179 cardp->ep_in = usb_endpoint_num(endpoint); 179 cardp->ep_in = usb_endpoint_num(endpoint);
180 180
181 lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in); 181 lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n",
182 lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size); 182 cardp->ep_in);
183 lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n",
184 cardp->ep_in_size);
183 } else if (usb_endpoint_is_bulk_out(endpoint)) { 185 } else if (usb_endpoint_is_bulk_out(endpoint)) {
184 cardp->ep_out_size = 186 cardp->ep_out_size =
185 le16_to_cpu(endpoint->wMaxPacketSize); 187 le16_to_cpu(endpoint->wMaxPacketSize);
186 cardp->ep_out = usb_endpoint_num(endpoint); 188 cardp->ep_out = usb_endpoint_num(endpoint);
187 189
188 lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out); 190 lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n",
191 cardp->ep_out);
189 lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n", 192 lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n",
190 cardp->ep_out_size); 193 cardp->ep_out_size);
191 } 194 }
192 } 195 }
193 if (!cardp->ep_out_size || !cardp->ep_in_size) { 196 if (!cardp->ep_out_size || !cardp->ep_in_size) {
@@ -318,10 +321,12 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
318 321
319 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) { 322 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) {
320 lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n"); 323 lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n");
321 lbtf_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n", 324 lbtf_deb_usb2(&cardp->udev->dev,
322 cardp->fwseqnum, cardp->totalbytes); 325 "seqnum = %d totalbytes = %d\n",
326 cardp->fwseqnum, cardp->totalbytes);
323 } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) { 327 } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
324 lbtf_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n"); 328 lbtf_deb_usb2(&cardp->udev->dev,
329 "Host has finished FW downloading\n");
325 lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n"); 330 lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
326 331
327 /* Host has finished FW downloading 332 /* Host has finished FW downloading
@@ -367,7 +372,7 @@ EXPORT_SYMBOL_GPL(if_usb_reset_device);
367/** 372/**
368 * usb_tx_block - transfer data to the device 373 * usb_tx_block - transfer data to the device
369 * 374 *
370 * @priv pointer to struct lbtf_private 375 * @priv pointer to struct lbtf_private
371 * @payload pointer to payload data 376 * @payload pointer to payload data
372 * @nb data length 377 * @nb data length
373 * @data non-zero for data, zero for commands 378 * @data non-zero for data, zero for commands
@@ -400,7 +405,8 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
400 urb->transfer_flags |= URB_ZERO_PACKET; 405 urb->transfer_flags |= URB_ZERO_PACKET;
401 406
402 if (usb_submit_urb(urb, GFP_ATOMIC)) { 407 if (usb_submit_urb(urb, GFP_ATOMIC)) {
403 lbtf_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret); 408 lbtf_deb_usbd(&cardp->udev->dev,
409 "usb_submit_urb failed: %d\n", ret);
404 goto tx_ret; 410 goto tx_ret;
405 } 411 }
406 412
@@ -438,10 +444,12 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
438 444
439 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; 445 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
440 446
441 lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); 447 lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
448 cardp->rx_urb);
442 ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC); 449 ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
443 if (ret) { 450 if (ret) {
444 lbtf_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret); 451 lbtf_deb_usbd(&cardp->udev->dev,
452 "Submit Rx URB failed: %d\n", ret);
445 kfree_skb(skb); 453 kfree_skb(skb);
446 cardp->rx_skb = NULL; 454 cardp->rx_skb = NULL;
447 lbtf_deb_leave(LBTF_DEB_USB); 455 lbtf_deb_leave(LBTF_DEB_USB);
@@ -522,14 +530,14 @@ static void if_usb_receive_fwload(struct urb *urb)
522 } 530 }
523 } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) { 531 } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) {
524 pr_info("boot cmd response cmd_tag error (%d)\n", 532 pr_info("boot cmd response cmd_tag error (%d)\n",
525 bcmdresp.cmd); 533 bcmdresp.cmd);
526 } else if (bcmdresp.result != BOOT_CMD_RESP_OK) { 534 } else if (bcmdresp.result != BOOT_CMD_RESP_OK) {
527 pr_info("boot cmd response result error (%d)\n", 535 pr_info("boot cmd response result error (%d)\n",
528 bcmdresp.result); 536 bcmdresp.result);
529 } else { 537 } else {
530 cardp->bootcmdresp = 1; 538 cardp->bootcmdresp = 1;
531 lbtf_deb_usbd(&cardp->udev->dev, 539 lbtf_deb_usbd(&cardp->udev->dev,
532 "Received valid boot command response\n"); 540 "Received valid boot command response\n");
533 } 541 }
534 542
535 kfree_skb(skb); 543 kfree_skb(skb);
@@ -541,19 +549,23 @@ static void if_usb_receive_fwload(struct urb *urb)
541 syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader), 549 syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader),
542 GFP_ATOMIC); 550 GFP_ATOMIC);
543 if (!syncfwheader) { 551 if (!syncfwheader) {
544 lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); 552 lbtf_deb_usbd(&cardp->udev->dev,
553 "Failure to allocate syncfwheader\n");
545 kfree_skb(skb); 554 kfree_skb(skb);
546 lbtf_deb_leave(LBTF_DEB_USB); 555 lbtf_deb_leave(LBTF_DEB_USB);
547 return; 556 return;
548 } 557 }
549 558
550 if (!syncfwheader->cmd) { 559 if (!syncfwheader->cmd) {
551 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); 560 lbtf_deb_usb2(&cardp->udev->dev,
552 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", 561 "FW received Blk with correct CRC\n");
553 le32_to_cpu(syncfwheader->seqnum)); 562 lbtf_deb_usb2(&cardp->udev->dev,
563 "FW received Blk seqnum = %d\n",
564 le32_to_cpu(syncfwheader->seqnum));
554 cardp->CRC_OK = 1; 565 cardp->CRC_OK = 1;
555 } else { 566 } else {
556 lbtf_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n"); 567 lbtf_deb_usbd(&cardp->udev->dev,
568 "FW received Blk with CRC error\n");
557 cardp->CRC_OK = 0; 569 cardp->CRC_OK = 0;
558 } 570 }
559 571
@@ -666,7 +678,8 @@ static void if_usb_receive(struct urb *urb)
666 { 678 {
667 /* Event cause handling */ 679 /* Event cause handling */
668 u32 event_cause = le32_to_cpu(pkt[1]); 680 u32 event_cause = le32_to_cpu(pkt[1]);
669 lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event_cause); 681 lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n",
682 event_cause);
670 683
671 /* Icky undocumented magic special case */ 684 /* Icky undocumented magic special case */
672 if (event_cause & 0xffff0000) { 685 if (event_cause & 0xffff0000) {
@@ -689,7 +702,7 @@ static void if_usb_receive(struct urb *urb)
689 } 702 }
690 default: 703 default:
691 lbtf_deb_usbd(&cardp->udev->dev, 704 lbtf_deb_usbd(&cardp->udev->dev,
692 "libertastf: unknown command type 0x%X\n", recvtype); 705 "libertastf: unknown command type 0x%X\n", recvtype);
693 kfree_skb(skb); 706 kfree_skb(skb);
694 break; 707 break;
695 } 708 }
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 86fa8abdd66f..92b486d46eb9 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -9,7 +9,8 @@
9 9
10/* 10/*
11 * TODO: 11 * TODO:
12 * - IBSS mode simulation (Beacon transmission with competition for "air time") 12 * - Add TSF sync and fix IBSS beacon transmission by adding
13 * competition for "air time" at TBTT
13 * - RX filtering based on filter configuration (data->rx_filter) 14 * - RX filtering based on filter configuration (data->rx_filter)
14 */ 15 */
15 16
@@ -600,6 +601,18 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
600} 601}
601 602
602 603
604static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
605 struct ieee80211_vif *vif,
606 enum nl80211_iftype newtype)
607{
608 wiphy_debug(hw->wiphy,
609 "%s (old type=%d, new type=%d, mac_addr=%pM)\n",
610 __func__, vif->type, newtype, vif->addr);
611 hwsim_check_magic(vif);
612
613 return 0;
614}
615
603static void mac80211_hwsim_remove_interface( 616static void mac80211_hwsim_remove_interface(
604 struct ieee80211_hw *hw, struct ieee80211_vif *vif) 617 struct ieee80211_hw *hw, struct ieee80211_vif *vif)
605{ 618{
@@ -620,7 +633,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
620 hwsim_check_magic(vif); 633 hwsim_check_magic(vif);
621 634
622 if (vif->type != NL80211_IFTYPE_AP && 635 if (vif->type != NL80211_IFTYPE_AP &&
623 vif->type != NL80211_IFTYPE_MESH_POINT) 636 vif->type != NL80211_IFTYPE_MESH_POINT &&
637 vif->type != NL80211_IFTYPE_ADHOC)
624 return; 638 return;
625 639
626 skb = ieee80211_beacon_get(hw, vif); 640 skb = ieee80211_beacon_get(hw, vif);
@@ -1025,6 +1039,7 @@ static struct ieee80211_ops mac80211_hwsim_ops =
1025 .start = mac80211_hwsim_start, 1039 .start = mac80211_hwsim_start,
1026 .stop = mac80211_hwsim_stop, 1040 .stop = mac80211_hwsim_stop,
1027 .add_interface = mac80211_hwsim_add_interface, 1041 .add_interface = mac80211_hwsim_add_interface,
1042 .change_interface = mac80211_hwsim_change_interface,
1028 .remove_interface = mac80211_hwsim_remove_interface, 1043 .remove_interface = mac80211_hwsim_remove_interface,
1029 .config = mac80211_hwsim_config, 1044 .config = mac80211_hwsim_config,
1030 .configure_filter = mac80211_hwsim_configure_filter, 1045 .configure_filter = mac80211_hwsim_configure_filter,
@@ -1295,6 +1310,7 @@ static int __init init_mac80211_hwsim(void)
1295 hw->wiphy->interface_modes = 1310 hw->wiphy->interface_modes =
1296 BIT(NL80211_IFTYPE_STATION) | 1311 BIT(NL80211_IFTYPE_STATION) |
1297 BIT(NL80211_IFTYPE_AP) | 1312 BIT(NL80211_IFTYPE_AP) |
1313 BIT(NL80211_IFTYPE_ADHOC) |
1298 BIT(NL80211_IFTYPE_MESH_POINT); 1314 BIT(NL80211_IFTYPE_MESH_POINT);
1299 1315
1300 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1316 hw->flags = IEEE80211_HW_MFP_CAPABLE |
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 077baa86756b..b4772c1c6135 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -762,14 +762,17 @@ int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate)
762 case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */ 762 case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
763 case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */ 763 case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
764 for (i = 0; i < BITRATE_TABLE_SIZE; i++) 764 for (i = 0; i < BITRATE_TABLE_SIZE; i++)
765 if (bitrate_table[i].intersil_txratectrl == val) 765 if (bitrate_table[i].intersil_txratectrl == val) {
766 *bitrate = bitrate_table[i].bitrate * 100000;
766 break; 767 break;
768 }
767 769
768 if (i >= BITRATE_TABLE_SIZE) 770 if (i >= BITRATE_TABLE_SIZE) {
769 printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n", 771 printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
770 priv->ndev->name, val); 772 priv->ndev->name, val);
773 err = -EIO;
774 }
771 775
772 *bitrate = bitrate_table[i].bitrate * 100000;
773 break; 776 break;
774 default: 777 default:
775 BUG(); 778 BUG();
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index cf7be1eb6124..93505f93bf97 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -589,8 +589,15 @@ static int orinoco_ioctl_getrate(struct net_device *dev,
589 589
590 /* If the interface is running we try to find more about the 590 /* If the interface is running we try to find more about the
591 current mode */ 591 current mode */
592 if (netif_running(dev)) 592 if (netif_running(dev)) {
593 err = orinoco_hw_get_act_bitrate(priv, &bitrate); 593 int act_bitrate;
594 int lerr;
595
596 /* Ignore errors if we can't get the actual bitrate */
597 lerr = orinoco_hw_get_act_bitrate(priv, &act_bitrate);
598 if (!lerr)
599 bitrate = act_bitrate;
600 }
594 601
595 orinoco_unlock(priv, &flags); 602 orinoco_unlock(priv, &flags);
596 603
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index b0342a520bf1..e5f45cb2a7a2 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -2,6 +2,7 @@ config P54_COMMON
2 tristate "Softmac Prism54 support" 2 tristate "Softmac Prism54 support"
3 depends on MAC80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 select CRC_CCITT
5 ---help--- 6 ---help---
6 This is common code for isl38xx/stlc45xx based modules. 7 This is common code for isl38xx/stlc45xx based modules.
7 This module does nothing by itself - the USB/PCI/SPI front-ends 8 This module does nothing by itself - the USB/PCI/SPI front-ends
@@ -48,6 +49,23 @@ config P54_SPI
48 49
49 If you choose to build a module, it'll be called p54spi. 50 If you choose to build a module, it'll be called p54spi.
50 51
52config P54_SPI_DEFAULT_EEPROM
53 bool "Include fallback EEPROM blob"
54 depends on P54_SPI
55 default n
56 ---help---
57 Unlike the PCI or USB devices, the SPI variants don't have
58 a dedicated EEPROM chip to store all device specific values
59 for calibration, country and interface settings.
60
61 The driver will try to load the image "3826.eeprom", if the
62 file is put at the right place. (usually /lib/firmware.)
63
64 Only if this request fails, this option will provide a
65 backup set of generic values to get the device working.
66
67 Enabling this option adds about 4k to p54spi.
68
51config P54_LEDS 69config P54_LEDS
52 bool 70 bool
53 depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON) 71 depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON)
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 78347041ec40..8c05266d37f4 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -23,6 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include <net/mac80211.h> 25#include <net/mac80211.h>
26#include <linux/crc-ccitt.h>
26 27
27#include "p54.h" 28#include "p54.h"
28#include "eeprom.h" 29#include "eeprom.h"
@@ -540,6 +541,7 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
540 int err; 541 int err;
541 u8 *end = (u8 *)eeprom + len; 542 u8 *end = (u8 *)eeprom + len;
542 u16 synth = 0; 543 u16 synth = 0;
544 u16 crc16 = ~0;
543 545
544 wrap = (struct eeprom_pda_wrap *) eeprom; 546 wrap = (struct eeprom_pda_wrap *) eeprom;
545 entry = (void *)wrap->data + le16_to_cpu(wrap->len); 547 entry = (void *)wrap->data + le16_to_cpu(wrap->len);
@@ -655,16 +657,29 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
655 } 657 }
656 break; 658 break;
657 case PDR_END: 659 case PDR_END:
658 /* make it overrun */ 660 crc16 = ~crc_ccitt(crc16, (u8 *) entry, sizeof(*entry));
659 entry_len = len; 661 if (crc16 != le16_to_cpup((__le16 *)entry->data)) {
662 wiphy_err(dev->wiphy, "eeprom failed checksum "
663 "test!\n");
664 err = -ENOMSG;
665 goto err;
666 } else {
667 goto good_eeprom;
668 }
660 break; 669 break;
661 default: 670 default:
662 break; 671 break;
663 } 672 }
664 673
665 entry = (void *)entry + (entry_len + 1)*2; 674 crc16 = crc_ccitt(crc16, (u8 *)entry, (entry_len + 1) * 2);
675 entry = (void *)entry + (entry_len + 1) * 2;
666 } 676 }
667 677
678 wiphy_err(dev->wiphy, "unexpected end of eeprom data.\n");
679 err = -ENODATA;
680 goto err;
681
682good_eeprom:
668 if (!synth || !priv->iq_autocal || !priv->output_limit || 683 if (!synth || !priv->iq_autocal || !priv->output_limit ||
669 !priv->curve_data) { 684 !priv->curve_data) {
670 wiphy_err(dev->wiphy, 685 wiphy_err(dev->wiphy,
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 15b20c29a604..92b9b1f05fd5 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -123,10 +123,14 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
123 bootrec = (struct bootrec *)&bootrec->data[len]; 123 bootrec = (struct bootrec *)&bootrec->data[len];
124 } 124 }
125 125
126 if (fw_version) 126 if (fw_version) {
127 wiphy_info(priv->hw->wiphy, 127 wiphy_info(priv->hw->wiphy,
128 "FW rev %s - Softmac protocol %x.%x\n", 128 "FW rev %s - Softmac protocol %x.%x\n",
129 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); 129 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
130 snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
131 "%s - %x.%x", fw_version,
132 priv->fw_var >> 8, priv->fw_var & 0xff);
133 }
130 134
131 if (priv->fw_var < 0x500) 135 if (priv->fw_var < 0x500)
132 wiphy_info(priv->hw->wiphy, 136 wiphy_info(priv->hw->wiphy,
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 47db439b63bf..622d27b6d8f2 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -429,8 +429,8 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
429 429
430 mutex_lock(&priv->conf_mutex); 430 mutex_lock(&priv->conf_mutex);
431 if (cmd == SET_KEY) { 431 if (cmd == SET_KEY) {
432 switch (key->alg) { 432 switch (key->cipher) {
433 case ALG_TKIP: 433 case WLAN_CIPHER_SUITE_TKIP:
434 if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL | 434 if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
435 BR_DESC_PRIV_CAP_TKIP))) { 435 BR_DESC_PRIV_CAP_TKIP))) {
436 ret = -EOPNOTSUPP; 436 ret = -EOPNOTSUPP;
@@ -439,7 +439,8 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
439 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 439 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
440 algo = P54_CRYPTO_TKIPMICHAEL; 440 algo = P54_CRYPTO_TKIPMICHAEL;
441 break; 441 break;
442 case ALG_WEP: 442 case WLAN_CIPHER_SUITE_WEP40:
443 case WLAN_CIPHER_SUITE_WEP104:
443 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) { 444 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) {
444 ret = -EOPNOTSUPP; 445 ret = -EOPNOTSUPP;
445 goto out_unlock; 446 goto out_unlock;
@@ -447,7 +448,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
447 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 448 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
448 algo = P54_CRYPTO_WEP; 449 algo = P54_CRYPTO_WEP;
449 break; 450 break;
450 case ALG_CCMP: 451 case WLAN_CIPHER_SUITE_CCMP:
451 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) { 452 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) {
452 ret = -EOPNOTSUPP; 453 ret = -EOPNOTSUPP;
453 goto out_unlock; 454 goto out_unlock;
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 087bf0698a5a..156e57dbd2cf 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -32,11 +32,14 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33 33
34#include "p54spi.h" 34#include "p54spi.h"
35#include "p54spi_eeprom.h"
36#include "p54.h" 35#include "p54.h"
37 36
38#include "lmac.h" 37#include "lmac.h"
39 38
39#ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
40#include "p54spi_eeprom.h"
41#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
42
40MODULE_FIRMWARE("3826.arm"); 43MODULE_FIRMWARE("3826.arm");
41MODULE_ALIAS("stlc45xx"); 44MODULE_ALIAS("stlc45xx");
42 45
@@ -195,9 +198,11 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
195 198
196 ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev); 199 ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
197 if (ret < 0) { 200 if (ret < 0) {
201#ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
198 dev_info(&priv->spi->dev, "loading default eeprom...\n"); 202 dev_info(&priv->spi->dev, "loading default eeprom...\n");
199 ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom, 203 ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom,
200 sizeof(p54spi_eeprom)); 204 sizeof(p54spi_eeprom));
205#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
201 } else { 206 } else {
202 dev_info(&priv->spi->dev, "loading user eeprom...\n"); 207 dev_info(&priv->spi->dev, "loading user eeprom...\n");
203 ret = p54_parse_eeprom(dev, (void *) eeprom->data, 208 ret = p54_parse_eeprom(dev, (void *) eeprom->data,
diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h
index 1ea1050911d9..d592cbd34d78 100644
--- a/drivers/net/wireless/p54/p54spi_eeprom.h
+++ b/drivers/net/wireless/p54/p54spi_eeprom.h
@@ -671,7 +671,7 @@ static unsigned char p54spi_eeprom[] = {
671 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, 671 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
672 672
6730x02, 0x00, 0x00, 0x00, /* PDR_END */ 6730x02, 0x00, 0x00, 0x00, /* PDR_END */
674 0xa8, 0xf5 /* bogus data */ 674 0x67, 0x99,
675}; 675};
676 676
677#endif /* P54SPI_EEPROM_H */ 677#endif /* P54SPI_EEPROM_H */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index ad595958b7df..063248b35069 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -930,8 +930,8 @@ static int __devinit p54u_probe(struct usb_interface *intf,
930#ifdef CONFIG_PM 930#ifdef CONFIG_PM
931 /* ISL3887 needs a full reset on resume */ 931 /* ISL3887 needs a full reset on resume */
932 udev->reset_resume = 1; 932 udev->reset_resume = 1;
933#endif /* CONFIG_PM */
933 err = p54u_device_reset(dev); 934 err = p54u_device_reset(dev);
934#endif
935 935
936 priv->hw_type = P54U_3887; 936 priv->hw_type = P54U_3887;
937 dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr); 937 dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 0e937dc0c9c4..76b2318a7dc7 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -275,15 +275,15 @@ static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
275{ 275{
276 int band = priv->hw->conf.channel->band; 276 int band = priv->hw->conf.channel->band;
277 277
278 if (priv->rxhw != 5) 278 if (priv->rxhw != 5) {
279 return ((rssi * priv->rssical_db[band].mul) / 64 + 279 return ((rssi * priv->rssical_db[band].mul) / 64 +
280 priv->rssical_db[band].add) / 4; 280 priv->rssical_db[band].add) / 4;
281 else 281 } else {
282 /* 282 /*
283 * TODO: find the correct formula 283 * TODO: find the correct formula
284 */ 284 */
285 return ((rssi * priv->rssical_db[band].mul) / 64 + 285 return rssi / 2 - 110;
286 priv->rssical_db[band].add) / 4; 286 }
287} 287}
288 288
289/* 289/*
@@ -683,14 +683,15 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
683 } 683 }
684} 684}
685 685
686static u8 p54_convert_algo(enum ieee80211_key_alg alg) 686static u8 p54_convert_algo(u32 cipher)
687{ 687{
688 switch (alg) { 688 switch (cipher) {
689 case ALG_WEP: 689 case WLAN_CIPHER_SUITE_WEP40:
690 case WLAN_CIPHER_SUITE_WEP104:
690 return P54_CRYPTO_WEP; 691 return P54_CRYPTO_WEP;
691 case ALG_TKIP: 692 case WLAN_CIPHER_SUITE_TKIP:
692 return P54_CRYPTO_TKIPMICHAEL; 693 return P54_CRYPTO_TKIPMICHAEL;
693 case ALG_CCMP: 694 case WLAN_CIPHER_SUITE_CCMP:
694 return P54_CRYPTO_AESCCMP; 695 return P54_CRYPTO_AESCCMP;
695 default: 696 default:
696 return 0; 697 return 0;
@@ -731,7 +732,7 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
731 732
732 if (info->control.hw_key) { 733 if (info->control.hw_key) {
733 crypt_offset = ieee80211_get_hdrlen_from_skb(skb); 734 crypt_offset = ieee80211_get_hdrlen_from_skb(skb);
734 if (info->control.hw_key->alg == ALG_TKIP) { 735 if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
735 u8 *iv = (u8 *)(skb->data + crypt_offset); 736 u8 *iv = (u8 *)(skb->data + crypt_offset);
736 /* 737 /*
737 * The firmware excepts that the IV has to have 738 * The firmware excepts that the IV has to have
@@ -827,10 +828,10 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
827 hdr->tries = ridx; 828 hdr->tries = ridx;
828 txhdr->rts_rate_idx = 0; 829 txhdr->rts_rate_idx = 0;
829 if (info->control.hw_key) { 830 if (info->control.hw_key) {
830 txhdr->key_type = p54_convert_algo(info->control.hw_key->alg); 831 txhdr->key_type = p54_convert_algo(info->control.hw_key->cipher);
831 txhdr->key_len = min((u8)16, info->control.hw_key->keylen); 832 txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
832 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len); 833 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
833 if (info->control.hw_key->alg == ALG_TKIP) { 834 if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
834 /* reserve space for the MIC key */ 835 /* reserve space for the MIC key */
835 len += 8; 836 len += 8;
836 memcpy(skb_put(skb, 8), &(info->control.hw_key->key 837 memcpy(skb_put(skb, 8), &(info->control.hw_key->key
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 77cd65db8500..d97a2caf582b 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -3234,7 +3234,7 @@ prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
3234 switch (cmd) { 3234 switch (cmd) {
3235 case PRISM54_HOSTAPD: 3235 case PRISM54_HOSTAPD:
3236 if (!capable(CAP_NET_ADMIN)) 3236 if (!capable(CAP_NET_ADMIN))
3237 return -EPERM; 3237 return -EPERM;
3238 ret = prism54_hostapd(ndev, &wrq->u.data); 3238 ret = prism54_hostapd(ndev, &wrq->u.data);
3239 return ret; 3239 return ret;
3240 } 3240 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88560d0ae50a..d91a831a7700 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -43,7 +43,6 @@
43#include <linux/if_arp.h> 43#include <linux/if_arp.h>
44#include <linux/ioport.h> 44#include <linux/ioport.h>
45#include <linux/skbuff.h> 45#include <linux/skbuff.h>
46#include <linux/ethtool.h>
47#include <linux/ieee80211.h> 46#include <linux/ieee80211.h>
48 47
49#include <pcmcia/cs.h> 48#include <pcmcia/cs.h>
@@ -80,8 +79,6 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map);
80static struct net_device_stats *ray_get_stats(struct net_device *dev); 79static struct net_device_stats *ray_get_stats(struct net_device *dev);
81static int ray_dev_init(struct net_device *dev); 80static int ray_dev_init(struct net_device *dev);
82 81
83static const struct ethtool_ops netdev_ethtool_ops;
84
85static int ray_open(struct net_device *dev); 82static int ray_open(struct net_device *dev);
86static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb, 83static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
87 struct net_device *dev); 84 struct net_device *dev);
@@ -333,7 +330,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
333 330
334 /* Raylink entries in the device structure */ 331 /* Raylink entries in the device structure */
335 dev->netdev_ops = &ray_netdev_ops; 332 dev->netdev_ops = &ray_netdev_ops;
336 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
337 dev->wireless_handlers = &ray_handler_def; 333 dev->wireless_handlers = &ray_handler_def;
338#ifdef WIRELESS_SPY 334#ifdef WIRELESS_SPY
339 local->wireless_data.spy_data = &local->spy_data; 335 local->wireless_data.spy_data = &local->spy_data;
@@ -608,7 +604,7 @@ static int dl_startup_params(struct net_device *dev)
608 /* Start kernel timer to wait for dl startup to complete. */ 604 /* Start kernel timer to wait for dl startup to complete. */
609 local->timer.expires = jiffies + HZ / 2; 605 local->timer.expires = jiffies + HZ / 2;
610 local->timer.data = (long)local; 606 local->timer.data = (long)local;
611 local->timer.function = &verify_dl_startup; 607 local->timer.function = verify_dl_startup;
612 add_timer(&local->timer); 608 add_timer(&local->timer);
613 dev_dbg(&link->dev, 609 dev_dbg(&link->dev,
614 "ray_cs dl_startup_params started timer for verify_dl_startup\n"); 610 "ray_cs dl_startup_params started timer for verify_dl_startup\n");
@@ -1062,18 +1058,6 @@ AP to AP 1 1 dest AP src AP dest source
1062 } 1058 }
1063} /* end encapsulate_frame */ 1059} /* end encapsulate_frame */
1064 1060
1065/*===========================================================================*/
1066
1067static void netdev_get_drvinfo(struct net_device *dev,
1068 struct ethtool_drvinfo *info)
1069{
1070 strcpy(info->driver, "ray_cs");
1071}
1072
1073static const struct ethtool_ops netdev_ethtool_ops = {
1074 .get_drvinfo = netdev_get_drvinfo,
1075};
1076
1077/*====================================================================*/ 1061/*====================================================================*/
1078 1062
1079/*------------------------------------------------------------------*/ 1063/*------------------------------------------------------------------*/
@@ -1997,12 +1981,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
1997 dev_dbg(&link->dev, 1981 dev_dbg(&link->dev,
1998 "ray_cs interrupt network \"%s\" start failed\n", 1982 "ray_cs interrupt network \"%s\" start failed\n",
1999 local->sparm.b4.a_current_ess_id); 1983 local->sparm.b4.a_current_ess_id);
2000 local->timer.function = &start_net; 1984 local->timer.function = start_net;
2001 } else { 1985 } else {
2002 dev_dbg(&link->dev, 1986 dev_dbg(&link->dev,
2003 "ray_cs interrupt network \"%s\" join failed\n", 1987 "ray_cs interrupt network \"%s\" join failed\n",
2004 local->sparm.b4.a_current_ess_id); 1988 local->sparm.b4.a_current_ess_id);
2005 local->timer.function = &join_net; 1989 local->timer.function = join_net;
2006 } 1990 }
2007 add_timer(&local->timer); 1991 add_timer(&local->timer);
2008 } 1992 }
@@ -2470,9 +2454,9 @@ static void authenticate(ray_dev_t *local)
2470 2454
2471 del_timer(&local->timer); 2455 del_timer(&local->timer);
2472 if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) { 2456 if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
2473 local->timer.function = &join_net; 2457 local->timer.function = join_net;
2474 } else { 2458 } else {
2475 local->timer.function = &authenticate_timeout; 2459 local->timer.function = authenticate_timeout;
2476 } 2460 }
2477 local->timer.expires = jiffies + HZ * 2; 2461 local->timer.expires = jiffies + HZ * 2;
2478 local->timer.data = (long)local; 2462 local->timer.data = (long)local;
@@ -2557,7 +2541,7 @@ static void associate(ray_dev_t *local)
2557 del_timer(&local->timer); 2541 del_timer(&local->timer);
2558 local->timer.expires = jiffies + HZ * 2; 2542 local->timer.expires = jiffies + HZ * 2;
2559 local->timer.data = (long)local; 2543 local->timer.data = (long)local;
2560 local->timer.function = &join_net; 2544 local->timer.function = join_net;
2561 add_timer(&local->timer); 2545 add_timer(&local->timer);
2562 local->card_status = CARD_ASSOC_FAILED; 2546 local->card_status = CARD_ASSOC_FAILED;
2563 return; 2547 return;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5063e01410e5..103c71164f10 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1007,12 +1007,11 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1007/* 1007/*
1008 * TX descriptor initialization 1008 * TX descriptor initialization
1009 */ 1009 */
1010static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1010static void rt2400pci_write_tx_desc(struct queue_entry *entry,
1011 struct sk_buff *skb,
1012 struct txentry_desc *txdesc) 1011 struct txentry_desc *txdesc)
1013{ 1012{
1014 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1013 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1015 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 1014 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1016 __le32 *txd = entry_priv->desc; 1015 __le32 *txd = entry_priv->desc;
1017 u32 word; 1016 u32 word;
1018 1017
@@ -1096,7 +1095,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1096 /* 1095 /*
1097 * Write the TX descriptor for the beacon. 1096 * Write the TX descriptor for the beacon.
1098 */ 1097 */
1099 rt2400pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1098 rt2400pci_write_tx_desc(entry, txdesc);
1100 1099
1101 /* 1100 /*
1102 * Dump beacon to userspace through debugfs. 1101 * Dump beacon to userspace through debugfs.
@@ -1112,24 +1111,24 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1112 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1111 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1113} 1112}
1114 1113
1115static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1114static void rt2400pci_kick_tx_queue(struct data_queue *queue)
1116 const enum data_queue_qid queue)
1117{ 1115{
1116 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1118 u32 reg; 1117 u32 reg;
1119 1118
1120 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1119 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1121 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE)); 1120 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
1122 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK)); 1121 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
1123 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM)); 1122 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
1124 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1123 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1125} 1124}
1126 1125
1127static void rt2400pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 1126static void rt2400pci_kill_tx_queue(struct data_queue *queue)
1128 const enum data_queue_qid qid)
1129{ 1127{
1128 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1130 u32 reg; 1129 u32 reg;
1131 1130
1132 if (qid == QID_BEACON) { 1131 if (queue->qid == QID_BEACON) {
1133 rt2x00pci_register_write(rt2x00dev, CSR14, 0); 1132 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1134 } else { 1133 } else {
1135 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1134 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
@@ -1481,15 +1480,17 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1481 /* 1480 /*
1482 * Create channel information array 1481 * Create channel information array
1483 */ 1482 */
1484 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 1483 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
1485 if (!info) 1484 if (!info)
1486 return -ENOMEM; 1485 return -ENOMEM;
1487 1486
1488 spec->channels_info = info; 1487 spec->channels_info = info;
1489 1488
1490 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); 1489 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1491 for (i = 0; i < 14; i++) 1490 for (i = 0; i < 14; i++) {
1492 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 1491 info[i].max_power = TXPOWER_FROM_DEV(MAX_TXPOWER);
1492 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1493 }
1493 1494
1494 return 0; 1495 return 0;
1495} 1496}
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index c2a555d5376b..ab0507110e42 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1161,12 +1161,11 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1161/* 1161/*
1162 * TX descriptor initialization 1162 * TX descriptor initialization
1163 */ 1163 */
1164static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1164static void rt2500pci_write_tx_desc(struct queue_entry *entry,
1165 struct sk_buff *skb,
1166 struct txentry_desc *txdesc) 1165 struct txentry_desc *txdesc)
1167{ 1166{
1168 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1167 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1169 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 1168 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1170 __le32 *txd = entry_priv->desc; 1169 __le32 *txd = entry_priv->desc;
1171 u32 word; 1170 u32 word;
1172 1171
@@ -1249,7 +1248,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1249 /* 1248 /*
1250 * Write the TX descriptor for the beacon. 1249 * Write the TX descriptor for the beacon.
1251 */ 1250 */
1252 rt2500pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1251 rt2500pci_write_tx_desc(entry, txdesc);
1253 1252
1254 /* 1253 /*
1255 * Dump beacon to userspace through debugfs. 1254 * Dump beacon to userspace through debugfs.
@@ -1265,24 +1264,24 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1265 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1264 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1266} 1265}
1267 1266
1268static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1267static void rt2500pci_kick_tx_queue(struct data_queue *queue)
1269 const enum data_queue_qid queue)
1270{ 1268{
1269 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1271 u32 reg; 1270 u32 reg;
1272 1271
1273 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1272 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1274 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE)); 1273 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
1275 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK)); 1274 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
1276 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM)); 1275 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
1277 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1276 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1278} 1277}
1279 1278
1280static void rt2500pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 1279static void rt2500pci_kill_tx_queue(struct data_queue *queue)
1281 const enum data_queue_qid qid)
1282{ 1280{
1281 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1283 u32 reg; 1282 u32 reg;
1284 1283
1285 if (qid == QID_BEACON) { 1284 if (queue->qid == QID_BEACON) {
1286 rt2x00pci_register_write(rt2x00dev, CSR14, 0); 1285 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1287 } else { 1286 } else {
1288 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1287 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
@@ -1795,19 +1794,23 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1795 /* 1794 /*
1796 * Create channel information array 1795 * Create channel information array
1797 */ 1796 */
1798 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 1797 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
1799 if (!info) 1798 if (!info)
1800 return -ENOMEM; 1799 return -ENOMEM;
1801 1800
1802 spec->channels_info = info; 1801 spec->channels_info = info;
1803 1802
1804 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); 1803 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1805 for (i = 0; i < 14; i++) 1804 for (i = 0; i < 14; i++) {
1806 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 1805 info[i].max_power = MAX_TXPOWER;
1806 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1807 }
1807 1808
1808 if (spec->num_channels > 14) { 1809 if (spec->num_channels > 14) {
1809 for (i = 14; i < spec->num_channels; i++) 1810 for (i = 14; i < spec->num_channels; i++) {
1810 info[i].tx_power1 = DEFAULT_TXPOWER; 1811 info[i].max_power = MAX_TXPOWER;
1812 info[i].default_power1 = DEFAULT_TXPOWER;
1813 }
1811 } 1814 }
1812 1815
1813 return 0; 1816 return 0;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index cdaf93f48263..db64df4267d8 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -355,7 +355,9 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
355 * it is known that not work at least on some hardware. 355 * it is known that not work at least on some hardware.
356 * SW crypto will be used in that case. 356 * SW crypto will be used in that case.
357 */ 357 */
358 if (key->alg == ALG_WEP && key->keyidx != 0) 358 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
359 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
360 key->keyidx != 0)
359 return -EOPNOTSUPP; 361 return -EOPNOTSUPP;
360 362
361 /* 363 /*
@@ -1039,12 +1041,11 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1039/* 1041/*
1040 * TX descriptor initialization 1042 * TX descriptor initialization
1041 */ 1043 */
1042static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1044static void rt2500usb_write_tx_desc(struct queue_entry *entry,
1043 struct sk_buff *skb,
1044 struct txentry_desc *txdesc) 1045 struct txentry_desc *txdesc)
1045{ 1046{
1046 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1047 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1047 __le32 *txd = (__le32 *) skb->data; 1048 __le32 *txd = (__le32 *) entry->skb->data;
1048 u32 word; 1049 u32 word;
1049 1050
1050 /* 1051 /*
@@ -1127,7 +1128,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry,
1127 /* 1128 /*
1128 * Write the TX descriptor for the beacon. 1129 * Write the TX descriptor for the beacon.
1129 */ 1130 */
1130 rt2500usb_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1131 rt2500usb_write_tx_desc(entry, txdesc);
1131 1132
1132 /* 1133 /*
1133 * Dump beacon to userspace through debugfs. 1134 * Dump beacon to userspace through debugfs.
@@ -1195,6 +1196,14 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
1195 return length; 1196 return length;
1196} 1197}
1197 1198
1199static void rt2500usb_kill_tx_queue(struct data_queue *queue)
1200{
1201 if (queue->qid == QID_BEACON)
1202 rt2500usb_register_write(queue->rt2x00dev, TXRX_CSR19, 0);
1203
1204 rt2x00usb_kill_tx_queue(queue);
1205}
1206
1198/* 1207/*
1199 * RX control handlers 1208 * RX control handlers
1200 */ 1209 */
@@ -1698,19 +1707,23 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1698 /* 1707 /*
1699 * Create channel information array 1708 * Create channel information array
1700 */ 1709 */
1701 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 1710 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
1702 if (!info) 1711 if (!info)
1703 return -ENOMEM; 1712 return -ENOMEM;
1704 1713
1705 spec->channels_info = info; 1714 spec->channels_info = info;
1706 1715
1707 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); 1716 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1708 for (i = 0; i < 14; i++) 1717 for (i = 0; i < 14; i++) {
1709 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 1718 info[i].max_power = MAX_TXPOWER;
1719 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1720 }
1710 1721
1711 if (spec->num_channels > 14) { 1722 if (spec->num_channels > 14) {
1712 for (i = 14; i < spec->num_channels; i++) 1723 for (i = 14; i < spec->num_channels; i++) {
1713 info[i].tx_power1 = DEFAULT_TXPOWER; 1724 info[i].max_power = MAX_TXPOWER;
1725 info[i].default_power1 = DEFAULT_TXPOWER;
1726 }
1714 } 1727 }
1715 1728
1716 return 0; 1729 return 0;
@@ -1789,7 +1802,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1789 .write_beacon = rt2500usb_write_beacon, 1802 .write_beacon = rt2500usb_write_beacon,
1790 .get_tx_data_len = rt2500usb_get_tx_data_len, 1803 .get_tx_data_len = rt2500usb_get_tx_data_len,
1791 .kick_tx_queue = rt2x00usb_kick_tx_queue, 1804 .kick_tx_queue = rt2x00usb_kick_tx_queue,
1792 .kill_tx_queue = rt2x00usb_kill_tx_queue, 1805 .kill_tx_queue = rt2500usb_kill_tx_queue,
1793 .fill_rxdone = rt2500usb_fill_rxdone, 1806 .fill_rxdone = rt2500usb_fill_rxdone,
1794 .config_shared_key = rt2500usb_config_key, 1807 .config_shared_key = rt2500usb_config_key,
1795 .config_pairwise_key = rt2500usb_config_key, 1808 .config_pairwise_key = rt2500usb_config_key,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index ed4ebcdde7c9..70a5cb86405b 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1318,7 +1318,25 @@
1318#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000) 1318#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000)
1319 1319
1320/* 1320/*
1321 * TX_STA_FIFO: TX Result for specific PID status fifo register 1321 * TX_STA_FIFO: TX Result for specific PID status fifo register.
1322 *
1323 * This register is implemented as FIFO with 16 entries in the HW. Each
1324 * register read fetches the next tx result. If the FIFO is full because
1325 * it wasn't read fast enough after the according interrupt (TX_FIFO_STATUS)
1326 * triggered, the hw seems to simply drop further tx results.
1327 *
1328 * VALID: 1: this tx result is valid
1329 * 0: no valid tx result -> driver should stop reading
1330 * PID_TYPE: The PID latched from the PID field in the TXWI, can be used
1331 * to match a frame with its tx result (even though the PID is
1332 * only 4 bits wide).
1333 * TX_SUCCESS: Indicates tx success (1) or failure (0)
1334 * TX_AGGRE: Indicates if the frame was part of an aggregate (1) or not (0)
1335 * TX_ACK_REQUIRED: Indicates if the frame needed to get ack'ed (1) or not (0)
1336 * WCID: The wireless client ID.
1337 * MCS: The tx rate used during the last transmission of this frame, be it
1338 * successful or not.
1339 * PHYMODE: The phymode used for the transmission.
1322 */ 1340 */
1323#define TX_STA_FIFO 0x1718 1341#define TX_STA_FIFO 0x1718
1324#define TX_STA_FIFO_VALID FIELD32(0x00000001) 1342#define TX_STA_FIFO_VALID FIELD32(0x00000001)
@@ -1841,6 +1859,13 @@ struct mac_iveiv_entry {
1841#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) 1859#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1842 1860
1843/* 1861/*
1862 * EEPROM Maximum TX power values
1863 */
1864#define EEPROM_MAX_TX_POWER 0x0027
1865#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff)
1866#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00)
1867
1868/*
1844 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power. 1869 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
1845 * This is delta in 40MHZ. 1870 * This is delta in 40MHZ.
1846 * VALUE: Tx Power dalta value (MAX=4) 1871 * VALUE: Tx Power dalta value (MAX=4)
@@ -1928,6 +1953,8 @@ struct mac_iveiv_entry {
1928 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs 1953 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
1929 * BW: Channel bandwidth 20MHz or 40 MHz 1954 * BW: Channel bandwidth 20MHz or 40 MHz
1930 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED 1955 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
1956 * AMPDU: 1: this frame is eligible for AMPDU aggregation, the hw will
1957 * aggregate consecutive frames with the same RA and QoS TID.
1931 */ 1958 */
1932#define TXWI_W0_FRAG FIELD32(0x00000001) 1959#define TXWI_W0_FRAG FIELD32(0x00000001)
1933#define TXWI_W0_MIMO_PS FIELD32(0x00000002) 1960#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
@@ -1945,6 +1972,15 @@ struct mac_iveiv_entry {
1945 1972
1946/* 1973/*
1947 * Word1 1974 * Word1
1975 * ACK: 0: No Ack needed, 1: Ack needed
1976 * NSEQ: 0: Don't assign hw sequence number, 1: Assign hw sequence number
1977 * BW_WIN_SIZE: BA windows size of the recipient
1978 * WIRELESS_CLI_ID: Client ID for WCID table access
1979 * MPDU_TOTAL_BYTE_COUNT: Length of 802.11 frame
1980 * PACKETID: Will be latched into the TX_STA_FIFO register once the according
1981 * frame was processed. If multiple frames are aggregated together
1982 * (AMPDU==1) the reported tx status will always contain the packet
1983 * id of the first frame. 0: Don't report tx status for this frame.
1948 */ 1984 */
1949#define TXWI_W1_ACK FIELD32(0x00000001) 1985#define TXWI_W1_ACK FIELD32(0x00000001)
1950#define TXWI_W1_NSEQ FIELD32(0x00000002) 1986#define TXWI_W1_NSEQ FIELD32(0x00000002)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b66e0fd8f0fa..27a6e225083c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1,4 +1,5 @@
1/* 1/*
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
2 Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com> 3 Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> 4 Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
4 Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com> 5 Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
@@ -254,6 +255,23 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
254} 255}
255EXPORT_SYMBOL_GPL(rt2800_mcu_request); 256EXPORT_SYMBOL_GPL(rt2800_mcu_request);
256 257
258int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev)
259{
260 unsigned int i = 0;
261 u32 reg;
262
263 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
264 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
265 if (reg && reg != ~0)
266 return 0;
267 msleep(1);
268 }
269
270 ERROR(rt2x00dev, "Unstable hardware.\n");
271 return -EBUSY;
272}
273EXPORT_SYMBOL_GPL(rt2800_wait_csr_ready);
274
257int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev) 275int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
258{ 276{
259 unsigned int i; 277 unsigned int i;
@@ -367,19 +385,16 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
367 u32 reg; 385 u32 reg;
368 386
369 /* 387 /*
370 * Wait for stable hardware. 388 * If driver doesn't wake up firmware here,
389 * rt2800_load_firmware will hang forever when interface is up again.
371 */ 390 */
372 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 391 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
373 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
374 if (reg && reg != ~0)
375 break;
376 msleep(1);
377 }
378 392
379 if (i == REGISTER_BUSY_COUNT) { 393 /*
380 ERROR(rt2x00dev, "Unstable hardware.\n"); 394 * Wait for stable hardware.
395 */
396 if (rt2800_wait_csr_ready(rt2x00dev))
381 return -EBUSY; 397 return -EBUSY;
382 }
383 398
384 if (rt2x00_is_pci(rt2x00dev)) 399 if (rt2x00_is_pci(rt2x00dev))
385 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); 400 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
@@ -427,8 +442,10 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
427} 442}
428EXPORT_SYMBOL_GPL(rt2800_load_firmware); 443EXPORT_SYMBOL_GPL(rt2800_load_firmware);
429 444
430void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc) 445void rt2800_write_tx_data(struct queue_entry *entry,
446 struct txentry_desc *txdesc)
431{ 447{
448 __le32 *txwi = rt2800_drv_get_txwi(entry);
432 u32 word; 449 u32 word;
433 450
434 /* 451 /*
@@ -437,7 +454,8 @@ void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
437 rt2x00_desc_read(txwi, 0, &word); 454 rt2x00_desc_read(txwi, 0, &word);
438 rt2x00_set_field32(&word, TXWI_W0_FRAG, 455 rt2x00_set_field32(&word, TXWI_W0_FRAG,
439 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 456 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
440 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0); 457 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS,
458 test_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags));
441 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0); 459 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
442 rt2x00_set_field32(&word, TXWI_W0_TS, 460 rt2x00_set_field32(&word, TXWI_W0_TS,
443 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 461 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
@@ -465,7 +483,7 @@ void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
465 txdesc->key_idx : 0xff); 483 txdesc->key_idx : 0xff);
466 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 484 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
467 txdesc->length); 485 txdesc->length);
468 rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->queue + 1); 486 rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->qid + 1);
469 rt2x00_desc_write(txwi, 1, word); 487 rt2x00_desc_write(txwi, 1, word);
470 488
471 /* 489 /*
@@ -478,7 +496,7 @@ void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
478 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */); 496 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
479 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */); 497 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
480} 498}
481EXPORT_SYMBOL_GPL(rt2800_write_txwi); 499EXPORT_SYMBOL_GPL(rt2800_write_tx_data);
482 500
483static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2) 501static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2)
484{ 502{
@@ -490,7 +508,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2)
490 u8 offset1; 508 u8 offset1;
491 u8 offset2; 509 u8 offset2;
492 510
493 if (rt2x00dev->rx_status.band == IEEE80211_BAND_2GHZ) { 511 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
494 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom); 512 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
495 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0); 513 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
496 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1); 514 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
@@ -569,6 +587,148 @@ void rt2800_process_rxwi(struct queue_entry *entry,
569} 587}
570EXPORT_SYMBOL_GPL(rt2800_process_rxwi); 588EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
571 589
590static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
591{
592 __le32 *txwi;
593 u32 word;
594 int wcid, ack, pid;
595 int tx_wcid, tx_ack, tx_pid;
596
597 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
598 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
599 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
600
601 /*
602 * This frames has returned with an IO error,
603 * so the status report is not intended for this
604 * frame.
605 */
606 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) {
607 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
608 return false;
609 }
610
611 /*
612 * Validate if this TX status report is intended for
613 * this entry by comparing the WCID/ACK/PID fields.
614 */
615 txwi = rt2800_drv_get_txwi(entry);
616
617 rt2x00_desc_read(txwi, 1, &word);
618 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
619 tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
620 tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
621
622 if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid)) {
623 WARNING(entry->queue->rt2x00dev,
624 "TX status report missed for queue %d entry %d\n",
625 entry->queue->qid, entry->entry_idx);
626 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
627 return false;
628 }
629
630 return true;
631}
632
633void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
634{
635 struct data_queue *queue;
636 struct queue_entry *entry;
637 __le32 *txwi;
638 struct txdone_entry_desc txdesc;
639 u32 word;
640 u32 reg;
641 u16 mcs, real_mcs;
642 u8 pid;
643 int i;
644
645 /*
646 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
647 * at most X times and also stop processing once the TX_STA_FIFO_VALID
648 * flag is not set anymore.
649 *
650 * The legacy drivers use X=TX_RING_SIZE but state in a comment
651 * that the TX_STA_FIFO stack has a size of 16. We stick to our
652 * tx ring size for now.
653 */
654 for (i = 0; i < TX_ENTRIES; i++) {
655 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
656 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
657 break;
658
659 /*
660 * Skip this entry when it contains an invalid
661 * queue identication number.
662 */
663 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
664 if (pid >= QID_RX)
665 continue;
666
667 queue = rt2x00queue_get_queue(rt2x00dev, pid);
668 if (unlikely(!queue))
669 continue;
670
671 /*
672 * Inside each queue, we process each entry in a chronological
673 * order. We first check that the queue is not empty.
674 */
675 entry = NULL;
676 txwi = NULL;
677 while (!rt2x00queue_empty(queue)) {
678 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
679 if (rt2800_txdone_entry_check(entry, reg))
680 break;
681 }
682
683 if (!entry || rt2x00queue_empty(queue))
684 break;
685
686
687 /*
688 * Obtain the status about this packet.
689 */
690 txdesc.flags = 0;
691 txwi = rt2800_drv_get_txwi(entry);
692 rt2x00_desc_read(txwi, 0, &word);
693 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
694 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
695
696 /*
697 * Ralink has a retry mechanism using a global fallback
698 * table. We setup this fallback table to try the immediate
699 * lower rate for all rates. In the TX_STA_FIFO, the MCS field
700 * always contains the MCS used for the last transmission, be
701 * it successful or not.
702 */
703 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
704 /*
705 * Transmission succeeded. The number of retries is
706 * mcs - real_mcs
707 */
708 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
709 txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
710 } else {
711 /*
712 * Transmission failed. The number of retries is
713 * always 7 in this case (for a total number of 8
714 * frames sent).
715 */
716 __set_bit(TXDONE_FAILURE, &txdesc.flags);
717 txdesc.retry = rt2x00dev->long_retry;
718 }
719
720 /*
721 * the frame was retried at least once
722 * -> hw used fallback rates
723 */
724 if (txdesc.retry)
725 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
726
727 rt2x00lib_txdone(entry, &txdesc);
728 }
729}
730EXPORT_SYMBOL_GPL(rt2800_txdone);
731
572void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) 732void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
573{ 733{
574 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 734 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -600,7 +760,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
600 /* 760 /*
601 * Add the TXWI for the beacon to the skb. 761 * Add the TXWI for the beacon to the skb.
602 */ 762 */
603 rt2800_write_txwi((__le32 *)entry->skb->data, txdesc); 763 rt2800_write_tx_data(entry, txdesc);
604 764
605 /* 765 /*
606 * Dump beacon to userspace through debugfs. 766 * Dump beacon to userspace through debugfs.
@@ -975,19 +1135,23 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
975 } 1135 }
976 1136
977 if (flags & CONFIG_UPDATE_MAC) { 1137 if (flags & CONFIG_UPDATE_MAC) {
978 reg = le32_to_cpu(conf->mac[1]); 1138 if (!is_zero_ether_addr((const u8 *)conf->mac)) {
979 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff); 1139 reg = le32_to_cpu(conf->mac[1]);
980 conf->mac[1] = cpu_to_le32(reg); 1140 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
1141 conf->mac[1] = cpu_to_le32(reg);
1142 }
981 1143
982 rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0, 1144 rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
983 conf->mac, sizeof(conf->mac)); 1145 conf->mac, sizeof(conf->mac));
984 } 1146 }
985 1147
986 if (flags & CONFIG_UPDATE_BSSID) { 1148 if (flags & CONFIG_UPDATE_BSSID) {
987 reg = le32_to_cpu(conf->bssid[1]); 1149 if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
988 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3); 1150 reg = le32_to_cpu(conf->bssid[1]);
989 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7); 1151 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
990 conf->bssid[1] = cpu_to_le32(reg); 1152 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7);
1153 conf->bssid[1] = cpu_to_le32(reg);
1154 }
991 1155
992 rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0, 1156 rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
993 conf->bssid, sizeof(conf->bssid)); 1157 conf->bssid, sizeof(conf->bssid));
@@ -1120,27 +1284,23 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
1120 * double meaning, and we should set a 7DBm boost flag. 1284 * double meaning, and we should set a 7DBm boost flag.
1121 */ 1285 */
1122 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST, 1286 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
1123 (info->tx_power1 >= 0)); 1287 (info->default_power1 >= 0));
1124 1288
1125 if (info->tx_power1 < 0) 1289 if (info->default_power1 < 0)
1126 info->tx_power1 += 7; 1290 info->default_power1 += 7;
1127 1291
1128 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, 1292 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, info->default_power1);
1129 TXPOWER_A_TO_DEV(info->tx_power1));
1130 1293
1131 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST, 1294 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
1132 (info->tx_power2 >= 0)); 1295 (info->default_power2 >= 0));
1133 1296
1134 if (info->tx_power2 < 0) 1297 if (info->default_power2 < 0)
1135 info->tx_power2 += 7; 1298 info->default_power2 += 7;
1136 1299
1137 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, 1300 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, info->default_power2);
1138 TXPOWER_A_TO_DEV(info->tx_power2));
1139 } else { 1301 } else {
1140 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, 1302 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, info->default_power1);
1141 TXPOWER_G_TO_DEV(info->tx_power1)); 1303 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G, info->default_power2);
1142 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
1143 TXPOWER_G_TO_DEV(info->tx_power2));
1144 } 1304 }
1145 1305
1146 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf)); 1306 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
@@ -1180,13 +1340,11 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
1180 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); 1340 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
1181 1341
1182 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr); 1342 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
1183 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, 1343 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1);
1184 TXPOWER_G_TO_DEV(info->tx_power1));
1185 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); 1344 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
1186 1345
1187 rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr); 1346 rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
1188 rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, 1347 rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
1189 TXPOWER_G_TO_DEV(info->tx_power2));
1190 rt2800_rfcsr_write(rt2x00dev, 13, rfcsr); 1348 rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
1191 1349
1192 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); 1350 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
@@ -1210,10 +1368,19 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1210 unsigned int tx_pin; 1368 unsigned int tx_pin;
1211 u8 bbp; 1369 u8 bbp;
1212 1370
1371 if (rf->channel <= 14) {
1372 info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
1373 info->default_power2 = TXPOWER_G_TO_DEV(info->default_power2);
1374 } else {
1375 info->default_power1 = TXPOWER_A_TO_DEV(info->default_power1);
1376 info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
1377 }
1378
1213 if (rt2x00_rf(rt2x00dev, RF2020) || 1379 if (rt2x00_rf(rt2x00dev, RF2020) ||
1214 rt2x00_rf(rt2x00dev, RF3020) || 1380 rt2x00_rf(rt2x00dev, RF3020) ||
1215 rt2x00_rf(rt2x00dev, RF3021) || 1381 rt2x00_rf(rt2x00dev, RF3021) ||
1216 rt2x00_rf(rt2x00dev, RF3022)) 1382 rt2x00_rf(rt2x00dev, RF3022) ||
1383 rt2x00_rf(rt2x00dev, RF3052))
1217 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); 1384 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
1218 else 1385 else
1219 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 1386 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@@ -1536,7 +1703,7 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
1536/* 1703/*
1537 * Initialization functions. 1704 * Initialization functions.
1538 */ 1705 */
1539int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) 1706static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1540{ 1707{
1541 u32 reg; 1708 u32 reg;
1542 u16 eeprom; 1709 u16 eeprom;
@@ -1906,7 +2073,6 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1906 2073
1907 return 0; 2074 return 0;
1908} 2075}
1909EXPORT_SYMBOL_GPL(rt2800_init_registers);
1910 2076
1911static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev) 2077static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
1912{ 2078{
@@ -1949,7 +2115,7 @@ static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1949 return -EACCES; 2115 return -EACCES;
1950} 2116}
1951 2117
1952int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) 2118static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1953{ 2119{
1954 unsigned int i; 2120 unsigned int i;
1955 u16 eeprom; 2121 u16 eeprom;
@@ -2044,7 +2210,6 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2044 2210
2045 return 0; 2211 return 0;
2046} 2212}
2047EXPORT_SYMBOL_GPL(rt2800_init_bbp);
2048 2213
2049static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, 2214static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
2050 bool bw40, u8 rfcsr24, u8 filter_target) 2215 bool bw40, u8 rfcsr24, u8 filter_target)
@@ -2106,7 +2271,7 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
2106 return rfcsr24; 2271 return rfcsr24;
2107} 2272}
2108 2273
2109int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) 2274static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2110{ 2275{
2111 u8 rfcsr; 2276 u8 rfcsr;
2112 u8 bbp; 2277 u8 bbp;
@@ -2360,7 +2525,100 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2360 2525
2361 return 0; 2526 return 0;
2362} 2527}
2363EXPORT_SYMBOL_GPL(rt2800_init_rfcsr); 2528
2529int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
2530{
2531 u32 reg;
2532 u16 word;
2533
2534 /*
2535 * Initialize all registers.
2536 */
2537 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
2538 rt2800_init_registers(rt2x00dev) ||
2539 rt2800_init_bbp(rt2x00dev) ||
2540 rt2800_init_rfcsr(rt2x00dev)))
2541 return -EIO;
2542
2543 /*
2544 * Send signal to firmware during boot time.
2545 */
2546 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
2547
2548 if (rt2x00_is_usb(rt2x00dev) &&
2549 (rt2x00_rt(rt2x00dev, RT3070) ||
2550 rt2x00_rt(rt2x00dev, RT3071) ||
2551 rt2x00_rt(rt2x00dev, RT3572))) {
2552 udelay(200);
2553 rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
2554 udelay(10);
2555 }
2556
2557 /*
2558 * Enable RX.
2559 */
2560 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
2561 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
2562 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
2563 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2564
2565 udelay(50);
2566
2567 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
2568 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
2569 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
2570 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
2571 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2572 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2573
2574 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
2575 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
2576 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
2577 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2578
2579 /*
2580 * Initialize LED control
2581 */
2582 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
2583 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
2584 word & 0xff, (word >> 8) & 0xff);
2585
2586 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
2587 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
2588 word & 0xff, (word >> 8) & 0xff);
2589
2590 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
2591 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
2592 word & 0xff, (word >> 8) & 0xff);
2593
2594 return 0;
2595}
2596EXPORT_SYMBOL_GPL(rt2800_enable_radio);
2597
2598void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
2599{
2600 u32 reg;
2601
2602 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
2603 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
2604 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
2605 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
2606 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
2607 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2608 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2609
2610 /* Wait for DMA, ignore error */
2611 rt2800_wait_wpdma_ready(rt2x00dev);
2612
2613 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
2614 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
2615 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
2616 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2617
2618 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
2619 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
2620}
2621EXPORT_SYMBOL_GPL(rt2800_disable_radio);
2364 2622
2365int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev) 2623int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
2366{ 2624{
@@ -2516,6 +2774,13 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2516 default_lna_gain); 2774 default_lna_gain);
2517 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); 2775 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
2518 2776
2777 rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
2778 if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
2779 rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
2780 if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
2781 rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
2782 rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
2783
2519 return 0; 2784 return 0;
2520} 2785}
2521EXPORT_SYMBOL_GPL(rt2800_validate_eeprom); 2786EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@@ -2755,9 +3020,10 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2755{ 3020{
2756 struct hw_mode_spec *spec = &rt2x00dev->spec; 3021 struct hw_mode_spec *spec = &rt2x00dev->spec;
2757 struct channel_info *info; 3022 struct channel_info *info;
2758 char *tx_power1; 3023 char *default_power1;
2759 char *tx_power2; 3024 char *default_power2;
2760 unsigned int i; 3025 unsigned int i;
3026 unsigned short max_power;
2761 u16 eeprom; 3027 u16 eeprom;
2762 3028
2763 /* 3029 /*
@@ -2865,27 +3131,32 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2865 /* 3131 /*
2866 * Create channel information array 3132 * Create channel information array
2867 */ 3133 */
2868 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 3134 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
2869 if (!info) 3135 if (!info)
2870 return -ENOMEM; 3136 return -ENOMEM;
2871 3137
2872 spec->channels_info = info; 3138 spec->channels_info = info;
2873 3139
2874 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); 3140 rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
2875 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); 3141 max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
3142 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
3143 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
2876 3144
2877 for (i = 0; i < 14; i++) { 3145 for (i = 0; i < 14; i++) {
2878 info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]); 3146 info[i].max_power = max_power;
2879 info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]); 3147 info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
3148 info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
2880 } 3149 }
2881 3150
2882 if (spec->num_channels > 14) { 3151 if (spec->num_channels > 14) {
2883 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); 3152 max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
2884 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); 3153 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
3154 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
2885 3155
2886 for (i = 14; i < spec->num_channels; i++) { 3156 for (i = 14; i < spec->num_channels; i++) {
2887 info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]); 3157 info[i].max_power = max_power;
2888 info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]); 3158 info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
3159 info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
2889 } 3160 }
2890 } 3161 }
2891 3162
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 091641e3c5e2..986229c06c19 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -1,4 +1,6 @@
1/* 1/*
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
2 Copyright (C) 2009 Bartlomiej Zolnierkiewicz 4 Copyright (C) 2009 Bartlomiej Zolnierkiewicz
3 5
4 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -44,6 +46,7 @@ struct rt2800_ops {
44 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev, 46 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
45 const u8 *data, const size_t len); 47 const u8 *data, const size_t len);
46 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev); 48 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
49 __le32 *(*drv_get_txwi)(struct queue_entry *entry);
47}; 50};
48 51
49static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev, 52static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
@@ -126,18 +129,31 @@ static inline int rt2800_drv_init_registers(struct rt2x00_dev *rt2x00dev)
126 return rt2800ops->drv_init_registers(rt2x00dev); 129 return rt2800ops->drv_init_registers(rt2x00dev);
127} 130}
128 131
132static inline __le32 *rt2800_drv_get_txwi(struct queue_entry *entry)
133{
134 const struct rt2800_ops *rt2800ops = entry->queue->rt2x00dev->ops->drv;
135
136 return rt2800ops->drv_get_txwi(entry);
137}
138
129void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, 139void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
130 const u8 command, const u8 token, 140 const u8 command, const u8 token,
131 const u8 arg0, const u8 arg1); 141 const u8 arg0, const u8 arg1);
132 142
143int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev);
144int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
145
133int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev, 146int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
134 const u8 *data, const size_t len); 147 const u8 *data, const size_t len);
135int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, 148int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
136 const u8 *data, const size_t len); 149 const u8 *data, const size_t len);
137 150
138void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc); 151void rt2800_write_tx_data(struct queue_entry *entry,
152 struct txentry_desc *txdesc);
139void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc); 153void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
140 154
155void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
156
141void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); 157void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
142 158
143extern const struct rt2x00debug rt2800_rt2x00debug; 159extern const struct rt2x00debug rt2800_rt2x00debug;
@@ -163,10 +179,8 @@ void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
163void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 179void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
164 const u32 count); 180 const u32 count);
165 181
166int rt2800_init_registers(struct rt2x00_dev *rt2x00dev); 182int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev);
167int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev); 183void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
168int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
169int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
170 184
171int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); 185int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
172void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); 186void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 39b3846fa340..2bcb1507e3ac 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com> 3 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com> 5 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
@@ -196,8 +196,6 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
196{ 196{
197 u32 reg; 197 u32 reg;
198 198
199 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
200
201 /* 199 /*
202 * enable Host program ram write selection 200 * enable Host program ram write selection
203 */ 201 */
@@ -399,78 +397,18 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
399 397
400static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) 398static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
401{ 399{
402 u32 reg;
403 u16 word;
404
405 /*
406 * Initialize all registers.
407 */
408 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || 400 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
409 rt2800pci_init_queues(rt2x00dev) || 401 rt2800pci_init_queues(rt2x00dev)))
410 rt2800_init_registers(rt2x00dev) ||
411 rt2800_wait_wpdma_ready(rt2x00dev) ||
412 rt2800_init_bbp(rt2x00dev) ||
413 rt2800_init_rfcsr(rt2x00dev)))
414 return -EIO; 402 return -EIO;
415 403
416 /* 404 return rt2800_enable_radio(rt2x00dev);
417 * Send signal to firmware during boot time.
418 */
419 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
420
421 /*
422 * Enable RX.
423 */
424 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
425 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
426 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
427 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
428
429 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
430 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
431 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
432 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
433 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
434 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
435
436 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
437 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
438 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
439 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
440
441 /*
442 * Initialize LED control
443 */
444 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
445 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
446 word & 0xff, (word >> 8) & 0xff);
447
448 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
449 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
450 word & 0xff, (word >> 8) & 0xff);
451
452 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
453 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
454 word & 0xff, (word >> 8) & 0xff);
455
456 return 0;
457} 405}
458 406
459static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) 407static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
460{ 408{
461 u32 reg; 409 u32 reg;
462 410
463 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 411 rt2800_disable_radio(rt2x00dev);
464 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
465 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
466 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
467 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
468 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
469 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
470
471 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
472 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
473 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
474 412
475 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); 413 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
476 414
@@ -486,9 +424,6 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
486 424
487 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 425 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
488 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 426 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
489
490 /* Wait for DMA, ignore error */
491 rt2800_wait_wpdma_ready(rt2x00dev);
492} 427}
493 428
494static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, 429static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -566,21 +501,16 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
566/* 501/*
567 * TX descriptor initialization 502 * TX descriptor initialization
568 */ 503 */
569static void rt2800pci_write_tx_data(struct queue_entry* entry, 504static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
570 struct txentry_desc *txdesc)
571{ 505{
572 __le32 *txwi = (__le32 *) entry->skb->data; 506 return (__le32 *) entry->skb->data;
573
574 rt2800_write_txwi(txwi, txdesc);
575} 507}
576 508
577 509static void rt2800pci_write_tx_desc(struct queue_entry *entry,
578static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
579 struct sk_buff *skb,
580 struct txentry_desc *txdesc) 510 struct txentry_desc *txdesc)
581{ 511{
582 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 512 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
583 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 513 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
584 __le32 *txd = entry_priv->desc; 514 __le32 *txd = entry_priv->desc;
585 u32 word; 515 u32 word;
586 516
@@ -600,7 +530,7 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
600 rt2x00_desc_write(txd, 0, word); 530 rt2x00_desc_write(txd, 0, word);
601 531
602 rt2x00_desc_read(txd, 1, &word); 532 rt2x00_desc_read(txd, 1, &word);
603 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len); 533 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
604 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1, 534 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
605 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 535 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
606 rt2x00_set_field32(&word, TXD_W1_BURST, 536 rt2x00_set_field32(&word, TXD_W1_BURST,
@@ -631,41 +561,35 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
631/* 561/*
632 * TX data initialization 562 * TX data initialization
633 */ 563 */
634static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 564static void rt2800pci_kick_tx_queue(struct data_queue *queue)
635 const enum data_queue_qid queue_idx)
636{ 565{
637 struct data_queue *queue; 566 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
638 unsigned int idx, qidx = 0; 567 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
639 568 unsigned int qidx = 0;
640 if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
641 return;
642
643 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
644 idx = queue->index[Q_INDEX];
645 569
646 if (queue_idx == QID_MGMT) 570 if (queue->qid == QID_MGMT)
647 qidx = 5; 571 qidx = 5;
648 else 572 else
649 qidx = queue_idx; 573 qidx = queue->qid;
650 574
651 rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx); 575 rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), entry->entry_idx);
652} 576}
653 577
654static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 578static void rt2800pci_kill_tx_queue(struct data_queue *queue)
655 const enum data_queue_qid qid)
656{ 579{
580 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
657 u32 reg; 581 u32 reg;
658 582
659 if (qid == QID_BEACON) { 583 if (queue->qid == QID_BEACON) {
660 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0); 584 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0);
661 return; 585 return;
662 } 586 }
663 587
664 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg); 588 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
665 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE)); 589 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (queue->qid == QID_AC_BE));
666 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK)); 590 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (queue->qid == QID_AC_BK));
667 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI)); 591 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (queue->qid == QID_AC_VI));
668 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO)); 592 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (queue->qid == QID_AC_VO));
669 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); 593 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
670} 594}
671 595
@@ -728,110 +652,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
728/* 652/*
729 * Interrupt functions. 653 * Interrupt functions.
730 */ 654 */
731static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
732{
733 struct data_queue *queue;
734 struct queue_entry *entry;
735 __le32 *txwi;
736 struct txdone_entry_desc txdesc;
737 u32 word;
738 u32 reg;
739 int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
740 u16 mcs, real_mcs;
741 int i;
742
743 /*
744 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
745 * at most X times and also stop processing once the TX_STA_FIFO_VALID
746 * flag is not set anymore.
747 *
748 * The legacy drivers use X=TX_RING_SIZE but state in a comment
749 * that the TX_STA_FIFO stack has a size of 16. We stick to our
750 * tx ring size for now.
751 */
752 for (i = 0; i < TX_ENTRIES; i++) {
753 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
754 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
755 break;
756
757 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
758 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
759 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
760
761 /*
762 * Skip this entry when it contains an invalid
763 * queue identication number.
764 */
765 if (pid <= 0 || pid > QID_RX)
766 continue;
767
768 queue = rt2x00queue_get_queue(rt2x00dev, pid - 1);
769 if (unlikely(!queue))
770 continue;
771
772 /*
773 * Inside each queue, we process each entry in a chronological
774 * order. We first check that the queue is not empty.
775 */
776 if (rt2x00queue_empty(queue))
777 continue;
778 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
779
780 /* Check if we got a match by looking at WCID/ACK/PID
781 * fields */
782 txwi = (__le32 *) entry->skb->data;
783
784 rt2x00_desc_read(txwi, 1, &word);
785 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
786 tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
787 tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
788
789 if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid))
790 WARNING(rt2x00dev, "invalid TX_STA_FIFO content\n");
791
792 /*
793 * Obtain the status about this packet.
794 */
795 txdesc.flags = 0;
796 rt2x00_desc_read(txwi, 0, &word);
797 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
798 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
799
800 /*
801 * Ralink has a retry mechanism using a global fallback
802 * table. We setup this fallback table to try the immediate
803 * lower rate for all rates. In the TX_STA_FIFO, the MCS field
804 * always contains the MCS used for the last transmission, be
805 * it successful or not.
806 */
807 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
808 /*
809 * Transmission succeeded. The number of retries is
810 * mcs - real_mcs
811 */
812 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
813 txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
814 } else {
815 /*
816 * Transmission failed. The number of retries is
817 * always 7 in this case (for a total number of 8
818 * frames sent).
819 */
820 __set_bit(TXDONE_FAILURE, &txdesc.flags);
821 txdesc.retry = 7;
822 }
823
824 /*
825 * the frame was retried at least once
826 * -> hw used fallback rates
827 */
828 if (txdesc.retry)
829 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
830
831 rt2x00lib_txdone(entry, &txdesc);
832 }
833}
834
835static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev) 655static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
836{ 656{
837 struct ieee80211_conf conf = { .flags = 0 }; 657 struct ieee80211_conf conf = { .flags = 0 };
@@ -867,7 +687,7 @@ static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
867 * 4 - Tx done interrupt. 687 * 4 - Tx done interrupt.
868 */ 688 */
869 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) 689 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
870 rt2800pci_txdone(rt2x00dev); 690 rt2800_txdone(rt2x00dev);
871 691
872 /* 692 /*
873 * 5 - Auto wakeup interrupt. 693 * 5 - Auto wakeup interrupt.
@@ -1011,6 +831,7 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
1011 .regbusy_read = rt2x00pci_regbusy_read, 831 .regbusy_read = rt2x00pci_regbusy_read,
1012 .drv_write_firmware = rt2800pci_write_firmware, 832 .drv_write_firmware = rt2800pci_write_firmware,
1013 .drv_init_registers = rt2800pci_init_registers, 833 .drv_init_registers = rt2800pci_init_registers,
834 .drv_get_txwi = rt2800pci_get_txwi,
1014}; 835};
1015 836
1016static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { 837static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
@@ -1030,7 +851,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1030 .reset_tuner = rt2800_reset_tuner, 851 .reset_tuner = rt2800_reset_tuner,
1031 .link_tuner = rt2800_link_tuner, 852 .link_tuner = rt2800_link_tuner,
1032 .write_tx_desc = rt2800pci_write_tx_desc, 853 .write_tx_desc = rt2800pci_write_tx_desc,
1033 .write_tx_data = rt2800pci_write_tx_data, 854 .write_tx_data = rt2800_write_tx_data,
1034 .write_beacon = rt2800_write_beacon, 855 .write_beacon = rt2800_write_beacon,
1035 .kick_tx_queue = rt2800pci_kick_tx_queue, 856 .kick_tx_queue = rt2800pci_kick_tx_queue,
1036 .kill_tx_queue = rt2800pci_kill_tx_queue, 857 .kill_tx_queue = rt2800pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 5a2dfe87c6b6..3dff56ec195a 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de> 4 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 5 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com> 6 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
@@ -100,19 +101,6 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
100 msleep(10); 101 msleep(10);
101 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 102 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
102 103
103 /*
104 * Send signal to firmware during boot time.
105 */
106 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
107
108 if (rt2x00_rt(rt2x00dev, RT3070) ||
109 rt2x00_rt(rt2x00dev, RT3071) ||
110 rt2x00_rt(rt2x00dev, RT3572)) {
111 udelay(200);
112 rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
113 udelay(10);
114 }
115
116 return 0; 104 return 0;
117} 105}
118 106
@@ -134,26 +122,18 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
134static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev) 122static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
135{ 123{
136 u32 reg; 124 u32 reg;
137 int i;
138 125
139 /* 126 /*
140 * Wait until BBP and RF are ready. 127 * Wait until BBP and RF are ready.
141 */ 128 */
142 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 129 if (rt2800_wait_csr_ready(rt2x00dev))
143 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
144 if (reg && reg != ~0)
145 break;
146 msleep(1);
147 }
148
149 if (i == REGISTER_BUSY_COUNT) {
150 ERROR(rt2x00dev, "Unstable hardware.\n");
151 return -EBUSY; 130 return -EBUSY;
152 }
153 131
154 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg); 132 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
155 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000); 133 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
156 134
135 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
136
157 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 137 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
158 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); 138 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
159 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); 139 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
@@ -172,30 +152,10 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
172static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) 152static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
173{ 153{
174 u32 reg; 154 u32 reg;
175 u16 word;
176 155
177 /* 156 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev)))
178 * Initialize all registers.
179 */
180 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
181 rt2800_init_registers(rt2x00dev) ||
182 rt2800_init_bbp(rt2x00dev) ||
183 rt2800_init_rfcsr(rt2x00dev)))
184 return -EIO; 157 return -EIO;
185 158
186 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
187 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
188 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
189
190 udelay(50);
191
192 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
193 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
194 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
195 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
196 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
197
198
199 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg); 159 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
200 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); 160 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
201 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0); 161 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
@@ -210,45 +170,12 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
210 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); 170 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
211 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg); 171 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
212 172
213 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 173 return rt2800_enable_radio(rt2x00dev);
214 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
215 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
216 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
217
218 /*
219 * Initialize LED control
220 */
221 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
222 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
223 word & 0xff, (word >> 8) & 0xff);
224
225 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
226 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
227 word & 0xff, (word >> 8) & 0xff);
228
229 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
230 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
231 word & 0xff, (word >> 8) & 0xff);
232
233 return 0;
234} 174}
235 175
236static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev) 176static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
237{ 177{
238 u32 reg; 178 rt2800_disable_radio(rt2x00dev);
239
240 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
241 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
242 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
243 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
244
245 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
246 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
247 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
248
249 /* Wait for DMA, ignore error */
250 rt2800_wait_wpdma_ready(rt2x00dev);
251
252 rt2x00usb_disable_radio(rt2x00dev); 179 rt2x00usb_disable_radio(rt2x00dev);
253} 180}
254 181
@@ -320,21 +247,19 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
320/* 247/*
321 * TX descriptor initialization 248 * TX descriptor initialization
322 */ 249 */
323static void rt2800usb_write_tx_data(struct queue_entry* entry, 250static __le32 *rt2800usb_get_txwi(struct queue_entry *entry)
324 struct txentry_desc *txdesc)
325{ 251{
326 __le32 *txwi = (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE); 252 if (entry->queue->qid == QID_BEACON)
327 253 return (__le32 *) (entry->skb->data);
328 rt2800_write_txwi(txwi, txdesc); 254 else
255 return (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE);
329} 256}
330 257
331 258static void rt2800usb_write_tx_desc(struct queue_entry *entry,
332static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
333 struct sk_buff *skb,
334 struct txentry_desc *txdesc) 259 struct txentry_desc *txdesc)
335{ 260{
336 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 261 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
337 __le32 *txi = (__le32 *) skb->data; 262 __le32 *txi = (__le32 *) entry->skb->data;
338 u32 word; 263 u32 word;
339 264
340 /* 265 /*
@@ -342,7 +267,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
342 */ 267 */
343 rt2x00_desc_read(txi, 0, &word); 268 rt2x00_desc_read(txi, 0, &word);
344 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 269 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
345 skb->len - TXINFO_DESC_SIZE); 270 entry->skb->len - TXINFO_DESC_SIZE);
346 rt2x00_set_field32(&word, TXINFO_W0_WIV, 271 rt2x00_set_field32(&word, TXINFO_W0_WIV,
347 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 272 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
348 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); 273 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -379,6 +304,46 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
379} 304}
380 305
381/* 306/*
307 * TX control handlers
308 */
309static void rt2800usb_work_txdone(struct work_struct *work)
310{
311 struct rt2x00_dev *rt2x00dev =
312 container_of(work, struct rt2x00_dev, txdone_work);
313 struct data_queue *queue;
314 struct queue_entry *entry;
315
316 rt2800_txdone(rt2x00dev);
317
318 /*
319 * Process any trailing TX status reports for IO failures,
320 * we loop until we find the first non-IO error entry. This
321 * can either be a frame which is free, is being uploaded,
322 * or has completed the upload but didn't have an entry
323 * in the TX_STAT_FIFO register yet.
324 */
325 tx_queue_for_each(rt2x00dev, queue) {
326 while (!rt2x00queue_empty(queue)) {
327 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
328
329 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
330 !test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
331 break;
332
333 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
334 }
335 }
336}
337
338static void rt2800usb_kill_tx_queue(struct data_queue *queue)
339{
340 if (queue->qid == QID_BEACON)
341 rt2x00usb_register_write(queue->rt2x00dev, BCN_TIME_CFG, 0);
342
343 rt2x00usb_kill_tx_queue(queue);
344}
345
346/*
382 * RX control handlers 347 * RX control handlers
383 */ 348 */
384static void rt2800usb_fill_rxdone(struct queue_entry *entry, 349static void rt2800usb_fill_rxdone(struct queue_entry *entry,
@@ -514,6 +479,11 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
514 */ 479 */
515 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; 480 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
516 481
482 /*
483 * Overwrite TX done handler
484 */
485 PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
486
517 return 0; 487 return 0;
518} 488}
519 489
@@ -549,6 +519,7 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
549 .regbusy_read = rt2x00usb_regbusy_read, 519 .regbusy_read = rt2x00usb_regbusy_read,
550 .drv_write_firmware = rt2800usb_write_firmware, 520 .drv_write_firmware = rt2800usb_write_firmware,
551 .drv_init_registers = rt2800usb_init_registers, 521 .drv_init_registers = rt2800usb_init_registers,
522 .drv_get_txwi = rt2800usb_get_txwi,
552}; 523};
553 524
554static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { 525static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
@@ -566,11 +537,11 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
566 .link_tuner = rt2800_link_tuner, 537 .link_tuner = rt2800_link_tuner,
567 .watchdog = rt2x00usb_watchdog, 538 .watchdog = rt2x00usb_watchdog,
568 .write_tx_desc = rt2800usb_write_tx_desc, 539 .write_tx_desc = rt2800usb_write_tx_desc,
569 .write_tx_data = rt2800usb_write_tx_data, 540 .write_tx_data = rt2800_write_tx_data,
570 .write_beacon = rt2800_write_beacon, 541 .write_beacon = rt2800_write_beacon,
571 .get_tx_data_len = rt2800usb_get_tx_data_len, 542 .get_tx_data_len = rt2800usb_get_tx_data_len,
572 .kick_tx_queue = rt2x00usb_kick_tx_queue, 543 .kick_tx_queue = rt2x00usb_kick_tx_queue,
573 .kill_tx_queue = rt2x00usb_kill_tx_queue, 544 .kill_tx_queue = rt2800usb_kill_tx_queue,
574 .fill_rxdone = rt2800usb_fill_rxdone, 545 .fill_rxdone = rt2800usb_fill_rxdone,
575 .config_shared_key = rt2800_config_shared_key, 546 .config_shared_key = rt2800_config_shared_key,
576 .config_pairwise_key = rt2800_config_pairwise_key, 547 .config_pairwise_key = rt2800_config_pairwise_key,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index c21af38cc5af..0ae942cb66df 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
4 <http://rt2x00.serialmonkey.com> 5 <http://rt2x00.serialmonkey.com>
5 6
@@ -212,8 +213,9 @@ struct channel_info {
212 unsigned int flags; 213 unsigned int flags;
213#define GEOGRAPHY_ALLOWED 0x00000001 214#define GEOGRAPHY_ALLOWED 0x00000001
214 215
215 short tx_power1; 216 short max_power;
216 short tx_power2; 217 short default_power1;
218 short default_power2;
217}; 219};
218 220
219/* 221/*
@@ -558,18 +560,15 @@ struct rt2x00lib_ops {
558 /* 560 /*
559 * TX control handlers 561 * TX control handlers
560 */ 562 */
561 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, 563 void (*write_tx_desc) (struct queue_entry *entry,
562 struct sk_buff *skb,
563 struct txentry_desc *txdesc); 564 struct txentry_desc *txdesc);
564 void (*write_tx_data) (struct queue_entry *entry, 565 void (*write_tx_data) (struct queue_entry *entry,
565 struct txentry_desc *txdesc); 566 struct txentry_desc *txdesc);
566 void (*write_beacon) (struct queue_entry *entry, 567 void (*write_beacon) (struct queue_entry *entry,
567 struct txentry_desc *txdesc); 568 struct txentry_desc *txdesc);
568 int (*get_tx_data_len) (struct queue_entry *entry); 569 int (*get_tx_data_len) (struct queue_entry *entry);
569 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 570 void (*kick_tx_queue) (struct data_queue *queue);
570 const enum data_queue_qid queue); 571 void (*kill_tx_queue) (struct data_queue *queue);
571 void (*kill_tx_queue) (struct rt2x00_dev *rt2x00dev,
572 const enum data_queue_qid queue);
573 572
574 /* 573 /*
575 * RX control handlers 574 * RX control handlers
@@ -698,6 +697,7 @@ struct rt2x00_dev {
698 struct ieee80211_hw *hw; 697 struct ieee80211_hw *hw;
699 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 698 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
700 enum ieee80211_band curr_band; 699 enum ieee80211_band curr_band;
700 int curr_freq;
701 701
702 /* 702 /*
703 * If enabled, the debugfs interface structures 703 * If enabled, the debugfs interface structures
@@ -850,11 +850,6 @@ struct rt2x00_dev {
850 struct ieee80211_low_level_stats low_level_stats; 850 struct ieee80211_low_level_stats low_level_stats;
851 851
852 /* 852 /*
853 * RX configuration information.
854 */
855 struct ieee80211_rx_status rx_status;
856
857 /*
858 * Scheduled work. 853 * Scheduled work.
859 * NOTE: intf_work will use ieee80211_iterate_active_interfaces() 854 * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
860 * which means it cannot be placed on the hw->workqueue 855 * which means it cannot be placed on the hw->workqueue
@@ -862,6 +857,12 @@ struct rt2x00_dev {
862 */ 857 */
863 struct work_struct intf_work; 858 struct work_struct intf_work;
864 859
860 /**
861 * Scheduled work for TX/RX done handling (USB devices)
862 */
863 struct work_struct rxdone_work;
864 struct work_struct txdone_work;
865
865 /* 866 /*
866 * Data queue arrays for RX, TX and Beacon. 867 * Data queue arrays for RX, TX and Beacon.
867 * The Beacon array also contains the Atim queue 868 * The Beacon array also contains the Atim queue
@@ -1069,8 +1070,10 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
1069 */ 1070 */
1070void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); 1071void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
1071void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev); 1072void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev);
1073void rt2x00lib_dmadone(struct queue_entry *entry);
1072void rt2x00lib_txdone(struct queue_entry *entry, 1074void rt2x00lib_txdone(struct queue_entry *entry,
1073 struct txdone_entry_desc *txdesc); 1075 struct txdone_entry_desc *txdesc);
1076void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
1074void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, 1077void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
1075 struct queue_entry *entry); 1078 struct queue_entry *entry);
1076 1079
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 953dc4f2c6af..34f34fa7f53a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -126,11 +126,6 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
126 * ANTENNA_SW_DIVERSITY state to the driver. 126 * ANTENNA_SW_DIVERSITY state to the driver.
127 * If that happens, fallback to hardware defaults, 127 * If that happens, fallback to hardware defaults,
128 * or our own default. 128 * or our own default.
129 * If diversity handling is active for a particular antenna,
130 * we shouldn't overwrite that antenna.
131 * The calls to rt2x00lib_config_antenna_check()
132 * might have caused that we restore back to the already
133 * active setting. If that has happened we can quit.
134 */ 129 */
135 if (!(ant->flags & ANTENNA_RX_DIVERSITY)) 130 if (!(ant->flags & ANTENNA_RX_DIVERSITY))
136 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx); 131 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx);
@@ -142,9 +137,6 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
142 else 137 else
143 config.tx = active->tx; 138 config.tx = active->tx;
144 139
145 if (config.rx == active->rx && config.tx == active->tx)
146 return;
147
148 /* 140 /*
149 * Antenna setup changes require the RX to be disabled, 141 * Antenna setup changes require the RX to be disabled,
150 * else the changes will be ignored by the device. 142 * else the changes will be ignored by the device.
@@ -209,10 +201,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
209 rt2x00link_reset_tuner(rt2x00dev, false); 201 rt2x00link_reset_tuner(rt2x00dev, false);
210 202
211 rt2x00dev->curr_band = conf->channel->band; 203 rt2x00dev->curr_band = conf->channel->band;
204 rt2x00dev->curr_freq = conf->channel->center_freq;
212 rt2x00dev->tx_power = conf->power_level; 205 rt2x00dev->tx_power = conf->power_level;
213 rt2x00dev->short_retry = conf->short_frame_max_tx_count; 206 rt2x00dev->short_retry = conf->short_frame_max_tx_count;
214 rt2x00dev->long_retry = conf->long_frame_max_tx_count; 207 rt2x00dev->long_retry = conf->long_frame_max_tx_count;
215
216 rt2x00dev->rx_status.band = conf->channel->band;
217 rt2x00dev->rx_status.freq = conf->channel->center_freq;
218} 208}
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 583dacd8d241..5e9074bf2b8e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -31,15 +31,14 @@
31 31
32enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key) 32enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
33{ 33{
34 switch (key->alg) { 34 switch (key->cipher) {
35 case ALG_WEP: 35 case WLAN_CIPHER_SUITE_WEP40:
36 if (key->keylen == WLAN_KEY_LEN_WEP40) 36 return CIPHER_WEP64;
37 return CIPHER_WEP64; 37 case WLAN_CIPHER_SUITE_WEP104:
38 else 38 return CIPHER_WEP128;
39 return CIPHER_WEP128; 39 case WLAN_CIPHER_SUITE_TKIP:
40 case ALG_TKIP:
41 return CIPHER_TKIP; 40 return CIPHER_TKIP;
42 case ALG_CCMP: 41 case WLAN_CIPHER_SUITE_CCMP:
43 return CIPHER_AES; 42 return CIPHER_AES;
44 default: 43 default:
45 return CIPHER_NONE; 44 return CIPHER_NONE;
@@ -95,7 +94,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
95 overhead += key->iv_len; 94 overhead += key->iv_len;
96 95
97 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { 96 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
98 if (key->alg == ALG_TKIP) 97 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
99 overhead += 8; 98 overhead += 8;
100 } 99 }
101 100
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index b0498e7e7aae..b8cf45c4e9f5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -333,12 +333,12 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
333 if (*offset) 333 if (*offset)
334 return 0; 334 return 0;
335 335
336 data = kzalloc(lines * MAX_LINE_LENGTH, GFP_KERNEL); 336 data = kcalloc(lines, MAX_LINE_LENGTH, GFP_KERNEL);
337 if (!data) 337 if (!data)
338 return -ENOMEM; 338 return -ENOMEM;
339 339
340 temp = data + 340 temp = data +
341 sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdone\tcrypto\n"); 341 sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
342 342
343 queue_for_each(intf->rt2x00dev, queue) { 343 queue_for_each(intf->rt2x00dev, queue) {
344 spin_lock_irqsave(&queue->lock, irqflags); 344 spin_lock_irqsave(&queue->lock, irqflags);
@@ -346,8 +346,8 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
346 temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid, 346 temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
347 queue->count, queue->limit, queue->length, 347 queue->count, queue->limit, queue->length,
348 queue->index[Q_INDEX], 348 queue->index[Q_INDEX],
349 queue->index[Q_INDEX_DONE], 349 queue->index[Q_INDEX_DMA_DONE],
350 queue->index[Q_INDEX_CRYPTO]); 350 queue->index[Q_INDEX_DONE]);
351 351
352 spin_unlock_irqrestore(&queue->lock, irqflags); 352 spin_unlock_irqrestore(&queue->lock, irqflags);
353 } 353 }
@@ -481,6 +481,9 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
481 if (index >= debug->__name.word_count) \ 481 if (index >= debug->__name.word_count) \
482 return -EINVAL; \ 482 return -EINVAL; \
483 \ 483 \
484 if (length > sizeof(line)) \
485 return -EINVAL; \
486 \
484 if (copy_from_user(line, buf, length)) \ 487 if (copy_from_user(line, buf, length)) \
485 return -EFAULT; \ 488 return -EFAULT; \
486 \ 489 \
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 585e8166f22a..053fdd3bd720 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 4 <http://rt2x00.serialmonkey.com>
4 5
5 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -250,6 +251,12 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
250} 251}
251EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); 252EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
252 253
254void rt2x00lib_dmadone(struct queue_entry *entry)
255{
256 rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE);
257}
258EXPORT_SYMBOL_GPL(rt2x00lib_dmadone);
259
253void rt2x00lib_txdone(struct queue_entry *entry, 260void rt2x00lib_txdone(struct queue_entry *entry,
254 struct txdone_entry_desc *txdesc) 261 struct txdone_entry_desc *txdesc)
255{ 262{
@@ -383,15 +390,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
383 * send the status report back. 390 * send the status report back.
384 */ 391 */
385 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) 392 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
386 /* 393 ieee80211_tx_status(rt2x00dev->hw, entry->skb);
387 * Only PCI and SOC devices process the tx status in process
388 * context. Hence use ieee80211_tx_status for PCI and SOC
389 * devices and stick to ieee80211_tx_status_irqsafe for USB.
390 */
391 if (rt2x00_is_usb(rt2x00dev))
392 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
393 else
394 ieee80211_tx_status(rt2x00dev->hw, entry->skb);
395 else 394 else
396 dev_kfree_skb_any(entry->skb); 395 dev_kfree_skb_any(entry->skb);
397 396
@@ -403,7 +402,6 @@ void rt2x00lib_txdone(struct queue_entry *entry,
403 402
404 rt2x00dev->ops->lib->clear_entry(entry); 403 rt2x00dev->ops->lib->clear_entry(entry);
405 404
406 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
407 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 405 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
408 406
409 /* 407 /*
@@ -416,6 +414,18 @@ void rt2x00lib_txdone(struct queue_entry *entry,
416} 414}
417EXPORT_SYMBOL_GPL(rt2x00lib_txdone); 415EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
418 416
417void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status)
418{
419 struct txdone_entry_desc txdesc;
420
421 txdesc.flags = 0;
422 __set_bit(status, &txdesc.flags);
423 txdesc.retry = 0;
424
425 rt2x00lib_txdone(entry, &txdesc);
426}
427EXPORT_SYMBOL_GPL(rt2x00lib_txdone_noinfo);
428
419static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, 429static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
420 struct rxdone_entry_desc *rxdesc) 430 struct rxdone_entry_desc *rxdesc)
421{ 431{
@@ -460,9 +470,13 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
460{ 470{
461 struct rxdone_entry_desc rxdesc; 471 struct rxdone_entry_desc rxdesc;
462 struct sk_buff *skb; 472 struct sk_buff *skb;
463 struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status; 473 struct ieee80211_rx_status *rx_status;
464 unsigned int header_length; 474 unsigned int header_length;
465 int rate_idx; 475 int rate_idx;
476
477 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
478 goto submit_entry;
479
466 /* 480 /*
467 * Allocate a new sk_buffer. If no new buffer available, drop the 481 * Allocate a new sk_buffer. If no new buffer available, drop the
468 * received frame and reuse the existing buffer. 482 * received frame and reuse the existing buffer.
@@ -527,39 +541,32 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
527 */ 541 */
528 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); 542 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
529 rt2x00debug_update_crypto(rt2x00dev, &rxdesc); 543 rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
544 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
530 545
546 /*
547 * Initialize RX status information, and send frame
548 * to mac80211.
549 */
550 rx_status = IEEE80211_SKB_RXCB(entry->skb);
531 rx_status->mactime = rxdesc.timestamp; 551 rx_status->mactime = rxdesc.timestamp;
552 rx_status->band = rt2x00dev->curr_band;
553 rx_status->freq = rt2x00dev->curr_freq;
532 rx_status->rate_idx = rate_idx; 554 rx_status->rate_idx = rate_idx;
533 rx_status->signal = rxdesc.rssi; 555 rx_status->signal = rxdesc.rssi;
534 rx_status->flag = rxdesc.flags; 556 rx_status->flag = rxdesc.flags;
535 rx_status->antenna = rt2x00dev->link.ant.active.rx; 557 rx_status->antenna = rt2x00dev->link.ant.active.rx;
536 558
537 /* 559 ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
538 * Send frame to mac80211 & debugfs.
539 * mac80211 will clean up the skb structure.
540 */
541 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
542 memcpy(IEEE80211_SKB_RXCB(entry->skb), rx_status, sizeof(*rx_status));
543
544 /*
545 * Currently only PCI and SOC devices handle rx interrupts in process
546 * context. Hence, use ieee80211_rx_irqsafe for USB and ieee80211_rx_ni
547 * for PCI and SOC devices.
548 */
549 if (rt2x00_is_usb(rt2x00dev))
550 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb);
551 else
552 ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
553 560
554 /* 561 /*
555 * Replace the skb with the freshly allocated one. 562 * Replace the skb with the freshly allocated one.
556 */ 563 */
557 entry->skb = skb; 564 entry->skb = skb;
558 entry->flags = 0;
559 565
566submit_entry:
560 rt2x00dev->ops->lib->clear_entry(entry); 567 rt2x00dev->ops->lib->clear_entry(entry);
561
562 rt2x00queue_index_inc(entry->queue, Q_INDEX); 568 rt2x00queue_index_inc(entry->queue, Q_INDEX);
569 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
563} 570}
564EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); 571EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
565 572
@@ -710,7 +717,7 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
710 for (i = 0; i < spec->num_channels; i++) { 717 for (i = 0; i < spec->num_channels; i++) {
711 rt2x00lib_channel(&channels[i], 718 rt2x00lib_channel(&channels[i],
712 spec->channels[i].channel, 719 spec->channels[i].channel,
713 spec->channels_info[i].tx_power1, i); 720 spec->channels_info[i].max_power, i);
714 } 721 }
715 722
716 /* 723 /*
@@ -1017,6 +1024,8 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1017 * Stop all work. 1024 * Stop all work.
1018 */ 1025 */
1019 cancel_work_sync(&rt2x00dev->intf_work); 1026 cancel_work_sync(&rt2x00dev->intf_work);
1027 cancel_work_sync(&rt2x00dev->rxdone_work);
1028 cancel_work_sync(&rt2x00dev->txdone_work);
1020 1029
1021 /* 1030 /*
1022 * Uninitialize device. 1031 * Uninitialize device.
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index b818a43c4672..f0e1eb72befc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -63,6 +63,9 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
63 63
64 INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n", 64 INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n",
65 fw->data[fw->size - 4], fw->data[fw->size - 3]); 65 fw->data[fw->size - 4], fw->data[fw->size - 3]);
66 snprintf(rt2x00dev->hw->wiphy->fw_version,
67 sizeof(rt2x00dev->hw->wiphy->fw_version), "%d.%d",
68 fw->data[fw->size - 4], fw->data[fw->size - 3]);
66 69
67 retval = rt2x00dev->ops->lib->check_firmware(rt2x00dev, fw->data, fw->size); 70 retval = rt2x00dev->ops->lib->check_firmware(rt2x00dev, fw->data, fw->size);
68 switch (retval) { 71 switch (retval) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index c004cd3a8847..ad3c7ff4837b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -54,6 +54,16 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
54 */ 54 */
55 if (txrate->flags & IEEE80211_TX_RC_MCS) { 55 if (txrate->flags & IEEE80211_TX_RC_MCS) {
56 txdesc->mcs = txrate->idx; 56 txdesc->mcs = txrate->idx;
57
58 /*
59 * MIMO PS should be set to 1 for STA's using dynamic SM PS
60 * when using more then one tx stream (>MCS7).
61 */
62 if (tx_info->control.sta && txdesc->mcs > 7 &&
63 (tx_info->control.sta->ht_cap.cap &
64 (WLAN_HT_CAP_SM_PS_DYNAMIC <<
65 IEEE80211_HT_CAP_SM_PS_SHIFT)))
66 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
57 } else { 67 } else {
58 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); 68 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
59 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 69 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a3401d301058..eede99939db9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
4 <http://rt2x00.serialmonkey.com> 5 <http://rt2x00.serialmonkey.com>
5 6
@@ -311,7 +312,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
311 /* 312 /*
312 * Initialize information from queue 313 * Initialize information from queue
313 */ 314 */
314 txdesc->queue = entry->queue->qid; 315 txdesc->qid = entry->queue->qid;
315 txdesc->cw_min = entry->queue->cw_min; 316 txdesc->cw_min = entry->queue->cw_min;
316 txdesc->cw_max = entry->queue->cw_max; 317 txdesc->cw_max = entry->queue->cw_max;
317 txdesc->aifs = entry->queue->aifs; 318 txdesc->aifs = entry->queue->aifs;
@@ -448,15 +449,14 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
448 struct txentry_desc *txdesc) 449 struct txentry_desc *txdesc)
449{ 450{
450 struct data_queue *queue = entry->queue; 451 struct data_queue *queue = entry->queue;
451 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
452 452
453 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); 453 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
454 454
455 /* 455 /*
456 * All processing on the frame has been completed, this means 456 * All processing on the frame has been completed, this means
457 * it is now ready to be dumped to userspace through debugfs. 457 * it is now ready to be dumped to userspace through debugfs.
458 */ 458 */
459 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb); 459 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
460} 460}
461 461
462static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, 462static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
@@ -476,7 +476,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
476 */ 476 */
477 if (rt2x00queue_threshold(queue) || 477 if (rt2x00queue_threshold(queue) ||
478 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 478 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
479 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid); 479 rt2x00dev->ops->lib->kick_tx_queue(queue);
480} 480}
481 481
482int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 482int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@@ -590,7 +590,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
590 intf->beacon->skb = NULL; 590 intf->beacon->skb = NULL;
591 591
592 if (!enable_beacon) { 592 if (!enable_beacon) {
593 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON); 593 rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
594 mutex_unlock(&intf->beacon_skb_mutex); 594 mutex_unlock(&intf->beacon_skb_mutex);
595 return 0; 595 return 0;
596 } 596 }
@@ -625,6 +625,51 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
625 return 0; 625 return 0;
626} 626}
627 627
628void rt2x00queue_for_each_entry(struct data_queue *queue,
629 enum queue_index start,
630 enum queue_index end,
631 void (*fn)(struct queue_entry *entry))
632{
633 unsigned long irqflags;
634 unsigned int index_start;
635 unsigned int index_end;
636 unsigned int i;
637
638 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
639 ERROR(queue->rt2x00dev,
640 "Entry requested from invalid index range (%d - %d)\n",
641 start, end);
642 return;
643 }
644
645 /*
646 * Only protect the range we are going to loop over,
647 * if during our loop a extra entry is set to pending
648 * it should not be kicked during this run, since it
649 * is part of another TX operation.
650 */
651 spin_lock_irqsave(&queue->lock, irqflags);
652 index_start = queue->index[start];
653 index_end = queue->index[end];
654 spin_unlock_irqrestore(&queue->lock, irqflags);
655
656 /*
657 * Start from the TX done pointer, this guarentees that we will
658 * send out all frames in the correct order.
659 */
660 if (index_start < index_end) {
661 for (i = index_start; i < index_end; i++)
662 fn(&queue->entries[i]);
663 } else {
664 for (i = index_start; i < queue->limit; i++)
665 fn(&queue->entries[i]);
666
667 for (i = 0; i < index_end; i++)
668 fn(&queue->entries[i]);
669 }
670}
671EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
672
628struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 673struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
629 const enum data_queue_qid queue) 674 const enum data_queue_qid queue)
630{ 675{
@@ -686,13 +731,13 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
686 if (queue->index[index] >= queue->limit) 731 if (queue->index[index] >= queue->limit)
687 queue->index[index] = 0; 732 queue->index[index] = 0;
688 733
734 queue->last_action[index] = jiffies;
735
689 if (index == Q_INDEX) { 736 if (index == Q_INDEX) {
690 queue->length++; 737 queue->length++;
691 queue->last_index = jiffies;
692 } else if (index == Q_INDEX_DONE) { 738 } else if (index == Q_INDEX_DONE) {
693 queue->length--; 739 queue->length--;
694 queue->count++; 740 queue->count++;
695 queue->last_index_done = jiffies;
696 } 741 }
697 742
698 spin_unlock_irqrestore(&queue->lock, irqflags); 743 spin_unlock_irqrestore(&queue->lock, irqflags);
@@ -701,14 +746,17 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
701static void rt2x00queue_reset(struct data_queue *queue) 746static void rt2x00queue_reset(struct data_queue *queue)
702{ 747{
703 unsigned long irqflags; 748 unsigned long irqflags;
749 unsigned int i;
704 750
705 spin_lock_irqsave(&queue->lock, irqflags); 751 spin_lock_irqsave(&queue->lock, irqflags);
706 752
707 queue->count = 0; 753 queue->count = 0;
708 queue->length = 0; 754 queue->length = 0;
709 queue->last_index = jiffies; 755
710 queue->last_index_done = jiffies; 756 for (i = 0; i < Q_INDEX_MAX; i++) {
711 memset(queue->index, 0, sizeof(queue->index)); 757 queue->index[i] = 0;
758 queue->last_action[i] = jiffies;
759 }
712 760
713 spin_unlock_irqrestore(&queue->lock, irqflags); 761 spin_unlock_irqrestore(&queue->lock, irqflags);
714} 762}
@@ -718,7 +766,7 @@ void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
718 struct data_queue *queue; 766 struct data_queue *queue;
719 767
720 txall_queue_for_each(rt2x00dev, queue) 768 txall_queue_for_each(rt2x00dev, queue)
721 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid); 769 rt2x00dev->ops->lib->kill_tx_queue(queue);
722} 770}
723 771
724void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 772void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
@@ -730,9 +778,9 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
730 rt2x00queue_reset(queue); 778 rt2x00queue_reset(queue);
731 779
732 for (i = 0; i < queue->limit; i++) { 780 for (i = 0; i < queue->limit; i++) {
733 queue->entries[i].flags = 0;
734
735 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 781 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
782 if (queue->qid == QID_RX)
783 rt2x00queue_index_inc(queue, Q_INDEX);
736 } 784 }
737 } 785 }
738} 786}
@@ -755,7 +803,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
755 * Allocate all queue entries. 803 * Allocate all queue entries.
756 */ 804 */
757 entry_size = sizeof(*entries) + qdesc->priv_size; 805 entry_size = sizeof(*entries) + qdesc->priv_size;
758 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL); 806 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
759 if (!entries) 807 if (!entries)
760 return -ENOMEM; 808 return -ENOMEM;
761 809
@@ -891,7 +939,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
891 */ 939 */
892 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; 940 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
893 941
894 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL); 942 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
895 if (!queue) { 943 if (!queue) {
896 ERROR(rt2x00dev, "Queue allocation failed.\n"); 944 ERROR(rt2x00dev, "Queue allocation failed.\n");
897 return -ENOMEM; 945 return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 191e7775a9c0..d81d85f34866 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -268,6 +268,7 @@ struct txdone_entry_desc {
268 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU. 268 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
269 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth. 269 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
270 * @ENTRY_TXD_HT_SHORT_GI: Use short GI. 270 * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
271 * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
271 */ 272 */
272enum txentry_desc_flags { 273enum txentry_desc_flags {
273 ENTRY_TXD_RTS_FRAME, 274 ENTRY_TXD_RTS_FRAME,
@@ -286,6 +287,7 @@ enum txentry_desc_flags {
286 ENTRY_TXD_HT_AMPDU, 287 ENTRY_TXD_HT_AMPDU,
287 ENTRY_TXD_HT_BW_40, 288 ENTRY_TXD_HT_BW_40,
288 ENTRY_TXD_HT_SHORT_GI, 289 ENTRY_TXD_HT_SHORT_GI,
290 ENTRY_TXD_HT_MIMO_PS,
289}; 291};
290 292
291/** 293/**
@@ -294,7 +296,7 @@ enum txentry_desc_flags {
294 * Summary of information for the frame descriptor before sending a TX frame. 296 * Summary of information for the frame descriptor before sending a TX frame.
295 * 297 *
296 * @flags: Descriptor flags (See &enum queue_entry_flags). 298 * @flags: Descriptor flags (See &enum queue_entry_flags).
297 * @queue: Queue identification (See &enum data_queue_qid). 299 * @qid: Queue identification (See &enum data_queue_qid).
298 * @length: Length of the entire frame. 300 * @length: Length of the entire frame.
299 * @header_length: Length of 802.11 header. 301 * @header_length: Length of 802.11 header.
300 * @length_high: PLCP length high word. 302 * @length_high: PLCP length high word.
@@ -320,7 +322,7 @@ enum txentry_desc_flags {
320struct txentry_desc { 322struct txentry_desc {
321 unsigned long flags; 323 unsigned long flags;
322 324
323 enum data_queue_qid queue; 325 enum data_queue_qid qid;
324 326
325 u16 length; 327 u16 length;
326 u16 header_length; 328 u16 header_length;
@@ -358,17 +360,17 @@ struct txentry_desc {
358 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data 360 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
359 * transfer (either TX or RX depending on the queue). The entry should 361 * transfer (either TX or RX depending on the queue). The entry should
360 * only be touched after the device has signaled it is done with it. 362 * only be touched after the device has signaled it is done with it.
361 * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data
362 * encryption or decryption. The entry should only be touched after
363 * the device has signaled it is done with it.
364 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting 363 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
365 * for the signal to start sending. 364 * for the signal to start sending.
365 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
366 * while transfering the data to the hardware. No TX status report will
367 * be expected from the hardware.
366 */ 368 */
367enum queue_entry_flags { 369enum queue_entry_flags {
368 ENTRY_BCN_ASSIGNED, 370 ENTRY_BCN_ASSIGNED,
369 ENTRY_OWNER_DEVICE_DATA, 371 ENTRY_OWNER_DEVICE_DATA,
370 ENTRY_OWNER_DEVICE_CRYPTO,
371 ENTRY_DATA_PENDING, 372 ENTRY_DATA_PENDING,
373 ENTRY_DATA_IO_FAILED
372}; 374};
373 375
374/** 376/**
@@ -399,18 +401,18 @@ struct queue_entry {
399 * 401 *
400 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is 402 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
401 * owned by the hardware then the queue is considered to be full. 403 * owned by the hardware then the queue is considered to be full.
404 * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
405 * transfered to the hardware.
402 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by 406 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
403 * the hardware and for which we need to run the txdone handler. If this 407 * the hardware and for which we need to run the txdone handler. If this
404 * entry is not owned by the hardware the queue is considered to be empty. 408 * entry is not owned by the hardware the queue is considered to be empty.
405 * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription
406 * will be completed by the hardware next.
407 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size 409 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
408 * of the index array. 410 * of the index array.
409 */ 411 */
410enum queue_index { 412enum queue_index {
411 Q_INDEX, 413 Q_INDEX,
414 Q_INDEX_DMA_DONE,
412 Q_INDEX_DONE, 415 Q_INDEX_DONE,
413 Q_INDEX_CRYPTO,
414 Q_INDEX_MAX, 416 Q_INDEX_MAX,
415}; 417};
416 418
@@ -446,13 +448,12 @@ struct data_queue {
446 enum data_queue_qid qid; 448 enum data_queue_qid qid;
447 449
448 spinlock_t lock; 450 spinlock_t lock;
449 unsigned long last_index;
450 unsigned long last_index_done;
451 unsigned int count; 451 unsigned int count;
452 unsigned short limit; 452 unsigned short limit;
453 unsigned short threshold; 453 unsigned short threshold;
454 unsigned short length; 454 unsigned short length;
455 unsigned short index[Q_INDEX_MAX]; 455 unsigned short index[Q_INDEX_MAX];
456 unsigned long last_action[Q_INDEX_MAX];
456 457
457 unsigned short txop; 458 unsigned short txop;
458 unsigned short aifs; 459 unsigned short aifs;
@@ -565,6 +566,22 @@ struct data_queue_desc {
565 queue_loop(__entry, (__dev)->tx, queue_end(__dev)) 566 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
566 567
567/** 568/**
569 * rt2x00queue_for_each_entry - Loop through all entries in the queue
570 * @queue: Pointer to @data_queue
571 * @start: &enum queue_index Pointer to start index
572 * @end: &enum queue_index Pointer to end index
573 * @fn: The function to call for each &struct queue_entry
574 *
575 * This will walk through all entries in the queue, in chronological
576 * order. This means it will start at the current @start pointer
577 * and will walk through the queue until it reaches the @end pointer.
578 */
579void rt2x00queue_for_each_entry(struct data_queue *queue,
580 enum queue_index start,
581 enum queue_index end,
582 void (*fn)(struct queue_entry *entry));
583
584/**
568 * rt2x00queue_empty - Check if the queue is empty. 585 * rt2x00queue_empty - Check if the queue is empty.
569 * @queue: Queue to check if empty. 586 * @queue: Queue to check if empty.
570 */ 587 */
@@ -601,12 +618,23 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
601} 618}
602 619
603/** 620/**
604 * rt2x00queue_timeout - Check if a timeout occured for this queue 621 * rt2x00queue_timeout - Check if a timeout occured for STATUS reorts
605 * @queue: Queue to check. 622 * @queue: Queue to check.
606 */ 623 */
607static inline int rt2x00queue_timeout(struct data_queue *queue) 624static inline int rt2x00queue_timeout(struct data_queue *queue)
608{ 625{
609 return time_after(queue->last_index, queue->last_index_done + (HZ / 10)); 626 return time_after(queue->last_action[Q_INDEX_DMA_DONE],
627 queue->last_action[Q_INDEX_DONE] + (HZ / 10));
628}
629
630/**
631 * rt2x00queue_timeout - Check if a timeout occured for DMA transfers
632 * @queue: Queue to check.
633 */
634static inline int rt2x00queue_dma_timeout(struct data_queue *queue)
635{
636 return time_after(queue->last_action[Q_INDEX],
637 queue->last_action[Q_INDEX_DMA_DONE] + (HZ / 10));
610} 638}
611 639
612/** 640/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index ff3a36622d1b..4c5ae3d45625 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 4 <http://rt2x00.serialmonkey.com>
4 5
5 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -167,137 +168,142 @@ EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
167/* 168/*
168 * TX data handlers. 169 * TX data handlers.
169 */ 170 */
170static void rt2x00usb_interrupt_txdone(struct urb *urb) 171static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
171{ 172{
172 struct queue_entry *entry = (struct queue_entry *)urb->context;
173 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
174 struct txdone_entry_desc txdesc;
175
176 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
177 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
178 return;
179
180 /* 173 /*
181 * Obtain the status about this packet. 174 * If the transfer to hardware succeeded, it does not mean the
182 * Note that when the status is 0 it does not mean the
183 * frame was send out correctly. It only means the frame 175 * frame was send out correctly. It only means the frame
184 * was succesfully pushed to the hardware, we have no 176 * was succesfully pushed to the hardware, we have no
185 * way to determine the transmission status right now. 177 * way to determine the transmission status right now.
186 * (Only indirectly by looking at the failed TX counters 178 * (Only indirectly by looking at the failed TX counters
187 * in the register). 179 * in the register).
188 */ 180 */
189 txdesc.flags = 0; 181 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
190 if (!urb->status) 182 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
191 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
192 else 183 else
193 __set_bit(TXDONE_FAILURE, &txdesc.flags); 184 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
194 txdesc.retry = 0;
195
196 rt2x00lib_txdone(entry, &txdesc);
197} 185}
198 186
199static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry) 187static void rt2x00usb_work_txdone(struct work_struct *work)
200{ 188{
201 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 189 struct rt2x00_dev *rt2x00dev =
202 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 190 container_of(work, struct rt2x00_dev, txdone_work);
203 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 191 struct data_queue *queue;
204 u32 length; 192 struct queue_entry *entry;
205 193
206 if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) { 194 tx_queue_for_each(rt2x00dev, queue) {
207 /* 195 while (!rt2x00queue_empty(queue)) {
208 * USB devices cannot blindly pass the skb->len as the 196 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
209 * length of the data to usb_fill_bulk_urb. Pass the skb
210 * to the driver to determine what the length should be.
211 */
212 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
213 197
214 usb_fill_bulk_urb(entry_priv->urb, usb_dev, 198 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
215 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), 199 break;
216 entry->skb->data, length,
217 rt2x00usb_interrupt_txdone, entry);
218 200
219 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 201 rt2x00usb_work_txdone_entry(entry);
202 }
220 } 203 }
221} 204}
222 205
223void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 206static void rt2x00usb_interrupt_txdone(struct urb *urb)
224 const enum data_queue_qid qid)
225{ 207{
226 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid); 208 struct queue_entry *entry = (struct queue_entry *)urb->context;
227 unsigned long irqflags; 209 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
228 unsigned int index; 210
229 unsigned int index_done; 211 if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
230 unsigned int i; 212 return;
231 213
232 /* 214 /*
233 * Only protect the range we are going to loop over, 215 * Report the frame as DMA done
234 * if during our loop a extra entry is set to pending
235 * it should not be kicked during this run, since it
236 * is part of another TX operation.
237 */ 216 */
238 spin_lock_irqsave(&queue->lock, irqflags); 217 rt2x00lib_dmadone(entry);
239 index = queue->index[Q_INDEX];
240 index_done = queue->index[Q_INDEX_DONE];
241 spin_unlock_irqrestore(&queue->lock, irqflags);
242 218
243 /* 219 /*
244 * Start from the TX done pointer, this guarentees that we will 220 * Check if the frame was correctly uploaded
245 * send out all frames in the correct order.
246 */ 221 */
247 if (index_done < index) { 222 if (urb->status)
248 for (i = index_done; i < index; i++) 223 __set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
249 rt2x00usb_kick_tx_entry(&queue->entries[i]);
250 } else {
251 for (i = index_done; i < queue->limit; i++)
252 rt2x00usb_kick_tx_entry(&queue->entries[i]);
253 224
254 for (i = 0; i < index; i++) 225 /*
255 rt2x00usb_kick_tx_entry(&queue->entries[i]); 226 * Schedule the delayed work for reading the TX status
256 } 227 * from the device.
228 */
229 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
230 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
231 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
232}
233
234static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
235{
236 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
237 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
238 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
239 u32 length;
240
241 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
242 return;
243
244 /*
245 * USB devices cannot blindly pass the skb->len as the
246 * length of the data to usb_fill_bulk_urb. Pass the skb
247 * to the driver to determine what the length should be.
248 */
249 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
250
251 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
252 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
253 entry->skb->data, length,
254 rt2x00usb_interrupt_txdone, entry);
255
256 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
257}
258
259void rt2x00usb_kick_tx_queue(struct data_queue *queue)
260{
261 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
262 rt2x00usb_kick_tx_entry);
257} 263}
258EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue); 264EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
259 265
260void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 266static void rt2x00usb_kill_tx_entry(struct queue_entry *entry)
261 const enum data_queue_qid qid)
262{ 267{
263 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid); 268 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264 struct queue_entry_priv_usb *entry_priv; 269 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
265 struct queue_entry_priv_usb_bcn *bcn_priv; 270 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
266 unsigned int i; 271
267 bool kill_guard; 272 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
273 return;
274
275 usb_kill_urb(entry_priv->urb);
268 276
269 /* 277 /*
270 * When killing the beacon queue, we must also kill 278 * Kill guardian urb (if required by driver).
271 * the beacon guard byte.
272 */ 279 */
273 kill_guard = 280 if ((entry->queue->qid == QID_BEACON) &&
274 (qid == QID_BEACON) && 281 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
275 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)); 282 usb_kill_urb(bcn_priv->guardian_urb);
276 283
277 /* 284 /*
278 * Cancel all entries. 285 * We need a short delay here to wait for
286 * the URB to be canceled
279 */ 287 */
280 for (i = 0; i < queue->limit; i++) { 288 do {
281 entry_priv = queue->entries[i].priv_data; 289 udelay(100);
282 usb_kill_urb(entry_priv->urb); 290 } while (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags));
291}
283 292
284 /* 293void rt2x00usb_kill_tx_queue(struct data_queue *queue)
285 * Kill guardian urb (if required by driver). 294{
286 */ 295 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
287 if (kill_guard) { 296 rt2x00usb_kill_tx_entry);
288 bcn_priv = queue->entries[i].priv_data;
289 usb_kill_urb(bcn_priv->guardian_urb);
290 }
291 }
292} 297}
293EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue); 298EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
294 299
295static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue) 300static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
296{ 301{
297 struct queue_entry_priv_usb *entry_priv; 302 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
298 unsigned short threshold = queue->threshold; 303 unsigned short threshold = queue->threshold;
299 304
300 WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid); 305 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
306 " invoke forced forced reset", queue->qid);
301 307
302 /* 308 /*
303 * Temporarily disable the TX queue, this will force mac80211 309 * Temporarily disable the TX queue, this will force mac80211
@@ -307,20 +313,33 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
307 * queue from being enabled during the txdone handler. 313 * queue from being enabled during the txdone handler.
308 */ 314 */
309 queue->threshold = queue->limit; 315 queue->threshold = queue->limit;
310 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); 316 ieee80211_stop_queue(rt2x00dev->hw, queue->qid);
311 317
312 /* 318 /*
313 * Reset all currently uploaded TX frames. 319 * Kill all entries in the queue, afterwards we need to
320 * wait a bit for all URBs to be cancelled.
314 */ 321 */
315 while (!rt2x00queue_empty(queue)) { 322 rt2x00usb_kill_tx_queue(queue);
316 entry_priv = rt2x00queue_get_entry(queue, Q_INDEX_DONE)->priv_data;
317 usb_kill_urb(entry_priv->urb);
318 323
319 /* 324 /*
320 * We need a short delay here to wait for 325 * In case that a driver has overriden the txdone_work
321 * the URB to be canceled and invoked the tx_done handler. 326 * function, we invoke the TX done through there.
322 */ 327 */
323 udelay(200); 328 rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
329
330 /*
331 * Security measure: if the driver did override the
332 * txdone_work function, and the hardware did arrive
333 * in a state which causes it to malfunction, it is
334 * possible that the driver couldn't handle the txdone
335 * event correctly. So after giving the driver the
336 * chance to cleanup, we now force a cleanup of any
337 * leftovers.
338 */
339 if (!rt2x00queue_empty(queue)) {
340 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
341 " status handling failed, invoke hard reset", queue->qid);
342 rt2x00usb_work_txdone(&rt2x00dev->txdone_work);
324 } 343 }
325 344
326 /* 345 /*
@@ -328,7 +347,15 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
328 * queue again. 347 * queue again.
329 */ 348 */
330 queue->threshold = threshold; 349 queue->threshold = threshold;
331 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); 350 ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
351}
352
353static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
354{
355 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
356 " invoke forced tx handler", queue->qid);
357
358 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
332} 359}
333 360
334void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) 361void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -336,8 +363,10 @@ void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
336 struct data_queue *queue; 363 struct data_queue *queue;
337 364
338 tx_queue_for_each(rt2x00dev, queue) { 365 tx_queue_for_each(rt2x00dev, queue) {
366 if (rt2x00queue_dma_timeout(queue))
367 rt2x00usb_watchdog_tx_dma(queue);
339 if (rt2x00queue_timeout(queue)) 368 if (rt2x00queue_timeout(queue))
340 rt2x00usb_watchdog_reset_tx(queue); 369 rt2x00usb_watchdog_tx_status(queue);
341 } 370 }
342} 371}
343EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); 372EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
@@ -345,38 +374,62 @@ EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
345/* 374/*
346 * RX data handlers. 375 * RX data handlers.
347 */ 376 */
377static void rt2x00usb_work_rxdone(struct work_struct *work)
378{
379 struct rt2x00_dev *rt2x00dev =
380 container_of(work, struct rt2x00_dev, rxdone_work);
381 struct queue_entry *entry;
382 struct skb_frame_desc *skbdesc;
383 u8 rxd[32];
384
385 while (!rt2x00queue_empty(rt2x00dev->rx)) {
386 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
387
388 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
389 break;
390
391 /*
392 * Fill in desc fields of the skb descriptor
393 */
394 skbdesc = get_skb_frame_desc(entry->skb);
395 skbdesc->desc = rxd;
396 skbdesc->desc_len = entry->queue->desc_size;
397
398 /*
399 * Send the frame to rt2x00lib for further processing.
400 */
401 rt2x00lib_rxdone(rt2x00dev, entry);
402 }
403}
404
348static void rt2x00usb_interrupt_rxdone(struct urb *urb) 405static void rt2x00usb_interrupt_rxdone(struct urb *urb)
349{ 406{
350 struct queue_entry *entry = (struct queue_entry *)urb->context; 407 struct queue_entry *entry = (struct queue_entry *)urb->context;
351 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 408 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
352 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
353 u8 rxd[32];
354 409
355 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || 410 if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
356 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
357 return; 411 return;
358 412
359 /* 413 /*
360 * Check if the received data is simply too small 414 * Report the frame as DMA done
361 * to be actually valid, or if the urb is signaling
362 * a problem.
363 */ 415 */
364 if (urb->actual_length < entry->queue->desc_size || urb->status) { 416 rt2x00lib_dmadone(entry);
365 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
366 usb_submit_urb(urb, GFP_ATOMIC);
367 return;
368 }
369 417
370 /* 418 /*
371 * Fill in desc fields of the skb descriptor 419 * Check if the received data is simply too small
420 * to be actually valid, or if the urb is signaling
421 * a problem.
372 */ 422 */
373 skbdesc->desc = rxd; 423 if (urb->actual_length < entry->queue->desc_size || urb->status)
374 skbdesc->desc_len = entry->queue->desc_size; 424 __set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
375 425
376 /* 426 /*
377 * Send the frame to rt2x00lib for further processing. 427 * Schedule the delayed work for reading the RX status
428 * from the device.
378 */ 429 */
379 rt2x00lib_rxdone(rt2x00dev, entry); 430 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
431 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
432 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
380} 433}
381 434
382/* 435/*
@@ -391,7 +444,7 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
391 * The USB version of kill_tx_queue also works 444 * The USB version of kill_tx_queue also works
392 * on the RX queue. 445 * on the RX queue.
393 */ 446 */
394 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_RX); 447 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev->rx);
395} 448}
396EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 449EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
397 450
@@ -405,6 +458,8 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
405 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 458 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
406 int pipe; 459 int pipe;
407 460
461 entry->flags = 0;
462
408 if (entry->queue->qid == QID_RX) { 463 if (entry->queue->qid == QID_RX) {
409 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint); 464 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint);
410 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe, 465 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe,
@@ -413,8 +468,6 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
413 468
414 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 469 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
415 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 470 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
416 } else {
417 entry->flags = 0;
418 } 471 }
419} 472}
420EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 473EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
@@ -659,6 +712,9 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
659 712
660 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB); 713 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
661 714
715 INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
716 INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
717
662 retval = rt2x00usb_alloc_reg(rt2x00dev); 718 retval = rt2x00usb_alloc_reg(rt2x00dev);
663 if (retval) 719 if (retval)
664 goto exit_free_device; 720 goto exit_free_device;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index d3d3ddc40875..c2d997f67b3e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -379,25 +379,21 @@ struct queue_entry_priv_usb_bcn {
379 379
380/** 380/**
381 * rt2x00usb_kick_tx_queue - Kick data queue 381 * rt2x00usb_kick_tx_queue - Kick data queue
382 * @rt2x00dev: Pointer to &struct rt2x00_dev 382 * @queue: Data queue to kick
383 * @qid: Data queue to kick
384 * 383 *
385 * This will walk through all entries of the queue and push all pending 384 * This will walk through all entries of the queue and push all pending
386 * frames to the hardware as a single burst. 385 * frames to the hardware as a single burst.
387 */ 386 */
388void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 387void rt2x00usb_kick_tx_queue(struct data_queue *queue);
389 const enum data_queue_qid qid);
390 388
391/** 389/**
392 * rt2x00usb_kill_tx_queue - Kill data queue 390 * rt2x00usb_kill_tx_queue - Kill data queue
393 * @rt2x00dev: Pointer to &struct rt2x00_dev 391 * @queue: Data queue to kill
394 * @qid: Data queue to kill
395 * 392 *
396 * This will walk through all entries of the queue and kill all 393 * This will walk through all entries of the queue and kill all
397 * previously kicked frames before they can be send. 394 * previously kicked frames before they can be send.
398 */ 395 */
399void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 396void rt2x00usb_kill_tx_queue(struct data_queue *queue);
400 const enum data_queue_qid qid);
401 397
402/** 398/**
403 * rt2x00usb_watchdog - Watchdog for USB communication 399 * rt2x00usb_watchdog - Watchdog for USB communication
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index e539c6cb636f..3a7759929190 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1050,7 +1050,7 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
1050 /* 1050 /*
1051 * Determine r17 bounds. 1051 * Determine r17 bounds.
1052 */ 1052 */
1053 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 1053 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1054 low_bound = 0x28; 1054 low_bound = 0x28;
1055 up_bound = 0x48; 1055 up_bound = 0x48;
1056 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 1056 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) {
@@ -1766,12 +1766,11 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1766/* 1766/*
1767 * TX descriptor initialization 1767 * TX descriptor initialization
1768 */ 1768 */
1769static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1769static void rt61pci_write_tx_desc(struct queue_entry *entry,
1770 struct sk_buff *skb,
1771 struct txentry_desc *txdesc) 1770 struct txentry_desc *txdesc)
1772{ 1771{
1773 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1772 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1774 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 1773 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1775 __le32 *txd = entry_priv->desc; 1774 __le32 *txd = entry_priv->desc;
1776 u32 word; 1775 u32 word;
1777 1776
@@ -1779,7 +1778,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1779 * Start writing the descriptor words. 1778 * Start writing the descriptor words.
1780 */ 1779 */
1781 rt2x00_desc_read(txd, 1, &word); 1780 rt2x00_desc_read(txd, 1, &word);
1782 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue); 1781 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
1783 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1782 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1784 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1783 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1785 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1784 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
@@ -1802,15 +1801,15 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1802 } 1801 }
1803 1802
1804 rt2x00_desc_read(txd, 5, &word); 1803 rt2x00_desc_read(txd, 5, &word);
1805 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid); 1804 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
1806 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, 1805 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
1807 skbdesc->entry->entry_idx); 1806 skbdesc->entry->entry_idx);
1808 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1807 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1809 TXPOWER_TO_DEV(rt2x00dev->tx_power)); 1808 TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
1810 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1809 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1811 rt2x00_desc_write(txd, 5, word); 1810 rt2x00_desc_write(txd, 5, word);
1812 1811
1813 if (txdesc->queue != QID_BEACON) { 1812 if (txdesc->qid != QID_BEACON) {
1814 rt2x00_desc_read(txd, 6, &word); 1813 rt2x00_desc_read(txd, 6, &word);
1815 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, 1814 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1816 skbdesc->skb_dma); 1815 skbdesc->skb_dma);
@@ -1857,7 +1856,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1857 */ 1856 */
1858 skbdesc->desc = txd; 1857 skbdesc->desc = txd;
1859 skbdesc->desc_len = 1858 skbdesc->desc_len =
1860 (txdesc->queue == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE; 1859 (txdesc->qid == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE;
1861} 1860}
1862 1861
1863/* 1862/*
@@ -1882,7 +1881,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1882 /* 1881 /*
1883 * Write the TX descriptor for the beacon. 1882 * Write the TX descriptor for the beacon.
1884 */ 1883 */
1885 rt61pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1884 rt61pci_write_tx_desc(entry, txdesc);
1886 1885
1887 /* 1886 /*
1888 * Dump beacon to userspace through debugfs. 1887 * Dump beacon to userspace through debugfs.
@@ -1918,34 +1917,34 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1918 entry->skb = NULL; 1917 entry->skb = NULL;
1919} 1918}
1920 1919
1921static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1920static void rt61pci_kick_tx_queue(struct data_queue *queue)
1922 const enum data_queue_qid queue)
1923{ 1921{
1922 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1924 u32 reg; 1923 u32 reg;
1925 1924
1926 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1925 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1927 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE)); 1926 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue->qid == QID_AC_BE));
1928 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK)); 1927 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue->qid == QID_AC_BK));
1929 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue == QID_AC_VI)); 1928 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue->qid == QID_AC_VI));
1930 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue == QID_AC_VO)); 1929 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue->qid == QID_AC_VO));
1931 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1930 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1932} 1931}
1933 1932
1934static void rt61pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 1933static void rt61pci_kill_tx_queue(struct data_queue *queue)
1935 const enum data_queue_qid qid)
1936{ 1934{
1935 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1937 u32 reg; 1936 u32 reg;
1938 1937
1939 if (qid == QID_BEACON) { 1938 if (queue->qid == QID_BEACON) {
1940 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0); 1939 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0);
1941 return; 1940 return;
1942 } 1941 }
1943 1942
1944 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1943 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1945 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (qid == QID_AC_BE)); 1944 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (queue->qid == QID_AC_BE));
1946 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (qid == QID_AC_BK)); 1945 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (queue->qid == QID_AC_BK));
1947 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (qid == QID_AC_VI)); 1946 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (queue->qid == QID_AC_VI));
1948 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (qid == QID_AC_VO)); 1947 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (queue->qid == QID_AC_VO));
1949 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1948 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1950} 1949}
1951 1950
@@ -1972,7 +1971,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1972 return 0; 1971 return 0;
1973 } 1972 }
1974 1973
1975 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 1974 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1976 if (lna == 3 || lna == 2) 1975 if (lna == 3 || lna == 2)
1977 offset += 10; 1976 offset += 10;
1978 } 1977 }
@@ -2107,11 +2106,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2107 "TX status report missed for entry %d\n", 2106 "TX status report missed for entry %d\n",
2108 entry_done->entry_idx); 2107 entry_done->entry_idx);
2109 2108
2110 txdesc.flags = 0; 2109 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
2111 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
2112 txdesc.retry = 0;
2113
2114 rt2x00lib_txdone(entry_done, &txdesc);
2115 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 2110 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
2116 } 2111 }
2117 2112
@@ -2654,20 +2649,24 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2654 /* 2649 /*
2655 * Create channel information array 2650 * Create channel information array
2656 */ 2651 */
2657 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 2652 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
2658 if (!info) 2653 if (!info)
2659 return -ENOMEM; 2654 return -ENOMEM;
2660 2655
2661 spec->channels_info = info; 2656 spec->channels_info = info;
2662 2657
2663 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); 2658 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2664 for (i = 0; i < 14; i++) 2659 for (i = 0; i < 14; i++) {
2665 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2660 info[i].max_power = MAX_TXPOWER;
2661 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2662 }
2666 2663
2667 if (spec->num_channels > 14) { 2664 if (spec->num_channels > 14) {
2668 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2665 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2669 for (i = 14; i < spec->num_channels; i++) 2666 for (i = 14; i < spec->num_channels; i++) {
2670 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2667 info[i].max_power = MAX_TXPOWER;
2668 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2669 }
2671 } 2670 }
2672 2671
2673 return 0; 2672 return 0;
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index aa9de18fd410..87fb2201537b 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -929,7 +929,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
929 /* 929 /*
930 * Determine r17 bounds. 930 * Determine r17 bounds.
931 */ 931 */
932 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 932 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
933 low_bound = 0x28; 933 low_bound = 0x28;
934 up_bound = 0x48; 934 up_bound = 0x48;
935 935
@@ -1426,12 +1426,11 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1426/* 1426/*
1427 * TX descriptor initialization 1427 * TX descriptor initialization
1428 */ 1428 */
1429static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1429static void rt73usb_write_tx_desc(struct queue_entry *entry,
1430 struct sk_buff *skb,
1431 struct txentry_desc *txdesc) 1430 struct txentry_desc *txdesc)
1432{ 1431{
1433 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1432 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1434 __le32 *txd = (__le32 *) skb->data; 1433 __le32 *txd = (__le32 *) entry->skb->data;
1435 u32 word; 1434 u32 word;
1436 1435
1437 /* 1436 /*
@@ -1464,7 +1463,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1464 rt2x00_desc_write(txd, 0, word); 1463 rt2x00_desc_write(txd, 0, word);
1465 1464
1466 rt2x00_desc_read(txd, 1, &word); 1465 rt2x00_desc_read(txd, 1, &word);
1467 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue); 1466 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
1468 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1467 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1469 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1468 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1470 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1469 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
@@ -1487,7 +1486,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1487 1486
1488 rt2x00_desc_read(txd, 5, &word); 1487 rt2x00_desc_read(txd, 5, &word);
1489 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1488 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1490 TXPOWER_TO_DEV(rt2x00dev->tx_power)); 1489 TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
1491 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1490 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1492 rt2x00_desc_write(txd, 5, word); 1491 rt2x00_desc_write(txd, 5, word);
1493 1492
@@ -1526,7 +1525,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1526 /* 1525 /*
1527 * Write the TX descriptor for the beacon. 1526 * Write the TX descriptor for the beacon.
1528 */ 1527 */
1529 rt73usb_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1528 rt73usb_write_tx_desc(entry, txdesc);
1530 1529
1531 /* 1530 /*
1532 * Dump beacon to userspace through debugfs. 1531 * Dump beacon to userspace through debugfs.
@@ -1574,6 +1573,14 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
1574 return length; 1573 return length;
1575} 1574}
1576 1575
1576static void rt73usb_kill_tx_queue(struct data_queue *queue)
1577{
1578 if (queue->qid == QID_BEACON)
1579 rt2x00usb_register_write(queue->rt2x00dev, TXRX_CSR9, 0);
1580
1581 rt2x00usb_kill_tx_queue(queue);
1582}
1583
1577/* 1584/*
1578 * RX control handlers 1585 * RX control handlers
1579 */ 1586 */
@@ -1597,7 +1604,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1597 return 0; 1604 return 0;
1598 } 1605 }
1599 1606
1600 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 1607 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1601 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 1608 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) {
1602 if (lna == 3 || lna == 2) 1609 if (lna == 3 || lna == 2)
1603 offset += 10; 1610 offset += 10;
@@ -2084,20 +2091,24 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2084 /* 2091 /*
2085 * Create channel information array 2092 * Create channel information array
2086 */ 2093 */
2087 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 2094 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
2088 if (!info) 2095 if (!info)
2089 return -ENOMEM; 2096 return -ENOMEM;
2090 2097
2091 spec->channels_info = info; 2098 spec->channels_info = info;
2092 2099
2093 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); 2100 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2094 for (i = 0; i < 14; i++) 2101 for (i = 0; i < 14; i++) {
2095 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2102 info[i].max_power = MAX_TXPOWER;
2103 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2104 }
2096 2105
2097 if (spec->num_channels > 14) { 2106 if (spec->num_channels > 14) {
2098 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2107 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2099 for (i = 14; i < spec->num_channels; i++) 2108 for (i = 14; i < spec->num_channels; i++) {
2100 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2109 info[i].max_power = MAX_TXPOWER;
2110 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2111 }
2101 } 2112 }
2102 2113
2103 return 0; 2114 return 0;
@@ -2259,7 +2270,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2259 .write_beacon = rt73usb_write_beacon, 2270 .write_beacon = rt73usb_write_beacon,
2260 .get_tx_data_len = rt73usb_get_tx_data_len, 2271 .get_tx_data_len = rt73usb_get_tx_data_len,
2261 .kick_tx_queue = rt2x00usb_kick_tx_queue, 2272 .kick_tx_queue = rt2x00usb_kick_tx_queue,
2262 .kill_tx_queue = rt2x00usb_kill_tx_queue, 2273 .kill_tx_queue = rt73usb_kill_tx_queue,
2263 .fill_rxdone = rt73usb_fill_rxdone, 2274 .fill_rxdone = rt73usb_fill_rxdone,
2264 .config_shared_key = rt73usb_config_shared_key, 2275 .config_shared_key = rt73usb_config_shared_key,
2265 .config_pairwise_key = rt73usb_config_pairwise_key, 2276 .config_pairwise_key = rt73usb_config_pairwise_key,
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 30107ce78dfb..05c6badbe201 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -99,19 +99,66 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
99 } 99 }
100} 100}
101 101
102static void rtl8180_handle_rx(struct ieee80211_hw *dev) 102static void rtl8180_handle_tx(struct ieee80211_hw *dev)
103{ 103{
104 struct rtl8180_priv *priv = dev->priv; 104 struct rtl8180_priv *priv = dev->priv;
105 unsigned int count = 32; 105 struct rtl8180_tx_ring *ring;
106 int prio;
107
108 spin_lock(&priv->lock);
109
110 for (prio = 3; prio >= 0; prio--) {
111 ring = &priv->tx_ring[prio];
112
113 while (skb_queue_len(&ring->queue)) {
114 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
115 struct sk_buff *skb;
116 struct ieee80211_tx_info *info;
117 u32 flags = le32_to_cpu(entry->flags);
118
119 if (flags & RTL818X_TX_DESC_FLAG_OWN)
120 break;
121
122 ring->idx = (ring->idx + 1) % ring->entries;
123 skb = __skb_dequeue(&ring->queue);
124 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
125 skb->len, PCI_DMA_TODEVICE);
126
127 info = IEEE80211_SKB_CB(skb);
128 ieee80211_tx_info_clear_status(info);
129
130 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
131 (flags & RTL818X_TX_DESC_FLAG_TX_OK))
132 info->flags |= IEEE80211_TX_STAT_ACK;
133
134 info->status.rates[0].count = (flags & 0xFF) + 1;
135 info->status.rates[1].idx = -1;
136
137 ieee80211_tx_status(dev, skb);
138 if (ring->entries - skb_queue_len(&ring->queue) == 2)
139 ieee80211_wake_queue(dev, prio);
140 }
141 }
142
143 spin_unlock(&priv->lock);
144}
145
146static int rtl8180_poll(struct ieee80211_hw *dev, int budget)
147{
148 struct rtl8180_priv *priv = dev->priv;
149 unsigned int count = 0;
106 u8 signal, agc, sq; 150 u8 signal, agc, sq;
107 151
108 while (count--) { 152 /* handle pending Tx queue cleanup */
153 rtl8180_handle_tx(dev);
154
155 while (count++ < budget) {
109 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; 156 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
110 struct sk_buff *skb = priv->rx_buf[priv->rx_idx]; 157 struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
111 u32 flags = le32_to_cpu(entry->flags); 158 u32 flags = le32_to_cpu(entry->flags);
112 159
113 if (flags & RTL818X_RX_DESC_FLAG_OWN) 160 if (flags & RTL818X_RX_DESC_FLAG_OWN)
114 return; 161 break;
115 162
116 if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL | 163 if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL |
117 RTL818X_RX_DESC_FLAG_FOF | 164 RTL818X_RX_DESC_FLAG_FOF |
@@ -151,7 +198,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
151 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 198 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
152 199
153 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 200 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
154 ieee80211_rx_irqsafe(dev, skb); 201 ieee80211_rx(dev, skb);
155 202
156 skb = new_skb; 203 skb = new_skb;
157 priv->rx_buf[priv->rx_idx] = skb; 204 priv->rx_buf[priv->rx_idx] = skb;
@@ -168,41 +215,16 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
168 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR); 215 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
169 priv->rx_idx = (priv->rx_idx + 1) % 32; 216 priv->rx_idx = (priv->rx_idx + 1) % 32;
170 } 217 }
171}
172 218
173static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio) 219 if (count < budget) {
174{ 220 /* disable polling */
175 struct rtl8180_priv *priv = dev->priv; 221 ieee80211_napi_complete(dev);
176 struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];
177 222
178 while (skb_queue_len(&ring->queue)) { 223 /* enable interrupts */
179 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; 224 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
180 struct sk_buff *skb;
181 struct ieee80211_tx_info *info;
182 u32 flags = le32_to_cpu(entry->flags);
183
184 if (flags & RTL818X_TX_DESC_FLAG_OWN)
185 return;
186
187 ring->idx = (ring->idx + 1) % ring->entries;
188 skb = __skb_dequeue(&ring->queue);
189 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
190 skb->len, PCI_DMA_TODEVICE);
191
192 info = IEEE80211_SKB_CB(skb);
193 ieee80211_tx_info_clear_status(info);
194
195 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
196 (flags & RTL818X_TX_DESC_FLAG_TX_OK))
197 info->flags |= IEEE80211_TX_STAT_ACK;
198
199 info->status.rates[0].count = (flags & 0xFF) + 1;
200 info->status.rates[1].idx = -1;
201
202 ieee80211_tx_status_irqsafe(dev, skb);
203 if (ring->entries - skb_queue_len(&ring->queue) == 2)
204 ieee80211_wake_queue(dev, prio);
205 } 225 }
226
227 return count;
206} 228}
207 229
208static irqreturn_t rtl8180_interrupt(int irq, void *dev_id) 230static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
@@ -211,31 +233,17 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
211 struct rtl8180_priv *priv = dev->priv; 233 struct rtl8180_priv *priv = dev->priv;
212 u16 reg; 234 u16 reg;
213 235
214 spin_lock(&priv->lock);
215 reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS); 236 reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS);
216 if (unlikely(reg == 0xFFFF)) { 237 if (unlikely(reg == 0xFFFF))
217 spin_unlock(&priv->lock);
218 return IRQ_HANDLED; 238 return IRQ_HANDLED;
219 }
220 239
221 rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg); 240 rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg);
222 241
223 if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR)) 242 /* disable interrupts */
224 rtl8180_handle_tx(dev, 3); 243 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
225
226 if (reg & (RTL818X_INT_TXH_OK | RTL818X_INT_TXH_ERR))
227 rtl8180_handle_tx(dev, 2);
228
229 if (reg & (RTL818X_INT_TXN_OK | RTL818X_INT_TXN_ERR))
230 rtl8180_handle_tx(dev, 1);
231
232 if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR))
233 rtl8180_handle_tx(dev, 0);
234
235 if (reg & (RTL818X_INT_RX_OK | RTL818X_INT_RX_ERR))
236 rtl8180_handle_rx(dev);
237 244
238 spin_unlock(&priv->lock); 245 /* enable polling */
246 ieee80211_napi_schedule(dev);
239 247
240 return IRQ_HANDLED; 248 return IRQ_HANDLED;
241} 249}
@@ -247,7 +255,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
247 struct rtl8180_priv *priv = dev->priv; 255 struct rtl8180_priv *priv = dev->priv;
248 struct rtl8180_tx_ring *ring; 256 struct rtl8180_tx_ring *ring;
249 struct rtl8180_tx_desc *entry; 257 struct rtl8180_tx_desc *entry;
250 unsigned long flags;
251 unsigned int idx, prio; 258 unsigned int idx, prio;
252 dma_addr_t mapping; 259 dma_addr_t mapping;
253 u32 tx_flags; 260 u32 tx_flags;
@@ -294,7 +301,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
294 plcp_len |= 1 << 15; 301 plcp_len |= 1 << 15;
295 } 302 }
296 303
297 spin_lock_irqsave(&priv->lock, flags); 304 spin_lock(&priv->lock);
298 305
299 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 306 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
300 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 307 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
@@ -318,7 +325,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
318 if (ring->entries - skb_queue_len(&ring->queue) < 2) 325 if (ring->entries - skb_queue_len(&ring->queue) < 2)
319 ieee80211_stop_queue(dev, prio); 326 ieee80211_stop_queue(dev, prio);
320 327
321 spin_unlock_irqrestore(&priv->lock, flags); 328 spin_unlock(&priv->lock);
322 329
323 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 330 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
324 331
@@ -783,6 +790,7 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
783 struct rtl8180_priv *priv = dev->priv; 790 struct rtl8180_priv *priv = dev->priv;
784 struct rtl8180_vif *vif_priv; 791 struct rtl8180_vif *vif_priv;
785 int i; 792 int i;
793 u8 reg;
786 794
787 vif_priv = (struct rtl8180_vif *)&vif->drv_priv; 795 vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
788 796
@@ -791,12 +799,14 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
791 rtl818x_iowrite8(priv, &priv->map->BSSID[i], 799 rtl818x_iowrite8(priv, &priv->map->BSSID[i],
792 info->bssid[i]); 800 info->bssid[i]);
793 801
794 if (is_valid_ether_addr(info->bssid)) 802 if (is_valid_ether_addr(info->bssid)) {
795 rtl818x_iowrite8(priv, &priv->map->MSR, 803 if (vif->type == NL80211_IFTYPE_ADHOC)
796 RTL818X_MSR_INFRA); 804 reg = RTL818X_MSR_ADHOC;
797 else 805 else
798 rtl818x_iowrite8(priv, &priv->map->MSR, 806 reg = RTL818X_MSR_INFRA;
799 RTL818X_MSR_NO_LINK); 807 } else
808 reg = RTL818X_MSR_NO_LINK;
809 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
800 } 810 }
801 811
802 if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp) 812 if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
@@ -861,6 +871,7 @@ static const struct ieee80211_ops rtl8180_ops = {
861 .prepare_multicast = rtl8180_prepare_multicast, 871 .prepare_multicast = rtl8180_prepare_multicast,
862 .configure_filter = rtl8180_configure_filter, 872 .configure_filter = rtl8180_configure_filter,
863 .get_tsf = rtl8180_get_tsf, 873 .get_tsf = rtl8180_get_tsf,
874 .napi_poll = rtl8180_poll,
864}; 875};
865 876
866static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom) 877static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
@@ -992,6 +1003,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
992 dev->queues = 1; 1003 dev->queues = 1;
993 dev->max_signal = 65; 1004 dev->max_signal = 65;
994 1005
1006 dev->napi_weight = 64;
1007
995 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); 1008 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
996 reg &= RTL818X_TX_CONF_HWVER_MASK; 1009 reg &= RTL818X_TX_CONF_HWVER_MASK;
997 switch (reg) { 1010 switch (reg) {
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 98e0351c1dd6..38fa8244cc96 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1176,13 +1176,12 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
1176 else 1176 else
1177 reg = 0; 1177 reg = 0;
1178 1178
1179 if (is_valid_ether_addr(info->bssid)) { 1179 if (is_valid_ether_addr(info->bssid))
1180 reg |= RTL818X_MSR_INFRA; 1180 reg |= RTL818X_MSR_INFRA;
1181 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1181 else
1182 } else {
1183 reg |= RTL818X_MSR_NO_LINK; 1182 reg |= RTL818X_MSR_NO_LINK;
1184 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1183
1185 } 1184 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
1186 1185
1187 mutex_unlock(&priv->conf_mutex); 1186 mutex_unlock(&priv->conf_mutex);
1188 } 1187 }
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 6b942a28e6a5..e113d4c1fb35 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008-2009 Nokia Corporation 5 * Copyright (C) 2008-2009 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -274,6 +272,8 @@ struct wl1251 {
274 int irq; 272 int irq;
275 bool use_eeprom; 273 bool use_eeprom;
276 274
275 spinlock_t wl_lock;
276
277 enum wl1251_state state; 277 enum wl1251_state state;
278 struct mutex mutex; 278 struct mutex mutex;
279 279
@@ -401,7 +401,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
401 401
402#define WL1251_DEFAULT_POWER_LEVEL 20 402#define WL1251_DEFAULT_POWER_LEVEL 20
403 403
404#define WL1251_TX_QUEUE_MAX_LENGTH 20 404#define WL1251_TX_QUEUE_LOW_WATERMARK 10
405#define WL1251_TX_QUEUE_HIGH_WATERMARK 25
405 406
406#define WL1251_DEFAULT_BEACON_INT 100 407#define WL1251_DEFAULT_BEACON_INT 100
407#define WL1251_DEFAULT_DTIM_PERIOD 1 408#define WL1251_DEFAULT_DTIM_PERIOD 1
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index 91891f928070..2f8a2ba744dc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -380,7 +380,7 @@ int wl1251_acx_pd_threshold(struct wl1251 *wl)
380 380
381out: 381out:
382 kfree(pd); 382 kfree(pd);
383 return 0; 383 return ret;
384} 384}
385 385
386int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time) 386int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time)
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 842df310d92a..c7cc5c1e8a75 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -37,7 +35,7 @@ struct acx_header {
37 35
38 /* payload length (not including headers */ 36 /* payload length (not including headers */
39 u16 len; 37 u16 len;
40}; 38} __packed;
41 39
42struct acx_error_counter { 40struct acx_error_counter {
43 struct acx_header header; 41 struct acx_header header;
@@ -459,8 +457,8 @@ struct acx_beacon_filter_ie_table {
459 struct acx_header header; 457 struct acx_header header;
460 458
461 u8 num_ie; 459 u8 num_ie;
462 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
463 u8 pad[3]; 460 u8 pad[3];
461 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
464} __packed; 462} __packed;
465 463
466#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ 464#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */
@@ -471,7 +469,7 @@ struct acx_conn_monit_params {
471 469
472 u32 synch_fail_thold; /* number of beacons missed */ 470 u32 synch_fail_thold; /* number of beacons missed */
473 u32 bss_lose_timeout; /* number of TU's from synch fail */ 471 u32 bss_lose_timeout; /* number of TU's from synch fail */
474}; 472} __packed;
475 473
476enum { 474enum {
477 SG_ENABLE = 0, 475 SG_ENABLE = 0,
@@ -1056,7 +1054,7 @@ struct acx_rate_class {
1056 u8 long_retry_limit; 1054 u8 long_retry_limit;
1057 u8 aflags; 1055 u8 aflags;
1058 u8 reserved; 1056 u8 reserved;
1059}; 1057} __packed;
1060 1058
1061struct acx_rate_policy { 1059struct acx_rate_policy {
1062 struct acx_header header; 1060 struct acx_header header;
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 65e0416be5b6..468b47b0328a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
@@ -302,7 +300,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
302 ROAMING_TRIGGER_LOW_RSSI_EVENT_ID | 300 ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
303 ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID | 301 ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
304 REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID | 302 REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
305 BT_PTA_PREDICTION_EVENT_ID; 303 BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID;
306 304
307 ret = wl1251_event_unmask(wl); 305 ret = wl1251_event_unmask(wl);
308 if (ret < 0) { 306 if (ret < 0) {
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.h b/drivers/net/wireless/wl12xx/wl1251_boot.h
index 90063697e8f2..7661bc5e4662 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.h
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index ce3722f4c3e3..15fb68c6b542 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -200,7 +200,7 @@ int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
200 200
201out: 201out:
202 kfree(vbm); 202 kfree(vbm);
203 return 0; 203 return ret;
204} 204}
205 205
206int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable) 206int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index a9e4991369be..e5c74c631374 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -111,7 +109,7 @@ struct wl1251_cmd_header {
111struct wl1251_command { 109struct wl1251_command {
112 struct wl1251_cmd_header header; 110 struct wl1251_cmd_header header;
113 u8 parameters[MAX_CMD_PARAMS]; 111 u8 parameters[MAX_CMD_PARAMS];
114}; 112} __packed;
115 113
116enum { 114enum {
117 CMD_MAILBOX_IDLE = 0, 115 CMD_MAILBOX_IDLE = 0,
@@ -164,7 +162,7 @@ struct cmd_read_write_memory {
164 of this field is the Host in WRITE command or the Wilink in READ 162 of this field is the Host in WRITE command or the Wilink in READ
165 command. */ 163 command. */
166 u8 value[MAX_READ_SIZE]; 164 u8 value[MAX_READ_SIZE];
167}; 165} __packed;
168 166
169#define CMDMBOX_HEADER_LEN 4 167#define CMDMBOX_HEADER_LEN 4
170#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 168#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -339,7 +337,7 @@ struct wl1251_cmd_trigger_scan_to {
339 struct wl1251_cmd_header header; 337 struct wl1251_cmd_header header;
340 338
341 u32 timeout; 339 u32 timeout;
342}; 340} __packed;
343 341
344/* HW encryption keys */ 342/* HW encryption keys */
345#define NUM_ACCESS_CATEGORIES_COPY 4 343#define NUM_ACCESS_CATEGORIES_COPY 4
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index 5e4465ac08fa..6ffe4cd58561 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.h b/drivers/net/wireless/wl12xx/wl1251_debugfs.h
index 6dc3d080853c..b3417c02a218 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.h
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.h
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.c b/drivers/net/wireless/wl12xx/wl1251_event.c
index 020d764f9c13..54223556b308 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.c
+++ b/drivers/net/wireless/wl12xx/wl1251_event.c
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -36,9 +34,7 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
36 mbox->scheduled_scan_channels); 34 mbox->scheduled_scan_channels);
37 35
38 if (wl->scanning) { 36 if (wl->scanning) {
39 mutex_unlock(&wl->mutex);
40 ieee80211_scan_completed(wl->hw, false); 37 ieee80211_scan_completed(wl->hw, false);
41 mutex_lock(&wl->mutex);
42 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed"); 38 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
43 wl->scanning = false; 39 wl->scanning = false;
44 } 40 }
@@ -97,6 +93,35 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
97 return 0; 93 return 0;
98} 94}
99 95
96/*
97 * Poll the mailbox event field until any of the bits in the mask is set or a
98 * timeout occurs (WL1251_EVENT_TIMEOUT in msecs)
99 */
100int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms)
101{
102 u32 events_vector, event;
103 unsigned long timeout;
104
105 timeout = jiffies + msecs_to_jiffies(timeout_ms);
106
107 do {
108 if (time_after(jiffies, timeout))
109 return -ETIMEDOUT;
110
111 msleep(1);
112
113 /* read from both event fields */
114 wl1251_mem_read(wl, wl->mbox_ptr[0], &events_vector,
115 sizeof(events_vector));
116 event = events_vector & mask;
117 wl1251_mem_read(wl, wl->mbox_ptr[1], &events_vector,
118 sizeof(events_vector));
119 event |= events_vector & mask;
120 } while (!event);
121
122 return 0;
123}
124
100int wl1251_event_unmask(struct wl1251 *wl) 125int wl1251_event_unmask(struct wl1251 *wl)
101{ 126{
102 int ret; 127 int ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h
index f48a2b66bc5a..30eb5d150bf7 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.h
+++ b/drivers/net/wireless/wl12xx/wl1251_event.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -117,5 +115,6 @@ struct event_mailbox {
117int wl1251_event_unmask(struct wl1251 *wl); 115int wl1251_event_unmask(struct wl1251 *wl);
118void wl1251_event_mbox_config(struct wl1251 *wl); 116void wl1251_event_mbox_config(struct wl1251 *wl);
119int wl1251_event_handle(struct wl1251 *wl, u8 mbox); 117int wl1251_event_handle(struct wl1251 *wl, u8 mbox);
118int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms);
120 119
121#endif 120#endif
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index b538bdd7b320..c5daec05d9ee 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index 269cefb3e7d4..543f17582ead 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.c b/drivers/net/wireless/wl12xx/wl1251_io.c
index f1c232e0887f..ad6ca68b303f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.c
+++ b/drivers/net/wireless/wl12xx/wl1251_io.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 861a5f33761e..faf221ca3f41 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008-2009 Nokia Corporation 4 * Copyright (C) 2008-2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
@@ -293,14 +291,14 @@ static void wl1251_irq_work(struct work_struct *work)
293 wl1251_tx_complete(wl); 291 wl1251_tx_complete(wl);
294 } 292 }
295 293
296 if (intr & (WL1251_ACX_INTR_EVENT_A | 294 if (intr & WL1251_ACX_INTR_EVENT_A) {
297 WL1251_ACX_INTR_EVENT_B)) { 295 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_A");
298 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)", 296 wl1251_event_handle(wl, 0);
299 intr); 297 }
300 if (intr & WL1251_ACX_INTR_EVENT_A) 298
301 wl1251_event_handle(wl, 0); 299 if (intr & WL1251_ACX_INTR_EVENT_B) {
302 else 300 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_B");
303 wl1251_event_handle(wl, 1); 301 wl1251_event_handle(wl, 1);
304 } 302 }
305 303
306 if (intr & WL1251_ACX_INTR_INIT_COMPLETE) 304 if (intr & WL1251_ACX_INTR_INIT_COMPLETE)
@@ -339,11 +337,9 @@ static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
339 if (ret < 0) 337 if (ret < 0)
340 goto out; 338 goto out;
341 339
342 /* 340 ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
343 * FIXME: we should wait for JOIN_EVENT_COMPLETE_ID but to simplify 341 if (ret < 0)
344 * locking we just sleep instead, for now 342 wl1251_warning("join timeout");
345 */
346 msleep(10);
347 343
348out: 344out:
349 return ret; 345 return ret;
@@ -379,6 +375,7 @@ out:
379static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 375static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
380{ 376{
381 struct wl1251 *wl = hw->priv; 377 struct wl1251 *wl = hw->priv;
378 unsigned long flags;
382 379
383 skb_queue_tail(&wl->tx_queue, skb); 380 skb_queue_tail(&wl->tx_queue, skb);
384 381
@@ -393,16 +390,13 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
393 * The workqueue is slow to process the tx_queue and we need stop 390 * The workqueue is slow to process the tx_queue and we need stop
394 * the queue here, otherwise the queue will get too long. 391 * the queue here, otherwise the queue will get too long.
395 */ 392 */
396 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) { 393 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_HIGH_WATERMARK) {
397 wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues"); 394 wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
398 ieee80211_stop_queues(wl->hw);
399 395
400 /* 396 spin_lock_irqsave(&wl->wl_lock, flags);
401 * FIXME: this is racy, the variable is not properly 397 ieee80211_stop_queues(wl->hw);
402 * protected. Maybe fix this by removing the stupid
403 * variable altogether and checking the real queue state?
404 */
405 wl->tx_queue_stopped = true; 398 wl->tx_queue_stopped = true;
399 spin_unlock_irqrestore(&wl->wl_lock, flags);
406 } 400 }
407 401
408 return NETDEV_TX_OK; 402 return NETDEV_TX_OK;
@@ -471,9 +465,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
471 WARN_ON(wl->state != WL1251_STATE_ON); 465 WARN_ON(wl->state != WL1251_STATE_ON);
472 466
473 if (wl->scanning) { 467 if (wl->scanning) {
474 mutex_unlock(&wl->mutex);
475 ieee80211_scan_completed(wl->hw, true); 468 ieee80211_scan_completed(wl->hw, true);
476 mutex_lock(&wl->mutex);
477 wl->scanning = false; 469 wl->scanning = false;
478 } 470 }
479 471
@@ -725,8 +717,9 @@ static int wl1251_set_key_type(struct wl1251 *wl,
725 struct ieee80211_key_conf *mac80211_key, 717 struct ieee80211_key_conf *mac80211_key,
726 const u8 *addr) 718 const u8 *addr)
727{ 719{
728 switch (mac80211_key->alg) { 720 switch (mac80211_key->cipher) {
729 case ALG_WEP: 721 case WLAN_CIPHER_SUITE_WEP40:
722 case WLAN_CIPHER_SUITE_WEP104:
730 if (is_broadcast_ether_addr(addr)) 723 if (is_broadcast_ether_addr(addr))
731 key->key_type = KEY_WEP_DEFAULT; 724 key->key_type = KEY_WEP_DEFAULT;
732 else 725 else
@@ -734,7 +727,7 @@ static int wl1251_set_key_type(struct wl1251 *wl,
734 727
735 mac80211_key->hw_key_idx = mac80211_key->keyidx; 728 mac80211_key->hw_key_idx = mac80211_key->keyidx;
736 break; 729 break;
737 case ALG_TKIP: 730 case WLAN_CIPHER_SUITE_TKIP:
738 if (is_broadcast_ether_addr(addr)) 731 if (is_broadcast_ether_addr(addr))
739 key->key_type = KEY_TKIP_MIC_GROUP; 732 key->key_type = KEY_TKIP_MIC_GROUP;
740 else 733 else
@@ -742,7 +735,7 @@ static int wl1251_set_key_type(struct wl1251 *wl,
742 735
743 mac80211_key->hw_key_idx = mac80211_key->keyidx; 736 mac80211_key->hw_key_idx = mac80211_key->keyidx;
744 break; 737 break;
745 case ALG_CCMP: 738 case WLAN_CIPHER_SUITE_CCMP:
746 if (is_broadcast_ether_addr(addr)) 739 if (is_broadcast_ether_addr(addr))
747 key->key_type = KEY_AES_GROUP; 740 key->key_type = KEY_AES_GROUP;
748 else 741 else
@@ -750,7 +743,7 @@ static int wl1251_set_key_type(struct wl1251 *wl,
750 mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 743 mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
751 break; 744 break;
752 default: 745 default:
753 wl1251_error("Unknown key algo 0x%x", mac80211_key->alg); 746 wl1251_error("Unknown key cipher 0x%x", mac80211_key->cipher);
754 return -EOPNOTSUPP; 747 return -EOPNOTSUPP;
755 } 748 }
756 749
@@ -783,7 +776,7 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
783 wl1251_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); 776 wl1251_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
784 wl1251_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); 777 wl1251_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
785 wl1251_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", 778 wl1251_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
786 key->alg, key->keyidx, key->keylen, key->flags); 779 key->cipher, key->keyidx, key->keylen, key->flags);
787 wl1251_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen); 780 wl1251_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen);
788 781
789 if (is_zero_ether_addr(addr)) { 782 if (is_zero_ether_addr(addr)) {
@@ -1438,5 +1431,5 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
1438 1431
1439MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); 1432MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
1440MODULE_LICENSE("GPL"); 1433MODULE_LICENSE("GPL");
1441MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 1434MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
1442MODULE_FIRMWARE(WL1251_FW_NAME); 1435MODULE_FIRMWARE(WL1251_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index b55cb2bd459a..0b997bdfec09 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.h b/drivers/net/wireless/wl12xx/wl1251_ps.h
index c688ac57aee4..e5db81fc1dfc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.h
@@ -1,14 +1,9 @@
1#ifndef __WL1251_PS_H__
2#define __WL1251_PS_H__
3
4/* 1/*
5 * This file is part of wl1251 2 * This file is part of wl1251
6 * 3 *
7 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
8 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
9 * 6 *
10 * Contact: Kalle Valo <kalle.valo@nokia.com>
11 *
12 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -25,6 +20,9 @@
25 * 20 *
26 */ 21 */
27 22
23#ifndef __WL1251_PS_H__
24#define __WL1251_PS_H__
25
28#include "wl1251.h" 26#include "wl1251.h"
29#include "wl1251_acx.h" 27#include "wl1251_acx.h"
30 28
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index d16edd9bf06c..a5809019c5c1 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 1b6294b3b996..25764592a596 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h
index da4e53406a0e..4448f635a4d8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index b901b6135654..c0b68b0a9aa8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -339,4 +339,4 @@ module_init(wl1251_sdio_init);
339module_exit(wl1251_sdio_exit); 339module_exit(wl1251_sdio_exit);
340 340
341MODULE_LICENSE("GPL"); 341MODULE_LICENSE("GPL");
342MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 342MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 27fdfaaeb074..334ded9881c0 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
@@ -344,5 +342,5 @@ module_init(wl1251_spi_init);
344module_exit(wl1251_spi_exit); 342module_exit(wl1251_spi_exit);
345 343
346MODULE_LICENSE("GPL"); 344MODULE_LICENSE("GPL");
347MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 345MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
348MODULE_ALIAS("spi:wl1251"); 346MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.h b/drivers/net/wireless/wl12xx/wl1251_spi.h
index 2e273a97e7f3..7dcf3cf7ae40 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index a38ec199187a..388492a7f41f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -189,7 +187,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
189 tx_hdr = (struct tx_double_buffer_desc *) skb->data; 187 tx_hdr = (struct tx_double_buffer_desc *) skb->data;
190 188
191 if (control->control.hw_key && 189 if (control->control.hw_key &&
192 control->control.hw_key->alg == ALG_TKIP) { 190 control->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
193 int hdrlen; 191 int hdrlen;
194 __le16 fc; 192 __le16 fc;
195 u16 length; 193 u16 length;
@@ -322,11 +320,6 @@ void wl1251_tx_work(struct work_struct *work)
322 320
323 ret = wl1251_tx_frame(wl, skb); 321 ret = wl1251_tx_frame(wl, skb);
324 if (ret == -EBUSY) { 322 if (ret == -EBUSY) {
325 /* firmware buffer is full, stop queues */
326 wl1251_debug(DEBUG_TX, "tx_work: fw buffer full, "
327 "stop queues");
328 ieee80211_stop_queues(wl->hw);
329 wl->tx_queue_stopped = true;
330 skb_queue_head(&wl->tx_queue, skb); 323 skb_queue_head(&wl->tx_queue, skb);
331 goto out; 324 goto out;
332 } else if (ret < 0) { 325 } else if (ret < 0) {
@@ -399,7 +392,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
399 */ 392 */
400 frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc)); 393 frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
401 if (info->control.hw_key && 394 if (info->control.hw_key &&
402 info->control.hw_key->alg == ALG_TKIP) { 395 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
403 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 396 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
404 memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen); 397 memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen);
405 skb_pull(skb, WL1251_TKIP_IV_SPACE); 398 skb_pull(skb, WL1251_TKIP_IV_SPACE);
@@ -449,6 +442,7 @@ void wl1251_tx_complete(struct wl1251 *wl)
449{ 442{
450 int i, result_index, num_complete = 0; 443 int i, result_index, num_complete = 0;
451 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr; 444 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
445 unsigned long flags;
452 446
453 if (unlikely(wl->state != WL1251_STATE_ON)) 447 if (unlikely(wl->state != WL1251_STATE_ON))
454 return; 448 return;
@@ -477,6 +471,20 @@ void wl1251_tx_complete(struct wl1251 *wl)
477 } 471 }
478 } 472 }
479 473
474 if (wl->tx_queue_stopped
475 &&
476 skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
477
478 /* firmware buffer has space, restart queues */
479 wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
480 spin_lock_irqsave(&wl->wl_lock, flags);
481 ieee80211_wake_queues(wl->hw);
482 wl->tx_queue_stopped = false;
483 spin_unlock_irqrestore(&wl->wl_lock, flags);
484 ieee80211_queue_work(wl->hw, &wl->tx_work);
485
486 }
487
480 /* Every completed frame needs to be acknowledged */ 488 /* Every completed frame needs to be acknowledged */
481 if (num_complete) { 489 if (num_complete) {
482 /* 490 /*
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index f40eeb37f5aa..96011e78cd5a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index bb245f05af49..f03ad088db8b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -269,7 +269,7 @@ int wl1271_acx_pd_threshold(struct wl1271 *wl)
269 269
270out: 270out:
271 kfree(pd); 271 kfree(pd);
272 return 0; 272 return ret;
273} 273}
274 274
275int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time) 275int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 9d68f0012f05..8e55cf8d509d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -948,9 +948,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
948 ieee80211_enable_dyn_ps(wl->vif); 948 ieee80211_enable_dyn_ps(wl->vif);
949 949
950 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { 950 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
951 mutex_unlock(&wl->mutex);
952 ieee80211_scan_completed(wl->hw, true); 951 ieee80211_scan_completed(wl->hw, true);
953 mutex_lock(&wl->mutex);
954 wl->scan.state = WL1271_SCAN_STATE_IDLE; 952 wl->scan.state = WL1271_SCAN_STATE_IDLE;
955 kfree(wl->scan.scanned_ch); 953 kfree(wl->scan.scanned_ch);
956 wl->scan.scanned_ch = NULL; 954 wl->scan.scanned_ch = NULL;
@@ -1439,7 +1437,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1439 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); 1437 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
1440 wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); 1438 wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
1441 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", 1439 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
1442 key_conf->alg, key_conf->keyidx, 1440 key_conf->cipher, key_conf->keyidx,
1443 key_conf->keylen, key_conf->flags); 1441 key_conf->keylen, key_conf->flags);
1444 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 1442 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
1445 1443
@@ -1455,20 +1453,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1455 if (ret < 0) 1453 if (ret < 0)
1456 goto out_unlock; 1454 goto out_unlock;
1457 1455
1458 switch (key_conf->alg) { 1456 switch (key_conf->cipher) {
1459 case ALG_WEP: 1457 case WLAN_CIPHER_SUITE_WEP40:
1458 case WLAN_CIPHER_SUITE_WEP104:
1460 key_type = KEY_WEP; 1459 key_type = KEY_WEP;
1461 1460
1462 key_conf->hw_key_idx = key_conf->keyidx; 1461 key_conf->hw_key_idx = key_conf->keyidx;
1463 break; 1462 break;
1464 case ALG_TKIP: 1463 case WLAN_CIPHER_SUITE_TKIP:
1465 key_type = KEY_TKIP; 1464 key_type = KEY_TKIP;
1466 1465
1467 key_conf->hw_key_idx = key_conf->keyidx; 1466 key_conf->hw_key_idx = key_conf->keyidx;
1468 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); 1467 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
1469 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); 1468 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1470 break; 1469 break;
1471 case ALG_CCMP: 1470 case WLAN_CIPHER_SUITE_CCMP:
1472 key_type = KEY_AES; 1471 key_type = KEY_AES;
1473 1472
1474 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1473 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -1476,7 +1475,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1476 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); 1475 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1477 break; 1476 break;
1478 default: 1477 default:
1479 wl1271_error("Unknown key algo 0x%x", key_conf->alg); 1478 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
1480 1479
1481 ret = -EOPNOTSUPP; 1480 ret = -EOPNOTSUPP;
1482 goto out_sleep; 1481 goto out_sleep;
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/wl1271_scan.c
index fec43eed8c55..e4950c8e396e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.c
+++ b/drivers/net/wireless/wl12xx/wl1271_scan.c
@@ -215,9 +215,7 @@ void wl1271_scan_stm(struct wl1271 *wl)
215 break; 215 break;
216 216
217 case WL1271_SCAN_STATE_DONE: 217 case WL1271_SCAN_STATE_DONE:
218 mutex_unlock(&wl->mutex);
219 ieee80211_scan_completed(wl->hw, false); 218 ieee80211_scan_completed(wl->hw, false);
220 mutex_lock(&wl->mutex);
221 219
222 kfree(wl->scan.scanned_ch); 220 kfree(wl->scan.scanned_ch);
223 wl->scan.scanned_ch = NULL; 221 wl->scan.scanned_ch = NULL;
@@ -248,7 +246,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
248 246
249 wl->scan.req = req; 247 wl->scan.req = req;
250 248
251 wl->scan.scanned_ch = kzalloc(req->n_channels * 249 wl->scan.scanned_ch = kcalloc(req->n_channels,
252 sizeof(*wl->scan.scanned_ch), 250 sizeof(*wl->scan.scanned_ch),
253 GFP_KERNEL); 251 GFP_KERNEL);
254 wl1271_scan_stm(wl); 252 wl1271_scan_stm(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index c592cc2e9fe8..dc0b46c93c4b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -193,7 +193,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
193 info = IEEE80211_SKB_CB(skb); 193 info = IEEE80211_SKB_CB(skb);
194 194
195 if (info->control.hw_key && 195 if (info->control.hw_key &&
196 info->control.hw_key->alg == ALG_TKIP) 196 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
197 extra = WL1271_TKIP_IV_SPACE; 197 extra = WL1271_TKIP_IV_SPACE;
198 198
199 if (info->control.hw_key) { 199 if (info->control.hw_key) {
@@ -347,7 +347,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
347 347
348 /* remove TKIP header space if present */ 348 /* remove TKIP header space if present */
349 if (info->control.hw_key && 349 if (info->control.hw_key &&
350 info->control.hw_key->alg == ALG_TKIP) { 350 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
351 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 351 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
352 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen); 352 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
353 skb_pull(skb, WL1271_TKIP_IV_SPACE); 353 skb_pull(skb, WL1271_TKIP_IV_SPACE);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index a1cc2d498a1c..420e9e986a18 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -29,7 +29,6 @@
29 29
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/ethtool.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/interrupt.h> 33#include <linux/interrupt.h>
35#include <linux/in.h> 34#include <linux/in.h>
@@ -1411,15 +1410,6 @@ static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
1411 return wstats; 1410 return wstats;
1412} 1411}
1413 1412
1414static void wl3501_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1415{
1416 strlcpy(info->driver, "wl3501_cs", sizeof(info->driver));
1417}
1418
1419static const struct ethtool_ops ops = {
1420 .get_drvinfo = wl3501_get_drvinfo
1421};
1422
1423/** 1413/**
1424 * wl3501_detach - deletes a driver "instance" 1414 * wl3501_detach - deletes a driver "instance"
1425 * @link - FILL_IN 1415 * @link - FILL_IN
@@ -1905,7 +1895,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1905 this->p_dev = p_dev; 1895 this->p_dev = p_dev;
1906 dev->wireless_data = &this->wireless_data; 1896 dev->wireless_data = &this->wireless_data;
1907 dev->wireless_handlers = &wl3501_handler_def; 1897 dev->wireless_handlers = &wl3501_handler_def;
1908 SET_ETHTOOL_OPS(dev, &ops);
1909 netif_stop_queue(dev); 1898 netif_stop_queue(dev);
1910 p_dev->priv = dev; 1899 p_dev->priv = dev;
1911 1900
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index b2af3c549bb3..87a95bcfee57 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -973,6 +973,7 @@ static void dump_fw_registers(struct zd_chip *chip)
973 973
974static int print_fw_version(struct zd_chip *chip) 974static int print_fw_version(struct zd_chip *chip)
975{ 975{
976 struct wiphy *wiphy = zd_chip_to_mac(chip)->hw->wiphy;
976 int r; 977 int r;
977 u16 version; 978 u16 version;
978 979
@@ -982,6 +983,10 @@ static int print_fw_version(struct zd_chip *chip)
982 return r; 983 return r;
983 984
984 dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version); 985 dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version);
986
987 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version),
988 "%04hx", version);
989
985 return 0; 990 return 0;
986} 991}
987 992
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b50fedcef8ac..788a9bc1dbac 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1395,7 +1395,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1395} 1395}
1396 1396
1397/* Common code used when first setting up, and when resuming. */ 1397/* Common code used when first setting up, and when resuming. */
1398static int talk_to_backend(struct xenbus_device *dev, 1398static int talk_to_netback(struct xenbus_device *dev,
1399 struct netfront_info *info) 1399 struct netfront_info *info)
1400{ 1400{
1401 const char *message; 1401 const char *message;
@@ -1545,7 +1545,7 @@ static int xennet_connect(struct net_device *dev)
1545 return -ENODEV; 1545 return -ENODEV;
1546 } 1546 }
1547 1547
1548 err = talk_to_backend(np->xbdev, np); 1548 err = talk_to_netback(np->xbdev, np);
1549 if (err) 1549 if (err)
1550 return err; 1550 return err;
1551 1551
@@ -1599,7 +1599,7 @@ static int xennet_connect(struct net_device *dev)
1599/** 1599/**
1600 * Callback received when the backend's state changes. 1600 * Callback received when the backend's state changes.
1601 */ 1601 */
1602static void backend_changed(struct xenbus_device *dev, 1602static void netback_changed(struct xenbus_device *dev,
1603 enum xenbus_state backend_state) 1603 enum xenbus_state backend_state)
1604{ 1604{
1605 struct netfront_info *np = dev_get_drvdata(&dev->dev); 1605 struct netfront_info *np = dev_get_drvdata(&dev->dev);
@@ -1801,7 +1801,7 @@ static struct xenbus_driver netfront_driver = {
1801 .probe = netfront_probe, 1801 .probe = netfront_probe,
1802 .remove = __devexit_p(xennet_remove), 1802 .remove = __devexit_p(xennet_remove),
1803 .resume = netfront_resume, 1803 .resume = netfront_resume,
1804 .otherend_changed = backend_changed, 1804 .otherend_changed = netback_changed,
1805}; 1805};
1806 1806
1807static int __init netif_init(void) 1807static int __init netif_init(void)
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index ecbbb688eba0..f3f8be5a35fa 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -641,7 +641,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
641 skb_put(skb, len); /* Tell the skb how much data we got */ 641 skb_put(skb, len); /* Tell the skb how much data we got */
642 642
643 skb->protocol = eth_type_trans(skb, dev); 643 skb->protocol = eth_type_trans(skb, dev);
644 skb->ip_summed = CHECKSUM_NONE; 644 skb_checksum_none_assert(skb);
645 645
646 dev->stats.rx_packets++; 646 dev->stats.rx_packets++;
647 dev->stats.rx_bytes += len; 647 dev->stats.rx_bytes += len;
@@ -1269,6 +1269,16 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
1269 return 0; 1269 return 0;
1270} 1270}
1271 1271
1272#ifdef CONFIG_NET_POLL_CONTROLLER
1273static void
1274xemaclite_poll_controller(struct net_device *ndev)
1275{
1276 disable_irq(ndev->irq);
1277 xemaclite_interrupt(ndev->irq, ndev);
1278 enable_irq(ndev->irq);
1279}
1280#endif
1281
1272static struct net_device_ops xemaclite_netdev_ops = { 1282static struct net_device_ops xemaclite_netdev_ops = {
1273 .ndo_open = xemaclite_open, 1283 .ndo_open = xemaclite_open,
1274 .ndo_stop = xemaclite_close, 1284 .ndo_stop = xemaclite_close,
@@ -1276,6 +1286,9 @@ static struct net_device_ops xemaclite_netdev_ops = {
1276 .ndo_set_mac_address = xemaclite_set_mac_address, 1286 .ndo_set_mac_address = xemaclite_set_mac_address,
1277 .ndo_tx_timeout = xemaclite_tx_timeout, 1287 .ndo_tx_timeout = xemaclite_tx_timeout,
1278 .ndo_get_stats = xemaclite_get_stats, 1288 .ndo_get_stats = xemaclite_get_stats,
1289#ifdef CONFIG_NET_POLL_CONTROLLER
1290 .ndo_poll_controller = xemaclite_poll_controller,
1291#endif
1279}; 1292};
1280 1293
1281/* Match table for OF platform binding */ 1294/* Match table for OF platform binding */
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 4eb67aed68dd..cd1b3dcd61db 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -646,7 +646,7 @@ static int yellowfin_open(struct net_device *dev)
646 init_timer(&yp->timer); 646 init_timer(&yp->timer);
647 yp->timer.expires = jiffies + 3*HZ; 647 yp->timer.expires = jiffies + 3*HZ;
648 yp->timer.data = (unsigned long)dev; 648 yp->timer.data = (unsigned long)dev;
649 yp->timer.function = &yellowfin_timer; /* timer handler */ 649 yp->timer.function = yellowfin_timer; /* timer handler */
650 add_timer(&yp->timer); 650 add_timer(&yp->timer);
651 651
652 return 0; 652 return 0;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index f0037eefd44e..0f4ef8769a3d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -208,6 +208,7 @@ struct qdio_dev_perf_stat {
208 unsigned int eqbs_partial; 208 unsigned int eqbs_partial;
209 unsigned int sqbs; 209 unsigned int sqbs;
210 unsigned int sqbs_partial; 210 unsigned int sqbs_partial;
211 unsigned int int_discarded;
211} ____cacheline_aligned; 212} ____cacheline_aligned;
212 213
213struct qdio_queue_perf_stat { 214struct qdio_queue_perf_stat {
@@ -222,6 +223,10 @@ struct qdio_queue_perf_stat {
222 unsigned int nr_sbal_total; 223 unsigned int nr_sbal_total;
223}; 224};
224 225
226enum qdio_queue_irq_states {
227 QDIO_QUEUE_IRQS_DISABLED,
228};
229
225struct qdio_input_q { 230struct qdio_input_q {
226 /* input buffer acknowledgement flag */ 231 /* input buffer acknowledgement flag */
227 int polling; 232 int polling;
@@ -231,6 +236,10 @@ struct qdio_input_q {
231 int ack_count; 236 int ack_count;
232 /* last time of noticing incoming data */ 237 /* last time of noticing incoming data */
233 u64 timestamp; 238 u64 timestamp;
239 /* upper-layer polling flag */
240 unsigned long queue_irq_state;
241 /* callback to start upper-layer polling */
242 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
234}; 243};
235 244
236struct qdio_output_q { 245struct qdio_output_q {
@@ -399,6 +408,26 @@ static inline int multicast_outbound(struct qdio_q *q)
399#define sub_buf(bufnr, dec) \ 408#define sub_buf(bufnr, dec) \
400 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) 409 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
401 410
411#define queue_irqs_enabled(q) \
412 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
413#define queue_irqs_disabled(q) \
414 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
415
416#define TIQDIO_SHARED_IND 63
417
418/* device state change indicators */
419struct indicator_t {
420 u32 ind; /* u32 because of compare-and-swap performance */
421 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
422};
423
424extern struct indicator_t *q_indicators;
425
426static inline int shared_ind(struct qdio_irq *irq_ptr)
427{
428 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
429}
430
402/* prototypes for thin interrupt */ 431/* prototypes for thin interrupt */
403void qdio_setup_thinint(struct qdio_irq *irq_ptr); 432void qdio_setup_thinint(struct qdio_irq *irq_ptr);
404int qdio_establish_thinint(struct qdio_irq *irq_ptr); 433int qdio_establish_thinint(struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 6ce83f56d537..28868e7471a5 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v)
56 56
57 seq_printf(m, "DSCI: %d nr_used: %d\n", 57 seq_printf(m, "DSCI: %d nr_used: %d\n",
58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); 58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
59 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); 59 seq_printf(m, "ftc: %d last_move: %d\n",
60 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 60 q->first_to_check, q->last_move);
61 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); 61 if (q->is_input_q) {
62 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
63 q->u.in.polling, q->u.in.ack_start,
64 q->u.in.ack_count);
65 seq_printf(m, "IRQs disabled: %u\n",
66 test_bit(QDIO_QUEUE_IRQS_DISABLED,
67 &q->u.in.queue_irq_state));
68 }
62 seq_printf(m, "SBAL states:\n"); 69 seq_printf(m, "SBAL states:\n");
63 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 70 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
64 71
@@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v)
113 return 0; 120 return 0;
114} 121}
115 122
116static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
117 size_t count, loff_t *off)
118{
119 struct seq_file *seq = file->private_data;
120 struct qdio_q *q = seq->private;
121
122 if (!q)
123 return 0;
124 if (q->is_input_q)
125 xchg(q->irq_ptr->dsci, 1);
126 local_bh_disable();
127 tasklet_schedule(&q->tasklet);
128 local_bh_enable();
129 return count;
130}
131
132static int qstat_seq_open(struct inode *inode, struct file *filp) 123static int qstat_seq_open(struct inode *inode, struct file *filp)
133{ 124{
134 return single_open(filp, qstat_show, 125 return single_open(filp, qstat_show,
@@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = {
139 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
140 .open = qstat_seq_open, 131 .open = qstat_seq_open,
141 .read = seq_read, 132 .read = seq_read,
142 .write = qstat_seq_write,
143 .llseek = seq_lseek, 133 .llseek = seq_lseek,
144 .release = single_release, 134 .release = single_release,
145}; 135};
@@ -166,7 +156,8 @@ static char *qperf_names[] = {
166 "QEBSM eqbs", 156 "QEBSM eqbs",
167 "QEBSM eqbs partial", 157 "QEBSM eqbs partial",
168 "QEBSM sqbs", 158 "QEBSM sqbs",
169 "QEBSM sqbs partial" 159 "QEBSM sqbs partial",
160 "Discarded interrupts"
170}; 161};
171 162
172static int qperf_show(struct seq_file *m, void *v) 163static int qperf_show(struct seq_file *m, void *v)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 00520f9a7a8e..5fcfa7f9e9ef 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -884,8 +884,19 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
884 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 884 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
885 return; 885 return;
886 886
887 for_each_input_queue(irq_ptr, q, i) 887 for_each_input_queue(irq_ptr, q, i) {
888 tasklet_schedule(&q->tasklet); 888 if (q->u.in.queue_start_poll) {
889 /* skip if polling is enabled or already in work */
890 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
891 &q->u.in.queue_irq_state)) {
892 qperf_inc(q, int_discarded);
893 continue;
894 }
895 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
896 q->irq_ptr->int_parm);
897 } else
898 tasklet_schedule(&q->tasklet);
899 }
889 900
890 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 901 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
891 return; 902 return;
@@ -1519,6 +1530,129 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1519} 1530}
1520EXPORT_SYMBOL_GPL(do_QDIO); 1531EXPORT_SYMBOL_GPL(do_QDIO);
1521 1532
1533/**
1534 * qdio_start_irq - process input buffers
1535 * @cdev: associated ccw_device for the qdio subchannel
1536 * @nr: input queue number
1537 *
1538 * Return codes
1539 * 0 - success
1540 * 1 - irqs not started since new data is available
1541 */
1542int qdio_start_irq(struct ccw_device *cdev, int nr)
1543{
1544 struct qdio_q *q;
1545 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1546
1547 if (!irq_ptr)
1548 return -ENODEV;
1549 q = irq_ptr->input_qs[nr];
1550
1551 WARN_ON(queue_irqs_enabled(q));
1552
1553 if (!shared_ind(q->irq_ptr))
1554 xchg(q->irq_ptr->dsci, 0);
1555
1556 qdio_stop_polling(q);
1557 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1558
1559 /*
1560 * We need to check again to not lose initiative after
1561 * resetting the ACK state.
1562 */
1563 if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci)
1564 goto rescan;
1565 if (!qdio_inbound_q_done(q))
1566 goto rescan;
1567 return 0;
1568
1569rescan:
1570 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1571 &q->u.in.queue_irq_state))
1572 return 0;
1573 else
1574 return 1;
1575
1576}
1577EXPORT_SYMBOL(qdio_start_irq);
1578
1579/**
1580 * qdio_get_next_buffers - process input buffers
1581 * @cdev: associated ccw_device for the qdio subchannel
1582 * @nr: input queue number
1583 * @bufnr: first filled buffer number
1584 * @error: buffers are in error state
1585 *
1586 * Return codes
1587 * < 0 - error
1588 * = 0 - no new buffers found
1589 * > 0 - number of processed buffers
1590 */
1591int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1592 int *error)
1593{
1594 struct qdio_q *q;
1595 int start, end;
1596 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1597
1598 if (!irq_ptr)
1599 return -ENODEV;
1600 q = irq_ptr->input_qs[nr];
1601 WARN_ON(queue_irqs_enabled(q));
1602
1603 qdio_sync_after_thinint(q);
1604
1605 /*
1606 * The interrupt could be caused by a PCI request. Check the
1607 * PCI capable outbound queues.
1608 */
1609 qdio_check_outbound_after_thinint(q);
1610
1611 if (!qdio_inbound_q_moved(q))
1612 return 0;
1613
1614 /* Note: upper-layer MUST stop processing immediately here ... */
1615 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1616 return -EIO;
1617
1618 start = q->first_to_kick;
1619 end = q->first_to_check;
1620 *bufnr = start;
1621 *error = q->qdio_error;
1622
1623 /* for the next time */
1624 q->first_to_kick = end;
1625 q->qdio_error = 0;
1626 return sub_buf(end, start);
1627}
1628EXPORT_SYMBOL(qdio_get_next_buffers);
1629
1630/**
1631 * qdio_stop_irq - disable interrupt processing for the device
1632 * @cdev: associated ccw_device for the qdio subchannel
1633 * @nr: input queue number
1634 *
1635 * Return codes
1636 * 0 - interrupts were already disabled
1637 * 1 - interrupts successfully disabled
1638 */
1639int qdio_stop_irq(struct ccw_device *cdev, int nr)
1640{
1641 struct qdio_q *q;
1642 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1643
1644 if (!irq_ptr)
1645 return -ENODEV;
1646 q = irq_ptr->input_qs[nr];
1647
1648 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1649 &q->u.in.queue_irq_state))
1650 return 0;
1651 else
1652 return 1;
1653}
1654EXPORT_SYMBOL(qdio_stop_irq);
1655
1522static int __init init_QDIO(void) 1656static int __init init_QDIO(void)
1523{ 1657{
1524 int rc; 1658 int rc;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 34c7e4046df4..a13cf7ec64b2 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
162 162
163 q->is_input_q = 1; 163 q->is_input_q = 1;
164 q->u.in.queue_start_poll = qdio_init->queue_start_poll;
164 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 165 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
165 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 166 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
166 167
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8daf1b99f153..752dbee06af5 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -25,24 +25,20 @@
25 */ 25 */
26#define TIQDIO_NR_NONSHARED_IND 63 26#define TIQDIO_NR_NONSHARED_IND 63
27#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 27#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
28#define TIQDIO_SHARED_IND 63
29 28
30/* list of thin interrupt input queues */ 29/* list of thin interrupt input queues */
31static LIST_HEAD(tiq_list); 30static LIST_HEAD(tiq_list);
32DEFINE_MUTEX(tiq_list_lock); 31DEFINE_MUTEX(tiq_list_lock);
33 32
34/* adapter local summary indicator */ 33/* adapter local summary indicator */
35static unsigned char *tiqdio_alsi; 34static u8 *tiqdio_alsi;
36 35
37/* device state change indicators */ 36struct indicator_t *q_indicators;
38struct indicator_t {
39 u32 ind; /* u32 because of compare-and-swap performance */
40 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
41};
42static struct indicator_t *q_indicators;
43 37
44static int css_qdio_omit_svs; 38static int css_qdio_omit_svs;
45 39
40static u64 last_ai_time;
41
46static inline unsigned long do_clear_global_summary(void) 42static inline unsigned long do_clear_global_summary(void)
47{ 43{
48 register unsigned long __fn asm("1") = 3; 44 register unsigned long __fn asm("1") = 3;
@@ -116,59 +112,73 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
116 } 112 }
117} 113}
118 114
119static inline int shared_ind(struct qdio_irq *irq_ptr) 115static inline int shared_ind_used(void)
120{ 116{
121 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 117 return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count);
122} 118}
123 119
124/** 120/**
125 * tiqdio_thinint_handler - thin interrupt handler for qdio 121 * tiqdio_thinint_handler - thin interrupt handler for qdio
126 * @ind: pointer to adapter local summary indicator 122 * @alsi: pointer to adapter local summary indicator
127 * @drv_data: NULL 123 * @data: NULL
128 */ 124 */
129static void tiqdio_thinint_handler(void *ind, void *drv_data) 125static void tiqdio_thinint_handler(void *alsi, void *data)
130{ 126{
131 struct qdio_q *q; 127 struct qdio_q *q;
132 128
129 last_ai_time = S390_lowcore.int_clock;
130
133 /* 131 /*
134 * SVS only when needed: issue SVS to benefit from iqdio interrupt 132 * SVS only when needed: issue SVS to benefit from iqdio interrupt
135 * avoidance (SVS clears adapter interrupt suppression overwrite) 133 * avoidance (SVS clears adapter interrupt suppression overwrite).
136 */ 134 */
137 if (!css_qdio_omit_svs) 135 if (!css_qdio_omit_svs)
138 do_clear_global_summary(); 136 do_clear_global_summary();
139 137
140 /* 138 /* reset local summary indicator */
141 * reset local summary indicator (tiqdio_alsi) to stop adapter 139 if (shared_ind_used())
142 * interrupts for now 140 xchg(tiqdio_alsi, 0);
143 */
144 xchg((u8 *)ind, 0);
145 141
146 /* protect tiq_list entries, only changed in activate or shutdown */ 142 /* protect tiq_list entries, only changed in activate or shutdown */
147 rcu_read_lock(); 143 rcu_read_lock();
148 144
149 /* check for work on all inbound thinint queues */ 145 /* check for work on all inbound thinint queues */
150 list_for_each_entry_rcu(q, &tiq_list, entry) 146 list_for_each_entry_rcu(q, &tiq_list, entry) {
147
151 /* only process queues from changed sets */ 148 /* only process queues from changed sets */
152 if (*q->irq_ptr->dsci) { 149 if (!*q->irq_ptr->dsci)
153 qperf_inc(q, adapter_int); 150 continue;
154 151
152 if (q->u.in.queue_start_poll) {
153 /* skip if polling is enabled or already in work */
154 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
155 &q->u.in.queue_irq_state)) {
156 qperf_inc(q, int_discarded);
157 continue;
158 }
159
160 /* avoid dsci clear here, done after processing */
161 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
162 q->irq_ptr->int_parm);
163 } else {
155 /* only clear it if the indicator is non-shared */ 164 /* only clear it if the indicator is non-shared */
156 if (!shared_ind(q->irq_ptr)) 165 if (!shared_ind(q->irq_ptr))
157 xchg(q->irq_ptr->dsci, 0); 166 xchg(q->irq_ptr->dsci, 0);
158 /* 167 /*
159 * don't call inbound processing directly since 168 * Call inbound processing but not directly
160 * that could starve other thinint queues 169 * since that could starve other thinint queues.
161 */ 170 */
162 tasklet_schedule(&q->tasklet); 171 tasklet_schedule(&q->tasklet);
163 } 172 }
164 173 qperf_inc(q, adapter_int);
174 }
165 rcu_read_unlock(); 175 rcu_read_unlock();
166 176
167 /* 177 /*
168 * if we used the shared indicator clear it now after all queues 178 * If the shared indicator was used clear it now after all queues
169 * were processed 179 * were processed.
170 */ 180 */
171 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { 181 if (shared_ind_used()) {
172 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 182 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
173 183
174 /* prevent racing */ 184 /* prevent racing */
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 977bb4d4ed15..456b18743397 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -100,6 +100,6 @@ config QETH_IPV6
100 100
101config CCWGROUP 101config CCWGROUP
102 tristate 102 tristate
103 default (LCS || CTCM || QETH) 103 default (LCS || CTCM || QETH || CLAW)
104 104
105endmenu 105endmenu
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d1257768be90..6be43eb126b4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -676,6 +676,7 @@ enum qeth_discipline_id {
676}; 676};
677 677
678struct qeth_discipline { 678struct qeth_discipline {
679 void (*start_poll)(struct ccw_device *, int, unsigned long);
679 qdio_handler_t *input_handler; 680 qdio_handler_t *input_handler;
680 qdio_handler_t *output_handler; 681 qdio_handler_t *output_handler;
681 int (*recover)(void *ptr); 682 int (*recover)(void *ptr);
@@ -702,6 +703,16 @@ struct qeth_skb_data {
702#define QETH_SKB_MAGIC 0x71657468 703#define QETH_SKB_MAGIC 0x71657468
703#define QETH_SIGA_CC2_RETRIES 3 704#define QETH_SIGA_CC2_RETRIES 3
704 705
706struct qeth_rx {
707 int b_count;
708 int b_index;
709 struct qdio_buffer_element *b_element;
710 int e_offset;
711 int qdio_err;
712};
713
714#define QETH_NAPI_WEIGHT 128
715
705struct qeth_card { 716struct qeth_card {
706 struct list_head list; 717 struct list_head list;
707 enum qeth_card_states state; 718 enum qeth_card_states state;
@@ -749,6 +760,8 @@ struct qeth_card {
749 debug_info_t *debug; 760 debug_info_t *debug;
750 struct mutex conf_mutex; 761 struct mutex conf_mutex;
751 struct mutex discipline_mutex; 762 struct mutex discipline_mutex;
763 struct napi_struct napi;
764 struct qeth_rx rx;
752}; 765};
753 766
754struct qeth_card_list_struct { 767struct qeth_card_list_struct {
@@ -831,6 +844,10 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
831 struct qdio_buffer *, struct qdio_buffer_element **, int *, 844 struct qdio_buffer *, struct qdio_buffer_element **, int *,
832 struct qeth_hdr **); 845 struct qeth_hdr **);
833void qeth_schedule_recovery(struct qeth_card *); 846void qeth_schedule_recovery(struct qeth_card *);
847void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
848void qeth_qdio_input_handler(struct ccw_device *,
849 unsigned int, unsigned int, int,
850 int, unsigned long);
834void qeth_qdio_output_handler(struct ccw_device *, unsigned int, 851void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
835 int, int, int, unsigned long); 852 int, int, int, unsigned long);
836void qeth_clear_ipacmd_list(struct qeth_card *); 853void qeth_clear_ipacmd_list(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3a5a18a0fc28..764267062601 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2911,6 +2911,27 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2911 } 2911 }
2912} 2912}
2913 2913
2914void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
2915 unsigned long card_ptr)
2916{
2917 struct qeth_card *card = (struct qeth_card *)card_ptr;
2918
2919 if (card->dev)
2920 napi_schedule(&card->napi);
2921}
2922EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
2923
2924void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
2925 unsigned int queue, int first_element, int count,
2926 unsigned long card_ptr)
2927{
2928 struct qeth_card *card = (struct qeth_card *)card_ptr;
2929
2930 if (qdio_err)
2931 qeth_schedule_recovery(card);
2932}
2933EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
2934
2914void qeth_qdio_output_handler(struct ccw_device *ccwdev, 2935void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2915 unsigned int qdio_error, int __queue, int first_element, 2936 unsigned int qdio_error, int __queue, int first_element,
2916 int count, unsigned long card_ptr) 2937 int count, unsigned long card_ptr)
@@ -3843,6 +3864,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
3843 init_data.no_output_qs = card->qdio.no_out_queues; 3864 init_data.no_output_qs = card->qdio.no_out_queues;
3844 init_data.input_handler = card->discipline.input_handler; 3865 init_data.input_handler = card->discipline.input_handler;
3845 init_data.output_handler = card->discipline.output_handler; 3866 init_data.output_handler = card->discipline.output_handler;
3867 init_data.queue_start_poll = card->discipline.start_poll;
3846 init_data.int_parm = (unsigned long) card; 3868 init_data.int_parm = (unsigned long) card;
3847 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 3869 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3848 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 3870 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
@@ -4513,8 +4535,8 @@ static struct {
4513/* 20 */{"queue 1 buffer usage"}, 4535/* 20 */{"queue 1 buffer usage"},
4514 {"queue 2 buffer usage"}, 4536 {"queue 2 buffer usage"},
4515 {"queue 3 buffer usage"}, 4537 {"queue 3 buffer usage"},
4516 {"rx handler time"}, 4538 {"rx poll time"},
4517 {"rx handler count"}, 4539 {"rx poll count"},
4518 {"rx do_QDIO time"}, 4540 {"rx do_QDIO time"},
4519 {"rx do_QDIO count"}, 4541 {"rx do_QDIO count"},
4520 {"tx handler time"}, 4542 {"tx handler time"},
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 830d63524d61..01c3c1f77879 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -407,29 +407,25 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
407 return rc; 407 return rc;
408} 408}
409 409
410static void qeth_l2_process_inbound_buffer(struct qeth_card *card, 410static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
411 struct qeth_qdio_buffer *buf, int index) 411 int budget, int *done)
412{ 412{
413 struct qdio_buffer_element *element; 413 int work_done = 0;
414 struct sk_buff *skb; 414 struct sk_buff *skb;
415 struct qeth_hdr *hdr; 415 struct qeth_hdr *hdr;
416 int offset;
417 unsigned int len; 416 unsigned int len;
418 417
419 /* get first element of current buffer */ 418 *done = 0;
420 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 419 BUG_ON(!budget);
421 offset = 0; 420 while (budget) {
422 if (card->options.performance_stats) 421 skb = qeth_core_get_next_skb(card,
423 card->perf_stats.bufs_rec++; 422 card->qdio.in_q->bufs[card->rx.b_index].buffer,
424 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, 423 &card->rx.b_element, &card->rx.e_offset, &hdr);
425 &offset, &hdr))) { 424 if (!skb) {
426 skb->dev = card->dev; 425 *done = 1;
427 /* is device UP ? */ 426 break;
428 if (!(card->dev->flags & IFF_UP)) {
429 dev_kfree_skb_any(skb);
430 continue;
431 } 427 }
432 428 skb->dev = card->dev;
433 switch (hdr->hdr.l2.id) { 429 switch (hdr->hdr.l2.id) {
434 case QETH_HEADER_TYPE_LAYER2: 430 case QETH_HEADER_TYPE_LAYER2:
435 skb->pkt_type = PACKET_HOST; 431 skb->pkt_type = PACKET_HOST;
@@ -441,7 +437,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
441 if (skb->protocol == htons(ETH_P_802_2)) 437 if (skb->protocol == htons(ETH_P_802_2))
442 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 438 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
443 len = skb->len; 439 len = skb->len;
444 netif_rx(skb); 440 netif_receive_skb(skb);
445 break; 441 break;
446 case QETH_HEADER_TYPE_OSN: 442 case QETH_HEADER_TYPE_OSN:
447 if (card->info.type == QETH_CARD_TYPE_OSN) { 443 if (card->info.type == QETH_CARD_TYPE_OSN) {
@@ -459,9 +455,87 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
459 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 455 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
460 continue; 456 continue;
461 } 457 }
458 work_done++;
459 budget--;
462 card->stats.rx_packets++; 460 card->stats.rx_packets++;
463 card->stats.rx_bytes += len; 461 card->stats.rx_bytes += len;
464 } 462 }
463 return work_done;
464}
465
466static int qeth_l2_poll(struct napi_struct *napi, int budget)
467{
468 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
469 int work_done = 0;
470 struct qeth_qdio_buffer *buffer;
471 int done;
472 int new_budget = budget;
473
474 if (card->options.performance_stats) {
475 card->perf_stats.inbound_cnt++;
476 card->perf_stats.inbound_start_time = qeth_get_micros();
477 }
478
479 while (1) {
480 if (!card->rx.b_count) {
481 card->rx.qdio_err = 0;
482 card->rx.b_count = qdio_get_next_buffers(
483 card->data.ccwdev, 0, &card->rx.b_index,
484 &card->rx.qdio_err);
485 if (card->rx.b_count <= 0) {
486 card->rx.b_count = 0;
487 break;
488 }
489 card->rx.b_element =
490 &card->qdio.in_q->bufs[card->rx.b_index]
491 .buffer->element[0];
492 card->rx.e_offset = 0;
493 }
494
495 while (card->rx.b_count) {
496 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
497 if (!(card->rx.qdio_err &&
498 qeth_check_qdio_errors(card, buffer->buffer,
499 card->rx.qdio_err, "qinerr")))
500 work_done += qeth_l2_process_inbound_buffer(
501 card, new_budget, &done);
502 else
503 done = 1;
504
505 if (done) {
506 if (card->options.performance_stats)
507 card->perf_stats.bufs_rec++;
508 qeth_put_buffer_pool_entry(card,
509 buffer->pool_entry);
510 qeth_queue_input_buffer(card, card->rx.b_index);
511 card->rx.b_count--;
512 if (card->rx.b_count) {
513 card->rx.b_index =
514 (card->rx.b_index + 1) %
515 QDIO_MAX_BUFFERS_PER_Q;
516 card->rx.b_element =
517 &card->qdio.in_q
518 ->bufs[card->rx.b_index]
519 .buffer->element[0];
520 card->rx.e_offset = 0;
521 }
522 }
523
524 if (work_done >= budget)
525 goto out;
526 else
527 new_budget = budget - work_done;
528 }
529 }
530
531 napi_complete(napi);
532 if (qdio_start_irq(card->data.ccwdev, 0))
533 napi_schedule(&card->napi);
534out:
535 if (card->options.performance_stats)
536 card->perf_stats.inbound_time += qeth_get_micros() -
537 card->perf_stats.inbound_start_time;
538 return work_done;
465} 539}
466 540
467static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, 541static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
@@ -755,49 +829,10 @@ tx_drop:
755 return NETDEV_TX_OK; 829 return NETDEV_TX_OK;
756} 830}
757 831
758static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
759 unsigned int qdio_err, unsigned int queue,
760 int first_element, int count, unsigned long card_ptr)
761{
762 struct net_device *net_dev;
763 struct qeth_card *card;
764 struct qeth_qdio_buffer *buffer;
765 int index;
766 int i;
767
768 card = (struct qeth_card *) card_ptr;
769 net_dev = card->dev;
770 if (card->options.performance_stats) {
771 card->perf_stats.inbound_cnt++;
772 card->perf_stats.inbound_start_time = qeth_get_micros();
773 }
774 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
775 QETH_CARD_TEXT(card, 1, "qdinchk");
776 QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
777 count);
778 QETH_CARD_TEXT_(card, 1, "%04X", queue);
779 qeth_schedule_recovery(card);
780 return;
781 }
782 for (i = first_element; i < (first_element + count); ++i) {
783 index = i % QDIO_MAX_BUFFERS_PER_Q;
784 buffer = &card->qdio.in_q->bufs[index];
785 if (!(qdio_err &&
786 qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
787 "qinerr")))
788 qeth_l2_process_inbound_buffer(card, buffer, index);
789 /* clear buffer and give back to hardware */
790 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
791 qeth_queue_input_buffer(card, index);
792 }
793 if (card->options.performance_stats)
794 card->perf_stats.inbound_time += qeth_get_micros() -
795 card->perf_stats.inbound_start_time;
796}
797
798static int qeth_l2_open(struct net_device *dev) 832static int qeth_l2_open(struct net_device *dev)
799{ 833{
800 struct qeth_card *card = dev->ml_priv; 834 struct qeth_card *card = dev->ml_priv;
835 int rc = 0;
801 836
802 QETH_CARD_TEXT(card, 4, "qethopen"); 837 QETH_CARD_TEXT(card, 4, "qethopen");
803 if (card->state != CARD_STATE_SOFTSETUP) 838 if (card->state != CARD_STATE_SOFTSETUP)
@@ -814,18 +849,24 @@ static int qeth_l2_open(struct net_device *dev)
814 849
815 if (!card->lan_online && netif_carrier_ok(dev)) 850 if (!card->lan_online && netif_carrier_ok(dev))
816 netif_carrier_off(dev); 851 netif_carrier_off(dev);
817 return 0; 852 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
853 napi_enable(&card->napi);
854 napi_schedule(&card->napi);
855 } else
856 rc = -EIO;
857 return rc;
818} 858}
819 859
820
821static int qeth_l2_stop(struct net_device *dev) 860static int qeth_l2_stop(struct net_device *dev)
822{ 861{
823 struct qeth_card *card = dev->ml_priv; 862 struct qeth_card *card = dev->ml_priv;
824 863
825 QETH_CARD_TEXT(card, 4, "qethstop"); 864 QETH_CARD_TEXT(card, 4, "qethstop");
826 netif_tx_disable(dev); 865 netif_tx_disable(dev);
827 if (card->state == CARD_STATE_UP) 866 if (card->state == CARD_STATE_UP) {
828 card->state = CARD_STATE_SOFTSETUP; 867 card->state = CARD_STATE_SOFTSETUP;
868 napi_disable(&card->napi);
869 }
829 return 0; 870 return 0;
830} 871}
831 872
@@ -836,8 +877,9 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
836 INIT_LIST_HEAD(&card->vid_list); 877 INIT_LIST_HEAD(&card->vid_list);
837 INIT_LIST_HEAD(&card->mc_list); 878 INIT_LIST_HEAD(&card->mc_list);
838 card->options.layer2 = 1; 879 card->options.layer2 = 1;
880 card->discipline.start_poll = qeth_qdio_start_poll;
839 card->discipline.input_handler = (qdio_handler_t *) 881 card->discipline.input_handler = (qdio_handler_t *)
840 qeth_l2_qdio_input_handler; 882 qeth_qdio_input_handler;
841 card->discipline.output_handler = (qdio_handler_t *) 883 card->discipline.output_handler = (qdio_handler_t *)
842 qeth_qdio_output_handler; 884 qeth_qdio_output_handler;
843 card->discipline.recover = qeth_l2_recover; 885 card->discipline.recover = qeth_l2_recover;
@@ -923,6 +965,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
923 card->info.broadcast_capable = 1; 965 card->info.broadcast_capable = 1;
924 qeth_l2_request_initial_mac(card); 966 qeth_l2_request_initial_mac(card);
925 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 967 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
968 netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
926 return register_netdev(card->dev); 969 return register_netdev(card->dev);
927} 970}
928 971
@@ -955,6 +998,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
955 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 998 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
956 999
957 card->state = CARD_STATE_HARDSETUP; 1000 card->state = CARD_STATE_HARDSETUP;
1001 memset(&card->rx, 0, sizeof(struct qeth_rx));
958 qeth_print_status_message(card); 1002 qeth_print_status_message(card);
959 1003
960 /* softsetup */ 1004 /* softsetup */
@@ -1086,9 +1130,6 @@ static int qeth_l2_recover(void *ptr)
1086 card->use_hard_stop = 1; 1130 card->use_hard_stop = 1;
1087 __qeth_l2_set_offline(card->gdev, 1); 1131 __qeth_l2_set_offline(card->gdev, 1);
1088 rc = __qeth_l2_set_online(card->gdev, 1); 1132 rc = __qeth_l2_set_online(card->gdev, 1);
1089 /* don't run another scheduled recovery */
1090 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1091 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1092 if (!rc) 1133 if (!rc)
1093 dev_info(&card->gdev->dev, 1134 dev_info(&card->gdev->dev,
1094 "Device successfully recovered!\n"); 1135 "Device successfully recovered!\n");
@@ -1099,6 +1140,8 @@ static int qeth_l2_recover(void *ptr)
1099 dev_warn(&card->gdev->dev, "The qeth device driver " 1140 dev_warn(&card->gdev->dev, "The qeth device driver "
1100 "failed to recover an error on the device\n"); 1141 "failed to recover an error on the device\n");
1101 } 1142 }
1143 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1144 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1102 return 0; 1145 return 0;
1103} 1146}
1104 1147
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e22ae248f613..5b79f573bd93 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -103,12 +103,7 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
103 103
104void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 104void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
105{ 105{
106 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" 106 sprintf(buf, "%pI6", addr);
107 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
108 addr[0], addr[1], addr[2], addr[3],
109 addr[4], addr[5], addr[6], addr[7],
110 addr[8], addr[9], addr[10], addr[11],
111 addr[12], addr[13], addr[14], addr[15]);
112} 107}
113 108
114int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) 109int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
@@ -2112,51 +2107,44 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
2112 return vlan_id; 2107 return vlan_id;
2113} 2108}
2114 2109
2115static void qeth_l3_process_inbound_buffer(struct qeth_card *card, 2110static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
2116 struct qeth_qdio_buffer *buf, int index) 2111 int budget, int *done)
2117{ 2112{
2118 struct qdio_buffer_element *element; 2113 int work_done = 0;
2119 struct sk_buff *skb; 2114 struct sk_buff *skb;
2120 struct qeth_hdr *hdr; 2115 struct qeth_hdr *hdr;
2121 int offset;
2122 __u16 vlan_tag = 0; 2116 __u16 vlan_tag = 0;
2123 unsigned int len; 2117 unsigned int len;
2124 /* get first element of current buffer */
2125 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2126 offset = 0;
2127 if (card->options.performance_stats)
2128 card->perf_stats.bufs_rec++;
2129 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
2130 &offset, &hdr))) {
2131 skb->dev = card->dev;
2132 /* is device UP ? */
2133 if (!(card->dev->flags & IFF_UP)) {
2134 dev_kfree_skb_any(skb);
2135 continue;
2136 }
2137 2118
2119 *done = 0;
2120 BUG_ON(!budget);
2121 while (budget) {
2122 skb = qeth_core_get_next_skb(card,
2123 card->qdio.in_q->bufs[card->rx.b_index].buffer,
2124 &card->rx.b_element, &card->rx.e_offset, &hdr);
2125 if (!skb) {
2126 *done = 1;
2127 break;
2128 }
2129 skb->dev = card->dev;
2138 switch (hdr->hdr.l3.id) { 2130 switch (hdr->hdr.l3.id) {
2139 case QETH_HEADER_TYPE_LAYER3: 2131 case QETH_HEADER_TYPE_LAYER3:
2140 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 2132 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
2141 len = skb->len; 2133 len = skb->len;
2142 if (vlan_tag && !card->options.sniffer) 2134 if (vlan_tag && !card->options.sniffer)
2143 if (card->vlangrp) 2135 if (card->vlangrp)
2144 vlan_hwaccel_rx(skb, card->vlangrp, 2136 vlan_gro_receive(&card->napi,
2145 vlan_tag); 2137 card->vlangrp, vlan_tag, skb);
2146 else { 2138 else {
2147 dev_kfree_skb_any(skb); 2139 dev_kfree_skb_any(skb);
2148 continue; 2140 continue;
2149 } 2141 }
2150 else 2142 else
2151 netif_rx(skb); 2143 napi_gro_receive(&card->napi, skb);
2152 break; 2144 break;
2153 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ 2145 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
2154 skb->pkt_type = PACKET_HOST; 2146 skb->pkt_type = PACKET_HOST;
2155 skb->protocol = eth_type_trans(skb, skb->dev); 2147 skb->protocol = eth_type_trans(skb, skb->dev);
2156 if (card->options.checksum_type == NO_CHECKSUMMING)
2157 skb->ip_summed = CHECKSUM_UNNECESSARY;
2158 else
2159 skb->ip_summed = CHECKSUM_NONE;
2160 len = skb->len; 2148 len = skb->len;
2161 netif_receive_skb(skb); 2149 netif_receive_skb(skb);
2162 break; 2150 break;
@@ -2166,10 +2154,87 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2166 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 2154 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
2167 continue; 2155 continue;
2168 } 2156 }
2169 2157 work_done++;
2158 budget--;
2170 card->stats.rx_packets++; 2159 card->stats.rx_packets++;
2171 card->stats.rx_bytes += len; 2160 card->stats.rx_bytes += len;
2172 } 2161 }
2162 return work_done;
2163}
2164
2165static int qeth_l3_poll(struct napi_struct *napi, int budget)
2166{
2167 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
2168 int work_done = 0;
2169 struct qeth_qdio_buffer *buffer;
2170 int done;
2171 int new_budget = budget;
2172
2173 if (card->options.performance_stats) {
2174 card->perf_stats.inbound_cnt++;
2175 card->perf_stats.inbound_start_time = qeth_get_micros();
2176 }
2177
2178 while (1) {
2179 if (!card->rx.b_count) {
2180 card->rx.qdio_err = 0;
2181 card->rx.b_count = qdio_get_next_buffers(
2182 card->data.ccwdev, 0, &card->rx.b_index,
2183 &card->rx.qdio_err);
2184 if (card->rx.b_count <= 0) {
2185 card->rx.b_count = 0;
2186 break;
2187 }
2188 card->rx.b_element =
2189 &card->qdio.in_q->bufs[card->rx.b_index]
2190 .buffer->element[0];
2191 card->rx.e_offset = 0;
2192 }
2193
2194 while (card->rx.b_count) {
2195 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
2196 if (!(card->rx.qdio_err &&
2197 qeth_check_qdio_errors(card, buffer->buffer,
2198 card->rx.qdio_err, "qinerr")))
2199 work_done += qeth_l3_process_inbound_buffer(
2200 card, new_budget, &done);
2201 else
2202 done = 1;
2203
2204 if (done) {
2205 if (card->options.performance_stats)
2206 card->perf_stats.bufs_rec++;
2207 qeth_put_buffer_pool_entry(card,
2208 buffer->pool_entry);
2209 qeth_queue_input_buffer(card, card->rx.b_index);
2210 card->rx.b_count--;
2211 if (card->rx.b_count) {
2212 card->rx.b_index =
2213 (card->rx.b_index + 1) %
2214 QDIO_MAX_BUFFERS_PER_Q;
2215 card->rx.b_element =
2216 &card->qdio.in_q
2217 ->bufs[card->rx.b_index]
2218 .buffer->element[0];
2219 card->rx.e_offset = 0;
2220 }
2221 }
2222
2223 if (work_done >= budget)
2224 goto out;
2225 else
2226 new_budget = budget - work_done;
2227 }
2228 }
2229
2230 napi_complete(napi);
2231 if (qdio_start_irq(card->data.ccwdev, 0))
2232 napi_schedule(&card->napi);
2233out:
2234 if (card->options.performance_stats)
2235 card->perf_stats.inbound_time += qeth_get_micros() -
2236 card->perf_stats.inbound_start_time;
2237 return work_done;
2173} 2238}
2174 2239
2175static int qeth_l3_verify_vlan_dev(struct net_device *dev, 2240static int qeth_l3_verify_vlan_dev(struct net_device *dev,
@@ -3103,6 +3168,7 @@ tx_drop:
3103static int qeth_l3_open(struct net_device *dev) 3168static int qeth_l3_open(struct net_device *dev)
3104{ 3169{
3105 struct qeth_card *card = dev->ml_priv; 3170 struct qeth_card *card = dev->ml_priv;
3171 int rc = 0;
3106 3172
3107 QETH_CARD_TEXT(card, 4, "qethopen"); 3173 QETH_CARD_TEXT(card, 4, "qethopen");
3108 if (card->state != CARD_STATE_SOFTSETUP) 3174 if (card->state != CARD_STATE_SOFTSETUP)
@@ -3113,7 +3179,12 @@ static int qeth_l3_open(struct net_device *dev)
3113 3179
3114 if (!card->lan_online && netif_carrier_ok(dev)) 3180 if (!card->lan_online && netif_carrier_ok(dev))
3115 netif_carrier_off(dev); 3181 netif_carrier_off(dev);
3116 return 0; 3182 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
3183 napi_enable(&card->napi);
3184 napi_schedule(&card->napi);
3185 } else
3186 rc = -EIO;
3187 return rc;
3117} 3188}
3118 3189
3119static int qeth_l3_stop(struct net_device *dev) 3190static int qeth_l3_stop(struct net_device *dev)
@@ -3122,8 +3193,10 @@ static int qeth_l3_stop(struct net_device *dev)
3122 3193
3123 QETH_CARD_TEXT(card, 4, "qethstop"); 3194 QETH_CARD_TEXT(card, 4, "qethstop");
3124 netif_tx_disable(dev); 3195 netif_tx_disable(dev);
3125 if (card->state == CARD_STATE_UP) 3196 if (card->state == CARD_STATE_UP) {
3126 card->state = CARD_STATE_SOFTSETUP; 3197 card->state = CARD_STATE_SOFTSETUP;
3198 napi_disable(&card->napi);
3199 }
3127 return 0; 3200 return 0;
3128} 3201}
3129 3202
@@ -3293,57 +3366,19 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3293 card->dev->gso_max_size = 15 * PAGE_SIZE; 3366 card->dev->gso_max_size = 15 * PAGE_SIZE;
3294 3367
3295 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3368 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3369 netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
3296 return register_netdev(card->dev); 3370 return register_netdev(card->dev);
3297} 3371}
3298 3372
3299static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
3300 unsigned int qdio_err, unsigned int queue, int first_element,
3301 int count, unsigned long card_ptr)
3302{
3303 struct net_device *net_dev;
3304 struct qeth_card *card;
3305 struct qeth_qdio_buffer *buffer;
3306 int index;
3307 int i;
3308
3309 card = (struct qeth_card *) card_ptr;
3310 net_dev = card->dev;
3311 if (card->options.performance_stats) {
3312 card->perf_stats.inbound_cnt++;
3313 card->perf_stats.inbound_start_time = qeth_get_micros();
3314 }
3315 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
3316 QETH_CARD_TEXT(card, 1, "qdinchk");
3317 QETH_CARD_TEXT_(card, 1, "%04X%04X",
3318 first_element, count);
3319 QETH_CARD_TEXT_(card, 1, "%04X", queue);
3320 qeth_schedule_recovery(card);
3321 return;
3322 }
3323 for (i = first_element; i < (first_element + count); ++i) {
3324 index = i % QDIO_MAX_BUFFERS_PER_Q;
3325 buffer = &card->qdio.in_q->bufs[index];
3326 if (!(qdio_err &&
3327 qeth_check_qdio_errors(card, buffer->buffer,
3328 qdio_err, "qinerr")))
3329 qeth_l3_process_inbound_buffer(card, buffer, index);
3330 /* clear buffer and give back to hardware */
3331 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
3332 qeth_queue_input_buffer(card, index);
3333 }
3334 if (card->options.performance_stats)
3335 card->perf_stats.inbound_time += qeth_get_micros() -
3336 card->perf_stats.inbound_start_time;
3337}
3338
3339static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3373static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3340{ 3374{
3341 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3375 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3342 3376
3343 qeth_l3_create_device_attributes(&gdev->dev); 3377 qeth_l3_create_device_attributes(&gdev->dev);
3344 card->options.layer2 = 0; 3378 card->options.layer2 = 0;
3379 card->discipline.start_poll = qeth_qdio_start_poll;
3345 card->discipline.input_handler = (qdio_handler_t *) 3380 card->discipline.input_handler = (qdio_handler_t *)
3346 qeth_l3_qdio_input_handler; 3381 qeth_qdio_input_handler;
3347 card->discipline.output_handler = (qdio_handler_t *) 3382 card->discipline.output_handler = (qdio_handler_t *)
3348 qeth_qdio_output_handler; 3383 qeth_qdio_output_handler;
3349 card->discipline.recover = qeth_l3_recover; 3384 card->discipline.recover = qeth_l3_recover;
@@ -3402,6 +3437,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3402 } 3437 }
3403 3438
3404 card->state = CARD_STATE_HARDSETUP; 3439 card->state = CARD_STATE_HARDSETUP;
3440 memset(&card->rx, 0, sizeof(struct qeth_rx));
3405 qeth_print_status_message(card); 3441 qeth_print_status_message(card);
3406 3442
3407 /* softsetup */ 3443 /* softsetup */
@@ -3538,9 +3574,6 @@ static int qeth_l3_recover(void *ptr)
3538 card->use_hard_stop = 1; 3574 card->use_hard_stop = 1;
3539 __qeth_l3_set_offline(card->gdev, 1); 3575 __qeth_l3_set_offline(card->gdev, 1);
3540 rc = __qeth_l3_set_online(card->gdev, 1); 3576 rc = __qeth_l3_set_online(card->gdev, 1);
3541 /* don't run another scheduled recovery */
3542 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3543 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3544 if (!rc) 3577 if (!rc)
3545 dev_info(&card->gdev->dev, 3578 dev_info(&card->gdev->dev,
3546 "Device successfully recovered!\n"); 3579 "Device successfully recovered!\n");
@@ -3551,6 +3584,8 @@ static int qeth_l3_recover(void *ptr)
3551 dev_warn(&card->gdev->dev, "The qeth device driver " 3584 dev_warn(&card->gdev->dev, "The qeth device driver "
3552 "failed to recover an error on the device\n"); 3585 "failed to recover an error on the device\n");
3553 } 3586 }
3587 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3588 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3554 return 0; 3589 return 0;
3555} 3590}
3556 3591
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index b2635759721c..da54a28a1b87 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -277,16 +277,12 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
277static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, 277static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
278 struct zfcp_qdio *qdio) 278 struct zfcp_qdio *qdio)
279{ 279{
280 280 memset(id, 0, sizeof(*id));
281 id->cdev = qdio->adapter->ccw_device; 281 id->cdev = qdio->adapter->ccw_device;
282 id->q_format = QDIO_ZFCP_QFMT; 282 id->q_format = QDIO_ZFCP_QFMT;
283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); 283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
284 ASCEBC(id->adapter_name, 8); 284 ASCEBC(id->adapter_name, 8);
285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; 285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
286 id->qib_param_field_format = 0;
287 id->qib_param_field = NULL;
288 id->input_slib_elements = NULL;
289 id->output_slib_elements = NULL;
290 id->no_input_qs = 1; 286 id->no_input_qs = 1;
291 id->no_output_qs = 1; 287 id->no_output_qs = 1;
292 id->input_handler = zfcp_qdio_int_resp; 288 id->input_handler = zfcp_qdio_int_resp;
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 5af23cc5ea9f..f383cb42b1d7 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1344,8 +1344,24 @@ static struct usbatm_driver cxacru_driver = {
1344 .tx_padding = 11, 1344 .tx_padding = 11,
1345}; 1345};
1346 1346
1347static int cxacru_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) 1347static int cxacru_usb_probe(struct usb_interface *intf,
1348 const struct usb_device_id *id)
1348{ 1349{
1350 struct usb_device *usb_dev = interface_to_usbdev(intf);
1351 char buf[15];
1352
1353 /* Avoid ADSL routers (cx82310_eth).
1354 * Abort if bDeviceClass is 0xff and iProduct is "USB NET CARD".
1355 */
1356 if (usb_dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC
1357 && usb_string(usb_dev, usb_dev->descriptor.iProduct,
1358 buf, sizeof(buf)) > 0) {
1359 if (!strcmp(buf, "USB NET CARD")) {
1360 dev_info(&intf->dev, "ignoring cx82310_eth device\n");
1361 return -ENODEV;
1362 }
1363 }
1364
1349 return usbatm_usb_probe(intf, id, &cxacru_driver); 1365 return usbatm_usb_probe(intf, id, &cxacru_driver);
1350} 1366}
1351 1367
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 626b629429ff..c7fbf298ad68 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -302,6 +302,7 @@ header-y += quota.h
302header-y += radeonfb.h 302header-y += radeonfb.h
303header-y += random.h 303header-y += random.h
304header-y += raw.h 304header-y += raw.h
305header-y += rds.h
305header-y += reboot.h 306header-y += reboot.h
306header-y += reiserfs_fs.h 307header-y += reiserfs_fs.h
307header-y += reiserfs_xattr.h 308header-y += reiserfs_xattr.h
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2308fbb4523a..fb6aa6070921 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -237,13 +237,29 @@ static inline bool is_etherdev_addr(const struct net_device *dev,
237 * entry points. 237 * entry points.
238 */ 238 */
239 239
240static inline int compare_ether_header(const void *a, const void *b) 240static inline unsigned long compare_ether_header(const void *a, const void *b)
241{ 241{
242#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
243 unsigned long fold;
244
245 /*
246 * We want to compare 14 bytes:
247 * [a0 ... a13] ^ [b0 ... b13]
248 * Use two long XOR, ORed together, with an overlap of two bytes.
249 * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] |
250 * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13]
251 * This means the [a6 a7] ^ [b6 b7] part is done two times.
252 */
253 fold = *(unsigned long *)a ^ *(unsigned long *)b;
254 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
255 return fold;
256#else
242 u32 *a32 = (u32 *)((u8 *)a + 2); 257 u32 *a32 = (u32 *)((u8 *)a + 2);
243 u32 *b32 = (u32 *)((u8 *)b + 2); 258 u32 *b32 = (u32 *)((u8 *)b + 2);
244 259
245 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | 260 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
246 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); 261 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
262#endif
247} 263}
248 264
249#endif /* _LINUX_ETHERDEVICE_H */ 265#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/if.h b/include/linux/if.h
index 53558ec59e1b..123959927745 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -75,6 +75,8 @@
75#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */ 75#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
76#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */ 76#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */
77#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */ 77#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */
78#define IFF_OVS_DATAPATH 0x10000 /* device used as Open vSwitch
79 * datapath port */
78 80
79#define IF_GET_IFACE 0x0001 /* for querying only */ 81#define IF_GET_IFACE 0x0001 /* for querying only */
80#define IF_GET_PROTO 0x0002 82#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index bed7a4682b90..f9c3df03db0f 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -137,8 +137,6 @@ extern struct ctl_table ether_table[];
137 137
138extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); 138extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
139 139
140#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
141
142#endif 140#endif
143 141
144#endif /* _LINUX_IF_ETHER_H */ 142#endif /* _LINUX_IF_ETHER_H */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 35280b302290..8a2fd66a8b5f 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -40,6 +40,12 @@ struct macvlan_rx_stats {
40 unsigned long rx_errors; 40 unsigned long rx_errors;
41}; 41};
42 42
43/*
44 * Maximum times a macvtap device can be opened. This can be used to
45 * configure the number of receive queue, e.g. for multiqueue virtio.
46 */
47#define MAX_MACVTAP_QUEUES (NR_CPUS < 16 ? NR_CPUS : 16)
48
43struct macvlan_dev { 49struct macvlan_dev {
44 struct net_device *dev; 50 struct net_device *dev;
45 struct list_head list; 51 struct list_head list;
@@ -50,7 +56,8 @@ struct macvlan_dev {
50 enum macvlan_mode mode; 56 enum macvlan_mode mode;
51 int (*receive)(struct sk_buff *skb); 57 int (*receive)(struct sk_buff *skb);
52 int (*forward)(struct net_device *dev, struct sk_buff *skb); 58 int (*forward)(struct net_device *dev, struct sk_buff *skb);
53 struct macvtap_queue *tap; 59 struct macvtap_queue *taps[MAX_MACVTAP_QUEUES];
60 int numvtaps;
54}; 61};
55 62
56static inline void macvlan_count_rx(const struct macvlan_dev *vlan, 63static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 27741e05446f..29bcd55851eb 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -40,25 +40,35 @@
40 * PPPoE addressing definition 40 * PPPoE addressing definition
41 */ 41 */
42typedef __be16 sid_t; 42typedef __be16 sid_t;
43struct pppoe_addr{ 43struct pppoe_addr {
44 sid_t sid; /* Session identifier */ 44 sid_t sid; /* Session identifier */
45 unsigned char remote[ETH_ALEN]; /* Remote address */ 45 unsigned char remote[ETH_ALEN]; /* Remote address */
46 char dev[IFNAMSIZ]; /* Local device to use */ 46 char dev[IFNAMSIZ]; /* Local device to use */
47}; 47};
48 48
49/************************************************************************ 49/************************************************************************
50 * Protocols supported by AF_PPPOX 50 * PPTP addressing definition
51 */ 51 */
52struct pptp_addr {
53 __be16 call_id;
54 struct in_addr sin_addr;
55};
56
57/************************************************************************
58 * Protocols supported by AF_PPPOX
59 */
52#define PX_PROTO_OE 0 /* Currently just PPPoE */ 60#define PX_PROTO_OE 0 /* Currently just PPPoE */
53#define PX_PROTO_OL2TP 1 /* Now L2TP also */ 61#define PX_PROTO_OL2TP 1 /* Now L2TP also */
54#define PX_MAX_PROTO 2 62#define PX_PROTO_PPTP 2
55 63#define PX_MAX_PROTO 3
56struct sockaddr_pppox { 64
57 sa_family_t sa_family; /* address family, AF_PPPOX */ 65struct sockaddr_pppox {
58 unsigned int sa_protocol; /* protocol identifier */ 66 sa_family_t sa_family; /* address family, AF_PPPOX */
59 union{ 67 unsigned int sa_protocol; /* protocol identifier */
60 struct pppoe_addr pppoe; 68 union {
61 }sa_addr; 69 struct pppoe_addr pppoe;
70 struct pptp_addr pptp;
71 } sa_addr;
62} __attribute__((packed)); 72} __attribute__((packed));
63 73
64/* The use of the above union isn't viable because the size of this 74/* The use of the above union isn't viable because the size of this
@@ -150,15 +160,23 @@ struct pppoe_opt {
150 relayed to (PPPoE relaying) */ 160 relayed to (PPPoE relaying) */
151}; 161};
152 162
163struct pptp_opt {
164 struct pptp_addr src_addr;
165 struct pptp_addr dst_addr;
166 u32 ack_sent, ack_recv;
167 u32 seq_sent, seq_recv;
168 int ppp_flags;
169};
153#include <net/sock.h> 170#include <net/sock.h>
154 171
155struct pppox_sock { 172struct pppox_sock {
156 /* struct sock must be the first member of pppox_sock */ 173 /* struct sock must be the first member of pppox_sock */
157 struct sock sk; 174 struct sock sk;
158 struct ppp_channel chan; 175 struct ppp_channel chan;
159 struct pppox_sock *next; /* for hash table */ 176 struct pppox_sock *next; /* for hash table */
160 union { 177 union {
161 struct pppoe_opt pppoe; 178 struct pppoe_opt pppoe;
179 struct pptp_opt pptp;
162 } proto; 180 } proto;
163 __be16 num; 181 __be16 num;
164}; 182};
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 3d870fda8c4f..a52320751bfc 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -119,7 +119,7 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
119 119
120extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 120extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
121 u16 vlan_tci, int polling); 121 u16 vlan_tci, int polling);
122extern int vlan_hwaccel_do_receive(struct sk_buff *skb); 122extern void vlan_hwaccel_do_receive(struct sk_buff *skb);
123extern gro_result_t 123extern gro_result_t
124vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 124vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
125 unsigned int vlan_tci, struct sk_buff *skb); 125 unsigned int vlan_tci, struct sk_buff *skb);
@@ -147,9 +147,8 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
147 return NET_XMIT_SUCCESS; 147 return NET_XMIT_SUCCESS;
148} 148}
149 149
150static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) 150static inline void vlan_hwaccel_do_receive(struct sk_buff *skb)
151{ 151{
152 return 0;
153} 152}
154 153
155static inline gro_result_t 154static inline gro_result_t
diff --git a/include/linux/in.h b/include/linux/in.h
index 41d88a4689af..beeb6dee2b49 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -250,6 +250,25 @@ struct sockaddr_in {
250 250
251#ifdef __KERNEL__ 251#ifdef __KERNEL__
252 252
253#include <linux/errno.h>
254
255static inline int proto_ports_offset(int proto)
256{
257 switch (proto) {
258 case IPPROTO_TCP:
259 case IPPROTO_UDP:
260 case IPPROTO_DCCP:
261 case IPPROTO_ESP: /* SPI */
262 case IPPROTO_SCTP:
263 case IPPROTO_UDPLITE:
264 return 0;
265 case IPPROTO_AH: /* SPI */
266 return 4;
267 default:
268 return -EINVAL;
269 }
270}
271
253static inline bool ipv4_is_loopback(__be32 addr) 272static inline bool ipv4_is_loopback(__be32 addr)
254{ 273{
255 return (addr & htonl(0xff000000)) == htonl(0x7f000000); 274 return (addr & htonl(0xff000000)) == htonl(0x7f000000);
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 0f82293a82ed..78a1b9671752 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -56,6 +56,7 @@ enum {
56 MLX4_CMD_QUERY_HCA = 0xb, 56 MLX4_CMD_QUERY_HCA = 0xb,
57 MLX4_CMD_QUERY_PORT = 0x43, 57 MLX4_CMD_QUERY_PORT = 0x43,
58 MLX4_CMD_SENSE_PORT = 0x4d, 58 MLX4_CMD_SENSE_PORT = 0x4d,
59 MLX4_CMD_HW_HEALTH_CHECK = 0x50,
59 MLX4_CMD_SET_PORT = 0xc, 60 MLX4_CMD_SET_PORT = 0xc,
60 MLX4_CMD_ACCESS_DDR = 0x2e, 61 MLX4_CMD_ACCESS_DDR = 0x2e,
61 MLX4_CMD_MAP_ICM = 0xffa, 62 MLX4_CMD_MAP_ICM = 0xffa,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7a7f9c1e679a..7338654c02b4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -186,6 +186,10 @@ struct mlx4_caps {
186 int eth_mtu_cap[MLX4_MAX_PORTS + 1]; 186 int eth_mtu_cap[MLX4_MAX_PORTS + 1];
187 int gid_table_len[MLX4_MAX_PORTS + 1]; 187 int gid_table_len[MLX4_MAX_PORTS + 1];
188 int pkey_table_len[MLX4_MAX_PORTS + 1]; 188 int pkey_table_len[MLX4_MAX_PORTS + 1];
189 int trans_type[MLX4_MAX_PORTS + 1];
190 int vendor_oui[MLX4_MAX_PORTS + 1];
191 int wavelength[MLX4_MAX_PORTS + 1];
192 u64 trans_code[MLX4_MAX_PORTS + 1];
189 int local_ca_ack_delay; 193 int local_ca_ack_delay;
190 int num_uars; 194 int num_uars;
191 int bf_reg_size; 195 int bf_reg_size;
@@ -229,6 +233,8 @@ struct mlx4_caps {
229 u32 bmme_flags; 233 u32 bmme_flags;
230 u32 reserved_lkey; 234 u32 reserved_lkey;
231 u16 stat_rate_support; 235 u16 stat_rate_support;
236 int udp_rss;
237 int loopback_support;
232 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 238 u8 port_width_cap[MLX4_MAX_PORTS + 1];
233 int max_gso_sz; 239 int max_gso_sz;
234 int reserved_qps_cnt[MLX4_NUM_QP_REGION]; 240 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
@@ -480,5 +486,6 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
480 u32 *lkey, u32 *rkey); 486 u32 *lkey, u32 *rkey);
481int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 487int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
482int mlx4_SYNC_TPT(struct mlx4_dev *dev); 488int mlx4_SYNC_TPT(struct mlx4_dev *dev);
489int mlx4_test_interrupts(struct mlx4_dev *dev);
483 490
484#endif /* MLX4_DEVICE_H */ 491#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 46c36ffe20ee..af05186d5b36 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -901,7 +901,7 @@ struct net_device {
901 901
902 unsigned int flags; /* interface flags (a la BSD) */ 902 unsigned int flags; /* interface flags (a la BSD) */
903 unsigned short gflags; 903 unsigned short gflags;
904 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ 904 unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */
905 unsigned short padded; /* How much padding added by alloc_netdev() */ 905 unsigned short padded; /* How much padding added by alloc_netdev() */
906 906
907 unsigned char operstate; /* RFC2863 operstate */ 907 unsigned char operstate; /* RFC2863 operstate */
@@ -953,7 +953,14 @@ struct net_device {
953/* 953/*
954 * Cache line mostly used on receive path (including eth_type_trans()) 954 * Cache line mostly used on receive path (including eth_type_trans())
955 */ 955 */
956 unsigned long last_rx; /* Time of last Rx */ 956 unsigned long last_rx; /* Time of last Rx
957 * This should not be set in
958 * drivers, unless really needed,
959 * because network stack (bonding)
960 * use it if/when necessary, to
961 * avoid dirtying this cache line.
962 */
963
957 /* Interface address info used in eth_type_trans() */ 964 /* Interface address info used in eth_type_trans() */
958 unsigned char *dev_addr; /* hw address, (before bcast 965 unsigned char *dev_addr; /* hw address, (before bcast
959 because most packets are 966 because most packets are
@@ -1695,6 +1702,7 @@ extern gro_result_t dev_gro_receive(struct napi_struct *napi,
1695extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); 1702extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
1696extern gro_result_t napi_gro_receive(struct napi_struct *napi, 1703extern gro_result_t napi_gro_receive(struct napi_struct *napi,
1697 struct sk_buff *skb); 1704 struct sk_buff *skb);
1705extern void napi_gro_flush(struct napi_struct *napi);
1698extern void napi_reuse_skb(struct napi_struct *napi, 1706extern void napi_reuse_skb(struct napi_struct *napi,
1699 struct sk_buff *skb); 1707 struct sk_buff *skb);
1700extern struct sk_buff * napi_get_frags(struct napi_struct *napi); 1708extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
@@ -2171,6 +2179,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
2171extern int netdev_class_create_file(struct class_attribute *class_attr); 2179extern int netdev_class_create_file(struct class_attribute *class_attr);
2172extern void netdev_class_remove_file(struct class_attribute *class_attr); 2180extern void netdev_class_remove_file(struct class_attribute *class_attr);
2173 2181
2182extern struct kobj_ns_type_operations net_ns_type_operations;
2183
2174extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); 2184extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
2175 2185
2176extern void linkwatch_run_queue(void); 2186extern void linkwatch_run_queue(void);
@@ -2191,7 +2201,7 @@ static inline int net_gso_ok(int features, int gso_type)
2191static inline int skb_gso_ok(struct sk_buff *skb, int features) 2201static inline int skb_gso_ok(struct sk_buff *skb, int features)
2192{ 2202{
2193 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 2203 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2194 (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST)); 2204 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2195} 2205}
2196 2206
2197static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 2207static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2c8701687336..31603e8b5581 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -40,6 +40,43 @@
40 */ 40 */
41 41
42/** 42/**
43 * DOC: Frame transmission/registration support
44 *
45 * Frame transmission and registration support exists to allow userspace
46 * management entities such as wpa_supplicant react to management frames
47 * that are not being handled by the kernel. This includes, for example,
48 * certain classes of action frames that cannot be handled in the kernel
49 * for various reasons.
50 *
51 * Frame registration is done on a per-interface basis and registrations
52 * cannot be removed other than by closing the socket. It is possible to
53 * specify a registration filter to register, for example, only for a
54 * certain type of action frame. In particular with action frames, those
55 * that userspace registers for will not be returned as unhandled by the
56 * driver, so that the registered application has to take responsibility
57 * for doing that.
58 *
59 * The type of frame that can be registered for is also dependent on the
60 * driver and interface type. The frame types are advertised in wiphy
61 * attributes so applications know what to expect.
62 *
63 * NOTE: When an interface changes type while registrations are active,
64 * these registrations are ignored until the interface type is
65 * changed again. This means that changing the interface type can
66 * lead to a situation that couldn't otherwise be produced, but
67 * any such registrations will be dormant in the sense that they
68 * will not be serviced, i.e. they will not receive any frames.
69 *
70 * Frame transmission allows userspace to send for example the required
71 * responses to action frames. It is subject to some sanity checking,
72 * but many frames can be transmitted. When a frame was transmitted, its
73 * status is indicated to the sending socket.
74 *
75 * For more technical details, see the corresponding command descriptions
76 * below.
77 */
78
79/**
43 * enum nl80211_commands - supported nl80211 commands 80 * enum nl80211_commands - supported nl80211 commands
44 * 81 *
45 * @NL80211_CMD_UNSPEC: unspecified command to catch errors 82 * @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -258,7 +295,9 @@
258 * auth and assoc steps. For this, you need to specify the SSID in a 295 * auth and assoc steps. For this, you need to specify the SSID in a
259 * %NL80211_ATTR_SSID attribute, and can optionally specify the association 296 * %NL80211_ATTR_SSID attribute, and can optionally specify the association
260 * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, 297 * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC,
261 * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_CONTROL_PORT. 298 * %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
299 * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
300 * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
262 * It is also sent as an event, with the BSSID and response IEs when the 301 * It is also sent as an event, with the BSSID and response IEs when the
263 * connection is established or failed to be established. This can be 302 * connection is established or failed to be established. This can be
264 * determined by the STATUS_CODE attribute. 303 * determined by the STATUS_CODE attribute.
@@ -301,16 +340,20 @@
301 * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface 340 * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface
302 * and @NL80211_ATTR_TX_RATES the set of allowed rates. 341 * and @NL80211_ATTR_TX_RATES the set of allowed rates.
303 * 342 *
304 * @NL80211_CMD_REGISTER_ACTION: Register for receiving certain action frames 343 * @NL80211_CMD_REGISTER_FRAME: Register for receiving certain mgmt frames
305 * (via @NL80211_CMD_ACTION) for processing in userspace. This command 344 * (via @NL80211_CMD_FRAME) for processing in userspace. This command
306 * requires an interface index and a match attribute containing the first 345 * requires an interface index, a frame type attribute (optional for
307 * few bytes of the frame that should match, e.g. a single byte for only 346 * backward compatibility reasons, if not given assumes action frames)
308 * a category match or four bytes for vendor frames including the OUI. 347 * and a match attribute containing the first few bytes of the frame
309 * The registration cannot be dropped, but is removed automatically 348 * that should match, e.g. a single byte for only a category match or
310 * when the netlink socket is closed. Multiple registrations can be made. 349 * four bytes for vendor frames including the OUI. The registration
311 * @NL80211_CMD_ACTION: Action frame TX request and RX notification. This 350 * cannot be dropped, but is removed automatically when the netlink
312 * command is used both as a request to transmit an Action frame and as an 351 * socket is closed. Multiple registrations can be made.
313 * event indicating reception of an Action frame that was not processed in 352 * @NL80211_CMD_REGISTER_ACTION: Alias for @NL80211_CMD_REGISTER_FRAME for
353 * backward compatibility
354 * @NL80211_CMD_FRAME: Management frame TX request and RX notification. This
355 * command is used both as a request to transmit a management frame and
356 * as an event indicating reception of a frame that was not processed in
314 * kernel code, but is for us (i.e., which may need to be processed in a 357 * kernel code, but is for us (i.e., which may need to be processed in a
315 * user space application). %NL80211_ATTR_FRAME is used to specify the 358 * user space application). %NL80211_ATTR_FRAME is used to specify the
316 * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and 359 * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and
@@ -320,11 +363,14 @@
320 * operational channel). When called, this operation returns a cookie 363 * operational channel). When called, this operation returns a cookie
321 * (%NL80211_ATTR_COOKIE) that will be included with the TX status event 364 * (%NL80211_ATTR_COOKIE) that will be included with the TX status event
322 * pertaining to the TX request. 365 * pertaining to the TX request.
323 * @NL80211_CMD_ACTION_TX_STATUS: Report TX status of an Action frame 366 * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility.
324 * transmitted with %NL80211_CMD_ACTION. %NL80211_ATTR_COOKIE identifies 367 * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame
368 * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies
325 * the TX command and %NL80211_ATTR_FRAME includes the contents of the 369 * the TX command and %NL80211_ATTR_FRAME includes the contents of the
326 * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged 370 * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged
327 * the frame. 371 * the frame.
372 * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for
373 * backward compatibility.
328 * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command 374 * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command
329 * is used to configure connection quality monitoring notification trigger 375 * is used to configure connection quality monitoring notification trigger
330 * levels. 376 * levels.
@@ -429,9 +475,12 @@ enum nl80211_commands {
429 475
430 NL80211_CMD_SET_TX_BITRATE_MASK, 476 NL80211_CMD_SET_TX_BITRATE_MASK,
431 477
432 NL80211_CMD_REGISTER_ACTION, 478 NL80211_CMD_REGISTER_FRAME,
433 NL80211_CMD_ACTION, 479 NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME,
434 NL80211_CMD_ACTION_TX_STATUS, 480 NL80211_CMD_FRAME,
481 NL80211_CMD_ACTION = NL80211_CMD_FRAME,
482 NL80211_CMD_FRAME_TX_STATUS,
483 NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS,
435 484
436 NL80211_CMD_SET_POWER_SAVE, 485 NL80211_CMD_SET_POWER_SAVE,
437 NL80211_CMD_GET_POWER_SAVE, 486 NL80211_CMD_GET_POWER_SAVE,
@@ -639,6 +688,15 @@ enum nl80211_commands {
639 * request, the driver will assume that the port is unauthorized until 688 * request, the driver will assume that the port is unauthorized until
640 * authorized by user space. Otherwise, port is marked authorized by 689 * authorized by user space. Otherwise, port is marked authorized by
641 * default in station mode. 690 * default in station mode.
691 * @NL80211_ATTR_CONTROL_PORT_ETHERTYPE: A 16-bit value indicating the
692 * ethertype that will be used for key negotiation. It can be
693 * specified with the associate and connect commands. If it is not
694 * specified, the value defaults to 0x888E (PAE, 802.1X). This
695 * attribute is also used as a flag in the wiphy information to
696 * indicate that protocols other than PAE are supported.
697 * @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with
698 * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom
699 * ethertype frames used for key negotiation must not be encrypted.
642 * 700 *
643 * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. 701 * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
644 * We recommend using nested, driver-specific attributes within this. 702 * We recommend using nested, driver-specific attributes within this.
@@ -708,7 +766,16 @@ enum nl80211_commands {
708 * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. 766 * is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
709 * 767 *
710 * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain 768 * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
711 * at least one byte, currently used with @NL80211_CMD_REGISTER_ACTION. 769 * at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
770 * @NL80211_ATTR_FRAME_TYPE: A u16 indicating the frame type/subtype for the
771 * @NL80211_CMD_REGISTER_FRAME command.
772 * @NL80211_ATTR_TX_FRAME_TYPES: wiphy capability attribute, which is a
773 * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing
774 * information about which frame types can be transmitted with
775 * %NL80211_CMD_FRAME.
776 * @NL80211_ATTR_RX_FRAME_TYPES: wiphy capability attribute, which is a
777 * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing
778 * information about which frame types can be registered for RX.
712 * 779 *
713 * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was 780 * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was
714 * acknowledged by the recipient. 781 * acknowledged by the recipient.
@@ -891,6 +958,13 @@ enum nl80211_attrs {
891 NL80211_ATTR_WIPHY_TX_POWER_SETTING, 958 NL80211_ATTR_WIPHY_TX_POWER_SETTING,
892 NL80211_ATTR_WIPHY_TX_POWER_LEVEL, 959 NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
893 960
961 NL80211_ATTR_TX_FRAME_TYPES,
962 NL80211_ATTR_RX_FRAME_TYPES,
963 NL80211_ATTR_FRAME_TYPE,
964
965 NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
966 NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT,
967
894 /* add attributes here, update the policy in nl80211.c */ 968 /* add attributes here, update the policy in nl80211.c */
895 969
896 __NL80211_ATTR_AFTER_LAST, 970 __NL80211_ATTR_AFTER_LAST,
@@ -947,7 +1021,7 @@ enum nl80211_attrs {
947 * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames 1021 * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames
948 * @NL80211_IFTYPE_MESH_POINT: mesh point 1022 * @NL80211_IFTYPE_MESH_POINT: mesh point
949 * @NL80211_IFTYPE_MAX: highest interface type number currently defined 1023 * @NL80211_IFTYPE_MAX: highest interface type number currently defined
950 * @__NL80211_IFTYPE_AFTER_LAST: internal use 1024 * @NUM_NL80211_IFTYPES: number of defined interface types
951 * 1025 *
952 * These values are used with the %NL80211_ATTR_IFTYPE 1026 * These values are used with the %NL80211_ATTR_IFTYPE
953 * to set the type of an interface. 1027 * to set the type of an interface.
@@ -964,8 +1038,8 @@ enum nl80211_iftype {
964 NL80211_IFTYPE_MESH_POINT, 1038 NL80211_IFTYPE_MESH_POINT,
965 1039
966 /* keep last */ 1040 /* keep last */
967 __NL80211_IFTYPE_AFTER_LAST, 1041 NUM_NL80211_IFTYPES,
968 NL80211_IFTYPE_MAX = __NL80211_IFTYPE_AFTER_LAST - 1 1042 NL80211_IFTYPE_MAX = NUM_NL80211_IFTYPES - 1
969}; 1043};
970 1044
971/** 1045/**
@@ -974,11 +1048,14 @@ enum nl80211_iftype {
974 * Station flags. When a station is added to an AP interface, it is 1048 * Station flags. When a station is added to an AP interface, it is
975 * assumed to be already associated (and hence authenticated.) 1049 * assumed to be already associated (and hence authenticated.)
976 * 1050 *
1051 * @__NL80211_STA_FLAG_INVALID: attribute number 0 is reserved
977 * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) 1052 * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X)
978 * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames 1053 * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames
979 * with short barker preamble 1054 * with short barker preamble
980 * @NL80211_STA_FLAG_WME: station is WME/QoS capable 1055 * @NL80211_STA_FLAG_WME: station is WME/QoS capable
981 * @NL80211_STA_FLAG_MFP: station uses management frame protection 1056 * @NL80211_STA_FLAG_MFP: station uses management frame protection
1057 * @NL80211_STA_FLAG_MAX: highest station flag number currently defined
1058 * @__NL80211_STA_FLAG_AFTER_LAST: internal use
982 */ 1059 */
983enum nl80211_sta_flags { 1060enum nl80211_sta_flags {
984 __NL80211_STA_FLAG_INVALID, 1061 __NL80211_STA_FLAG_INVALID,
@@ -1091,14 +1168,17 @@ enum nl80211_mpath_flags {
1091 * information about a mesh path. 1168 * information about a mesh path.
1092 * 1169 *
1093 * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved 1170 * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved
1094 * @NL80211_ATTR_MPATH_FRAME_QLEN: number of queued frames for this destination 1171 * @NL80211_MPATH_INFO_FRAME_QLEN: number of queued frames for this destination
1095 * @NL80211_ATTR_MPATH_SN: destination sequence number 1172 * @NL80211_MPATH_INFO_SN: destination sequence number
1096 * @NL80211_ATTR_MPATH_METRIC: metric (cost) of this mesh path 1173 * @NL80211_MPATH_INFO_METRIC: metric (cost) of this mesh path
1097 * @NL80211_ATTR_MPATH_EXPTIME: expiration time for the path, in msec from now 1174 * @NL80211_MPATH_INFO_EXPTIME: expiration time for the path, in msec from now
1098 * @NL80211_ATTR_MPATH_FLAGS: mesh path flags, enumerated in 1175 * @NL80211_MPATH_INFO_FLAGS: mesh path flags, enumerated in
1099 * &enum nl80211_mpath_flags; 1176 * &enum nl80211_mpath_flags;
1100 * @NL80211_ATTR_MPATH_DISCOVERY_TIMEOUT: total path discovery timeout, in msec 1177 * @NL80211_MPATH_INFO_DISCOVERY_TIMEOUT: total path discovery timeout, in msec
1101 * @NL80211_ATTR_MPATH_DISCOVERY_RETRIES: mesh path discovery retries 1178 * @NL80211_MPATH_INFO_DISCOVERY_RETRIES: mesh path discovery retries
1179 * @NL80211_MPATH_INFO_MAX: highest mesh path information attribute number
1180 * currently defind
1181 * @__NL80211_MPATH_INFO_AFTER_LAST: internal use
1102 */ 1182 */
1103enum nl80211_mpath_info { 1183enum nl80211_mpath_info {
1104 __NL80211_MPATH_INFO_INVALID, 1184 __NL80211_MPATH_INFO_INVALID,
@@ -1127,6 +1207,8 @@ enum nl80211_mpath_info {
1127 * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE 1207 * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE
1128 * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n 1208 * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n
1129 * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n 1209 * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n
1210 * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
1211 * @__NL80211_BAND_ATTR_AFTER_LAST: internal use
1130 */ 1212 */
1131enum nl80211_band_attr { 1213enum nl80211_band_attr {
1132 __NL80211_BAND_ATTR_INVALID, 1214 __NL80211_BAND_ATTR_INVALID,
@@ -1147,6 +1229,7 @@ enum nl80211_band_attr {
1147 1229
1148/** 1230/**
1149 * enum nl80211_frequency_attr - frequency attributes 1231 * enum nl80211_frequency_attr - frequency attributes
1232 * @__NL80211_FREQUENCY_ATTR_INVALID: attribute number 0 is reserved
1150 * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz 1233 * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz
1151 * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current 1234 * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current
1152 * regulatory domain. 1235 * regulatory domain.
@@ -1158,6 +1241,9 @@ enum nl80211_band_attr {
1158 * on this channel in current regulatory domain. 1241 * on this channel in current regulatory domain.
1159 * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm 1242 * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm
1160 * (100 * dBm). 1243 * (100 * dBm).
1244 * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
1245 * currently defined
1246 * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
1161 */ 1247 */
1162enum nl80211_frequency_attr { 1248enum nl80211_frequency_attr {
1163 __NL80211_FREQUENCY_ATTR_INVALID, 1249 __NL80211_FREQUENCY_ATTR_INVALID,
@@ -1177,9 +1263,13 @@ enum nl80211_frequency_attr {
1177 1263
1178/** 1264/**
1179 * enum nl80211_bitrate_attr - bitrate attributes 1265 * enum nl80211_bitrate_attr - bitrate attributes
1266 * @__NL80211_BITRATE_ATTR_INVALID: attribute number 0 is reserved
1180 * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps 1267 * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps
1181 * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported 1268 * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported
1182 * in 2.4 GHz band. 1269 * in 2.4 GHz band.
1270 * @NL80211_BITRATE_ATTR_MAX: highest bitrate attribute number
1271 * currently defined
1272 * @__NL80211_BITRATE_ATTR_AFTER_LAST: internal use
1183 */ 1273 */
1184enum nl80211_bitrate_attr { 1274enum nl80211_bitrate_attr {
1185 __NL80211_BITRATE_ATTR_INVALID, 1275 __NL80211_BITRATE_ATTR_INVALID,
@@ -1235,6 +1325,7 @@ enum nl80211_reg_type {
1235 1325
1236/** 1326/**
1237 * enum nl80211_reg_rule_attr - regulatory rule attributes 1327 * enum nl80211_reg_rule_attr - regulatory rule attributes
1328 * @__NL80211_REG_RULE_ATTR_INVALID: attribute number 0 is reserved
1238 * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional 1329 * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional
1239 * considerations for a given frequency range. These are the 1330 * considerations for a given frequency range. These are the
1240 * &enum nl80211_reg_rule_flags. 1331 * &enum nl80211_reg_rule_flags.
@@ -1251,6 +1342,9 @@ enum nl80211_reg_type {
1251 * If you don't have one then don't send this. 1342 * If you don't have one then don't send this.
1252 * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for 1343 * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for
1253 * a given frequency range. The value is in mBm (100 * dBm). 1344 * a given frequency range. The value is in mBm (100 * dBm).
1345 * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number
1346 * currently defined
1347 * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use
1254 */ 1348 */
1255enum nl80211_reg_rule_attr { 1349enum nl80211_reg_rule_attr {
1256 __NL80211_REG_RULE_ATTR_INVALID, 1350 __NL80211_REG_RULE_ATTR_INVALID,
@@ -1302,6 +1396,9 @@ enum nl80211_reg_rule_flags {
1302 * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved 1396 * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved
1303 * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel 1397 * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel
1304 * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) 1398 * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm)
1399 * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
1400 * currently defined
1401 * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
1305 */ 1402 */
1306enum nl80211_survey_info { 1403enum nl80211_survey_info {
1307 __NL80211_SURVEY_INFO_INVALID, 1404 __NL80211_SURVEY_INFO_INVALID,
@@ -1466,6 +1563,7 @@ enum nl80211_channel_type {
1466 * enum nl80211_bss - netlink attributes for a BSS 1563 * enum nl80211_bss - netlink attributes for a BSS
1467 * 1564 *
1468 * @__NL80211_BSS_INVALID: invalid 1565 * @__NL80211_BSS_INVALID: invalid
1566 * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets)
1469 * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) 1567 * @NL80211_BSS_FREQUENCY: frequency in MHz (u32)
1470 * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) 1568 * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64)
1471 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) 1569 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16)
@@ -1509,6 +1607,12 @@ enum nl80211_bss {
1509 1607
1510/** 1608/**
1511 * enum nl80211_bss_status - BSS "status" 1609 * enum nl80211_bss_status - BSS "status"
1610 * @NL80211_BSS_STATUS_AUTHENTICATED: Authenticated with this BSS.
1611 * @NL80211_BSS_STATUS_ASSOCIATED: Associated with this BSS.
1612 * @NL80211_BSS_STATUS_IBSS_JOINED: Joined to this IBSS.
1613 *
1614 * The BSS status is a BSS attribute in scan dumps, which
1615 * indicates the status the interface has wrt. this BSS.
1512 */ 1616 */
1513enum nl80211_bss_status { 1617enum nl80211_bss_status {
1514 NL80211_BSS_STATUS_AUTHENTICATED, 1618 NL80211_BSS_STATUS_AUTHENTICATED,
@@ -1619,8 +1723,8 @@ enum nl80211_tx_rate_attributes {
1619 1723
1620/** 1724/**
1621 * enum nl80211_band - Frequency band 1725 * enum nl80211_band - Frequency band
1622 * @NL80211_BAND_2GHZ - 2.4 GHz ISM band 1726 * @NL80211_BAND_2GHZ: 2.4 GHz ISM band
1623 * @NL80211_BAND_5GHZ - around 5 GHz band (4.9 - 5.7 GHz) 1727 * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
1624 */ 1728 */
1625enum nl80211_band { 1729enum nl80211_band {
1626 NL80211_BAND_2GHZ, 1730 NL80211_BAND_2GHZ,
@@ -1658,9 +1762,9 @@ enum nl80211_attr_cqm {
1658 1762
1659/** 1763/**
1660 * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event 1764 * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event
1661 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW - The RSSI level is lower than the 1765 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW: The RSSI level is lower than the
1662 * configured threshold 1766 * configured threshold
1663 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH - The RSSI is higher than the 1767 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the
1664 * configured threshold 1768 * configured threshold
1665 */ 1769 */
1666enum nl80211_cqm_rssi_threshold_event { 1770enum nl80211_cqm_rssi_threshold_event {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 10d33309e9a6..9438660b46ea 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2189,6 +2189,9 @@
2189#define PCI_VENDOR_ID_ARIMA 0x161f 2189#define PCI_VENDOR_ID_ARIMA 0x161f
2190 2190
2191#define PCI_VENDOR_ID_BROCADE 0x1657 2191#define PCI_VENDOR_ID_BROCADE 0x1657
2192#define PCI_DEVICE_ID_BROCADE_CT 0x0014
2193#define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017
2194#define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021
2192 2195
2193#define PCI_VENDOR_ID_SIBYTE 0x166d 2196#define PCI_VENDOR_ID_SIBYTE 0x166d
2194#define PCI_DEVICE_ID_BCM1250_PCI 0x0001 2197#define PCI_DEVICE_ID_BCM1250_PCI 0x0001
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6b0a782c6224..a6e047a04f79 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -116,7 +116,7 @@ struct mii_bus {
116 /* list of all PHYs on bus */ 116 /* list of all PHYs on bus */
117 struct phy_device *phy_map[PHY_MAX_ADDR]; 117 struct phy_device *phy_map[PHY_MAX_ADDR];
118 118
119 /* Phy addresses to be ignored when probing */ 119 /* PHY addresses to be ignored when probing */
120 u32 phy_mask; 120 u32 phy_mask;
121 121
122 /* 122 /*
@@ -283,7 +283,7 @@ struct phy_device {
283 283
284 phy_interface_t interface; 284 phy_interface_t interface;
285 285
286 /* Bus address of the PHY (0-32) */ 286 /* Bus address of the PHY (0-31) */
287 int addr; 287 int addr;
288 288
289 /* 289 /*
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index 7f6ba8658abe..defbde203d07 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -332,6 +332,7 @@ enum {
332 FLOW_KEY_SKUID, 332 FLOW_KEY_SKUID,
333 FLOW_KEY_SKGID, 333 FLOW_KEY_SKGID,
334 FLOW_KEY_VLAN_TAG, 334 FLOW_KEY_VLAN_TAG,
335 FLOW_KEY_RXHASH,
335 __FLOW_KEY_MAX, 336 __FLOW_KEY_MAX,
336}; 337};
337 338
diff --git a/include/linux/rds.h b/include/linux/rds.h
index 24bce3ded9ea..91950950aa59 100644
--- a/include/linux/rds.h
+++ b/include/linux/rds.h
@@ -36,15 +36,6 @@
36 36
37#include <linux/types.h> 37#include <linux/types.h>
38 38
39/* These sparse annotated types shouldn't be in any user
40 * visible header file. We should clean this up rather
41 * than kludging around them. */
42#ifndef __KERNEL__
43#define __be16 u_int16_t
44#define __be32 u_int32_t
45#define __be64 u_int64_t
46#endif
47
48#define RDS_IB_ABI_VERSION 0x301 39#define RDS_IB_ABI_VERSION 0x301
49 40
50/* 41/*
@@ -82,6 +73,10 @@
82#define RDS_CMSG_RDMA_MAP 3 73#define RDS_CMSG_RDMA_MAP 3
83#define RDS_CMSG_RDMA_STATUS 4 74#define RDS_CMSG_RDMA_STATUS 4
84#define RDS_CMSG_CONG_UPDATE 5 75#define RDS_CMSG_CONG_UPDATE 5
76#define RDS_CMSG_ATOMIC_FADD 6
77#define RDS_CMSG_ATOMIC_CSWP 7
78#define RDS_CMSG_MASKED_ATOMIC_FADD 8
79#define RDS_CMSG_MASKED_ATOMIC_CSWP 9
85 80
86#define RDS_INFO_FIRST 10000 81#define RDS_INFO_FIRST 10000
87#define RDS_INFO_COUNTERS 10000 82#define RDS_INFO_COUNTERS 10000
@@ -98,9 +93,9 @@
98#define RDS_INFO_LAST 10010 93#define RDS_INFO_LAST 10010
99 94
100struct rds_info_counter { 95struct rds_info_counter {
101 u_int8_t name[32]; 96 uint8_t name[32];
102 u_int64_t value; 97 uint64_t value;
103} __packed; 98} __attribute__((packed));
104 99
105#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 100#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01
106#define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 101#define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02
@@ -109,56 +104,48 @@ struct rds_info_counter {
109#define TRANSNAMSIZ 16 104#define TRANSNAMSIZ 16
110 105
111struct rds_info_connection { 106struct rds_info_connection {
112 u_int64_t next_tx_seq; 107 uint64_t next_tx_seq;
113 u_int64_t next_rx_seq; 108 uint64_t next_rx_seq;
114 __be32 laddr;
115 __be32 faddr;
116 u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */
117 u_int8_t flags;
118} __packed;
119
120struct rds_info_flow {
121 __be32 laddr; 109 __be32 laddr;
122 __be32 faddr; 110 __be32 faddr;
123 u_int32_t bytes; 111 uint8_t transport[TRANSNAMSIZ]; /* null term ascii */
124 __be16 lport; 112 uint8_t flags;
125 __be16 fport; 113} __attribute__((packed));
126} __packed;
127 114
128#define RDS_INFO_MESSAGE_FLAG_ACK 0x01 115#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
129#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 116#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
130 117
131struct rds_info_message { 118struct rds_info_message {
132 u_int64_t seq; 119 uint64_t seq;
133 u_int32_t len; 120 uint32_t len;
134 __be32 laddr; 121 __be32 laddr;
135 __be32 faddr; 122 __be32 faddr;
136 __be16 lport; 123 __be16 lport;
137 __be16 fport; 124 __be16 fport;
138 u_int8_t flags; 125 uint8_t flags;
139} __packed; 126} __attribute__((packed));
140 127
141struct rds_info_socket { 128struct rds_info_socket {
142 u_int32_t sndbuf; 129 uint32_t sndbuf;
143 __be32 bound_addr; 130 __be32 bound_addr;
144 __be32 connected_addr; 131 __be32 connected_addr;
145 __be16 bound_port; 132 __be16 bound_port;
146 __be16 connected_port; 133 __be16 connected_port;
147 u_int32_t rcvbuf; 134 uint32_t rcvbuf;
148 u_int64_t inum; 135 uint64_t inum;
149} __packed; 136} __attribute__((packed));
150 137
151struct rds_info_tcp_socket { 138struct rds_info_tcp_socket {
152 __be32 local_addr; 139 __be32 local_addr;
153 __be16 local_port; 140 __be16 local_port;
154 __be32 peer_addr; 141 __be32 peer_addr;
155 __be16 peer_port; 142 __be16 peer_port;
156 u_int64_t hdr_rem; 143 uint64_t hdr_rem;
157 u_int64_t data_rem; 144 uint64_t data_rem;
158 u_int32_t last_sent_nxt; 145 uint32_t last_sent_nxt;
159 u_int32_t last_expected_una; 146 uint32_t last_expected_una;
160 u_int32_t last_seen_una; 147 uint32_t last_seen_una;
161} __packed; 148} __attribute__((packed));
162 149
163#define RDS_IB_GID_LEN 16 150#define RDS_IB_GID_LEN 16
164struct rds_info_rdma_connection { 151struct rds_info_rdma_connection {
@@ -212,42 +199,69 @@ struct rds_info_rdma_connection {
212 * (so that the application does not have to worry about 199 * (so that the application does not have to worry about
213 * alignment). 200 * alignment).
214 */ 201 */
215typedef u_int64_t rds_rdma_cookie_t; 202typedef uint64_t rds_rdma_cookie_t;
216 203
217struct rds_iovec { 204struct rds_iovec {
218 u_int64_t addr; 205 uint64_t addr;
219 u_int64_t bytes; 206 uint64_t bytes;
220}; 207};
221 208
222struct rds_get_mr_args { 209struct rds_get_mr_args {
223 struct rds_iovec vec; 210 struct rds_iovec vec;
224 u_int64_t cookie_addr; 211 uint64_t cookie_addr;
225 uint64_t flags; 212 uint64_t flags;
226}; 213};
227 214
228struct rds_get_mr_for_dest_args { 215struct rds_get_mr_for_dest_args {
229 struct sockaddr_storage dest_addr; 216 struct sockaddr_storage dest_addr;
230 struct rds_iovec vec; 217 struct rds_iovec vec;
231 u_int64_t cookie_addr; 218 uint64_t cookie_addr;
232 uint64_t flags; 219 uint64_t flags;
233}; 220};
234 221
235struct rds_free_mr_args { 222struct rds_free_mr_args {
236 rds_rdma_cookie_t cookie; 223 rds_rdma_cookie_t cookie;
237 u_int64_t flags; 224 uint64_t flags;
238}; 225};
239 226
240struct rds_rdma_args { 227struct rds_rdma_args {
241 rds_rdma_cookie_t cookie; 228 rds_rdma_cookie_t cookie;
242 struct rds_iovec remote_vec; 229 struct rds_iovec remote_vec;
243 u_int64_t local_vec_addr; 230 uint64_t local_vec_addr;
244 u_int64_t nr_local; 231 uint64_t nr_local;
245 u_int64_t flags; 232 uint64_t flags;
246 u_int64_t user_token; 233 uint64_t user_token;
234};
235
236struct rds_atomic_args {
237 rds_rdma_cookie_t cookie;
238 uint64_t local_addr;
239 uint64_t remote_addr;
240 union {
241 struct {
242 uint64_t compare;
243 uint64_t swap;
244 } cswp;
245 struct {
246 uint64_t add;
247 } fadd;
248 struct {
249 uint64_t compare;
250 uint64_t swap;
251 uint64_t compare_mask;
252 uint64_t swap_mask;
253 } m_cswp;
254 struct {
255 uint64_t add;
256 uint64_t nocarry_mask;
257 } m_fadd;
258 };
259 uint64_t flags;
260 uint64_t user_token;
247}; 261};
248 262
249struct rds_rdma_notify { 263struct rds_rdma_notify {
250 u_int64_t user_token; 264 uint64_t user_token;
251 int32_t status; 265 int32_t status;
252}; 266};
253 267
@@ -266,5 +280,6 @@ struct rds_rdma_notify {
266#define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */ 280#define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */
267#define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */ 281#define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */
268#define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */ 282#define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */
283#define RDS_RDMA_SILENT 0x0040 /* Do not interrupt remote */
269 284
270#endif /* IB_RDS_H */ 285#endif /* IB_RDS_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 58d44491880f..263690d991a8 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -749,6 +749,17 @@ extern int rtnl_is_locked(void);
749extern int lockdep_rtnl_is_held(void); 749extern int lockdep_rtnl_is_held(void);
750#endif /* #ifdef CONFIG_PROVE_LOCKING */ 750#endif /* #ifdef CONFIG_PROVE_LOCKING */
751 751
752/**
753 * rcu_dereference_rtnl - rcu_dereference with debug checking
754 * @p: The pointer to read, prior to dereferencing
755 *
756 * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
757 * or RTNL
758 */
759#define rcu_dereference_rtnl(p) \
760 rcu_dereference_check(p, rcu_read_lock_held() || \
761 lockdep_rtnl_is_held())
762
752extern void rtnetlink_init(void); 763extern void rtnetlink_init(void);
753extern void __rtnl_unlock(void); 764extern void __rtnl_unlock(void);
754 765
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 77eb60d2b496..9e8085a89589 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -163,26 +163,19 @@ struct skb_shared_hwtstamps {
163 ktime_t syststamp; 163 ktime_t syststamp;
164}; 164};
165 165
166/** 166/* Definitions for tx_flags in struct skb_shared_info */
167 * struct skb_shared_tx - instructions for time stamping of outgoing packets 167enum {
168 * @hardware: generate hardware time stamp 168 /* generate hardware time stamp */
169 * @software: generate software time stamp 169 SKBTX_HW_TSTAMP = 1 << 0,
170 * @in_progress: device driver is going to provide 170
171 * hardware time stamp 171 /* generate software time stamp */
172 * @prevent_sk_orphan: make sk reference available on driver level 172 SKBTX_SW_TSTAMP = 1 << 1,
173 * @flags: all shared_tx flags 173
174 * 174 /* device driver is going to provide hardware time stamp */
175 * These flags are attached to packets as part of the 175 SKBTX_IN_PROGRESS = 1 << 2,
176 * &skb_shared_info. Use skb_tx() to get a pointer. 176
177 */ 177 /* ensure the originating sk reference is available on driver level */
178union skb_shared_tx { 178 SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
179 struct {
180 __u8 hardware:1,
181 software:1,
182 in_progress:1,
183 prevent_sk_orphan:1;
184 };
185 __u8 flags;
186}; 179};
187 180
188/* This data is invariant across clones and lives at 181/* This data is invariant across clones and lives at
@@ -195,7 +188,7 @@ struct skb_shared_info {
195 unsigned short gso_segs; 188 unsigned short gso_segs;
196 unsigned short gso_type; 189 unsigned short gso_type;
197 __be32 ip6_frag_id; 190 __be32 ip6_frag_id;
198 union skb_shared_tx tx_flags; 191 __u8 tx_flags;
199 struct sk_buff *frag_list; 192 struct sk_buff *frag_list;
200 struct skb_shared_hwtstamps hwtstamps; 193 struct skb_shared_hwtstamps hwtstamps;
201 194
@@ -558,6 +551,15 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
558 unsigned int to, struct ts_config *config, 551 unsigned int to, struct ts_config *config,
559 struct ts_state *state); 552 struct ts_state *state);
560 553
554extern __u32 __skb_get_rxhash(struct sk_buff *skb);
555static inline __u32 skb_get_rxhash(struct sk_buff *skb)
556{
557 if (!skb->rxhash)
558 skb->rxhash = __skb_get_rxhash(skb);
559
560 return skb->rxhash;
561}
562
561#ifdef NET_SKBUFF_DATA_USES_OFFSET 563#ifdef NET_SKBUFF_DATA_USES_OFFSET
562static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 564static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
563{ 565{
@@ -578,11 +580,6 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
578 return &skb_shinfo(skb)->hwtstamps; 580 return &skb_shinfo(skb)->hwtstamps;
579} 581}
580 582
581static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
582{
583 return &skb_shinfo(skb)->tx_flags;
584}
585
586/** 583/**
587 * skb_queue_empty - check if a queue is empty 584 * skb_queue_empty - check if a queue is empty
588 * @list: queue head 585 * @list: queue head
@@ -1123,7 +1120,7 @@ extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1123 int off, int size); 1120 int off, int size);
1124 1121
1125#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1122#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1126#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) 1123#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1127#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1124#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1128 1125
1129#ifdef NET_SKBUFF_DATA_USES_OFFSET 1126#ifdef NET_SKBUFF_DATA_USES_OFFSET
@@ -1787,7 +1784,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1787 skb = skb->prev) 1784 skb = skb->prev)
1788 1785
1789 1786
1790static inline bool skb_has_frags(const struct sk_buff *skb) 1787static inline bool skb_has_frag_list(const struct sk_buff *skb)
1791{ 1788{
1792 return skb_shinfo(skb)->frag_list != NULL; 1789 return skb_shinfo(skb)->frag_list != NULL;
1793} 1790}
@@ -1987,8 +1984,8 @@ extern void skb_tstamp_tx(struct sk_buff *orig_skb,
1987 1984
1988static inline void sw_tx_timestamp(struct sk_buff *skb) 1985static inline void sw_tx_timestamp(struct sk_buff *skb)
1989{ 1986{
1990 union skb_shared_tx *shtx = skb_tx(skb); 1987 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
1991 if (shtx->software && !shtx->in_progress) 1988 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
1992 skb_tstamp_tx(skb, NULL); 1989 skb_tstamp_tx(skb, NULL);
1993} 1990}
1994 1991
@@ -2209,6 +2206,21 @@ static inline void skb_forward_csum(struct sk_buff *skb)
2209 skb->ip_summed = CHECKSUM_NONE; 2206 skb->ip_summed = CHECKSUM_NONE;
2210} 2207}
2211 2208
2209/**
2210 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2211 * @skb: skb to check
2212 *
2213 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2214 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2215 * use this helper, to document places where we make this assertion.
2216 */
2217static inline void skb_checksum_none_assert(struct sk_buff *skb)
2218{
2219#ifdef DEBUG
2220 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2221#endif
2222}
2223
2212bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2224bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2213#endif /* __KERNEL__ */ 2225#endif /* __KERNEL__ */
2214#endif /* _LINUX_SKBUFF_H */ 2226#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/spi/wl12xx.h b/include/linux/spi/wl12xx.h
index a223ecbc71ef..a20bccf0b5c2 100644
--- a/include/linux/spi/wl12xx.h
+++ b/include/linux/spi/wl12xx.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com> 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index a6d5225b9275..11daf9c140e7 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -97,6 +97,7 @@
97#define SSB_TMSLOW_RESET 0x00000001 /* Reset */ 97#define SSB_TMSLOW_RESET 0x00000001 /* Reset */
98#define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */ 98#define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */
99#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ 99#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */
100#define SSB_TMSLOW_PHYCLK 0x00000010 /* MAC PHY Clock Control Enable */
100#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ 101#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */
101#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ 102#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
102#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ 103#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 632ff7c03280..a4adf0de6ed6 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -35,7 +35,7 @@ struct plat_stmmacenet_data {
35 int has_gmac; 35 int has_gmac;
36 int enh_desc; 36 int enh_desc;
37 void (*fix_mac_speed)(void *priv, unsigned int speed); 37 void (*fix_mac_speed)(void *priv, unsigned int speed);
38 void (*bus_setup)(unsigned long ioaddr); 38 void (*bus_setup)(void __iomem *ioaddr);
39#ifdef CONFIG_STM_DRIVERS 39#ifdef CONFIG_STM_DRIVERS
40 struct stm_pad_config *pad_config; 40 struct stm_pad_config *pad_config;
41#endif 41#endif
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild
index 76990937f4c9..67b501c302b2 100644
--- a/include/linux/tc_act/Kbuild
+++ b/include/linux/tc_act/Kbuild
@@ -4,3 +4,4 @@ header-y += tc_mirred.h
4header-y += tc_pedit.h 4header-y += tc_pedit.h
5header-y += tc_nat.h 5header-y += tc_nat.h
6header-y += tc_skbedit.h 6header-y += tc_skbedit.h
7header-y += tc_csum.h
diff --git a/include/linux/tc_act/tc_csum.h b/include/linux/tc_act/tc_csum.h
new file mode 100644
index 000000000000..a047c49a3153
--- /dev/null
+++ b/include/linux/tc_act/tc_csum.h
@@ -0,0 +1,32 @@
1#ifndef __LINUX_TC_CSUM_H
2#define __LINUX_TC_CSUM_H
3
4#include <linux/types.h>
5#include <linux/pkt_cls.h>
6
7#define TCA_ACT_CSUM 16
8
9enum {
10 TCA_CSUM_UNSPEC,
11 TCA_CSUM_PARMS,
12 TCA_CSUM_TM,
13 __TCA_CSUM_MAX
14};
15#define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1)
16
17enum {
18 TCA_CSUM_UPDATE_FLAG_IPV4HDR = 1,
19 TCA_CSUM_UPDATE_FLAG_ICMP = 2,
20 TCA_CSUM_UPDATE_FLAG_IGMP = 4,
21 TCA_CSUM_UPDATE_FLAG_TCP = 8,
22 TCA_CSUM_UPDATE_FLAG_UDP = 16,
23 TCA_CSUM_UPDATE_FLAG_UDPLITE = 32
24};
25
26struct tc_csum {
27 tc_gen;
28
29 __u32 update_flags;
30};
31
32#endif /* __LINUX_TC_CSUM_H */
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h
index 0864206ec1a3..7138962664f8 100644
--- a/include/linux/tc_ematch/tc_em_meta.h
+++ b/include/linux/tc_ematch/tc_em_meta.h
@@ -79,6 +79,7 @@ enum {
79 TCF_META_ID_SK_SENDMSG_OFF, 79 TCF_META_ID_SK_SENDMSG_OFF,
80 TCF_META_ID_SK_WRITE_PENDING, 80 TCF_META_ID_SK_WRITE_PENDING,
81 TCF_META_ID_VLAN_TAG, 81 TCF_META_ID_VLAN_TAG,
82 TCF_META_ID_RXHASH,
82 __TCF_META_ID_MAX 83 __TCF_META_ID_MAX
83}; 84};
84#define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) 85#define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1)
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a778ee024590..e64f4c67d0ef 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -105,6 +105,7 @@ enum {
105#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */ 105#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */
106#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ 106#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
107#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ 107#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
108#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
108 109
109/* for TCP_INFO socket option */ 110/* for TCP_INFO socket option */
110#define TCPI_OPT_TIMESTAMPS 1 111#define TCPI_OPT_TIMESTAMPS 1
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 2fd06c60ffbb..4c8c727d0cca 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -25,6 +25,43 @@
25#include <linux/wireless.h> 25#include <linux/wireless.h>
26 26
27 27
28/**
29 * DOC: Introduction
30 *
31 * cfg80211 is the configuration API for 802.11 devices in Linux. It bridges
32 * userspace and drivers, and offers some utility functionality associated
33 * with 802.11. cfg80211 must, directly or indirectly via mac80211, be used
34 * by all modern wireless drivers in Linux, so that they offer a consistent
35 * API through nl80211. For backward compatibility, cfg80211 also offers
36 * wireless extensions to userspace, but hides them from drivers completely.
37 *
38 * Additionally, cfg80211 contains code to help enforce regulatory spectrum
39 * use restrictions.
40 */
41
42
43/**
44 * DOC: Device registration
45 *
46 * In order for a driver to use cfg80211, it must register the hardware device
47 * with cfg80211. This happens through a number of hardware capability structs
48 * described below.
49 *
50 * The fundamental structure for each device is the 'wiphy', of which each
51 * instance describes a physical wireless device connected to the system. Each
52 * such wiphy can have zero, one, or many virtual interfaces associated with
53 * it, which need to be identified as such by pointing the network interface's
54 * @ieee80211_ptr pointer to a &struct wireless_dev which further describes
55 * the wireless part of the interface, normally this struct is embedded in the
56 * network interface's private data area. Drivers can optionally allow creating
57 * or destroying virtual interfaces on the fly, but without at least one or the
58 * ability to create some the wireless device isn't useful.
59 *
60 * Each wiphy structure contains device capability information, and also has
61 * a pointer to the various operations the driver offers. The definitions and
62 * structures here describe these capabilities in detail.
63 */
64
28/* 65/*
29 * wireless hardware capability structures 66 * wireless hardware capability structures
30 */ 67 */
@@ -205,6 +242,21 @@ struct ieee80211_supported_band {
205 */ 242 */
206 243
207/** 244/**
245 * DOC: Actions and configuration
246 *
247 * Each wireless device and each virtual interface offer a set of configuration
248 * operations and other actions that are invoked by userspace. Each of these
249 * actions is described in the operations structure, and the parameters these
250 * operations use are described separately.
251 *
252 * Additionally, some operations are asynchronous and expect to get status
253 * information via some functions that drivers need to call.
254 *
255 * Scanning and BSS list handling with its associated functionality is described
256 * in a separate chapter.
257 */
258
259/**
208 * struct vif_params - describes virtual interface parameters 260 * struct vif_params - describes virtual interface parameters
209 * @mesh_id: mesh ID to use 261 * @mesh_id: mesh ID to use
210 * @mesh_id_len: length of the mesh ID 262 * @mesh_id_len: length of the mesh ID
@@ -570,8 +622,28 @@ struct ieee80211_txq_params {
570/* from net/wireless.h */ 622/* from net/wireless.h */
571struct wiphy; 623struct wiphy;
572 624
573/* from net/ieee80211.h */ 625/**
574struct ieee80211_channel; 626 * DOC: Scanning and BSS list handling
627 *
628 * The scanning process itself is fairly simple, but cfg80211 offers quite
629 * a bit of helper functionality. To start a scan, the scan operation will
630 * be invoked with a scan definition. This scan definition contains the
631 * channels to scan, and the SSIDs to send probe requests for (including the
632 * wildcard, if desired). A passive scan is indicated by having no SSIDs to
633 * probe. Additionally, a scan request may contain extra information elements
634 * that should be added to the probe request. The IEs are guaranteed to be
635 * well-formed, and will not exceed the maximum length the driver advertised
636 * in the wiphy structure.
637 *
638 * When scanning finds a BSS, cfg80211 needs to be notified of that, because
639 * it is responsible for maintaining the BSS list; the driver should not
640 * maintain a list itself. For this notification, various functions exist.
641 *
642 * Since drivers do not maintain a BSS list, there are also a number of
643 * functions to search for a BSS and obtain information about it from the
644 * BSS structure cfg80211 maintains. The BSS list is also made available
645 * to userspace.
646 */
575 647
576/** 648/**
577 * struct cfg80211_ssid - SSID description 649 * struct cfg80211_ssid - SSID description
@@ -691,6 +763,10 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
691 * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is 763 * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
692 * required to assume that the port is unauthorized until authorized by 764 * required to assume that the port is unauthorized until authorized by
693 * user space. Otherwise, port is marked authorized by default. 765 * user space. Otherwise, port is marked authorized by default.
766 * @control_port_ethertype: the control port protocol that should be
767 * allowed through even on unauthorized ports
768 * @control_port_no_encrypt: TRUE to prevent encryption of control port
769 * protocol frames.
694 */ 770 */
695struct cfg80211_crypto_settings { 771struct cfg80211_crypto_settings {
696 u32 wpa_versions; 772 u32 wpa_versions;
@@ -700,6 +776,8 @@ struct cfg80211_crypto_settings {
700 int n_akm_suites; 776 int n_akm_suites;
701 u32 akm_suites[NL80211_MAX_NR_AKM_SUITES]; 777 u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
702 bool control_port; 778 bool control_port;
779 __be16 control_port_ethertype;
780 bool control_port_no_encrypt;
703}; 781};
704 782
705/** 783/**
@@ -1020,7 +1098,7 @@ struct cfg80211_pmksa {
1020 * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. 1098 * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation.
1021 * This allows the operation to be terminated prior to timeout based on 1099 * This allows the operation to be terminated prior to timeout based on
1022 * the duration value. 1100 * the duration value.
1023 * @action: Transmit an action frame 1101 * @mgmt_tx: Transmit a management frame
1024 * 1102 *
1025 * @testmode_cmd: run a test mode command 1103 * @testmode_cmd: run a test mode command
1026 * 1104 *
@@ -1172,7 +1250,7 @@ struct cfg80211_ops {
1172 struct net_device *dev, 1250 struct net_device *dev,
1173 u64 cookie); 1251 u64 cookie);
1174 1252
1175 int (*action)(struct wiphy *wiphy, struct net_device *dev, 1253 int (*mgmt_tx)(struct wiphy *wiphy, struct net_device *dev,
1176 struct ieee80211_channel *chan, 1254 struct ieee80211_channel *chan,
1177 enum nl80211_channel_type channel_type, 1255 enum nl80211_channel_type channel_type,
1178 bool channel_type_valid, 1256 bool channel_type_valid,
@@ -1221,21 +1299,29 @@ struct cfg80211_ops {
1221 * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station 1299 * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station
1222 * on a VLAN interface) 1300 * on a VLAN interface)
1223 * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station 1301 * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station
1302 * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the
1303 * control port protocol ethertype. The device also honours the
1304 * control_port_no_encrypt flag.
1224 */ 1305 */
1225enum wiphy_flags { 1306enum wiphy_flags {
1226 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), 1307 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
1227 WIPHY_FLAG_STRICT_REGULATORY = BIT(1), 1308 WIPHY_FLAG_STRICT_REGULATORY = BIT(1),
1228 WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2), 1309 WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2),
1229 WIPHY_FLAG_NETNS_OK = BIT(3), 1310 WIPHY_FLAG_NETNS_OK = BIT(3),
1230 WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), 1311 WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
1231 WIPHY_FLAG_4ADDR_AP = BIT(5), 1312 WIPHY_FLAG_4ADDR_AP = BIT(5),
1232 WIPHY_FLAG_4ADDR_STATION = BIT(6), 1313 WIPHY_FLAG_4ADDR_STATION = BIT(6),
1314 WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
1233}; 1315};
1234 1316
1235struct mac_address { 1317struct mac_address {
1236 u8 addr[ETH_ALEN]; 1318 u8 addr[ETH_ALEN];
1237}; 1319};
1238 1320
1321struct ieee80211_txrx_stypes {
1322 u16 tx, rx;
1323};
1324
1239/** 1325/**
1240 * struct wiphy - wireless hardware description 1326 * struct wiphy - wireless hardware description
1241 * @reg_notifier: the driver's regulatory notification callback 1327 * @reg_notifier: the driver's regulatory notification callback
@@ -1286,6 +1372,10 @@ struct mac_address {
1286 * @privid: a pointer that drivers can use to identify if an arbitrary 1372 * @privid: a pointer that drivers can use to identify if an arbitrary
1287 * wiphy is theirs, e.g. in global notifiers 1373 * wiphy is theirs, e.g. in global notifiers
1288 * @bands: information about bands/channels supported by this device 1374 * @bands: information about bands/channels supported by this device
1375 *
1376 * @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or
1377 * transmitted through nl80211, points to an array indexed by interface
1378 * type
1289 */ 1379 */
1290struct wiphy { 1380struct wiphy {
1291 /* assign these fields before you register the wiphy */ 1381 /* assign these fields before you register the wiphy */
@@ -1294,9 +1384,12 @@ struct wiphy {
1294 u8 perm_addr[ETH_ALEN]; 1384 u8 perm_addr[ETH_ALEN];
1295 u8 addr_mask[ETH_ALEN]; 1385 u8 addr_mask[ETH_ALEN];
1296 1386
1297 u16 n_addresses;
1298 struct mac_address *addresses; 1387 struct mac_address *addresses;
1299 1388
1389 const struct ieee80211_txrx_stypes *mgmt_stypes;
1390
1391 u16 n_addresses;
1392
1300 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ 1393 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
1301 u16 interface_modes; 1394 u16 interface_modes;
1302 1395
@@ -1492,8 +1585,8 @@ struct cfg80211_cached_keys;
1492 * set by driver (if supported) on add_interface BEFORE registering the 1585 * set by driver (if supported) on add_interface BEFORE registering the
1493 * netdev and may otherwise be used by driver read-only, will be update 1586 * netdev and may otherwise be used by driver read-only, will be update
1494 * by cfg80211 on change_interface 1587 * by cfg80211 on change_interface
1495 * @action_registrations: list of registrations for action frames 1588 * @mgmt_registrations: list of registrations for management frames
1496 * @action_registrations_lock: lock for the list 1589 * @mgmt_registrations_lock: lock for the list
1497 * @mtx: mutex used to lock data in this struct 1590 * @mtx: mutex used to lock data in this struct
1498 * @cleanup_work: work struct used for cleanup that can't be done directly 1591 * @cleanup_work: work struct used for cleanup that can't be done directly
1499 */ 1592 */
@@ -1505,8 +1598,8 @@ struct wireless_dev {
1505 struct list_head list; 1598 struct list_head list;
1506 struct net_device *netdev; 1599 struct net_device *netdev;
1507 1600
1508 struct list_head action_registrations; 1601 struct list_head mgmt_registrations;
1509 spinlock_t action_registrations_lock; 1602 spinlock_t mgmt_registrations_lock;
1510 1603
1511 struct mutex mtx; 1604 struct mutex mtx;
1512 1605
@@ -1563,8 +1656,10 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
1563 return wiphy_priv(wdev->wiphy); 1656 return wiphy_priv(wdev->wiphy);
1564} 1657}
1565 1658
1566/* 1659/**
1567 * Utility functions 1660 * DOC: Utility functions
1661 *
1662 * cfg80211 offers a number of utility functions that can be useful.
1568 */ 1663 */
1569 1664
1570/** 1665/**
@@ -1715,7 +1810,15 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
1715 * ieee80211_hdrlen - get header length in bytes from frame control 1810 * ieee80211_hdrlen - get header length in bytes from frame control
1716 * @fc: frame control field in little-endian format 1811 * @fc: frame control field in little-endian format
1717 */ 1812 */
1718unsigned int ieee80211_hdrlen(__le16 fc); 1813unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
1814
1815/**
1816 * DOC: Data path helpers
1817 *
1818 * In addition to generic utilities, cfg80211 also offers
1819 * functions that help implement the data path for devices
1820 * that do not do the 802.11/802.3 conversion on the device.
1821 */
1719 1822
1720/** 1823/**
1721 * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 1824 * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -1777,8 +1880,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb);
1777 */ 1880 */
1778const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); 1881const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
1779 1882
1780/* 1883/**
1781 * Regulatory helper functions for wiphys 1884 * DOC: Regulatory enforcement infrastructure
1885 *
1886 * TODO
1782 */ 1887 */
1783 1888
1784/** 1889/**
@@ -2181,6 +2286,20 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
2181void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp); 2286void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp);
2182 2287
2183/** 2288/**
2289 * DOC: RFkill integration
2290 *
2291 * RFkill integration in cfg80211 is almost invisible to drivers,
2292 * as cfg80211 automatically registers an rfkill instance for each
2293 * wireless device it knows about. Soft kill is also translated
2294 * into disconnecting and turning all interfaces off, drivers are
2295 * expected to turn off the device when all interfaces are down.
2296 *
2297 * However, devices may have a hard RFkill line, in which case they
2298 * also need to interact with the rfkill subsystem, via cfg80211.
2299 * They can do this with a few helper functions documented here.
2300 */
2301
2302/**
2184 * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state 2303 * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state
2185 * @wiphy: the wiphy 2304 * @wiphy: the wiphy
2186 * @blocked: block status 2305 * @blocked: block status
@@ -2201,6 +2320,17 @@ void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
2201 2320
2202#ifdef CONFIG_NL80211_TESTMODE 2321#ifdef CONFIG_NL80211_TESTMODE
2203/** 2322/**
2323 * DOC: Test mode
2324 *
2325 * Test mode is a set of utility functions to allow drivers to
2326 * interact with driver-specific tools to aid, for instance,
2327 * factory programming.
2328 *
2329 * This chapter describes how drivers interact with it, for more
2330 * information see the nl80211 book's chapter on it.
2331 */
2332
2333/**
2204 * cfg80211_testmode_alloc_reply_skb - allocate testmode reply 2334 * cfg80211_testmode_alloc_reply_skb - allocate testmode reply
2205 * @wiphy: the wiphy 2335 * @wiphy: the wiphy
2206 * @approxlen: an upper bound of the length of the data that will 2336 * @approxlen: an upper bound of the length of the data that will
@@ -2373,38 +2503,39 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
2373 struct station_info *sinfo, gfp_t gfp); 2503 struct station_info *sinfo, gfp_t gfp);
2374 2504
2375/** 2505/**
2376 * cfg80211_rx_action - notification of received, unprocessed Action frame 2506 * cfg80211_rx_mgmt - notification of received, unprocessed management frame
2377 * @dev: network device 2507 * @dev: network device
2378 * @freq: Frequency on which the frame was received in MHz 2508 * @freq: Frequency on which the frame was received in MHz
2379 * @buf: Action frame (header + body) 2509 * @buf: Management frame (header + body)
2380 * @len: length of the frame data 2510 * @len: length of the frame data
2381 * @gfp: context flags 2511 * @gfp: context flags
2382 * Returns %true if a user space application is responsible for rejecting the 2512 *
2383 * unrecognized Action frame; %false if no such application is registered 2513 * Returns %true if a user space application has registered for this frame.
2384 * (i.e., the driver is responsible for rejecting the unrecognized Action 2514 * For action frames, that makes it responsible for rejecting unrecognized
2385 * frame) 2515 * action frames; %false otherwise, in which case for action frames the
2516 * driver is responsible for rejecting the frame.
2386 * 2517 *
2387 * This function is called whenever an Action frame is received for a station 2518 * This function is called whenever an Action frame is received for a station
2388 * mode interface, but is not processed in kernel. 2519 * mode interface, but is not processed in kernel.
2389 */ 2520 */
2390bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, 2521bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
2391 size_t len, gfp_t gfp); 2522 size_t len, gfp_t gfp);
2392 2523
2393/** 2524/**
2394 * cfg80211_action_tx_status - notification of TX status for Action frame 2525 * cfg80211_mgmt_tx_status - notification of TX status for management frame
2395 * @dev: network device 2526 * @dev: network device
2396 * @cookie: Cookie returned by cfg80211_ops::action() 2527 * @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
2397 * @buf: Action frame (header + body) 2528 * @buf: Management frame (header + body)
2398 * @len: length of the frame data 2529 * @len: length of the frame data
2399 * @ack: Whether frame was acknowledged 2530 * @ack: Whether frame was acknowledged
2400 * @gfp: context flags 2531 * @gfp: context flags
2401 * 2532 *
2402 * This function is called whenever an Action frame was requested to be 2533 * This function is called whenever a management frame was requested to be
2403 * transmitted with cfg80211_ops::action() to report the TX status of the 2534 * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
2404 * transmission attempt. 2535 * transmission attempt.
2405 */ 2536 */
2406void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, 2537void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
2407 const u8 *buf, size_t len, bool ack, gfp_t gfp); 2538 const u8 *buf, size_t len, bool ack, gfp_t gfp);
2408 2539
2409 2540
2410/** 2541/**
diff --git a/include/net/gre.h b/include/net/gre.h
new file mode 100644
index 000000000000..82665474bcb7
--- /dev/null
+++ b/include/net/gre.h
@@ -0,0 +1,18 @@
1#ifndef __LINUX_GRE_H
2#define __LINUX_GRE_H
3
4#include <linux/skbuff.h>
5
6#define GREPROTO_CISCO 0
7#define GREPROTO_PPTP 1
8#define GREPROTO_MAX 2
9
10struct gre_protocol {
11 int (*handler)(struct sk_buff *skb);
12 void (*err_handler)(struct sk_buff *skb, u32 info);
13};
14
15int gre_add_protocol(const struct gre_protocol *proto, u8 version);
16int gre_del_protocol(const struct gre_protocol *proto, u8 version);
17
18#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index b6d3b55da19b..e4f494b42e06 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -125,6 +125,7 @@ struct inet_connection_sock {
125 int probe_size; 125 int probe_size;
126 } icsk_mtup; 126 } icsk_mtup;
127 u32 icsk_ca_priv[16]; 127 u32 icsk_ca_priv[16];
128 u32 icsk_user_timeout;
128#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) 129#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
129}; 130};
130 131
diff --git a/include/net/ip.h b/include/net/ip.h
index 890f9725d681..7691aca133db 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -53,7 +53,7 @@ struct ipcm_cookie {
53 __be32 addr; 53 __be32 addr;
54 int oif; 54 int oif;
55 struct ip_options *opt; 55 struct ip_options *opt;
56 union skb_shared_tx shtx; 56 __u8 tx_flags;
57}; 57};
58 58
59#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) 59#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h
index 73cacb3ac16c..0af8b8dfbc22 100644
--- a/include/net/irda/irlan_common.h
+++ b/include/net/irda/irlan_common.h
@@ -171,7 +171,6 @@ struct irlan_cb {
171 int magic; 171 int magic;
172 struct list_head dev_list; 172 struct list_head dev_list;
173 struct net_device *dev; /* Ethernet device structure*/ 173 struct net_device *dev; /* Ethernet device structure*/
174 struct net_device_stats stats;
175 174
176 __u32 saddr; /* Source device address */ 175 __u32 saddr; /* Source device address */
177 __u32 daddr; /* Destination device address */ 176 __u32 daddr; /* Destination device address */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index b0787a1dea90..f91fc331369b 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -149,6 +149,7 @@ struct ieee80211_low_level_stats {
149 * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed. 149 * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed.
150 * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note 150 * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note
151 * that it is only ever disabled for station mode. 151 * that it is only ever disabled for station mode.
152 * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
152 */ 153 */
153enum ieee80211_bss_change { 154enum ieee80211_bss_change {
154 BSS_CHANGED_ASSOC = 1<<0, 155 BSS_CHANGED_ASSOC = 1<<0,
@@ -165,6 +166,7 @@ enum ieee80211_bss_change {
165 BSS_CHANGED_IBSS = 1<<11, 166 BSS_CHANGED_IBSS = 1<<11,
166 BSS_CHANGED_ARP_FILTER = 1<<12, 167 BSS_CHANGED_ARP_FILTER = 1<<12,
167 BSS_CHANGED_QOS = 1<<13, 168 BSS_CHANGED_QOS = 1<<13,
169 BSS_CHANGED_IDLE = 1<<14,
168 170
169 /* when adding here, make sure to change ieee80211_reconfig */ 171 /* when adding here, make sure to change ieee80211_reconfig */
170}; 172};
@@ -223,6 +225,9 @@ enum ieee80211_bss_change {
223 * hardware must not perform any ARP filtering. Note, that the filter will 225 * hardware must not perform any ARP filtering. Note, that the filter will
224 * be enabled also in promiscuous mode. 226 * be enabled also in promiscuous mode.
225 * @qos: This is a QoS-enabled BSS. 227 * @qos: This is a QoS-enabled BSS.
228 * @idle: This interface is idle. There's also a global idle flag in the
229 * hardware config which may be more appropriate depending on what
230 * your driver/device needs to do.
226 */ 231 */
227struct ieee80211_bss_conf { 232struct ieee80211_bss_conf {
228 const u8 *bssid; 233 const u8 *bssid;
@@ -247,6 +252,7 @@ struct ieee80211_bss_conf {
247 u8 arp_addr_cnt; 252 u8 arp_addr_cnt;
248 bool arp_filter_enabled; 253 bool arp_filter_enabled;
249 bool qos; 254 bool qos;
255 bool idle;
250}; 256};
251 257
252/** 258/**
@@ -783,20 +789,6 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
783} 789}
784 790
785/** 791/**
786 * enum ieee80211_key_alg - key algorithm
787 * @ALG_WEP: WEP40 or WEP104
788 * @ALG_TKIP: TKIP
789 * @ALG_CCMP: CCMP (AES)
790 * @ALG_AES_CMAC: AES-128-CMAC
791 */
792enum ieee80211_key_alg {
793 ALG_WEP,
794 ALG_TKIP,
795 ALG_CCMP,
796 ALG_AES_CMAC,
797};
798
799/**
800 * enum ieee80211_key_flags - key flags 792 * enum ieee80211_key_flags - key flags
801 * 793 *
802 * These flags are used for communication about keys between the driver 794 * These flags are used for communication about keys between the driver
@@ -833,7 +825,7 @@ enum ieee80211_key_flags {
833 * @hw_key_idx: To be set by the driver, this is the key index the driver 825 * @hw_key_idx: To be set by the driver, this is the key index the driver
834 * wants to be given when a frame is transmitted and needs to be 826 * wants to be given when a frame is transmitted and needs to be
835 * encrypted in hardware. 827 * encrypted in hardware.
836 * @alg: The key algorithm. 828 * @cipher: The key's cipher suite selector.
837 * @flags: key flags, see &enum ieee80211_key_flags. 829 * @flags: key flags, see &enum ieee80211_key_flags.
838 * @keyidx: the key index (0-3) 830 * @keyidx: the key index (0-3)
839 * @keylen: key material length 831 * @keylen: key material length
@@ -846,7 +838,7 @@ enum ieee80211_key_flags {
846 * @iv_len: The IV length for this key type 838 * @iv_len: The IV length for this key type
847 */ 839 */
848struct ieee80211_key_conf { 840struct ieee80211_key_conf {
849 enum ieee80211_key_alg alg; 841 u32 cipher;
850 u8 icv_len; 842 u8 icv_len;
851 u8 iv_len; 843 u8 iv_len;
852 u8 hw_key_idx; 844 u8 hw_key_idx;
@@ -1102,6 +1094,10 @@ enum ieee80211_hw_flags {
1102 * 1094 *
1103 * @max_rates: maximum number of alternate rate retry stages 1095 * @max_rates: maximum number of alternate rate retry stages
1104 * @max_rate_tries: maximum number of tries for each stage 1096 * @max_rate_tries: maximum number of tries for each stage
1097 *
1098 * @napi_weight: weight used for NAPI polling. You must specify an
1099 * appropriate value here if a napi_poll operation is provided
1100 * by your driver.
1105 */ 1101 */
1106struct ieee80211_hw { 1102struct ieee80211_hw {
1107 struct ieee80211_conf conf; 1103 struct ieee80211_conf conf;
@@ -1113,6 +1109,7 @@ struct ieee80211_hw {
1113 int channel_change_time; 1109 int channel_change_time;
1114 int vif_data_size; 1110 int vif_data_size;
1115 int sta_data_size; 1111 int sta_data_size;
1112 int napi_weight;
1116 u16 queues; 1113 u16 queues;
1117 u16 max_listen_interval; 1114 u16 max_listen_interval;
1118 s8 max_signal; 1115 s8 max_signal;
@@ -1245,8 +1242,8 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1245 * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in 1242 * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in
1246 * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused 1243 * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused
1247 * with hardware wakeup and sleep states. Driver is responsible for waking 1244 * with hardware wakeup and sleep states. Driver is responsible for waking
1248 * up the hardware before issueing commands to the hardware and putting it 1245 * up the hardware before issuing commands to the hardware and putting it
1249 * back to sleep at approriate times. 1246 * back to sleep at appropriate times.
1250 * 1247 *
1251 * When PS is enabled, hardware needs to wakeup for beacons and receive the 1248 * When PS is enabled, hardware needs to wakeup for beacons and receive the
1252 * buffered multicast/broadcast frames after the beacon. Also it must be 1249 * buffered multicast/broadcast frames after the beacon. Also it must be
@@ -1267,7 +1264,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1267 * there's data traffic and still saving significantly power in idle 1264 * there's data traffic and still saving significantly power in idle
1268 * periods. 1265 * periods.
1269 * 1266 *
1270 * Dynamic powersave is supported by simply mac80211 enabling and disabling 1267 * Dynamic powersave is simply supported by mac80211 enabling and disabling
1271 * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS 1268 * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS
1272 * flag and mac80211 will handle everything automatically. Additionally, 1269 * flag and mac80211 will handle everything automatically. Additionally,
1273 * hardware having support for the dynamic PS feature may set the 1270 * hardware having support for the dynamic PS feature may set the
@@ -1540,6 +1537,12 @@ enum ieee80211_ampdu_mlme_action {
1540 * negative error code (which will be seen in userspace.) 1537 * negative error code (which will be seen in userspace.)
1541 * Must be implemented and can sleep. 1538 * Must be implemented and can sleep.
1542 * 1539 *
1540 * @change_interface: Called when a netdevice changes type. This callback
1541 * is optional, but only if it is supported can interface types be
1542 * switched while the interface is UP. The callback may sleep.
1543 * Note that while an interface is being switched, it will not be
1544 * found by the interface iteration callbacks.
1545 *
1543 * @remove_interface: Notifies a driver that an interface is going down. 1546 * @remove_interface: Notifies a driver that an interface is going down.
1544 * The @stop callback is called after this if it is the last interface 1547 * The @stop callback is called after this if it is the last interface
1545 * and no monitor interfaces are present. 1548 * and no monitor interfaces are present.
@@ -1687,6 +1690,8 @@ enum ieee80211_ampdu_mlme_action {
1687 * switch operation for CSAs received from the AP may implement this 1690 * switch operation for CSAs received from the AP may implement this
1688 * callback. They must then call ieee80211_chswitch_done() to indicate 1691 * callback. They must then call ieee80211_chswitch_done() to indicate
1689 * completion of the channel switch. 1692 * completion of the channel switch.
1693 *
1694 * @napi_poll: Poll Rx queue for incoming data frames.
1690 */ 1695 */
1691struct ieee80211_ops { 1696struct ieee80211_ops {
1692 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 1697 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -1694,6 +1699,9 @@ struct ieee80211_ops {
1694 void (*stop)(struct ieee80211_hw *hw); 1699 void (*stop)(struct ieee80211_hw *hw);
1695 int (*add_interface)(struct ieee80211_hw *hw, 1700 int (*add_interface)(struct ieee80211_hw *hw,
1696 struct ieee80211_vif *vif); 1701 struct ieee80211_vif *vif);
1702 int (*change_interface)(struct ieee80211_hw *hw,
1703 struct ieee80211_vif *vif,
1704 enum nl80211_iftype new_type);
1697 void (*remove_interface)(struct ieee80211_hw *hw, 1705 void (*remove_interface)(struct ieee80211_hw *hw,
1698 struct ieee80211_vif *vif); 1706 struct ieee80211_vif *vif);
1699 int (*config)(struct ieee80211_hw *hw, u32 changed); 1707 int (*config)(struct ieee80211_hw *hw, u32 changed);
@@ -1752,6 +1760,7 @@ struct ieee80211_ops {
1752 void (*flush)(struct ieee80211_hw *hw, bool drop); 1760 void (*flush)(struct ieee80211_hw *hw, bool drop);
1753 void (*channel_switch)(struct ieee80211_hw *hw, 1761 void (*channel_switch)(struct ieee80211_hw *hw,
1754 struct ieee80211_channel_switch *ch_switch); 1762 struct ieee80211_channel_switch *ch_switch);
1763 int (*napi_poll)(struct ieee80211_hw *hw, int budget);
1755}; 1764};
1756 1765
1757/** 1766/**
@@ -1897,6 +1906,22 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
1897 */ 1906 */
1898void ieee80211_restart_hw(struct ieee80211_hw *hw); 1907void ieee80211_restart_hw(struct ieee80211_hw *hw);
1899 1908
1909/** ieee80211_napi_schedule - schedule NAPI poll
1910 *
1911 * Use this function to schedule NAPI polling on a device.
1912 *
1913 * @hw: the hardware to start polling
1914 */
1915void ieee80211_napi_schedule(struct ieee80211_hw *hw);
1916
1917/** ieee80211_napi_complete - complete NAPI polling
1918 *
1919 * Use this function to finish NAPI polling on a device.
1920 *
1921 * @hw: the hardware to stop polling
1922 */
1923void ieee80211_napi_complete(struct ieee80211_hw *hw);
1924
1900/** 1925/**
1901 * ieee80211_rx - receive frame 1926 * ieee80211_rx - receive frame
1902 * 1927 *
@@ -2252,7 +2277,8 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw);
2252 * 2277 *
2253 * When hardware scan offload is used (i.e. the hw_scan() callback is 2278 * When hardware scan offload is used (i.e. the hw_scan() callback is
2254 * assigned) this function needs to be called by the driver to notify 2279 * assigned) this function needs to be called by the driver to notify
2255 * mac80211 that the scan finished. 2280 * mac80211 that the scan finished. This function can be called from
2281 * any context, including hardirq context.
2256 * 2282 *
2257 * @hw: the hardware that finished the scan 2283 * @hw: the hardware that finished the scan
2258 * @aborted: set to true if scan was aborted 2284 * @aborted: set to true if scan was aborted
@@ -2442,7 +2468,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
2442 * 2468 *
2443 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 2469 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2444 * 2470 *
2445 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING and 2471 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER and
2446 * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the 2472 * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the
2447 * hardware is not receiving beacons with this function. 2473 * hardware is not receiving beacons with this function.
2448 */ 2474 */
@@ -2453,7 +2479,7 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
2453 * 2479 *
2454 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 2480 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2455 * 2481 *
2456 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING, and 2482 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER, and
2457 * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver 2483 * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver
2458 * needs to inform if the connection to the AP has been lost. 2484 * needs to inform if the connection to the AP has been lost.
2459 * 2485 *
@@ -2518,6 +2544,18 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2518 */ 2544 */
2519void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); 2545void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success);
2520 2546
2547/**
2548 * ieee80211_request_smps - request SM PS transition
2549 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2550 * @smps_mode: new SM PS mode
2551 *
2552 * This allows the driver to request an SM PS transition in managed
2553 * mode. This is useful when the driver has more information than
2554 * the stack about possible interference, for example by bluetooth.
2555 */
2556void ieee80211_request_smps(struct ieee80211_vif *vif,
2557 enum ieee80211_smps_mode smps_mode);
2558
2521/* Rate control API */ 2559/* Rate control API */
2522 2560
2523/** 2561/**
diff --git a/include/net/raw.h b/include/net/raw.h
index 43c57502659b..42ce6fe7a2d5 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -45,7 +45,10 @@ struct raw_iter_state {
45 struct raw_hashinfo *h; 45 struct raw_hashinfo *h;
46}; 46};
47 47
48#define raw_seq_private(seq) ((struct raw_iter_state *)(seq)->private) 48static inline struct raw_iter_state *raw_seq_private(struct seq_file *seq)
49{
50 return seq->private;
51}
49void *raw_seq_start(struct seq_file *seq, loff_t *pos); 52void *raw_seq_start(struct seq_file *seq, loff_t *pos);
50void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos); 53void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos);
51void raw_seq_stop(struct seq_file *seq, void *v); 54void raw_seq_stop(struct seq_file *seq, void *v);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 65946bc43d00..2cb3980b1616 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -275,24 +275,35 @@ struct sctp_mib {
275/* Print debugging messages. */ 275/* Print debugging messages. */
276#if SCTP_DEBUG 276#if SCTP_DEBUG
277extern int sctp_debug_flag; 277extern int sctp_debug_flag;
278#define SCTP_DEBUG_PRINTK(whatever...) \ 278#define SCTP_DEBUG_PRINTK(fmt, args...) \
279 ((void) (sctp_debug_flag && printk(KERN_DEBUG whatever))) 279do { \
280#define SCTP_DEBUG_PRINTK_IPADDR(lead, trail, leadparm, saddr, otherparms...) \ 280 if (sctp_debug_flag) \
281 if (sctp_debug_flag) { \ 281 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
282 if (saddr->sa.sa_family == AF_INET6) { \ 282} while (0)
283 printk(KERN_DEBUG \ 283#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) \
284 lead "%pI6" trail, \ 284do { \
285 leadparm, \ 285 if (sctp_debug_flag) \
286 &saddr->v6.sin6_addr, \ 286 pr_cont(fmt, ##args); \
287 otherparms); \ 287} while (0)
288 } else { \ 288#define SCTP_DEBUG_PRINTK_IPADDR(fmt_lead, fmt_trail, \
289 printk(KERN_DEBUG \ 289 args_lead, saddr, args_trail...) \
290 lead "%pI4" trail, \ 290do { \
291 leadparm, \ 291 if (sctp_debug_flag) { \
292 &saddr->v4.sin_addr.s_addr, \ 292 if (saddr->sa.sa_family == AF_INET6) { \
293 otherparms); \ 293 printk(KERN_DEBUG \
294 } \ 294 pr_fmt(fmt_lead "%pI6" fmt_trail), \
295 } 295 args_lead, \
296 &saddr->v6.sin6_addr, \
297 args_trail); \
298 } else { \
299 printk(KERN_DEBUG \
300 pr_fmt(fmt_lead "%pI4" fmt_trail), \
301 args_lead, \
302 &saddr->v4.sin_addr.s_addr, \
303 args_trail); \
304 } \
305 } \
306} while (0)
296#define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; } 307#define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; }
297#define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; } 308#define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; }
298 309
@@ -306,6 +317,7 @@ extern int sctp_debug_flag;
306#else /* SCTP_DEBUG */ 317#else /* SCTP_DEBUG */
307 318
308#define SCTP_DEBUG_PRINTK(whatever...) 319#define SCTP_DEBUG_PRINTK(whatever...)
320#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
309#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) 321#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
310#define SCTP_ENABLE_DEBUG 322#define SCTP_ENABLE_DEBUG
311#define SCTP_DISABLE_DEBUG 323#define SCTP_DISABLE_DEBUG
diff --git a/include/net/sock.h b/include/net/sock.h
index adab9dc58183..8ae97c4970df 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1670,17 +1670,13 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
1670 1670
1671/** 1671/**
1672 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 1672 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
1673 * @msg: outgoing packet
1674 * @sk: socket sending this packet 1673 * @sk: socket sending this packet
1675 * @shtx: filled with instructions for time stamping 1674 * @tx_flags: filled with instructions for time stamping
1676 * 1675 *
1677 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if 1676 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
1678 * parameters are invalid. 1677 * parameters are invalid.
1679 */ 1678 */
1680extern int sock_tx_timestamp(struct msghdr *msg, 1679extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
1681 struct sock *sk,
1682 union skb_shared_tx *shtx);
1683
1684 1680
1685/** 1681/**
1686 * sk_eat_skb - Release a skb if it is no longer needed 1682 * sk_eat_skb - Release a skb if it is no longer needed
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
new file mode 100644
index 000000000000..9e8710be7a04
--- /dev/null
+++ b/include/net/tc_act/tc_csum.h
@@ -0,0 +1,15 @@
1#ifndef __NET_TC_CSUM_H
2#define __NET_TC_CSUM_H
3
4#include <linux/types.h>
5#include <net/act_api.h>
6
7struct tcf_csum {
8 struct tcf_common common;
9
10 u32 update_flags;
11};
12#define to_tcf_csum(pc) \
13 container_of(pc,struct tcf_csum,common)
14
15#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eaa9582779d0..bfc1da43295c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -789,6 +789,15 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
789/* Use define here intentionally to get WARN_ON location shown at the caller */ 789/* Use define here intentionally to get WARN_ON location shown at the caller */
790#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 790#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
791 791
792/*
793 * Convert RFC 3390 larger initial window into an equivalent number of packets.
794 * This is based on the numbers specified in RFC 5681, 3.1.
795 */
796static inline u32 rfc3390_bytes_to_packets(const u32 smss)
797{
798 return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
799}
800
792extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); 801extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
793extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); 802extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
794 803
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 01ddb0472f86..889f4ac4459a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -35,12 +35,12 @@ drop:
35} 35}
36EXPORT_SYMBOL(__vlan_hwaccel_rx); 36EXPORT_SYMBOL(__vlan_hwaccel_rx);
37 37
38int vlan_hwaccel_do_receive(struct sk_buff *skb) 38void vlan_hwaccel_do_receive(struct sk_buff *skb)
39{ 39{
40 struct net_device *dev = skb->dev; 40 struct net_device *dev = skb->dev;
41 struct vlan_rx_stats *rx_stats; 41 struct vlan_rx_stats *rx_stats;
42 42
43 skb->dev = vlan_dev_info(dev)->real_dev; 43 skb->dev = vlan_dev_real_dev(dev);
44 netif_nit_deliver(skb); 44 netif_nit_deliver(skb);
45 45
46 skb->dev = dev; 46 skb->dev = dev;
@@ -69,7 +69,6 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
69 break; 69 break;
70 } 70 }
71 u64_stats_update_end(&rx_stats->syncp); 71 u64_stats_update_end(&rx_stats->syncp);
72 return 0;
73} 72}
74 73
75struct net_device *vlan_dev_real_dev(const struct net_device *dev) 74struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -106,9 +105,12 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
106 goto drop; 105 goto drop;
107 106
108 for (p = napi->gro_list; p; p = p->next) { 107 for (p = napi->gro_list; p; p = p->next) {
109 NAPI_GRO_CB(p)->same_flow = 108 unsigned long diffs;
110 p->dev == skb->dev && !compare_ether_header( 109
111 skb_mac_header(p), skb_gro_mac_header(skb)); 110 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
111 diffs |= compare_ether_header(skb_mac_header(p),
112 skb_gro_mac_header(skb));
113 NAPI_GRO_CB(p)->same_flow = !diffs;
112 NAPI_GRO_CB(p)->flush = 0; 114 NAPI_GRO_CB(p)->flush = 0;
113 } 115 }
114 116
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index c85109d809ca..078eb162d9bf 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -222,7 +222,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
222 } 222 }
223} 223}
224 224
225static unsigned int 225static int
226p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt) 226p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
227{ 227{
228 int ret, n; 228 int ret, n;
diff --git a/net/atm/common.c b/net/atm/common.c
index 940404a73b3d..1b9c52a02cd3 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -792,7 +792,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
792 default: 792 default:
793 if (level == SOL_SOCKET) 793 if (level == SOL_SOCKET)
794 return -EINVAL; 794 return -EINVAL;
795 break; 795 break;
796 } 796 }
797 if (!vcc->dev || !vcc->dev->ops->getsockopt) 797 if (!vcc->dev || !vcc->dev->ops->getsockopt)
798 return -EINVAL; 798 return -EINVAL;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index d98bde1a0ac8..181d70c73d70 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -220,7 +220,6 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
220static int lec_open(struct net_device *dev) 220static int lec_open(struct net_device *dev)
221{ 221{
222 netif_start_queue(dev); 222 netif_start_queue(dev);
223 memset(&dev->stats, 0, sizeof(struct net_device_stats));
224 223
225 return 0; 224 return 0;
226} 225}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index cfdfd7e2a172..26eaebf4aaa9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1103,7 +1103,7 @@ done:
1103out: 1103out:
1104 release_sock(sk); 1104 release_sock(sk);
1105 1105
1106 return 0; 1106 return err;
1107} 1107}
1108 1108
1109/* 1109/*
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 7805945a5fd6..a1690845dc6e 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -412,7 +412,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
412{ 412{
413 ax25_uid_assoc *user; 413 ax25_uid_assoc *user;
414 ax25_route *ax25_rt; 414 ax25_route *ax25_rt;
415 int err; 415 int err = 0;
416 416
417 if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) 417 if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
418 return -EHOSTUNREACH; 418 return -EHOSTUNREACH;
@@ -453,7 +453,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
453put: 453put:
454 ax25_put_route(ax25_rt); 454 ax25_put_route(ax25_rt);
455 455
456 return 0; 456 return err;
457} 457}
458 458
459struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, 459struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 421c45bd1b95..ed0f22f57668 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -297,13 +297,12 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
297 mask |= POLLERR; 297 mask |= POLLERR;
298 298
299 if (sk->sk_shutdown & RCV_SHUTDOWN) 299 if (sk->sk_shutdown & RCV_SHUTDOWN)
300 mask |= POLLRDHUP; 300 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
301 301
302 if (sk->sk_shutdown == SHUTDOWN_MASK) 302 if (sk->sk_shutdown == SHUTDOWN_MASK)
303 mask |= POLLHUP; 303 mask |= POLLHUP;
304 304
305 if (!skb_queue_empty(&sk->sk_receive_queue) || 305 if (!skb_queue_empty(&sk->sk_receive_queue))
306 (sk->sk_shutdown & RCV_SHUTDOWN))
307 mask |= POLLIN | POLLRDNORM; 306 mask |= POLLIN | POLLRDNORM;
308 307
309 if (sk->sk_state == BT_CLOSED) 308 if (sk->sk_state == BT_CLOSED)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c03d2c3ff03e..89ad25a76202 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -61,30 +61,27 @@ static int port_cost(struct net_device *dev)
61} 61}
62 62
63 63
64/* 64/* Check for port carrier transistions. */
65 * Check for port carrier transistions.
66 * Called from work queue to allow for calling functions that
67 * might sleep (such as speed check), and to debounce.
68 */
69void br_port_carrier_check(struct net_bridge_port *p) 65void br_port_carrier_check(struct net_bridge_port *p)
70{ 66{
71 struct net_device *dev = p->dev; 67 struct net_device *dev = p->dev;
72 struct net_bridge *br = p->br; 68 struct net_bridge *br = p->br;
73 69
74 if (netif_carrier_ok(dev)) 70 if (netif_running(dev) && netif_carrier_ok(dev))
75 p->path_cost = port_cost(dev); 71 p->path_cost = port_cost(dev);
76 72
77 if (netif_running(br->dev)) { 73 if (!netif_running(br->dev))
78 spin_lock_bh(&br->lock); 74 return;
79 if (netif_carrier_ok(dev)) { 75
80 if (p->state == BR_STATE_DISABLED) 76 spin_lock_bh(&br->lock);
81 br_stp_enable_port(p); 77 if (netif_running(dev) && netif_carrier_ok(dev)) {
82 } else { 78 if (p->state == BR_STATE_DISABLED)
83 if (p->state != BR_STATE_DISABLED) 79 br_stp_enable_port(p);
84 br_stp_disable_port(p); 80 } else {
85 } 81 if (p->state != BR_STATE_DISABLED)
86 spin_unlock_bh(&br->lock); 82 br_stp_disable_port(p);
87 } 83 }
84 spin_unlock_bh(&br->lock);
88} 85}
89 86
90static void release_nbp(struct kobject *kobj) 87static void release_nbp(struct kobject *kobj)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 826cd5221536..6d04cfdf4541 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -141,7 +141,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
141 const unsigned char *dest = eth_hdr(skb)->h_dest; 141 const unsigned char *dest = eth_hdr(skb)->h_dest;
142 int (*rhook)(struct sk_buff *skb); 142 int (*rhook)(struct sk_buff *skb);
143 143
144 if (skb->pkt_type == PACKET_LOOPBACK) 144 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
145 return skb; 145 return skb;
146 146
147 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) 147 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 0b586e9d1378..0fd01dd17c48 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -9,6 +9,8 @@
9 * and Sakari Ailus <sakari.ailus@nokia.com> 9 * and Sakari Ailus <sakari.ailus@nokia.com>
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13
12#include <linux/version.h> 14#include <linux/version.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -214,7 +216,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
214 216
215 switch (what) { 217 switch (what) {
216 case NETDEV_REGISTER: 218 case NETDEV_REGISTER:
217 pr_info("CAIF: %s():register %s\n", __func__, dev->name); 219 netdev_info(dev, "register\n");
218 caifd = caif_device_alloc(dev); 220 caifd = caif_device_alloc(dev);
219 if (caifd == NULL) 221 if (caifd == NULL)
220 break; 222 break;
@@ -225,14 +227,13 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
225 break; 227 break;
226 228
227 case NETDEV_UP: 229 case NETDEV_UP:
228 pr_info("CAIF: %s(): up %s\n", __func__, dev->name); 230 netdev_info(dev, "up\n");
229 caifd = caif_get(dev); 231 caifd = caif_get(dev);
230 if (caifd == NULL) 232 if (caifd == NULL)
231 break; 233 break;
232 caifdev = netdev_priv(dev); 234 caifdev = netdev_priv(dev);
233 if (atomic_read(&caifd->state) == NETDEV_UP) { 235 if (atomic_read(&caifd->state) == NETDEV_UP) {
234 pr_info("CAIF: %s():%s already up\n", 236 netdev_info(dev, "already up\n");
235 __func__, dev->name);
236 break; 237 break;
237 } 238 }
238 atomic_set(&caifd->state, what); 239 atomic_set(&caifd->state, what);
@@ -273,7 +274,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
273 caifd = caif_get(dev); 274 caifd = caif_get(dev);
274 if (caifd == NULL) 275 if (caifd == NULL)
275 break; 276 break;
276 pr_info("CAIF: %s():going down %s\n", __func__, dev->name); 277 netdev_info(dev, "going down\n");
277 278
278 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || 279 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
279 atomic_read(&caifd->state) == NETDEV_DOWN) 280 atomic_read(&caifd->state) == NETDEV_DOWN)
@@ -295,11 +296,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
295 caifd = caif_get(dev); 296 caifd = caif_get(dev);
296 if (caifd == NULL) 297 if (caifd == NULL)
297 break; 298 break;
298 pr_info("CAIF: %s(): down %s\n", __func__, dev->name); 299 netdev_info(dev, "down\n");
299 if (atomic_read(&caifd->in_use)) 300 if (atomic_read(&caifd->in_use))
300 pr_warning("CAIF: %s(): " 301 netdev_warn(dev,
301 "Unregistering an active CAIF device: %s\n", 302 "Unregistering an active CAIF device\n");
302 __func__, dev->name);
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); 303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
304 dev_put(dev); 304 dev_put(dev);
305 atomic_set(&caifd->state, what); 305 atomic_set(&caifd->state, what);
@@ -307,7 +307,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
307 307
308 case NETDEV_UNREGISTER: 308 case NETDEV_UNREGISTER:
309 caifd = caif_get(dev); 309 caifd = caif_get(dev);
310 pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name); 310 netdev_info(dev, "unregister\n");
311 atomic_set(&caifd->state, what); 311 atomic_set(&caifd->state, what);
312 caif_device_destroy(dev); 312 caif_device_destroy(dev);
313 break; 313 break;
@@ -391,7 +391,7 @@ static int __init caif_device_init(void)
391 int result; 391 int result;
392 cfg = cfcnfg_create(); 392 cfg = cfcnfg_create();
393 if (!cfg) { 393 if (!cfg) {
394 pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__); 394 pr_warn("can't create cfcnfg\n");
395 goto err_cfcnfg_create_failed; 395 goto err_cfcnfg_create_failed;
396 } 396 }
397 result = register_pernet_device(&caif_net_ops); 397 result = register_pernet_device(&caif_net_ops);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8ce904786116..fd1f5df0827c 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/fs.h> 9#include <linux/fs.h>
8#include <linux/init.h> 10#include <linux/init.h>
9#include <linux/module.h> 11#include <linux/module.h>
@@ -157,8 +159,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
157 159
158 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 160 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
159 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 161 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
160 trace_printk("CAIF: %s():" 162 trace_printk("CAIF: %s(): "
161 " sending flow OFF (queue len = %d %d)\n", 163 "sending flow OFF (queue len = %d %d)\n",
162 __func__, 164 __func__,
163 atomic_read(&cf_sk->sk.sk_rmem_alloc), 165 atomic_read(&cf_sk->sk.sk_rmem_alloc),
164 sk_rcvbuf_lowwater(cf_sk)); 166 sk_rcvbuf_lowwater(cf_sk));
@@ -172,8 +174,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
172 return err; 174 return err;
173 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { 175 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
174 set_rx_flow_off(cf_sk); 176 set_rx_flow_off(cf_sk);
175 trace_printk("CAIF: %s():" 177 trace_printk("CAIF: %s(): "
176 " sending flow OFF due to rmem_schedule\n", 178 "sending flow OFF due to rmem_schedule\n",
177 __func__); 179 __func__);
178 dbfs_atomic_inc(&cnt.num_rx_flow_off); 180 dbfs_atomic_inc(&cnt.num_rx_flow_off);
179 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 181 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
@@ -275,8 +277,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
275 break; 277 break;
276 278
277 default: 279 default:
278 pr_debug("CAIF: %s(): Unexpected flow command %d\n", 280 pr_debug("Unexpected flow command %d\n", flow);
279 __func__, flow);
280 } 281 }
281} 282}
282 283
@@ -536,8 +537,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
536 537
537 /* Slight paranoia, probably not needed. */ 538 /* Slight paranoia, probably not needed. */
538 if (unlikely(loopcnt++ > 1000)) { 539 if (unlikely(loopcnt++ > 1000)) {
539 pr_warning("CAIF: %s(): transmit retries failed," 540 pr_warn("transmit retries failed, error = %d\n", ret);
540 " error = %d\n", __func__, ret);
541 break; 541 break;
542 } 542 }
543 543
@@ -902,8 +902,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
902 cf_sk->maxframe = dev->mtu - (headroom + tailroom); 902 cf_sk->maxframe = dev->mtu - (headroom + tailroom);
903 dev_put(dev); 903 dev_put(dev);
904 if (cf_sk->maxframe < 1) { 904 if (cf_sk->maxframe < 1) {
905 pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n", 905 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
906 __func__, dev->mtu);
907 err = -ENODEV; 906 err = -ENODEV;
908 goto out; 907 goto out;
909 } 908 }
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 1c29189b344d..ef93a131310b 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -3,6 +3,9 @@
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
6#include <linux/kernel.h> 9#include <linux/kernel.h>
7#include <linux/stddef.h> 10#include <linux/stddef.h>
8#include <linux/slab.h> 11#include <linux/slab.h>
@@ -78,7 +81,7 @@ struct cfcnfg *cfcnfg_create(void)
78 /* Initiate this layer */ 81 /* Initiate this layer */
79 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 82 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
80 if (!this) { 83 if (!this) {
81 pr_warning("CAIF: %s(): Out of memory\n", __func__); 84 pr_warn("Out of memory\n");
82 return NULL; 85 return NULL;
83 } 86 }
84 this->mux = cfmuxl_create(); 87 this->mux = cfmuxl_create();
@@ -106,7 +109,7 @@ struct cfcnfg *cfcnfg_create(void)
106 layer_set_up(this->ctrl, this); 109 layer_set_up(this->ctrl, this);
107 return this; 110 return this;
108out_of_mem: 111out_of_mem:
109 pr_warning("CAIF: %s(): Out of memory\n", __func__); 112 pr_warn("Out of memory\n");
110 kfree(this->mux); 113 kfree(this->mux);
111 kfree(this->ctrl); 114 kfree(this->ctrl);
112 kfree(this); 115 kfree(this);
@@ -194,7 +197,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
194 caif_assert(adap_layer != NULL); 197 caif_assert(adap_layer != NULL);
195 channel_id = adap_layer->id; 198 channel_id = adap_layer->id;
196 if (adap_layer->dn == NULL || channel_id == 0) { 199 if (adap_layer->dn == NULL || channel_id == 0) {
197 pr_err("CAIF: %s():adap_layer->id is 0\n", __func__); 200 pr_err("adap_layer->id is 0\n");
198 ret = -ENOTCONN; 201 ret = -ENOTCONN;
199 goto end; 202 goto end;
200 } 203 }
@@ -204,9 +207,8 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
204 layer_set_up(servl, NULL); 207 layer_set_up(servl, NULL);
205 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); 208 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
206 if (servl == NULL) { 209 if (servl == NULL) {
207 pr_err("CAIF: %s(): PROTOCOL ERROR " 210 pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
208 "- Error removing service_layer Channel_Id(%d)", 211 channel_id);
209 __func__, channel_id);
210 ret = -EINVAL; 212 ret = -EINVAL;
211 goto end; 213 goto end;
212 } 214 }
@@ -216,18 +218,14 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
216 218
217 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); 219 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
218 if (phyinfo == NULL) { 220 if (phyinfo == NULL) {
219 pr_warning("CAIF: %s(): " 221 pr_warn("No interface to send disconnect to\n");
220 "No interface to send disconnect to\n",
221 __func__);
222 ret = -ENODEV; 222 ret = -ENODEV;
223 goto end; 223 goto end;
224 } 224 }
225 if (phyinfo->id != phyid || 225 if (phyinfo->id != phyid ||
226 phyinfo->phy_layer->id != phyid || 226 phyinfo->phy_layer->id != phyid ||
227 phyinfo->frm_layer->id != phyid) { 227 phyinfo->frm_layer->id != phyid) {
228 pr_err("CAIF: %s(): " 228 pr_err("Inconsistency in phy registration\n");
229 "Inconsistency in phy registration\n",
230 __func__);
231 ret = -EINVAL; 229 ret = -EINVAL;
232 goto end; 230 goto end;
233 } 231 }
@@ -276,21 +274,20 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
276{ 274{
277 struct cflayer *frml; 275 struct cflayer *frml;
278 if (adap_layer == NULL) { 276 if (adap_layer == NULL) {
279 pr_err("CAIF: %s(): adap_layer is zero", __func__); 277 pr_err("adap_layer is zero\n");
280 return -EINVAL; 278 return -EINVAL;
281 } 279 }
282 if (adap_layer->receive == NULL) { 280 if (adap_layer->receive == NULL) {
283 pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__); 281 pr_err("adap_layer->receive is NULL\n");
284 return -EINVAL; 282 return -EINVAL;
285 } 283 }
286 if (adap_layer->ctrlcmd == NULL) { 284 if (adap_layer->ctrlcmd == NULL) {
287 pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__); 285 pr_err("adap_layer->ctrlcmd == NULL\n");
288 return -EINVAL; 286 return -EINVAL;
289 } 287 }
290 frml = cnfg->phy_layers[param->phyid].frm_layer; 288 frml = cnfg->phy_layers[param->phyid].frm_layer;
291 if (frml == NULL) { 289 if (frml == NULL) {
292 pr_err("CAIF: %s(): Specified PHY type does not exist!", 290 pr_err("Specified PHY type does not exist!\n");
293 __func__);
294 return -ENODEV; 291 return -ENODEV;
295 } 292 }
296 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); 293 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
@@ -330,9 +327,7 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
330 struct net_device *netdev; 327 struct net_device *netdev;
331 328
332 if (adapt_layer == NULL) { 329 if (adapt_layer == NULL) {
333 pr_debug("CAIF: %s(): link setup response " 330 pr_debug("link setup response but no client exist, send linkdown back\n");
334 "but no client exist, send linkdown back\n",
335 __func__);
336 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); 331 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
337 return; 332 return;
338 } 333 }
@@ -374,13 +369,11 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
374 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); 369 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
375 break; 370 break;
376 default: 371 default:
377 pr_err("CAIF: %s(): Protocol error. " 372 pr_err("Protocol error. Link setup response - unknown channel type\n");
378 "Link setup response - unknown channel type\n",
379 __func__);
380 return; 373 return;
381 } 374 }
382 if (!servicel) { 375 if (!servicel) {
383 pr_warning("CAIF: %s(): Out of memory\n", __func__); 376 pr_warn("Out of memory\n");
384 return; 377 return;
385 } 378 }
386 layer_set_dn(servicel, cnfg->mux); 379 layer_set_dn(servicel, cnfg->mux);
@@ -418,7 +411,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
418 } 411 }
419 } 412 }
420 if (*phyid == 0) { 413 if (*phyid == 0) {
421 pr_err("CAIF: %s(): No Available PHY ID\n", __func__); 414 pr_err("No Available PHY ID\n");
422 return; 415 return;
423 } 416 }
424 417
@@ -427,7 +420,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
427 phy_driver = 420 phy_driver =
428 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); 421 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
429 if (!phy_driver) { 422 if (!phy_driver) {
430 pr_warning("CAIF: %s(): Out of memory\n", __func__); 423 pr_warn("Out of memory\n");
431 return; 424 return;
432 } 425 }
433 426
@@ -436,7 +429,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
436 phy_driver = NULL; 429 phy_driver = NULL;
437 break; 430 break;
438 default: 431 default:
439 pr_err("CAIF: %s(): %d", __func__, phy_type); 432 pr_err("%d\n", phy_type);
440 return; 433 return;
441 break; 434 break;
442 } 435 }
@@ -455,7 +448,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
455 phy_layer->type = phy_type; 448 phy_layer->type = phy_type;
456 frml = cffrml_create(*phyid, fcs); 449 frml = cffrml_create(*phyid, fcs);
457 if (!frml) { 450 if (!frml) {
458 pr_warning("CAIF: %s(): Out of memory\n", __func__); 451 pr_warn("Out of memory\n");
459 return; 452 return;
460 } 453 }
461 cnfg->phy_layers[*phyid].frm_layer = frml; 454 cnfg->phy_layers[*phyid].frm_layer = frml;
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 563145fdc4c3..08f267a109aa 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -36,7 +38,7 @@ struct cflayer *cfctrl_create(void)
36 struct cfctrl *this = 38 struct cfctrl *this =
37 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); 39 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
38 if (!this) { 40 if (!this) {
39 pr_warning("CAIF: %s(): Out of memory\n", __func__); 41 pr_warn("Out of memory\n");
40 return NULL; 42 return NULL;
41 } 43 }
42 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 44 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
@@ -132,9 +134,7 @@ struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
132 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 134 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
133 if (cfctrl_req_eq(req, p)) { 135 if (cfctrl_req_eq(req, p)) {
134 if (p != first) 136 if (p != first)
135 pr_warning("CAIF: %s(): Requests are not " 137 pr_warn("Requests are not received in order\n");
136 "received in order\n",
137 __func__);
138 138
139 atomic_set(&ctrl->rsp_seq_no, 139 atomic_set(&ctrl->rsp_seq_no,
140 p->sequence_no); 140 p->sequence_no);
@@ -177,7 +177,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
177 int ret; 177 int ret;
178 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 178 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
179 if (!pkt) { 179 if (!pkt) {
180 pr_warning("CAIF: %s(): Out of memory\n", __func__); 180 pr_warn("Out of memory\n");
181 return; 181 return;
182 } 182 }
183 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 183 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
@@ -189,8 +189,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
189 ret = 189 ret =
190 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 190 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
191 if (ret < 0) { 191 if (ret < 0) {
192 pr_err("CAIF: %s(): Could not transmit enum message\n", 192 pr_err("Could not transmit enum message\n");
193 __func__);
194 cfpkt_destroy(pkt); 193 cfpkt_destroy(pkt);
195 } 194 }
196} 195}
@@ -208,7 +207,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
208 char utility_name[16]; 207 char utility_name[16];
209 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 208 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
210 if (!pkt) { 209 if (!pkt) {
211 pr_warning("CAIF: %s(): Out of memory\n", __func__); 210 pr_warn("Out of memory\n");
212 return -ENOMEM; 211 return -ENOMEM;
213 } 212 }
214 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); 213 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
@@ -253,13 +252,13 @@ int cfctrl_linkup_request(struct cflayer *layer,
253 param->u.utility.paramlen); 252 param->u.utility.paramlen);
254 break; 253 break;
255 default: 254 default:
256 pr_warning("CAIF: %s():Request setup of bad link type = %d\n", 255 pr_warn("Request setup of bad link type = %d\n",
257 __func__, param->linktype); 256 param->linktype);
258 return -EINVAL; 257 return -EINVAL;
259 } 258 }
260 req = kzalloc(sizeof(*req), GFP_KERNEL); 259 req = kzalloc(sizeof(*req), GFP_KERNEL);
261 if (!req) { 260 if (!req) {
262 pr_warning("CAIF: %s(): Out of memory\n", __func__); 261 pr_warn("Out of memory\n");
263 return -ENOMEM; 262 return -ENOMEM;
264 } 263 }
265 req->client_layer = user_layer; 264 req->client_layer = user_layer;
@@ -276,8 +275,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
276 ret = 275 ret =
277 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 276 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
278 if (ret < 0) { 277 if (ret < 0) {
279 pr_err("CAIF: %s(): Could not transmit linksetup request\n", 278 pr_err("Could not transmit linksetup request\n");
280 __func__);
281 cfpkt_destroy(pkt); 279 cfpkt_destroy(pkt);
282 return -ENODEV; 280 return -ENODEV;
283 } 281 }
@@ -291,7 +289,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
291 struct cfctrl *cfctrl = container_obj(layer); 289 struct cfctrl *cfctrl = container_obj(layer);
292 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 290 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
293 if (!pkt) { 291 if (!pkt) {
294 pr_warning("CAIF: %s(): Out of memory\n", __func__); 292 pr_warn("Out of memory\n");
295 return -ENOMEM; 293 return -ENOMEM;
296 } 294 }
297 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 295 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
@@ -300,8 +298,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
300 ret = 298 ret =
301 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 299 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
302 if (ret < 0) { 300 if (ret < 0) {
303 pr_err("CAIF: %s(): Could not transmit link-down request\n", 301 pr_err("Could not transmit link-down request\n");
304 __func__);
305 cfpkt_destroy(pkt); 302 cfpkt_destroy(pkt);
306 } 303 }
307 return ret; 304 return ret;
@@ -313,7 +310,7 @@ void cfctrl_sleep_req(struct cflayer *layer)
313 struct cfctrl *cfctrl = container_obj(layer); 310 struct cfctrl *cfctrl = container_obj(layer);
314 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 311 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
315 if (!pkt) { 312 if (!pkt) {
316 pr_warning("CAIF: %s(): Out of memory\n", __func__); 313 pr_warn("Out of memory\n");
317 return; 314 return;
318 } 315 }
319 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP); 316 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
@@ -330,7 +327,7 @@ void cfctrl_wake_req(struct cflayer *layer)
330 struct cfctrl *cfctrl = container_obj(layer); 327 struct cfctrl *cfctrl = container_obj(layer);
331 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 328 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
332 if (!pkt) { 329 if (!pkt) {
333 pr_warning("CAIF: %s(): Out of memory\n", __func__); 330 pr_warn("Out of memory\n");
334 return; 331 return;
335 } 332 }
336 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE); 333 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
@@ -347,7 +344,7 @@ void cfctrl_getstartreason_req(struct cflayer *layer)
347 struct cfctrl *cfctrl = container_obj(layer); 344 struct cfctrl *cfctrl = container_obj(layer);
348 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 345 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
349 if (!pkt) { 346 if (!pkt) {
350 pr_warning("CAIF: %s(): Out of memory\n", __func__); 347 pr_warn("Out of memory\n");
351 return; 348 return;
352 } 349 }
353 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON); 350 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
@@ -364,12 +361,11 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
364 struct cfctrl_request_info *p, *tmp; 361 struct cfctrl_request_info *p, *tmp;
365 struct cfctrl *ctrl = container_obj(layr); 362 struct cfctrl *ctrl = container_obj(layr);
366 spin_lock(&ctrl->info_list_lock); 363 spin_lock(&ctrl->info_list_lock);
367 pr_warning("CAIF: %s(): enter\n", __func__); 364 pr_warn("enter\n");
368 365
369 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 366 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
370 if (p->client_layer == adap_layer) { 367 if (p->client_layer == adap_layer) {
371 pr_warning("CAIF: %s(): cancel req :%d\n", __func__, 368 pr_warn("cancel req :%d\n", p->sequence_no);
372 p->sequence_no);
373 list_del(&p->list); 369 list_del(&p->list);
374 kfree(p); 370 kfree(p);
375 } 371 }
@@ -520,9 +516,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
520 cfpkt_extr_head(pkt, &param, len); 516 cfpkt_extr_head(pkt, &param, len);
521 break; 517 break;
522 default: 518 default:
523 pr_warning("CAIF: %s(): Request setup " 519 pr_warn("Request setup - invalid link type (%d)\n",
524 "- invalid link type (%d)", 520 serv);
525 __func__, serv);
526 goto error; 521 goto error;
527 } 522 }
528 523
@@ -532,9 +527,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
532 527
533 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || 528 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
534 cfpkt_erroneous(pkt)) { 529 cfpkt_erroneous(pkt)) {
535 pr_err("CAIF: %s(): Invalid O/E bit or parse " 530 pr_err("Invalid O/E bit or parse error on CAIF control channel\n");
536 "error on CAIF control channel",
537 __func__);
538 cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 531 cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
539 0, 532 0,
540 req ? req->client_layer 533 req ? req->client_layer
@@ -556,8 +549,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
556 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid); 549 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
557 break; 550 break;
558 case CFCTRL_CMD_LINK_ERR: 551 case CFCTRL_CMD_LINK_ERR:
559 pr_err("CAIF: %s(): Frame Error Indication received\n", 552 pr_err("Frame Error Indication received\n");
560 __func__);
561 cfctrl->res.linkerror_ind(); 553 cfctrl->res.linkerror_ind();
562 break; 554 break;
563 case CFCTRL_CMD_ENUM: 555 case CFCTRL_CMD_ENUM:
@@ -576,7 +568,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
576 cfctrl->res.radioset_rsp(); 568 cfctrl->res.radioset_rsp();
577 break; 569 break;
578 default: 570 default:
579 pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__); 571 pr_err("Unrecognized Control Frame\n");
580 goto error; 572 goto error;
581 break; 573 break;
582 } 574 }
@@ -595,8 +587,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
595 case CAIF_CTRLCMD_FLOW_OFF_IND: 587 case CAIF_CTRLCMD_FLOW_OFF_IND:
596 spin_lock(&this->info_list_lock); 588 spin_lock(&this->info_list_lock);
597 if (!list_empty(&this->list)) { 589 if (!list_empty(&this->list)) {
598 pr_debug("CAIF: %s(): Received flow off in " 590 pr_debug("Received flow off in control layer\n");
599 "control layer", __func__);
600 } 591 }
601 spin_unlock(&this->info_list_lock); 592 spin_unlock(&this->info_list_lock);
602 break; 593 break;
@@ -620,7 +611,7 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
620 if (!ctrl->loop_linkused[linkid]) 611 if (!ctrl->loop_linkused[linkid])
621 goto found; 612 goto found;
622 spin_unlock(&ctrl->loop_linkid_lock); 613 spin_unlock(&ctrl->loop_linkid_lock);
623 pr_err("CAIF: %s(): Out of link-ids\n", __func__); 614 pr_err("Out of link-ids\n");
624 return -EINVAL; 615 return -EINVAL;
625found: 616found:
626 if (!ctrl->loop_linkused[linkid]) 617 if (!ctrl->loop_linkused[linkid])
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 676648cac8dd..496fda9ac66f 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/slab.h> 10#include <linux/slab.h>
9#include <net/caif/caif_layer.h> 11#include <net/caif/caif_layer.h>
@@ -17,7 +19,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
17{ 19{
18 struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 20 struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
19 if (!dbg) { 21 if (!dbg) {
20 pr_warning("CAIF: %s(): Out of memory\n", __func__); 22 pr_warn("Out of memory\n");
21 return NULL; 23 return NULL;
22 } 24 }
23 caif_assert(offsetof(struct cfsrvl, layer) == 0); 25 caif_assert(offsetof(struct cfsrvl, layer) == 0);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index ed9d53aff280..d3ed264ad6c4 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -26,7 +28,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
26{ 28{
27 struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 29 struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
28 if (!dgm) { 30 if (!dgm) {
29 pr_warning("CAIF: %s(): Out of memory\n", __func__); 31 pr_warn("Out of memory\n");
30 return NULL; 32 return NULL;
31 } 33 }
32 caif_assert(offsetof(struct cfsrvl, layer) == 0); 34 caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -49,14 +51,14 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
49 caif_assert(layr->ctrlcmd != NULL); 51 caif_assert(layr->ctrlcmd != NULL);
50 52
51 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { 53 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
52 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 54 pr_err("Packet is erroneous!\n");
53 cfpkt_destroy(pkt); 55 cfpkt_destroy(pkt);
54 return -EPROTO; 56 return -EPROTO;
55 } 57 }
56 58
57 if ((cmd & DGM_CMD_BIT) == 0) { 59 if ((cmd & DGM_CMD_BIT) == 0) {
58 if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) { 60 if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
59 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 61 pr_err("Packet is erroneous!\n");
60 cfpkt_destroy(pkt); 62 cfpkt_destroy(pkt);
61 return -EPROTO; 63 return -EPROTO;
62 } 64 }
@@ -75,8 +77,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
75 return 0; 77 return 0;
76 default: 78 default:
77 cfpkt_destroy(pkt); 79 cfpkt_destroy(pkt);
78 pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n", 80 pr_info("Unknown datagram control %d (0x%x)\n", cmd, cmd);
79 __func__, cmd, cmd);
80 return -EPROTO; 81 return -EPROTO;
81 } 82 }
82} 83}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index e86a4ca3b217..a445043931ae 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -6,6 +6,8 @@
6 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
10
9#include <linux/stddef.h> 11#include <linux/stddef.h>
10#include <linux/spinlock.h> 12#include <linux/spinlock.h>
11#include <linux/slab.h> 13#include <linux/slab.h>
@@ -32,7 +34,7 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
32{ 34{
33 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC); 35 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
34 if (!this) { 36 if (!this) {
35 pr_warning("CAIF: %s(): Out of memory\n", __func__); 37 pr_warn("Out of memory\n");
36 return NULL; 38 return NULL;
37 } 39 }
38 caif_assert(offsetof(struct cffrml, layer) == 0); 40 caif_assert(offsetof(struct cffrml, layer) == 0);
@@ -83,7 +85,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
83 85
84 if (cfpkt_setlen(pkt, len) < 0) { 86 if (cfpkt_setlen(pkt, len) < 0) {
85 ++cffrml_rcv_error; 87 ++cffrml_rcv_error;
86 pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len); 88 pr_err("Framing length error (%d)\n", len);
87 cfpkt_destroy(pkt); 89 cfpkt_destroy(pkt);
88 return -EPROTO; 90 return -EPROTO;
89 } 91 }
@@ -99,14 +101,14 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
99 cfpkt_add_trail(pkt, &tmp, 2); 101 cfpkt_add_trail(pkt, &tmp, 2);
100 ++cffrml_rcv_error; 102 ++cffrml_rcv_error;
101 ++cffrml_rcv_checsum_error; 103 ++cffrml_rcv_checsum_error;
102 pr_info("CAIF: %s(): Frame checksum error " 104 pr_info("Frame checksum error (0x%x != 0x%x)\n",
103 "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks); 105 hdrchks, pktchks);
104 return -EILSEQ; 106 return -EILSEQ;
105 } 107 }
106 } 108 }
107 if (cfpkt_erroneous(pkt)) { 109 if (cfpkt_erroneous(pkt)) {
108 ++cffrml_rcv_error; 110 ++cffrml_rcv_error;
109 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 111 pr_err("Packet is erroneous!\n");
110 cfpkt_destroy(pkt); 112 cfpkt_destroy(pkt);
111 return -EPROTO; 113 return -EPROTO;
112 } 114 }
@@ -132,7 +134,7 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
132 cfpkt_add_head(pkt, &tmp, 2); 134 cfpkt_add_head(pkt, &tmp, 2);
133 cfpkt_info(pkt)->hdr_len += 2; 135 cfpkt_info(pkt)->hdr_len += 2;
134 if (cfpkt_erroneous(pkt)) { 136 if (cfpkt_erroneous(pkt)) {
135 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 137 pr_err("Packet is erroneous!\n");
136 return -EPROTO; 138 return -EPROTO;
137 } 139 }
138 ret = layr->dn->transmit(layr->dn, pkt); 140 ret = layr->dn->transmit(layr->dn, pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 80c8d332b258..46f34b2e0478 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -3,6 +3,9 @@
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
6#include <linux/stddef.h> 9#include <linux/stddef.h>
7#include <linux/spinlock.h> 10#include <linux/spinlock.h>
8#include <linux/slab.h> 11#include <linux/slab.h>
@@ -190,7 +193,7 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
190 u8 id; 193 u8 id;
191 struct cflayer *up; 194 struct cflayer *up;
192 if (cfpkt_extr_head(pkt, &id, 1) < 0) { 195 if (cfpkt_extr_head(pkt, &id, 1) < 0) {
193 pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__); 196 pr_err("erroneous Caif Packet\n");
194 cfpkt_destroy(pkt); 197 cfpkt_destroy(pkt);
195 return -EPROTO; 198 return -EPROTO;
196 } 199 }
@@ -199,8 +202,8 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
199 up = get_up(muxl, id); 202 up = get_up(muxl, id);
200 spin_unlock(&muxl->receive_lock); 203 spin_unlock(&muxl->receive_lock);
201 if (up == NULL) { 204 if (up == NULL) {
202 pr_info("CAIF: %s():Received data on unknown link ID = %d " 205 pr_info("Received data on unknown link ID = %d (0x%x) up == NULL",
203 "(0x%x) up == NULL", __func__, id, id); 206 id, id);
204 cfpkt_destroy(pkt); 207 cfpkt_destroy(pkt);
205 /* 208 /*
206 * Don't return ERROR, since modem misbehaves and sends out 209 * Don't return ERROR, since modem misbehaves and sends out
@@ -223,9 +226,8 @@ static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
223 struct caif_payload_info *info = cfpkt_info(pkt); 226 struct caif_payload_info *info = cfpkt_info(pkt);
224 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); 227 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
225 if (dn == NULL) { 228 if (dn == NULL) {
226 pr_warning("CAIF: %s(): Send data on unknown phy " 229 pr_warn("Send data on unknown phy ID = %d (0x%x)\n",
227 "ID = %d (0x%x)\n", 230 info->dev_info->id, info->dev_info->id);
228 __func__, info->dev_info->id, info->dev_info->id);
229 return -ENOTCONN; 231 return -ENOTCONN;
230 } 232 }
231 info->hdr_len += 1; 233 info->hdr_len += 1;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index c49a6695793a..d7e865e2ff65 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/string.h> 9#include <linux/string.h>
8#include <linux/skbuff.h> 10#include <linux/skbuff.h>
9#include <linux/hardirq.h> 11#include <linux/hardirq.h>
@@ -12,11 +14,12 @@
12#define PKT_PREFIX 48 14#define PKT_PREFIX 48
13#define PKT_POSTFIX 2 15#define PKT_POSTFIX 2
14#define PKT_LEN_WHEN_EXTENDING 128 16#define PKT_LEN_WHEN_EXTENDING 128
15#define PKT_ERROR(pkt, errmsg) do { \ 17#define PKT_ERROR(pkt, errmsg) \
16 cfpkt_priv(pkt)->erronous = true; \ 18do { \
17 skb_reset_tail_pointer(&pkt->skb); \ 19 cfpkt_priv(pkt)->erronous = true; \
18 pr_warning("CAIF: " errmsg);\ 20 skb_reset_tail_pointer(&pkt->skb); \
19 } while (0) 21 pr_warn(errmsg); \
22} while (0)
20 23
21struct cfpktq { 24struct cfpktq {
22 struct sk_buff_head head; 25 struct sk_buff_head head;
@@ -130,13 +133,13 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
130 return -EPROTO; 133 return -EPROTO;
131 134
132 if (unlikely(len > skb->len)) { 135 if (unlikely(len > skb->len)) {
133 PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n"); 136 PKT_ERROR(pkt, "read beyond end of packet\n");
134 return -EPROTO; 137 return -EPROTO;
135 } 138 }
136 139
137 if (unlikely(len > skb_headlen(skb))) { 140 if (unlikely(len > skb_headlen(skb))) {
138 if (unlikely(skb_linearize(skb) != 0)) { 141 if (unlikely(skb_linearize(skb) != 0)) {
139 PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n"); 142 PKT_ERROR(pkt, "linearize failed\n");
140 return -EPROTO; 143 return -EPROTO;
141 } 144 }
142 } 145 }
@@ -156,11 +159,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
156 return -EPROTO; 159 return -EPROTO;
157 160
158 if (unlikely(skb_linearize(skb) != 0)) { 161 if (unlikely(skb_linearize(skb) != 0)) {
159 PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n"); 162 PKT_ERROR(pkt, "linearize failed\n");
160 return -EPROTO; 163 return -EPROTO;
161 } 164 }
162 if (unlikely(skb->data + len > skb_tail_pointer(skb))) { 165 if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
163 PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n"); 166 PKT_ERROR(pkt, "read beyond end of packet\n");
164 return -EPROTO; 167 return -EPROTO;
165 } 168 }
166 from = skb_tail_pointer(skb) - len; 169 from = skb_tail_pointer(skb) - len;
@@ -202,7 +205,7 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
202 205
203 /* Make sure data is writable */ 206 /* Make sure data is writable */
204 if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { 207 if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
205 PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n"); 208 PKT_ERROR(pkt, "cow failed\n");
206 return -EPROTO; 209 return -EPROTO;
207 } 210 }
208 /* 211 /*
@@ -211,8 +214,7 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
211 * lengths of the top SKB. 214 * lengths of the top SKB.
212 */ 215 */
213 if (lastskb != skb) { 216 if (lastskb != skb) {
214 pr_warning("CAIF: %s(): Packet is non-linear\n", 217 pr_warn("Packet is non-linear\n");
215 __func__);
216 skb->len += len; 218 skb->len += len;
217 skb->data_len += len; 219 skb->data_len += len;
218 } 220 }
@@ -242,14 +244,14 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
242 if (unlikely(is_erronous(pkt))) 244 if (unlikely(is_erronous(pkt)))
243 return -EPROTO; 245 return -EPROTO;
244 if (unlikely(skb_headroom(skb) < len)) { 246 if (unlikely(skb_headroom(skb) < len)) {
245 PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n"); 247 PKT_ERROR(pkt, "no headroom\n");
246 return -EPROTO; 248 return -EPROTO;
247 } 249 }
248 250
249 /* Make sure data is writable */ 251 /* Make sure data is writable */
250 ret = skb_cow_data(skb, 0, &lastskb); 252 ret = skb_cow_data(skb, 0, &lastskb);
251 if (unlikely(ret < 0)) { 253 if (unlikely(ret < 0)) {
252 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); 254 PKT_ERROR(pkt, "cow failed\n");
253 return ret; 255 return ret;
254 } 256 }
255 257
@@ -283,7 +285,7 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
283 if (unlikely(is_erronous(pkt))) 285 if (unlikely(is_erronous(pkt)))
284 return -EPROTO; 286 return -EPROTO;
285 if (unlikely(skb_linearize(&pkt->skb) != 0)) { 287 if (unlikely(skb_linearize(&pkt->skb) != 0)) {
286 PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n"); 288 PKT_ERROR(pkt, "linearize failed\n");
287 return -EPROTO; 289 return -EPROTO;
288 } 290 }
289 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); 291 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
@@ -309,7 +311,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
309 311
310 /* Need to expand SKB */ 312 /* Need to expand SKB */
311 if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) 313 if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
312 PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n"); 314 PKT_ERROR(pkt, "skb_pad_trail failed\n");
313 315
314 return cfpkt_getlen(pkt); 316 return cfpkt_getlen(pkt);
315} 317}
@@ -380,8 +382,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
380 return NULL; 382 return NULL;
381 383
382 if (skb->data + pos > skb_tail_pointer(skb)) { 384 if (skb->data + pos > skb_tail_pointer(skb)) {
383 PKT_ERROR(pkt, 385 PKT_ERROR(pkt, "trying to split beyond end of packet\n");
384 "cfpkt_split: trying to split beyond end of packet");
385 return NULL; 386 return NULL;
386 } 387 }
387 388
@@ -455,17 +456,17 @@ int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
455 return -EPROTO; 456 return -EPROTO;
456 /* Make sure SKB is writable */ 457 /* Make sure SKB is writable */
457 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { 458 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
458 PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n"); 459 PKT_ERROR(pkt, "skb_cow_data failed\n");
459 return -EPROTO; 460 return -EPROTO;
460 } 461 }
461 462
462 if (unlikely(skb_linearize(skb) != 0)) { 463 if (unlikely(skb_linearize(skb) != 0)) {
463 PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n"); 464 PKT_ERROR(pkt, "linearize failed\n");
464 return -EPROTO; 465 return -EPROTO;
465 } 466 }
466 467
467 if (unlikely(skb_tailroom(skb) < buflen)) { 468 if (unlikely(skb_tailroom(skb) < buflen)) {
468 PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n"); 469 PKT_ERROR(pkt, "buffer too short - failed\n");
469 return -EPROTO; 470 return -EPROTO;
470 } 471 }
471 472
@@ -483,14 +484,13 @@ int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
483 return -EPROTO; 484 return -EPROTO;
484 485
485 if (unlikely(buflen > skb->len)) { 486 if (unlikely(buflen > skb->len)) {
486 PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large " 487 PKT_ERROR(pkt, "buflen too large - failed\n");
487 "- failed\n");
488 return -EPROTO; 488 return -EPROTO;
489 } 489 }
490 490
491 if (unlikely(buflen > skb_headlen(skb))) { 491 if (unlikely(buflen > skb_headlen(skb))) {
492 if (unlikely(skb_linearize(skb) != 0)) { 492 if (unlikely(skb_linearize(skb) != 0)) {
493 PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n"); 493 PKT_ERROR(pkt, "linearize failed\n");
494 return -EPROTO; 494 return -EPROTO;
495 } 495 }
496 } 496 }
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 9a699242d104..bde8481e8d25 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -48,7 +50,7 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
48 kzalloc(sizeof(struct cfrfml), GFP_ATOMIC); 50 kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
49 51
50 if (!this) { 52 if (!this) {
51 pr_warning("CAIF: %s(): Out of memory\n", __func__); 53 pr_warn("Out of memory\n");
52 return NULL; 54 return NULL;
53 } 55 }
54 56
@@ -178,9 +180,7 @@ out:
178 cfpkt_destroy(rfml->incomplete_frm); 180 cfpkt_destroy(rfml->incomplete_frm);
179 rfml->incomplete_frm = NULL; 181 rfml->incomplete_frm = NULL;
180 182
181 pr_info("CAIF: %s(): " 183 pr_info("Connection error %d triggered on RFM link\n", err);
182 "Connection error %d triggered on RFM link\n",
183 __func__, err);
184 184
185 /* Trigger connection error upon failure.*/ 185 /* Trigger connection error upon failure.*/
186 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 186 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
@@ -280,9 +280,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
280out: 280out:
281 281
282 if (err != 0) { 282 if (err != 0) {
283 pr_info("CAIF: %s(): " 283 pr_info("Connection error %d triggered on RFM link\n", err);
284 "Connection error %d triggered on RFM link\n",
285 __func__, err);
286 /* Trigger connection error upon failure.*/ 284 /* Trigger connection error upon failure.*/
287 285
288 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 286 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index a11fbd68a13d..9297f7dea9d8 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -34,7 +36,7 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx)
34{ 36{
35 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC); 37 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
36 if (!this) { 38 if (!this) {
37 pr_warning("CAIF: %s(): Out of memory\n", __func__); 39 pr_warn("Out of memory\n");
38 return NULL; 40 return NULL;
39 } 41 }
40 caif_assert(offsetof(struct cfserl, layer) == 0); 42 caif_assert(offsetof(struct cfserl, layer) == 0);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index f40939a91211..ab5e542526bf 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/types.h> 10#include <linux/types.h>
9#include <linux/errno.h> 11#include <linux/errno.h>
@@ -79,8 +81,7 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
79 layr->up->ctrlcmd(layr->up, ctrl, phyid); 81 layr->up->ctrlcmd(layr->up, ctrl, phyid);
80 break; 82 break;
81 default: 83 default:
82 pr_warning("CAIF: %s(): " 84 pr_warn("Unexpected ctrl in cfsrvl (%d)\n", ctrl);
83 "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
84 /* We have both modem and phy flow on, send flow on */ 85 /* We have both modem and phy flow on, send flow on */
85 layr->up->ctrlcmd(layr->up, ctrl, phyid); 86 layr->up->ctrlcmd(layr->up, ctrl, phyid);
86 service->phy_flow_on = true; 87 service->phy_flow_on = true;
@@ -107,14 +108,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
107 u8 flow_on = SRVL_FLOW_ON; 108 u8 flow_on = SRVL_FLOW_ON;
108 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 109 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
109 if (!pkt) { 110 if (!pkt) {
110 pr_warning("CAIF: %s(): Out of memory\n", 111 pr_warn("Out of memory\n");
111 __func__);
112 return -ENOMEM; 112 return -ENOMEM;
113 } 113 }
114 114
115 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) { 115 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
116 pr_err("CAIF: %s(): Packet is erroneous!\n", 116 pr_err("Packet is erroneous!\n");
117 __func__);
118 cfpkt_destroy(pkt); 117 cfpkt_destroy(pkt);
119 return -EPROTO; 118 return -EPROTO;
120 } 119 }
@@ -131,14 +130,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
131 u8 flow_off = SRVL_FLOW_OFF; 130 u8 flow_off = SRVL_FLOW_OFF;
132 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 131 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
133 if (!pkt) { 132 if (!pkt) {
134 pr_warning("CAIF: %s(): Out of memory\n", 133 pr_warn("Out of memory\n");
135 __func__);
136 return -ENOMEM; 134 return -ENOMEM;
137 } 135 }
138 136
139 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { 137 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
140 pr_err("CAIF: %s(): Packet is erroneous!\n", 138 pr_err("Packet is erroneous!\n");
141 __func__);
142 cfpkt_destroy(pkt); 139 cfpkt_destroy(pkt);
143 return -EPROTO; 140 return -EPROTO;
144 } 141 }
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 02795aff57a4..efad410e4c82 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/types.h> 10#include <linux/types.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -26,7 +28,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
26{ 28{
27 struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 29 struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
28 if (!util) { 30 if (!util) {
29 pr_warning("CAIF: %s(): Out of memory\n", __func__); 31 pr_warn("Out of memory\n");
30 return NULL; 32 return NULL;
31 } 33 }
32 caif_assert(offsetof(struct cfsrvl, layer) == 0); 34 caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -47,7 +49,7 @@ static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
47 caif_assert(layr->up->receive != NULL); 49 caif_assert(layr->up->receive != NULL);
48 caif_assert(layr->up->ctrlcmd != NULL); 50 caif_assert(layr->up->ctrlcmd != NULL);
49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { 51 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
50 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 52 pr_err("Packet is erroneous!\n");
51 cfpkt_destroy(pkt); 53 cfpkt_destroy(pkt);
52 return -EPROTO; 54 return -EPROTO;
53 } 55 }
@@ -64,16 +66,14 @@ static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
64 cfpkt_destroy(pkt); 66 cfpkt_destroy(pkt);
65 return 0; 67 return 0;
66 case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */ 68 case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
67 pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n", 69 pr_err("REMOTE SHUTDOWN REQUEST RECEIVED\n");
68 __func__);
69 layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0); 70 layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
70 service->open = false; 71 service->open = false;
71 cfpkt_destroy(pkt); 72 cfpkt_destroy(pkt);
72 return 0; 73 return 0;
73 default: 74 default:
74 cfpkt_destroy(pkt); 75 cfpkt_destroy(pkt);
75 pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n", 76 pr_warn("Unknown service control %d (0x%x)\n", cmd, cmd);
76 __func__, cmd, cmd);
77 return -EPROTO; 77 return -EPROTO;
78 } 78 }
79} 79}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 77cc09faac9a..3b425b189a99 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/slab.h> 10#include <linux/slab.h>
9#include <net/caif/caif_layer.h> 11#include <net/caif/caif_layer.h>
@@ -25,7 +27,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
25{ 27{
26 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 28 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
27 if (!vei) { 29 if (!vei) {
28 pr_warning("CAIF: %s(): Out of memory\n", __func__); 30 pr_warn("Out of memory\n");
29 return NULL; 31 return NULL;
30 } 32 }
31 caif_assert(offsetof(struct cfsrvl, layer) == 0); 33 caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -47,7 +49,7 @@ static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
47 49
48 50
49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { 51 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
50 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 52 pr_err("Packet is erroneous!\n");
51 cfpkt_destroy(pkt); 53 cfpkt_destroy(pkt);
52 return -EPROTO; 54 return -EPROTO;
53 } 55 }
@@ -67,8 +69,7 @@ static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
67 cfpkt_destroy(pkt); 69 cfpkt_destroy(pkt);
68 return 0; 70 return 0;
69 default: /* SET RS232 PIN */ 71 default: /* SET RS232 PIN */
70 pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n", 72 pr_warn("Unknown VEI control packet %d (0x%x)!\n", cmd, cmd);
71 __func__, cmd, cmd);
72 cfpkt_destroy(pkt); 73 cfpkt_destroy(pkt);
73 return -EPROTO; 74 return -EPROTO;
74 } 75 }
@@ -86,7 +87,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
86 caif_assert(layr->dn->transmit != NULL); 87 caif_assert(layr->dn->transmit != NULL);
87 88
88 if (cfpkt_add_head(pkt, &tmp, 1) < 0) { 89 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
89 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 90 pr_err("Packet is erroneous!\n");
90 return -EPROTO; 91 return -EPROTO;
91 } 92 }
92 93
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index ada6ee2d48f5..bf6fef2a0eff 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/types.h> 10#include <linux/types.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -21,7 +23,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
21{ 23{
22 struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 24 struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
23 if (!vid) { 25 if (!vid) {
24 pr_warning("CAIF: %s(): Out of memory\n", __func__); 26 pr_warn("Out of memory\n");
25 return NULL; 27 return NULL;
26 } 28 }
27 caif_assert(offsetof(struct cfsrvl, layer) == 0); 29 caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -38,7 +40,7 @@ static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
38{ 40{
39 u32 videoheader; 41 u32 videoheader;
40 if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) { 42 if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
41 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 43 pr_err("Packet is erroneous!\n");
42 cfpkt_destroy(pkt); 44 cfpkt_destroy(pkt);
43 return -EPROTO; 45 return -EPROTO;
44 } 46 }
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4293e190ec53..86aac24b0225 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -5,6 +5,8 @@
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
9
8#include <linux/version.h> 10#include <linux/version.h>
9#include <linux/fs.h> 11#include <linux/fs.h>
10#include <linux/init.h> 12#include <linux/init.h>
@@ -29,7 +31,7 @@
29#define CAIF_NET_DEFAULT_QUEUE_LEN 500 31#define CAIF_NET_DEFAULT_QUEUE_LEN 500
30 32
31#undef pr_debug 33#undef pr_debug
32#define pr_debug pr_warning 34#define pr_debug pr_warn
33 35
34/*This list is protected by the rtnl lock. */ 36/*This list is protected by the rtnl lock. */
35static LIST_HEAD(chnl_net_list); 37static LIST_HEAD(chnl_net_list);
@@ -142,8 +144,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
142 int phyid) 144 int phyid)
143{ 145{
144 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 146 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
145 pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n", 147 pr_debug("NET flowctrl func called flow: %s\n",
146 __func__,
147 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : 148 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
148 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" : 149 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
149 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : 150 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
@@ -196,12 +197,12 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
196 priv = netdev_priv(dev); 197 priv = netdev_priv(dev);
197 198
198 if (skb->len > priv->netdev->mtu) { 199 if (skb->len > priv->netdev->mtu) {
199 pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__); 200 pr_warn("Size of skb exceeded MTU\n");
200 return -ENOSPC; 201 return -ENOSPC;
201 } 202 }
202 203
203 if (!priv->flowenabled) { 204 if (!priv->flowenabled) {
204 pr_debug("CAIF: %s(): dropping packets flow off\n", __func__); 205 pr_debug("dropping packets flow off\n");
205 return NETDEV_TX_BUSY; 206 return NETDEV_TX_BUSY;
206 } 207 }
207 208
@@ -237,7 +238,7 @@ static int chnl_net_open(struct net_device *dev)
237 ASSERT_RTNL(); 238 ASSERT_RTNL();
238 priv = netdev_priv(dev); 239 priv = netdev_priv(dev);
239 if (!priv) { 240 if (!priv) {
240 pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__); 241 pr_debug("chnl_net_open: no priv\n");
241 return -ENODEV; 242 return -ENODEV;
242 } 243 }
243 244
@@ -246,18 +247,17 @@ static int chnl_net_open(struct net_device *dev)
246 result = caif_connect_client(&priv->conn_req, &priv->chnl, 247 result = caif_connect_client(&priv->conn_req, &priv->chnl,
247 &llifindex, &headroom, &tailroom); 248 &llifindex, &headroom, &tailroom);
248 if (result != 0) { 249 if (result != 0) {
249 pr_debug("CAIF: %s(): err: " 250 pr_debug("err: "
250 "Unable to register and open device," 251 "Unable to register and open device,"
251 " Err:%d\n", 252 " Err:%d\n",
252 __func__, 253 result);
253 result);
254 goto error; 254 goto error;
255 } 255 }
256 256
257 lldev = dev_get_by_index(dev_net(dev), llifindex); 257 lldev = dev_get_by_index(dev_net(dev), llifindex);
258 258
259 if (lldev == NULL) { 259 if (lldev == NULL) {
260 pr_debug("CAIF: %s(): no interface?\n", __func__); 260 pr_debug("no interface?\n");
261 result = -ENODEV; 261 result = -ENODEV;
262 goto error; 262 goto error;
263 } 263 }
@@ -279,9 +279,7 @@ static int chnl_net_open(struct net_device *dev)
279 dev_put(lldev); 279 dev_put(lldev);
280 280
281 if (mtu < 100) { 281 if (mtu < 100) {
282 pr_warning("CAIF: %s(): " 282 pr_warn("CAIF Interface MTU too small (%d)\n", mtu);
283 "CAIF Interface MTU too small (%d)\n",
284 __func__, mtu);
285 result = -ENODEV; 283 result = -ENODEV;
286 goto error; 284 goto error;
287 } 285 }
@@ -296,33 +294,32 @@ static int chnl_net_open(struct net_device *dev)
296 rtnl_lock(); 294 rtnl_lock();
297 295
298 if (result == -ERESTARTSYS) { 296 if (result == -ERESTARTSYS) {
299 pr_debug("CAIF: %s(): wait_event_interruptible" 297 pr_debug("wait_event_interruptible woken by a signal\n");
300 " woken by a signal\n", __func__);
301 result = -ERESTARTSYS; 298 result = -ERESTARTSYS;
302 goto error; 299 goto error;
303 } 300 }
304 301
305 if (result == 0) { 302 if (result == 0) {
306 pr_debug("CAIF: %s(): connect timeout\n", __func__); 303 pr_debug("connect timeout\n");
307 caif_disconnect_client(&priv->chnl); 304 caif_disconnect_client(&priv->chnl);
308 priv->state = CAIF_DISCONNECTED; 305 priv->state = CAIF_DISCONNECTED;
309 pr_debug("CAIF: %s(): state disconnected\n", __func__); 306 pr_debug("state disconnected\n");
310 result = -ETIMEDOUT; 307 result = -ETIMEDOUT;
311 goto error; 308 goto error;
312 } 309 }
313 310
314 if (priv->state != CAIF_CONNECTED) { 311 if (priv->state != CAIF_CONNECTED) {
315 pr_debug("CAIF: %s(): connect failed\n", __func__); 312 pr_debug("connect failed\n");
316 result = -ECONNREFUSED; 313 result = -ECONNREFUSED;
317 goto error; 314 goto error;
318 } 315 }
319 pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__); 316 pr_debug("CAIF Netdevice connected\n");
320 return 0; 317 return 0;
321 318
322error: 319error:
323 caif_disconnect_client(&priv->chnl); 320 caif_disconnect_client(&priv->chnl);
324 priv->state = CAIF_DISCONNECTED; 321 priv->state = CAIF_DISCONNECTED;
325 pr_debug("CAIF: %s(): state disconnected\n", __func__); 322 pr_debug("state disconnected\n");
326 return result; 323 return result;
327 324
328} 325}
@@ -413,7 +410,7 @@ static void caif_netlink_parms(struct nlattr *data[],
413 struct caif_connect_request *conn_req) 410 struct caif_connect_request *conn_req)
414{ 411{
415 if (!data) { 412 if (!data) {
416 pr_warning("CAIF: %s: no params data found\n", __func__); 413 pr_warn("no params data found\n");
417 return; 414 return;
418 } 415 }
419 if (data[IFLA_CAIF_IPV4_CONNID]) 416 if (data[IFLA_CAIF_IPV4_CONNID])
@@ -442,8 +439,7 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
442 439
443 ret = register_netdevice(dev); 440 ret = register_netdevice(dev);
444 if (ret) 441 if (ret)
445 pr_warning("CAIF: %s(): device rtml registration failed\n", 442 pr_warn("device rtml registration failed\n");
446 __func__);
447 return ret; 443 return ret;
448} 444}
449 445
diff --git a/net/can/raw.c b/net/can/raw.c
index a10e3338f084..7d77e67e57af 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -647,12 +647,12 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
647 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 647 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
648 if (err < 0) 648 if (err < 0)
649 goto free_skb; 649 goto free_skb;
650 err = sock_tx_timestamp(msg, sk, skb_tx(skb)); 650 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
651 if (err < 0) 651 if (err < 0)
652 goto free_skb; 652 goto free_skb;
653 653
654 /* to be able to check the received tx sock reference in raw_rcv() */ 654 /* to be able to check the received tx sock reference in raw_rcv() */
655 skb_tx(skb)->prevent_sk_orphan = 1; 655 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
656 656
657 skb->dev = dev; 657 skb->dev = dev;
658 skb->sk = sk; 658 skb->sk = sk;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 251997a95483..4df1b7a6c1bf 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -746,13 +746,12 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
746 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 746 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
747 mask |= POLLERR; 747 mask |= POLLERR;
748 if (sk->sk_shutdown & RCV_SHUTDOWN) 748 if (sk->sk_shutdown & RCV_SHUTDOWN)
749 mask |= POLLRDHUP; 749 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
750 if (sk->sk_shutdown == SHUTDOWN_MASK) 750 if (sk->sk_shutdown == SHUTDOWN_MASK)
751 mask |= POLLHUP; 751 mask |= POLLHUP;
752 752
753 /* readable? */ 753 /* readable? */
754 if (!skb_queue_empty(&sk->sk_receive_queue) || 754 if (!skb_queue_empty(&sk->sk_receive_queue))
755 (sk->sk_shutdown & RCV_SHUTDOWN))
756 mask |= POLLIN | POLLRDNORM; 755 mask |= POLLIN | POLLRDNORM;
757 756
758 /* Connection-based need to check for termination and startup */ 757 /* Connection-based need to check for termination and startup */
diff --git a/net/core/dev.c b/net/core/dev.c
index b9b22a3c4c8f..fc2dc933bee5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -371,6 +371,14 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
371 * --ANK (980803) 371 * --ANK (980803)
372 */ 372 */
373 373
374static inline struct list_head *ptype_head(const struct packet_type *pt)
375{
376 if (pt->type == htons(ETH_P_ALL))
377 return &ptype_all;
378 else
379 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
380}
381
374/** 382/**
375 * dev_add_pack - add packet handler 383 * dev_add_pack - add packet handler
376 * @pt: packet type declaration 384 * @pt: packet type declaration
@@ -386,16 +394,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
386 394
387void dev_add_pack(struct packet_type *pt) 395void dev_add_pack(struct packet_type *pt)
388{ 396{
389 int hash; 397 struct list_head *head = ptype_head(pt);
390 398
391 spin_lock_bh(&ptype_lock); 399 spin_lock(&ptype_lock);
392 if (pt->type == htons(ETH_P_ALL)) 400 list_add_rcu(&pt->list, head);
393 list_add_rcu(&pt->list, &ptype_all); 401 spin_unlock(&ptype_lock);
394 else {
395 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
396 list_add_rcu(&pt->list, &ptype_base[hash]);
397 }
398 spin_unlock_bh(&ptype_lock);
399} 402}
400EXPORT_SYMBOL(dev_add_pack); 403EXPORT_SYMBOL(dev_add_pack);
401 404
@@ -414,15 +417,10 @@ EXPORT_SYMBOL(dev_add_pack);
414 */ 417 */
415void __dev_remove_pack(struct packet_type *pt) 418void __dev_remove_pack(struct packet_type *pt)
416{ 419{
417 struct list_head *head; 420 struct list_head *head = ptype_head(pt);
418 struct packet_type *pt1; 421 struct packet_type *pt1;
419 422
420 spin_lock_bh(&ptype_lock); 423 spin_lock(&ptype_lock);
421
422 if (pt->type == htons(ETH_P_ALL))
423 head = &ptype_all;
424 else
425 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
426 424
427 list_for_each_entry(pt1, head, list) { 425 list_for_each_entry(pt1, head, list) {
428 if (pt == pt1) { 426 if (pt == pt1) {
@@ -433,7 +431,7 @@ void __dev_remove_pack(struct packet_type *pt)
433 431
434 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 432 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
435out: 433out:
436 spin_unlock_bh(&ptype_lock); 434 spin_unlock(&ptype_lock);
437} 435}
438EXPORT_SYMBOL(__dev_remove_pack); 436EXPORT_SYMBOL(__dev_remove_pack);
439 437
@@ -1902,14 +1900,14 @@ static int dev_gso_segment(struct sk_buff *skb)
1902 1900
1903/* 1901/*
1904 * Try to orphan skb early, right before transmission by the device. 1902 * Try to orphan skb early, right before transmission by the device.
1905 * We cannot orphan skb if tx timestamp is requested, since 1903 * We cannot orphan skb if tx timestamp is requested or the sk-reference
1906 * drivers need to call skb_tstamp_tx() to send the timestamp. 1904 * is needed on driver level for other reasons, e.g. see net/can/raw.c
1907 */ 1905 */
1908static inline void skb_orphan_try(struct sk_buff *skb) 1906static inline void skb_orphan_try(struct sk_buff *skb)
1909{ 1907{
1910 struct sock *sk = skb->sk; 1908 struct sock *sk = skb->sk;
1911 1909
1912 if (sk && !skb_tx(skb)->flags) { 1910 if (sk && !skb_shinfo(skb)->tx_flags) {
1913 /* skb_tx_hash() wont be able to get sk. 1911 /* skb_tx_hash() wont be able to get sk.
1914 * We copy sk_hash into skb->rxhash 1912 * We copy sk_hash into skb->rxhash
1915 */ 1913 */
@@ -1930,7 +1928,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
1930 struct net_device *dev) 1928 struct net_device *dev)
1931{ 1929{
1932 return skb_is_nonlinear(skb) && 1930 return skb_is_nonlinear(skb) &&
1933 ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || 1931 ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
1934 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || 1932 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1935 illegal_highdma(dev, skb)))); 1933 illegal_highdma(dev, skb))));
1936} 1934}
@@ -2259,69 +2257,44 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2259 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2257 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2260} 2258}
2261 2259
2262#ifdef CONFIG_RPS
2263
2264/* One global table that all flow-based protocols share. */
2265struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2266EXPORT_SYMBOL(rps_sock_flow_table);
2267
2268/* 2260/*
2269 * get_rps_cpu is called from netif_receive_skb and returns the target 2261 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2270 * CPU from the RPS map of the receiving queue for a given skb. 2262 * and src/dst port numbers. Returns a non-zero hash number on success
2271 * rcu_read_lock must be held on entry. 2263 * and 0 on failure.
2272 */ 2264 */
2273static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2265__u32 __skb_get_rxhash(struct sk_buff *skb)
2274 struct rps_dev_flow **rflowp)
2275{ 2266{
2267 int nhoff, hash = 0, poff;
2276 struct ipv6hdr *ip6; 2268 struct ipv6hdr *ip6;
2277 struct iphdr *ip; 2269 struct iphdr *ip;
2278 struct netdev_rx_queue *rxqueue;
2279 struct rps_map *map;
2280 struct rps_dev_flow_table *flow_table;
2281 struct rps_sock_flow_table *sock_flow_table;
2282 int cpu = -1;
2283 u8 ip_proto; 2270 u8 ip_proto;
2284 u16 tcpu;
2285 u32 addr1, addr2, ihl; 2271 u32 addr1, addr2, ihl;
2286 union { 2272 union {
2287 u32 v32; 2273 u32 v32;
2288 u16 v16[2]; 2274 u16 v16[2];
2289 } ports; 2275 } ports;
2290 2276
2291 if (skb_rx_queue_recorded(skb)) { 2277 nhoff = skb_network_offset(skb);
2292 u16 index = skb_get_rx_queue(skb);
2293 if (unlikely(index >= dev->num_rx_queues)) {
2294 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2295 "on queue %u, but number of RX queues is %u\n",
2296 dev->name, index, dev->num_rx_queues);
2297 goto done;
2298 }
2299 rxqueue = dev->_rx + index;
2300 } else
2301 rxqueue = dev->_rx;
2302
2303 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2304 goto done;
2305
2306 if (skb->rxhash)
2307 goto got_hash; /* Skip hash computation on packet header */
2308 2278
2309 switch (skb->protocol) { 2279 switch (skb->protocol) {
2310 case __constant_htons(ETH_P_IP): 2280 case __constant_htons(ETH_P_IP):
2311 if (!pskb_may_pull(skb, sizeof(*ip))) 2281 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2312 goto done; 2282 goto done;
2313 2283
2314 ip = (struct iphdr *) skb->data; 2284 ip = (struct iphdr *) (skb->data + nhoff);
2315 ip_proto = ip->protocol; 2285 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2286 ip_proto = 0;
2287 else
2288 ip_proto = ip->protocol;
2316 addr1 = (__force u32) ip->saddr; 2289 addr1 = (__force u32) ip->saddr;
2317 addr2 = (__force u32) ip->daddr; 2290 addr2 = (__force u32) ip->daddr;
2318 ihl = ip->ihl; 2291 ihl = ip->ihl;
2319 break; 2292 break;
2320 case __constant_htons(ETH_P_IPV6): 2293 case __constant_htons(ETH_P_IPV6):
2321 if (!pskb_may_pull(skb, sizeof(*ip6))) 2294 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2322 goto done; 2295 goto done;
2323 2296
2324 ip6 = (struct ipv6hdr *) skb->data; 2297 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
2325 ip_proto = ip6->nexthdr; 2298 ip_proto = ip6->nexthdr;
2326 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2299 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2327 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2300 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -2330,33 +2303,80 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2330 default: 2303 default:
2331 goto done; 2304 goto done;
2332 } 2305 }
2333 switch (ip_proto) { 2306
2334 case IPPROTO_TCP: 2307 ports.v32 = 0;
2335 case IPPROTO_UDP: 2308 poff = proto_ports_offset(ip_proto);
2336 case IPPROTO_DCCP: 2309 if (poff >= 0) {
2337 case IPPROTO_ESP: 2310 nhoff += ihl * 4 + poff;
2338 case IPPROTO_AH: 2311 if (pskb_may_pull(skb, nhoff + 4)) {
2339 case IPPROTO_SCTP: 2312 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2340 case IPPROTO_UDPLITE:
2341 if (pskb_may_pull(skb, (ihl * 4) + 4)) {
2342 ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
2343 if (ports.v16[1] < ports.v16[0]) 2313 if (ports.v16[1] < ports.v16[0])
2344 swap(ports.v16[0], ports.v16[1]); 2314 swap(ports.v16[0], ports.v16[1]);
2345 break;
2346 } 2315 }
2347 default:
2348 ports.v32 = 0;
2349 break;
2350 } 2316 }
2351 2317
2352 /* get a consistent hash (same value on both flow directions) */ 2318 /* get a consistent hash (same value on both flow directions) */
2353 if (addr2 < addr1) 2319 if (addr2 < addr1)
2354 swap(addr1, addr2); 2320 swap(addr1, addr2);
2355 skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2356 if (!skb->rxhash)
2357 skb->rxhash = 1;
2358 2321
2359got_hash: 2322 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2323 if (!hash)
2324 hash = 1;
2325
2326done:
2327 return hash;
2328}
2329EXPORT_SYMBOL(__skb_get_rxhash);
2330
2331#ifdef CONFIG_RPS
2332
2333/* One global table that all flow-based protocols share. */
2334struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2335EXPORT_SYMBOL(rps_sock_flow_table);
2336
2337/*
2338 * get_rps_cpu is called from netif_receive_skb and returns the target
2339 * CPU from the RPS map of the receiving queue for a given skb.
2340 * rcu_read_lock must be held on entry.
2341 */
2342static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2343 struct rps_dev_flow **rflowp)
2344{
2345 struct netdev_rx_queue *rxqueue;
2346 struct rps_map *map = NULL;
2347 struct rps_dev_flow_table *flow_table;
2348 struct rps_sock_flow_table *sock_flow_table;
2349 int cpu = -1;
2350 u16 tcpu;
2351
2352 if (skb_rx_queue_recorded(skb)) {
2353 u16 index = skb_get_rx_queue(skb);
2354 if (unlikely(index >= dev->num_rx_queues)) {
2355 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2356 "on queue %u, but number of RX queues is %u\n",
2357 dev->name, index, dev->num_rx_queues);
2358 goto done;
2359 }
2360 rxqueue = dev->_rx + index;
2361 } else
2362 rxqueue = dev->_rx;
2363
2364 if (rxqueue->rps_map) {
2365 map = rcu_dereference(rxqueue->rps_map);
2366 if (map && map->len == 1) {
2367 tcpu = map->cpus[0];
2368 if (cpu_online(tcpu))
2369 cpu = tcpu;
2370 goto done;
2371 }
2372 } else if (!rxqueue->rps_flow_table) {
2373 goto done;
2374 }
2375
2376 skb_reset_network_header(skb);
2377 if (!skb_get_rxhash(skb))
2378 goto done;
2379
2360 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2380 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2361 sock_flow_table = rcu_dereference(rps_sock_flow_table); 2381 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2362 if (flow_table && sock_flow_table) { 2382 if (flow_table && sock_flow_table) {
@@ -2396,7 +2416,6 @@ got_hash:
2396 } 2416 }
2397 } 2417 }
2398 2418
2399 map = rcu_dereference(rxqueue->rps_map);
2400 if (map) { 2419 if (map) {
2401 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 2420 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2402 2421
@@ -2828,8 +2847,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
2828 if (!netdev_tstamp_prequeue) 2847 if (!netdev_tstamp_prequeue)
2829 net_timestamp_check(skb); 2848 net_timestamp_check(skb);
2830 2849
2831 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) 2850 if (vlan_tx_tag_present(skb))
2832 return NET_RX_SUCCESS; 2851 vlan_hwaccel_do_receive(skb);
2833 2852
2834 /* if we've gotten here through NAPI, check netpoll */ 2853 /* if we've gotten here through NAPI, check netpoll */
2835 if (netpoll_receive_skb(skb)) 2854 if (netpoll_receive_skb(skb))
@@ -3050,7 +3069,7 @@ out:
3050 return netif_receive_skb(skb); 3069 return netif_receive_skb(skb);
3051} 3070}
3052 3071
3053static void napi_gro_flush(struct napi_struct *napi) 3072inline void napi_gro_flush(struct napi_struct *napi)
3054{ 3073{
3055 struct sk_buff *skb, *next; 3074 struct sk_buff *skb, *next;
3056 3075
@@ -3063,6 +3082,7 @@ static void napi_gro_flush(struct napi_struct *napi)
3063 napi->gro_count = 0; 3082 napi->gro_count = 0;
3064 napi->gro_list = NULL; 3083 napi->gro_list = NULL;
3065} 3084}
3085EXPORT_SYMBOL(napi_gro_flush);
3066 3086
3067enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3087enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3068{ 3088{
@@ -3077,7 +3097,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3077 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3097 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3078 goto normal; 3098 goto normal;
3079 3099
3080 if (skb_is_gso(skb) || skb_has_frags(skb)) 3100 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3081 goto normal; 3101 goto normal;
3082 3102
3083 rcu_read_lock(); 3103 rcu_read_lock();
@@ -3156,16 +3176,18 @@ normal:
3156} 3176}
3157EXPORT_SYMBOL(dev_gro_receive); 3177EXPORT_SYMBOL(dev_gro_receive);
3158 3178
3159static gro_result_t 3179static inline gro_result_t
3160__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3180__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3161{ 3181{
3162 struct sk_buff *p; 3182 struct sk_buff *p;
3163 3183
3164 for (p = napi->gro_list; p; p = p->next) { 3184 for (p = napi->gro_list; p; p = p->next) {
3165 NAPI_GRO_CB(p)->same_flow = 3185 unsigned long diffs;
3166 (p->dev == skb->dev) && 3186
3167 !compare_ether_header(skb_mac_header(p), 3187 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3188 diffs |= compare_ether_header(skb_mac_header(p),
3168 skb_gro_mac_header(skb)); 3189 skb_gro_mac_header(skb));
3190 NAPI_GRO_CB(p)->same_flow = !diffs;
3169 NAPI_GRO_CB(p)->flush = 0; 3191 NAPI_GRO_CB(p)->flush = 0;
3170 } 3192 }
3171 3193
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 7a85367b3c2f..970eb9817bbc 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -205,18 +205,24 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
205 struct ethtool_drvinfo info; 205 struct ethtool_drvinfo info;
206 const struct ethtool_ops *ops = dev->ethtool_ops; 206 const struct ethtool_ops *ops = dev->ethtool_ops;
207 207
208 if (!ops->get_drvinfo)
209 return -EOPNOTSUPP;
210
211 memset(&info, 0, sizeof(info)); 208 memset(&info, 0, sizeof(info));
212 info.cmd = ETHTOOL_GDRVINFO; 209 info.cmd = ETHTOOL_GDRVINFO;
213 ops->get_drvinfo(dev, &info); 210 if (ops && ops->get_drvinfo) {
211 ops->get_drvinfo(dev, &info);
212 } else if (dev->dev.parent && dev->dev.parent->driver) {
213 strlcpy(info.bus_info, dev_name(dev->dev.parent),
214 sizeof(info.bus_info));
215 strlcpy(info.driver, dev->dev.parent->driver->name,
216 sizeof(info.driver));
217 } else {
218 return -EOPNOTSUPP;
219 }
214 220
215 /* 221 /*
216 * this method of obtaining string set info is deprecated; 222 * this method of obtaining string set info is deprecated;
217 * Use ETHTOOL_GSSET_INFO instead. 223 * Use ETHTOOL_GSSET_INFO instead.
218 */ 224 */
219 if (ops->get_sset_count) { 225 if (ops && ops->get_sset_count) {
220 int rc; 226 int rc;
221 227
222 rc = ops->get_sset_count(dev, ETH_SS_TEST); 228 rc = ops->get_sset_count(dev, ETH_SS_TEST);
@@ -229,9 +235,9 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
229 if (rc >= 0) 235 if (rc >= 0)
230 info.n_priv_flags = rc; 236 info.n_priv_flags = rc;
231 } 237 }
232 if (ops->get_regs_len) 238 if (ops && ops->get_regs_len)
233 info.regdump_len = ops->get_regs_len(dev); 239 info.regdump_len = ops->get_regs_len(dev);
234 if (ops->get_eeprom_len) 240 if (ops && ops->get_eeprom_len)
235 info.eedump_len = ops->get_eeprom_len(dev); 241 info.eedump_len = ops->get_eeprom_len(dev);
236 242
237 if (copy_to_user(useraddr, &info, sizeof(info))) 243 if (copy_to_user(useraddr, &info, sizeof(info)))
@@ -1402,14 +1408,22 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1402 if (!dev || !netif_device_present(dev)) 1408 if (!dev || !netif_device_present(dev))
1403 return -ENODEV; 1409 return -ENODEV;
1404 1410
1405 if (!dev->ethtool_ops)
1406 return -EOPNOTSUPP;
1407
1408 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) 1411 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1409 return -EFAULT; 1412 return -EFAULT;
1410 1413
1414 if (!dev->ethtool_ops) {
1415 /* ETHTOOL_GDRVINFO does not require any driver support.
1416 * It is also unprivileged and does not change anything,
1417 * so we can take a shortcut to it. */
1418 if (ethcmd == ETHTOOL_GDRVINFO)
1419 return ethtool_get_drvinfo(dev, useraddr);
1420 else
1421 return -EOPNOTSUPP;
1422 }
1423
1411 /* Allow some commands to be done by anyone */ 1424 /* Allow some commands to be done by anyone */
1412 switch (ethcmd) { 1425 switch (ethcmd) {
1426 case ETHTOOL_GSET:
1413 case ETHTOOL_GDRVINFO: 1427 case ETHTOOL_GDRVINFO:
1414 case ETHTOOL_GMSGLVL: 1428 case ETHTOOL_GMSGLVL:
1415 case ETHTOOL_GCOALESCE: 1429 case ETHTOOL_GCOALESCE:
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 1cd98df412df..f4657c2127b4 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -41,7 +41,9 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
41 41
42 if (m->msg_namelen) { 42 if (m->msg_namelen) {
43 if (mode == VERIFY_READ) { 43 if (mode == VERIFY_READ) {
44 err = move_addr_to_kernel(m->msg_name, m->msg_namelen, 44 void __user *namep;
45 namep = (void __user __force *) m->msg_name;
46 err = move_addr_to_kernel(namep, m->msg_namelen,
45 address); 47 address);
46 if (err < 0) 48 if (err < 0)
47 return err; 49 return err;
@@ -52,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
52 } 54 }
53 55
54 size = m->msg_iovlen * sizeof(struct iovec); 56 size = m->msg_iovlen * sizeof(struct iovec);
55 if (copy_from_user(iov, m->msg_iov, size)) 57 if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
56 return -EFAULT; 58 return -EFAULT;
57 59
58 m->msg_iov = iov; 60 m->msg_iov = iov;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index af4dfbadf2a0..76485a3f910b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -515,7 +515,7 @@ static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
515 return attribute->store(queue, attribute, buf, count); 515 return attribute->store(queue, attribute, buf, count);
516} 516}
517 517
518static struct sysfs_ops rx_queue_sysfs_ops = { 518static const struct sysfs_ops rx_queue_sysfs_ops = {
519 .show = rx_queue_attr_show, 519 .show = rx_queue_attr_show,
520 .store = rx_queue_attr_store, 520 .store = rx_queue_attr_store,
521}; 521};
@@ -789,12 +789,13 @@ static const void *net_netlink_ns(struct sock *sk)
789 return sock_net(sk); 789 return sock_net(sk);
790} 790}
791 791
792static struct kobj_ns_type_operations net_ns_type_operations = { 792struct kobj_ns_type_operations net_ns_type_operations = {
793 .type = KOBJ_NS_TYPE_NET, 793 .type = KOBJ_NS_TYPE_NET,
794 .current_ns = net_current_ns, 794 .current_ns = net_current_ns,
795 .netlink_ns = net_netlink_ns, 795 .netlink_ns = net_netlink_ns,
796 .initial_ns = net_initial_ns, 796 .initial_ns = net_initial_ns,
797}; 797};
798EXPORT_SYMBOL_GPL(net_ns_type_operations);
798 799
799static void net_kobj_ns_exit(struct net *net) 800static void net_kobj_ns_exit(struct net *net)
800{ 801{
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 10a1ea72010d..386c2283f14e 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3907,8 +3907,6 @@ static void __exit pg_cleanup(void)
3907{ 3907{
3908 struct pktgen_thread *t; 3908 struct pktgen_thread *t;
3909 struct list_head *q, *n; 3909 struct list_head *q, *n;
3910 wait_queue_head_t queue;
3911 init_waitqueue_head(&queue);
3912 3910
3913 /* Stop all interfaces & threads */ 3911 /* Stop all interfaces & threads */
3914 3912
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f78d821bd935..b2a718dfd720 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -612,36 +612,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
612 612
613static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) 613static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
614{ 614{
615 struct rtnl_link_stats64 a; 615 memcpy(v, b, sizeof(*b));
616
617 a.rx_packets = b->rx_packets;
618 a.tx_packets = b->tx_packets;
619 a.rx_bytes = b->rx_bytes;
620 a.tx_bytes = b->tx_bytes;
621 a.rx_errors = b->rx_errors;
622 a.tx_errors = b->tx_errors;
623 a.rx_dropped = b->rx_dropped;
624 a.tx_dropped = b->tx_dropped;
625
626 a.multicast = b->multicast;
627 a.collisions = b->collisions;
628
629 a.rx_length_errors = b->rx_length_errors;
630 a.rx_over_errors = b->rx_over_errors;
631 a.rx_crc_errors = b->rx_crc_errors;
632 a.rx_frame_errors = b->rx_frame_errors;
633 a.rx_fifo_errors = b->rx_fifo_errors;
634 a.rx_missed_errors = b->rx_missed_errors;
635
636 a.tx_aborted_errors = b->tx_aborted_errors;
637 a.tx_carrier_errors = b->tx_carrier_errors;
638 a.tx_fifo_errors = b->tx_fifo_errors;
639 a.tx_heartbeat_errors = b->tx_heartbeat_errors;
640 a.tx_window_errors = b->tx_window_errors;
641
642 a.rx_compressed = b->rx_compressed;
643 a.tx_compressed = b->tx_compressed;
644 memcpy(v, &a, sizeof(a));
645} 616}
646 617
647/* All VF info */ 618/* All VF info */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c83b421341c0..752c1972b3a7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -202,8 +202,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
202 skb->data = data; 202 skb->data = data;
203 skb_reset_tail_pointer(skb); 203 skb_reset_tail_pointer(skb);
204 skb->end = skb->tail + size; 204 skb->end = skb->tail + size;
205 kmemcheck_annotate_bitfield(skb, flags1);
206 kmemcheck_annotate_bitfield(skb, flags2);
207#ifdef NET_SKBUFF_DATA_USES_OFFSET 205#ifdef NET_SKBUFF_DATA_USES_OFFSET
208 skb->mac_header = ~0U; 206 skb->mac_header = ~0U;
209#endif 207#endif
@@ -340,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb)
340 put_page(skb_shinfo(skb)->frags[i].page); 338 put_page(skb_shinfo(skb)->frags[i].page);
341 } 339 }
342 340
343 if (skb_has_frags(skb)) 341 if (skb_has_frag_list(skb))
344 skb_drop_fraglist(skb); 342 skb_drop_fraglist(skb);
345 343
346 kfree(skb->head); 344 kfree(skb->head);
@@ -685,16 +683,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
685 683
686struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 684struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
687{ 685{
688 int headerlen = skb->data - skb->head; 686 int headerlen = skb_headroom(skb);
689 /* 687 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
690 * Allocate the copy buffer 688 struct sk_buff *n = alloc_skb(size, gfp_mask);
691 */ 689
692 struct sk_buff *n;
693#ifdef NET_SKBUFF_DATA_USES_OFFSET
694 n = alloc_skb(skb->end + skb->data_len, gfp_mask);
695#else
696 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
697#endif
698 if (!n) 690 if (!n)
699 return NULL; 691 return NULL;
700 692
@@ -726,20 +718,14 @@ EXPORT_SYMBOL(skb_copy);
726 718
727struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 719struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
728{ 720{
729 /* 721 unsigned int size = skb_end_pointer(skb) - skb->head;
730 * Allocate the copy buffer 722 struct sk_buff *n = alloc_skb(size, gfp_mask);
731 */ 723
732 struct sk_buff *n;
733#ifdef NET_SKBUFF_DATA_USES_OFFSET
734 n = alloc_skb(skb->end, gfp_mask);
735#else
736 n = alloc_skb(skb->end - skb->head, gfp_mask);
737#endif
738 if (!n) 724 if (!n)
739 goto out; 725 goto out;
740 726
741 /* Set the data pointer */ 727 /* Set the data pointer */
742 skb_reserve(n, skb->data - skb->head); 728 skb_reserve(n, skb_headroom(skb));
743 /* Set the tail pointer and length */ 729 /* Set the tail pointer and length */
744 skb_put(n, skb_headlen(skb)); 730 skb_put(n, skb_headlen(skb));
745 /* Copy the bytes */ 731 /* Copy the bytes */
@@ -759,7 +745,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
759 skb_shinfo(n)->nr_frags = i; 745 skb_shinfo(n)->nr_frags = i;
760 } 746 }
761 747
762 if (skb_has_frags(skb)) { 748 if (skb_has_frag_list(skb)) {
763 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 749 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
764 skb_clone_fraglist(n); 750 skb_clone_fraglist(n);
765 } 751 }
@@ -791,12 +777,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
791{ 777{
792 int i; 778 int i;
793 u8 *data; 779 u8 *data;
794#ifdef NET_SKBUFF_DATA_USES_OFFSET 780 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
795 int size = nhead + skb->end + ntail;
796#else
797 int size = nhead + (skb->end - skb->head) + ntail;
798#endif
799 long off; 781 long off;
782 bool fastpath;
800 783
801 BUG_ON(nhead < 0); 784 BUG_ON(nhead < 0);
802 785
@@ -810,23 +793,36 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
810 goto nodata; 793 goto nodata;
811 794
812 /* Copy only real data... and, alas, header. This should be 795 /* Copy only real data... and, alas, header. This should be
813 * optimized for the cases when header is void. */ 796 * optimized for the cases when header is void.
814#ifdef NET_SKBUFF_DATA_USES_OFFSET 797 */
815 memcpy(data + nhead, skb->head, skb->tail); 798 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
816#else 799
817 memcpy(data + nhead, skb->head, skb->tail - skb->head); 800 memcpy((struct skb_shared_info *)(data + size),
818#endif 801 skb_shinfo(skb),
819 memcpy(data + size, skb_end_pointer(skb),
820 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 802 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
821 803
822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 804 /* Check if we can avoid taking references on fragments if we own
823 get_page(skb_shinfo(skb)->frags[i].page); 805 * the last reference on skb->head. (see skb_release_data())
806 */
807 if (!skb->cloned)
808 fastpath = true;
809 else {
810 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
824 811
825 if (skb_has_frags(skb)) 812 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
826 skb_clone_fraglist(skb); 813 }
827 814
828 skb_release_data(skb); 815 if (fastpath) {
816 kfree(skb->head);
817 } else {
818 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
819 get_page(skb_shinfo(skb)->frags[i].page);
829 820
821 if (skb_has_frag_list(skb))
822 skb_clone_fraglist(skb);
823
824 skb_release_data(skb);
825 }
830 off = (data + nhead) - skb->head; 826 off = (data + nhead) - skb->head;
831 827
832 skb->head = data; 828 skb->head = data;
@@ -1099,7 +1095,7 @@ drop_pages:
1099 for (; i < nfrags; i++) 1095 for (; i < nfrags; i++)
1100 put_page(skb_shinfo(skb)->frags[i].page); 1096 put_page(skb_shinfo(skb)->frags[i].page);
1101 1097
1102 if (skb_has_frags(skb)) 1098 if (skb_has_frag_list(skb))
1103 skb_drop_fraglist(skb); 1099 skb_drop_fraglist(skb);
1104 goto done; 1100 goto done;
1105 } 1101 }
@@ -1194,7 +1190,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1194 /* Optimization: no fragments, no reasons to preestimate 1190 /* Optimization: no fragments, no reasons to preestimate
1195 * size of pulled pages. Superb. 1191 * size of pulled pages. Superb.
1196 */ 1192 */
1197 if (!skb_has_frags(skb)) 1193 if (!skb_has_frag_list(skb))
1198 goto pull_pages; 1194 goto pull_pages;
1199 1195
1200 /* Estimate size of pulled pages. */ 1196 /* Estimate size of pulled pages. */
@@ -2323,7 +2319,7 @@ next_skb:
2323 st->frag_data = NULL; 2319 st->frag_data = NULL;
2324 } 2320 }
2325 2321
2326 if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) { 2322 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2327 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2323 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2328 st->frag_idx = 0; 2324 st->frag_idx = 0;
2329 goto next_skb; 2325 goto next_skb;
@@ -2893,7 +2889,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2893 return -ENOMEM; 2889 return -ENOMEM;
2894 2890
2895 /* Easy case. Most of packets will go this way. */ 2891 /* Easy case. Most of packets will go this way. */
2896 if (!skb_has_frags(skb)) { 2892 if (!skb_has_frag_list(skb)) {
2897 /* A little of trouble, not enough of space for trailer. 2893 /* A little of trouble, not enough of space for trailer.
2898 * This should not happen, when stack is tuned to generate 2894 * This should not happen, when stack is tuned to generate
2899 * good frames. OK, on miss we reallocate and reserve even more 2895 * good frames. OK, on miss we reallocate and reserve even more
@@ -2928,7 +2924,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2928 2924
2929 if (skb1->next == NULL && tailbits) { 2925 if (skb1->next == NULL && tailbits) {
2930 if (skb_shinfo(skb1)->nr_frags || 2926 if (skb_shinfo(skb1)->nr_frags ||
2931 skb_has_frags(skb1) || 2927 skb_has_frag_list(skb1) ||
2932 skb_tailroom(skb1) < tailbits) 2928 skb_tailroom(skb1) < tailbits)
2933 ntail = tailbits + 128; 2929 ntail = tailbits + 128;
2934 } 2930 }
@@ -2937,7 +2933,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2937 skb_cloned(skb1) || 2933 skb_cloned(skb1) ||
2938 ntail || 2934 ntail ||
2939 skb_shinfo(skb1)->nr_frags || 2935 skb_shinfo(skb1)->nr_frags ||
2940 skb_has_frags(skb1)) { 2936 skb_has_frag_list(skb1)) {
2941 struct sk_buff *skb2; 2937 struct sk_buff *skb2;
2942 2938
2943 /* Fuck, we are miserable poor guys... */ 2939 /* Fuck, we are miserable poor guys... */
@@ -3020,7 +3016,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
3020 } else { 3016 } else {
3021 /* 3017 /*
3022 * no hardware time stamps available, 3018 * no hardware time stamps available,
3023 * so keep the skb_shared_tx and only 3019 * so keep the shared tx_flags and only
3024 * store software time stamp 3020 * store software time stamp
3025 */ 3021 */
3026 skb->tstamp = ktime_get_real(); 3022 skb->tstamp = ktime_get_real();
diff --git a/net/core/sock.c b/net/core/sock.c
index b05b9b6ddb87..f3a06c40d5e0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1557,6 +1557,8 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1557EXPORT_SYMBOL(sock_alloc_send_skb); 1557EXPORT_SYMBOL(sock_alloc_send_skb);
1558 1558
1559static void __lock_sock(struct sock *sk) 1559static void __lock_sock(struct sock *sk)
1560 __releases(&sk->sk_lock.slock)
1561 __acquires(&sk->sk_lock.slock)
1560{ 1562{
1561 DEFINE_WAIT(wait); 1563 DEFINE_WAIT(wait);
1562 1564
@@ -1573,6 +1575,8 @@ static void __lock_sock(struct sock *sk)
1573} 1575}
1574 1576
1575static void __release_sock(struct sock *sk) 1577static void __release_sock(struct sock *sk)
1578 __releases(&sk->sk_lock.slock)
1579 __acquires(&sk->sk_lock.slock)
1576{ 1580{
1577 struct sk_buff *skb = sk->sk_backlog.head; 1581 struct sk_buff *skb = sk->sk_backlog.head;
1578 1582
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 8408398cd44e..0581143cb800 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -47,37 +47,6 @@ config IP_DCCP_CCID3_DEBUG
47 47
48 If in doubt, say N. 48 If in doubt, say N.
49 49
50config IP_DCCP_CCID3_RTO
51 int "Use higher bound for nofeedback timer"
52 default 100
53 depends on IP_DCCP_CCID3 && EXPERIMENTAL
54 ---help---
55 Use higher lower bound for nofeedback timer expiration.
56
57 The TFRC nofeedback timer normally expires after the maximum of 4
58 RTTs and twice the current send interval (RFC 3448, 4.3). On LANs
59 with a small RTT this can mean a high processing load and reduced
60 performance, since then the nofeedback timer is triggered very
61 frequently.
62
63 This option enables to set a higher lower bound for the nofeedback
64 value. Values in units of milliseconds can be set here.
65
66 A value of 0 disables this feature by enforcing the value specified
67 in RFC 3448. The following values have been suggested as bounds for
68 experimental use:
69 * 16-20ms to match the typical multimedia inter-frame interval
70 * 100ms as a reasonable compromise [default]
71 * 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
72
73 The default of 100ms is a compromise between a large value for
74 efficient DCCP implementations, and a small value to avoid disrupting
75 the network in times of congestion.
76
77 The purpose of the nofeedback timer is to slow DCCP down when there
78 is serious network congestion: experimenting with larger values should
79 therefore not be performed on WANs.
80
81config IP_DCCP_TFRC_LIB 50config IP_DCCP_TFRC_LIB
82 def_bool y if IP_DCCP_CCID3 51 def_bool y if IP_DCCP_CCID3
83 52
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 9b3ae9922be1..dc18172b1e59 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -25,59 +25,14 @@
25 */ 25 */
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include "../feat.h" 27#include "../feat.h"
28#include "../ccid.h"
29#include "../dccp.h"
30#include "ccid2.h" 28#include "ccid2.h"
31 29
32 30
33#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 31#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
34static int ccid2_debug; 32static int ccid2_debug;
35#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) 33#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
36
37static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc)
38{
39 int len = 0;
40 int pipe = 0;
41 struct ccid2_seq *seqp = hc->tx_seqh;
42
43 /* there is data in the chain */
44 if (seqp != hc->tx_seqt) {
45 seqp = seqp->ccid2s_prev;
46 len++;
47 if (!seqp->ccid2s_acked)
48 pipe++;
49
50 while (seqp != hc->tx_seqt) {
51 struct ccid2_seq *prev = seqp->ccid2s_prev;
52
53 len++;
54 if (!prev->ccid2s_acked)
55 pipe++;
56
57 /* packets are sent sequentially */
58 BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq,
59 prev->ccid2s_seq ) >= 0);
60 BUG_ON(time_before(seqp->ccid2s_sent,
61 prev->ccid2s_sent));
62
63 seqp = prev;
64 }
65 }
66
67 BUG_ON(pipe != hc->tx_pipe);
68 ccid2_pr_debug("len of chain=%d\n", len);
69
70 do {
71 seqp = seqp->ccid2s_prev;
72 len++;
73 } while (seqp != hc->tx_seqh);
74
75 ccid2_pr_debug("total len=%d\n", len);
76 BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN);
77}
78#else 34#else
79#define ccid2_pr_debug(format, a...) 35#define ccid2_pr_debug(format, a...)
80#define ccid2_hc_tx_check_sanity(hc)
81#endif 36#endif
82 37
83static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) 38static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
@@ -156,19 +111,10 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
156 dp->dccps_l_ack_ratio = val; 111 dp->dccps_l_ack_ratio = val;
157} 112}
158 113
159static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val)
160{
161 ccid2_pr_debug("change SRTT to %ld\n", val);
162 hc->tx_srtt = val;
163}
164
165static void ccid2_start_rto_timer(struct sock *sk);
166
167static void ccid2_hc_tx_rto_expire(unsigned long data) 114static void ccid2_hc_tx_rto_expire(unsigned long data)
168{ 115{
169 struct sock *sk = (struct sock *)data; 116 struct sock *sk = (struct sock *)data;
170 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 117 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
171 long s;
172 118
173 bh_lock_sock(sk); 119 bh_lock_sock(sk);
174 if (sock_owned_by_user(sk)) { 120 if (sock_owned_by_user(sk)) {
@@ -178,23 +124,19 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
178 124
179 ccid2_pr_debug("RTO_EXPIRE\n"); 125 ccid2_pr_debug("RTO_EXPIRE\n");
180 126
181 ccid2_hc_tx_check_sanity(hc);
182
183 /* back-off timer */ 127 /* back-off timer */
184 hc->tx_rto <<= 1; 128 hc->tx_rto <<= 1;
129 if (hc->tx_rto > DCCP_RTO_MAX)
130 hc->tx_rto = DCCP_RTO_MAX;
185 131
186 s = hc->tx_rto / HZ; 132 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
187 if (s > 60)
188 hc->tx_rto = 60 * HZ;
189
190 ccid2_start_rto_timer(sk);
191 133
192 /* adjust pipe, cwnd etc */ 134 /* adjust pipe, cwnd etc */
193 hc->tx_ssthresh = hc->tx_cwnd / 2; 135 hc->tx_ssthresh = hc->tx_cwnd / 2;
194 if (hc->tx_ssthresh < 2) 136 if (hc->tx_ssthresh < 2)
195 hc->tx_ssthresh = 2; 137 hc->tx_ssthresh = 2;
196 hc->tx_cwnd = 1; 138 hc->tx_cwnd = 1;
197 hc->tx_pipe = 0; 139 hc->tx_pipe = 0;
198 140
199 /* clear state about stuff we sent */ 141 /* clear state about stuff we sent */
200 hc->tx_seqt = hc->tx_seqh; 142 hc->tx_seqt = hc->tx_seqh;
@@ -204,22 +146,11 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
204 hc->tx_rpseq = 0; 146 hc->tx_rpseq = 0;
205 hc->tx_rpdupack = -1; 147 hc->tx_rpdupack = -1;
206 ccid2_change_l_ack_ratio(sk, 1); 148 ccid2_change_l_ack_ratio(sk, 1);
207 ccid2_hc_tx_check_sanity(hc);
208out: 149out:
209 bh_unlock_sock(sk); 150 bh_unlock_sock(sk);
210 sock_put(sk); 151 sock_put(sk);
211} 152}
212 153
213static void ccid2_start_rto_timer(struct sock *sk)
214{
215 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
216
217 ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto);
218
219 BUG_ON(timer_pending(&hc->tx_rtotimer));
220 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
221}
222
223static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) 154static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
224{ 155{
225 struct dccp_sock *dp = dccp_sk(sk); 156 struct dccp_sock *dp = dccp_sk(sk);
@@ -230,7 +161,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
230 161
231 hc->tx_seqh->ccid2s_seq = dp->dccps_gss; 162 hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
232 hc->tx_seqh->ccid2s_acked = 0; 163 hc->tx_seqh->ccid2s_acked = 0;
233 hc->tx_seqh->ccid2s_sent = jiffies; 164 hc->tx_seqh->ccid2s_sent = ccid2_time_stamp;
234 165
235 next = hc->tx_seqh->ccid2s_next; 166 next = hc->tx_seqh->ccid2s_next;
236 /* check if we need to alloc more space */ 167 /* check if we need to alloc more space */
@@ -296,23 +227,20 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
296 } 227 }
297#endif 228#endif
298 229
299 /* setup RTO timer */ 230 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
300 if (!timer_pending(&hc->tx_rtotimer))
301 ccid2_start_rto_timer(sk);
302 231
303#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 232#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
304 do { 233 do {
305 struct ccid2_seq *seqp = hc->tx_seqt; 234 struct ccid2_seq *seqp = hc->tx_seqt;
306 235
307 while (seqp != hc->tx_seqh) { 236 while (seqp != hc->tx_seqh) {
308 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", 237 ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
309 (unsigned long long)seqp->ccid2s_seq, 238 (unsigned long long)seqp->ccid2s_seq,
310 seqp->ccid2s_acked, seqp->ccid2s_sent); 239 seqp->ccid2s_acked, seqp->ccid2s_sent);
311 seqp = seqp->ccid2s_next; 240 seqp = seqp->ccid2s_next;
312 } 241 }
313 } while (0); 242 } while (0);
314 ccid2_pr_debug("=========\n"); 243 ccid2_pr_debug("=========\n");
315 ccid2_hc_tx_check_sanity(hc);
316#endif 244#endif
317} 245}
318 246
@@ -378,17 +306,87 @@ out_invalid_option:
378 return -1; 306 return -1;
379} 307}
380 308
381static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) 309/**
310 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
311 * This code is almost identical with TCP's tcp_rtt_estimator(), since
312 * - it has a higher sampling frequency (recommended by RFC 1323),
313 * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
314 * - it is simple (cf. more complex proposals such as Eifel timer or research
315 * which suggests that the gain should be set according to window size),
316 * - in tests it was found to work well with CCID2 [gerrit].
317 */
318static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
382{ 319{
383 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 320 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
321 long m = mrtt ? : 1;
384 322
385 sk_stop_timer(sk, &hc->tx_rtotimer); 323 if (hc->tx_srtt == 0) {
386 ccid2_pr_debug("deleted RTO timer\n"); 324 /* First measurement m */
325 hc->tx_srtt = m << 3;
326 hc->tx_mdev = m << 1;
327
328 hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
329 hc->tx_rttvar = hc->tx_mdev_max;
330
331 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
332 } else {
333 /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
334 m -= (hc->tx_srtt >> 3);
335 hc->tx_srtt += m;
336
337 /* Similarly, update scaled mdev with regard to |m| */
338 if (m < 0) {
339 m = -m;
340 m -= (hc->tx_mdev >> 2);
341 /*
342 * This neutralises RTO increase when RTT < SRTT - mdev
343 * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
344 * in Linux TCP", USENIX 2002, pp. 49-62).
345 */
346 if (m > 0)
347 m >>= 3;
348 } else {
349 m -= (hc->tx_mdev >> 2);
350 }
351 hc->tx_mdev += m;
352
353 if (hc->tx_mdev > hc->tx_mdev_max) {
354 hc->tx_mdev_max = hc->tx_mdev;
355 if (hc->tx_mdev_max > hc->tx_rttvar)
356 hc->tx_rttvar = hc->tx_mdev_max;
357 }
358
359 /*
360 * Decay RTTVAR at most once per flight, exploiting that
361 * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2)
362 * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1)
363 * GAR is a useful bound for FlightSize = pipe.
364 * AWL is probably too low here, as it over-estimates pipe.
365 */
366 if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
367 if (hc->tx_mdev_max < hc->tx_rttvar)
368 hc->tx_rttvar -= (hc->tx_rttvar -
369 hc->tx_mdev_max) >> 2;
370 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
371 hc->tx_mdev_max = tcp_rto_min(sk);
372 }
373 }
374
375 /*
376 * Set RTO from SRTT and RTTVAR
377 * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
378 * This agrees with RFC 4341, 5:
379 * "Because DCCP does not retransmit data, DCCP does not require
380 * TCP's recommended minimum timeout of one second".
381 */
382 hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
383
384 if (hc->tx_rto > DCCP_RTO_MAX)
385 hc->tx_rto = DCCP_RTO_MAX;
387} 386}
388 387
389static inline void ccid2_new_ack(struct sock *sk, 388static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
390 struct ccid2_seq *seqp, 389 unsigned int *maxincr)
391 unsigned int *maxincr)
392{ 390{
393 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 391 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
394 392
@@ -402,93 +400,27 @@ static inline void ccid2_new_ack(struct sock *sk,
402 hc->tx_cwnd += 1; 400 hc->tx_cwnd += 1;
403 hc->tx_packets_acked = 0; 401 hc->tx_packets_acked = 0;
404 } 402 }
405 403 /*
406 /* update RTO */ 404 * FIXME: RTT is sampled several times per acknowledgment (for each
407 if (hc->tx_srtt == -1 || 405 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
408 time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) { 406 * This causes the RTT to be over-estimated, since the older entries
409 unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; 407 * in the Ack Vector have earlier sending times.
410 int s; 408 * The cleanest solution is to not use the ccid2s_sent field at all
411 409 * and instead use DCCP timestamps: requires changes in other places.
412 /* first measurement */ 410 */
413 if (hc->tx_srtt == -1) { 411 ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
414 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
415 r, jiffies,
416 (unsigned long long)seqp->ccid2s_seq);
417 ccid2_change_srtt(hc, r);
418 hc->tx_rttvar = r >> 1;
419 } else {
420 /* RTTVAR */
421 long tmp = hc->tx_srtt - r;
422 long srtt;
423
424 if (tmp < 0)
425 tmp *= -1;
426
427 tmp >>= 2;
428 hc->tx_rttvar *= 3;
429 hc->tx_rttvar >>= 2;
430 hc->tx_rttvar += tmp;
431
432 /* SRTT */
433 srtt = hc->tx_srtt;
434 srtt *= 7;
435 srtt >>= 3;
436 tmp = r >> 3;
437 srtt += tmp;
438 ccid2_change_srtt(hc, srtt);
439 }
440 s = hc->tx_rttvar << 2;
441 /* clock granularity is 1 when based on jiffies */
442 if (!s)
443 s = 1;
444 hc->tx_rto = hc->tx_srtt + s;
445
446 /* must be at least a second */
447 s = hc->tx_rto / HZ;
448 /* DCCP doesn't require this [but I like it cuz my code sux] */
449#if 1
450 if (s < 1)
451 hc->tx_rto = HZ;
452#endif
453 /* max 60 seconds */
454 if (s > 60)
455 hc->tx_rto = HZ * 60;
456
457 hc->tx_lastrtt = jiffies;
458
459 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
460 hc->tx_srtt, hc->tx_rttvar,
461 hc->tx_rto, HZ, r);
462 }
463
464 /* we got a new ack, so re-start RTO timer */
465 ccid2_hc_tx_kill_rto_timer(sk);
466 ccid2_start_rto_timer(sk);
467}
468
469static void ccid2_hc_tx_dec_pipe(struct sock *sk)
470{
471 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
472
473 if (hc->tx_pipe == 0)
474 DCCP_BUG("pipe == 0");
475 else
476 hc->tx_pipe--;
477
478 if (hc->tx_pipe == 0)
479 ccid2_hc_tx_kill_rto_timer(sk);
480} 412}
481 413
482static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) 414static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
483{ 415{
484 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 416 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
485 417
486 if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) { 418 if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
487 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); 419 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
488 return; 420 return;
489 } 421 }
490 422
491 hc->tx_last_cong = jiffies; 423 hc->tx_last_cong = ccid2_time_stamp;
492 424
493 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; 425 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
494 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); 426 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
@@ -510,7 +442,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
510 int done = 0; 442 int done = 0;
511 unsigned int maxincr = 0; 443 unsigned int maxincr = 0;
512 444
513 ccid2_hc_tx_check_sanity(hc);
514 /* check reverse path congestion */ 445 /* check reverse path congestion */
515 seqno = DCCP_SKB_CB(skb)->dccpd_seq; 446 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
516 447
@@ -620,7 +551,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
620 seqp->ccid2s_acked = 1; 551 seqp->ccid2s_acked = 1;
621 ccid2_pr_debug("Got ack for %llu\n", 552 ccid2_pr_debug("Got ack for %llu\n",
622 (unsigned long long)seqp->ccid2s_seq); 553 (unsigned long long)seqp->ccid2s_seq);
623 ccid2_hc_tx_dec_pipe(sk); 554 hc->tx_pipe--;
624 } 555 }
625 if (seqp == hc->tx_seqt) { 556 if (seqp == hc->tx_seqt) {
626 done = 1; 557 done = 1;
@@ -677,7 +608,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
677 * one ack vector. 608 * one ack vector.
678 */ 609 */
679 ccid2_congestion_event(sk, seqp); 610 ccid2_congestion_event(sk, seqp);
680 ccid2_hc_tx_dec_pipe(sk); 611 hc->tx_pipe--;
681 } 612 }
682 if (seqp == hc->tx_seqt) 613 if (seqp == hc->tx_seqt)
683 break; 614 break;
@@ -695,7 +626,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
695 hc->tx_seqt = hc->tx_seqt->ccid2s_next; 626 hc->tx_seqt = hc->tx_seqt->ccid2s_next;
696 } 627 }
697 628
698 ccid2_hc_tx_check_sanity(hc); 629 /* restart RTO timer if not all outstanding data has been acked */
630 if (hc->tx_pipe == 0)
631 sk_stop_timer(sk, &hc->tx_rtotimer);
632 else
633 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
699} 634}
700 635
701static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 636static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -707,12 +642,8 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
707 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ 642 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
708 hc->tx_ssthresh = ~0U; 643 hc->tx_ssthresh = ~0U;
709 644
710 /* 645 /* Use larger initial windows (RFC 4341, section 5). */
711 * RFC 4341, 5: "The cwnd parameter is initialized to at most four 646 hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
712 * packets for new connections, following the rules from [RFC3390]".
713 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
714 */
715 hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
716 647
717 /* Make sure that Ack Ratio is enabled and within bounds. */ 648 /* Make sure that Ack Ratio is enabled and within bounds. */
718 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2); 649 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
@@ -723,15 +654,11 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
723 if (ccid2_hc_tx_alloc_seq(hc)) 654 if (ccid2_hc_tx_alloc_seq(hc))
724 return -ENOMEM; 655 return -ENOMEM;
725 656
726 hc->tx_rto = 3 * HZ; 657 hc->tx_rto = DCCP_TIMEOUT_INIT;
727 ccid2_change_srtt(hc, -1);
728 hc->tx_rttvar = -1;
729 hc->tx_rpdupack = -1; 658 hc->tx_rpdupack = -1;
730 hc->tx_last_cong = jiffies; 659 hc->tx_last_cong = ccid2_time_stamp;
731 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 660 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
732 (unsigned long)sk); 661 (unsigned long)sk);
733
734 ccid2_hc_tx_check_sanity(hc);
735 return 0; 662 return 0;
736} 663}
737 664
@@ -740,7 +667,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
740 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 667 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
741 int i; 668 int i;
742 669
743 ccid2_hc_tx_kill_rto_timer(sk); 670 sk_stop_timer(sk, &hc->tx_rtotimer);
744 671
745 for (i = 0; i < hc->tx_seqbufc; i++) 672 for (i = 0; i < hc->tx_seqbufc; i++)
746 kfree(hc->tx_seqbuf[i]); 673 kfree(hc->tx_seqbuf[i]);
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 1ec6a30103bb..9731c2dc1487 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -18,18 +18,23 @@
18#ifndef _DCCP_CCID2_H_ 18#ifndef _DCCP_CCID2_H_
19#define _DCCP_CCID2_H_ 19#define _DCCP_CCID2_H_
20 20
21#include <linux/dccp.h>
22#include <linux/timer.h> 21#include <linux/timer.h>
23#include <linux/types.h> 22#include <linux/types.h>
24#include "../ccid.h" 23#include "../ccid.h"
24#include "../dccp.h"
25
26/*
27 * CCID-2 timestamping faces the same issues as TCP timestamping.
28 * Hence we reuse/share as much of the code as possible.
29 */
30#define ccid2_time_stamp tcp_time_stamp
31
25/* NUMDUPACK parameter from RFC 4341, p. 6 */ 32/* NUMDUPACK parameter from RFC 4341, p. 6 */
26#define NUMDUPACK 3 33#define NUMDUPACK 3
27 34
28struct sock;
29
30struct ccid2_seq { 35struct ccid2_seq {
31 u64 ccid2s_seq; 36 u64 ccid2s_seq;
32 unsigned long ccid2s_sent; 37 u32 ccid2s_sent;
33 int ccid2s_acked; 38 int ccid2s_acked;
34 struct ccid2_seq *ccid2s_prev; 39 struct ccid2_seq *ccid2s_prev;
35 struct ccid2_seq *ccid2s_next; 40 struct ccid2_seq *ccid2s_next;
@@ -42,7 +47,12 @@ struct ccid2_seq {
42 * struct ccid2_hc_tx_sock - CCID2 TX half connection 47 * struct ccid2_hc_tx_sock - CCID2 TX half connection
43 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 48 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
44 * @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465) 49 * @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465)
45 * @tx_lastrtt: time RTT was last measured 50 * @tx_srtt: smoothed RTT estimate, scaled by 2^3
51 * @tx_mdev: smoothed RTT variation, scaled by 2^2
52 * @tx_mdev_max: maximum of @mdev during one flight
53 * @tx_rttvar: moving average/maximum of @mdev_max
54 * @tx_rto: RTO value deriving from SRTT and RTTVAR (RFC 2988)
55 * @tx_rtt_seq: to decay RTTVAR at most once per flight
46 * @tx_rpseq: last consecutive seqno 56 * @tx_rpseq: last consecutive seqno
47 * @tx_rpdupack: dupacks since rpseq 57 * @tx_rpdupack: dupacks since rpseq
48 */ 58 */
@@ -55,14 +65,19 @@ struct ccid2_hc_tx_sock {
55 int tx_seqbufc; 65 int tx_seqbufc;
56 struct ccid2_seq *tx_seqh; 66 struct ccid2_seq *tx_seqh;
57 struct ccid2_seq *tx_seqt; 67 struct ccid2_seq *tx_seqt;
58 long tx_rto; 68
59 long tx_srtt; 69 /* RTT measurement: variables/principles are the same as in TCP */
60 long tx_rttvar; 70 u32 tx_srtt,
61 unsigned long tx_lastrtt; 71 tx_mdev,
72 tx_mdev_max,
73 tx_rttvar,
74 tx_rto;
75 u64 tx_rtt_seq:48;
62 struct timer_list tx_rtotimer; 76 struct timer_list tx_rtotimer;
77
63 u64 tx_rpseq; 78 u64 tx_rpseq;
64 int tx_rpdupack; 79 int tx_rpdupack;
65 unsigned long tx_last_cong; 80 u32 tx_last_cong;
66 u64 tx_high_ack; 81 u64 tx_high_ack;
67}; 82};
68 83
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 95f752986497..278e17069322 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -218,9 +218,9 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
218 218
219 /* 219 /*
220 * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4 220 * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
221 * RTO is 0 if and only if no feedback has been received yet.
221 */ 222 */
222 if (hc->tx_t_rto == 0 || /* no feedback received yet */ 223 if (hc->tx_t_rto == 0 || hc->tx_p == 0) {
223 hc->tx_p == 0) {
224 224
225 /* halve send rate directly */ 225 /* halve send rate directly */
226 hc->tx_x = max(hc->tx_x / 2, 226 hc->tx_x = max(hc->tx_x / 2,
@@ -256,7 +256,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
256 * Set new timeout for the nofeedback timer. 256 * Set new timeout for the nofeedback timer.
257 * See comments in packet_recv() regarding the value of t_RTO. 257 * See comments in packet_recv() regarding the value of t_RTO.
258 */ 258 */
259 if (unlikely(hc->tx_t_rto == 0)) /* no feedback yet */ 259 if (unlikely(hc->tx_t_rto == 0)) /* no feedback received yet */
260 t_nfb = TFRC_INITIAL_TIMEOUT; 260 t_nfb = TFRC_INITIAL_TIMEOUT;
261 else 261 else
262 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); 262 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
@@ -372,7 +372,7 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
372static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 372static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
373{ 373{
374 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); 374 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
375 struct ccid3_options_received *opt_recv; 375 struct ccid3_options_received *opt_recv = &hc->tx_options_received;
376 ktime_t now; 376 ktime_t now;
377 unsigned long t_nfb; 377 unsigned long t_nfb;
378 u32 pinv, r_sample; 378 u32 pinv, r_sample;
@@ -386,7 +386,6 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
386 hc->tx_state != TFRC_SSTATE_NO_FBACK) 386 hc->tx_state != TFRC_SSTATE_NO_FBACK)
387 return; 387 return;
388 388
389 opt_recv = &hc->tx_options_received;
390 now = ktime_get_real(); 389 now = ktime_get_real();
391 390
392 /* Estimate RTT from history if ACK number is valid */ 391 /* Estimate RTT from history if ACK number is valid */
@@ -461,13 +460,12 @@ done_computing_x:
461 sk->sk_write_space(sk); 460 sk->sk_write_space(sk);
462 461
463 /* 462 /*
464 * Update timeout interval for the nofeedback timer. 463 * Update timeout interval for the nofeedback timer. In order to control
465 * We use a configuration option to increase the lower bound. 464 * rate halving on networks with very low RTTs (<= 1 ms), use per-route
466 * This can help avoid triggering the nofeedback timer too 465 * tunable RTAX_RTO_MIN value as the lower bound.
467 * often ('spinning') on LANs with small RTTs.
468 */ 466 */
469 hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt, (CONFIG_IP_DCCP_CCID3_RTO * 467 hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt,
470 (USEC_PER_SEC / 1000))); 468 USEC_PER_SEC/HZ * tcp_rto_min(sk));
471 /* 469 /*
472 * Schedule no feedback timer to expire in 470 * Schedule no feedback timer to expire in
473 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) 471 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
@@ -489,11 +487,9 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
489 int rc = 0; 487 int rc = 0;
490 const struct dccp_sock *dp = dccp_sk(sk); 488 const struct dccp_sock *dp = dccp_sk(sk);
491 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); 489 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
492 struct ccid3_options_received *opt_recv; 490 struct ccid3_options_received *opt_recv = &hc->tx_options_received;
493 __be32 opt_val; 491 __be32 opt_val;
494 492
495 opt_recv = &hc->tx_options_received;
496
497 if (opt_recv->ccid3or_seqno != dp->dccps_gsr) { 493 if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
498 opt_recv->ccid3or_seqno = dp->dccps_gsr; 494 opt_recv->ccid3or_seqno = dp->dccps_gsr;
499 opt_recv->ccid3or_loss_event_rate = ~0; 495 opt_recv->ccid3or_loss_event_rate = ~0;
@@ -567,34 +563,30 @@ static void ccid3_hc_tx_exit(struct sock *sk)
567 563
568static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) 564static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
569{ 565{
570 struct ccid3_hc_tx_sock *hc; 566 info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto;
571 567 info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt;
572 /* Listen socks doesn't have a private CCID block */
573 if (sk->sk_state == DCCP_LISTEN)
574 return;
575
576 hc = ccid3_hc_tx_sk(sk);
577 info->tcpi_rto = hc->tx_t_rto;
578 info->tcpi_rtt = hc->tx_rtt;
579} 568}
580 569
581static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, 570static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
582 u32 __user *optval, int __user *optlen) 571 u32 __user *optval, int __user *optlen)
583{ 572{
584 const struct ccid3_hc_tx_sock *hc; 573 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
574 struct tfrc_tx_info tfrc;
585 const void *val; 575 const void *val;
586 576
587 /* Listen socks doesn't have a private CCID block */
588 if (sk->sk_state == DCCP_LISTEN)
589 return -EINVAL;
590
591 hc = ccid3_hc_tx_sk(sk);
592 switch (optname) { 577 switch (optname) {
593 case DCCP_SOCKOPT_CCID_TX_INFO: 578 case DCCP_SOCKOPT_CCID_TX_INFO:
594 if (len < sizeof(hc->tx_tfrc)) 579 if (len < sizeof(tfrc))
595 return -EINVAL; 580 return -EINVAL;
596 len = sizeof(hc->tx_tfrc); 581 tfrc.tfrctx_x = hc->tx_x;
597 val = &hc->tx_tfrc; 582 tfrc.tfrctx_x_recv = hc->tx_x_recv;
583 tfrc.tfrctx_x_calc = hc->tx_x_calc;
584 tfrc.tfrctx_rtt = hc->tx_rtt;
585 tfrc.tfrctx_p = hc->tx_p;
586 tfrc.tfrctx_rto = hc->tx_t_rto;
587 tfrc.tfrctx_ipi = hc->tx_t_ipi;
588 len = sizeof(tfrc);
589 val = &tfrc;
598 break; 590 break;
599 default: 591 default:
600 return -ENOPROTOOPT; 592 return -ENOPROTOOPT;
@@ -701,14 +693,12 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
701 693
702static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) 694static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
703{ 695{
704 const struct ccid3_hc_rx_sock *hc; 696 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
705 __be32 x_recv, pinv; 697 __be32 x_recv, pinv;
706 698
707 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 699 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
708 return 0; 700 return 0;
709 701
710 hc = ccid3_hc_rx_sk(sk);
711
712 if (dccp_packet_without_ack(skb)) 702 if (dccp_packet_without_ack(skb))
713 return 0; 703 return 0;
714 704
@@ -749,10 +739,11 @@ static u32 ccid3_first_li(struct sock *sk)
749 x_recv = scaled_div32(hc->rx_bytes_recv, delta); 739 x_recv = scaled_div32(hc->rx_bytes_recv, delta);
750 if (x_recv == 0) { /* would also trigger divide-by-zero */ 740 if (x_recv == 0) { /* would also trigger divide-by-zero */
751 DCCP_WARN("X_recv==0\n"); 741 DCCP_WARN("X_recv==0\n");
752 if ((x_recv = hc->rx_x_recv) == 0) { 742 if (hc->rx_x_recv == 0) {
753 DCCP_BUG("stored value of X_recv is zero"); 743 DCCP_BUG("stored value of X_recv is zero");
754 return ~0U; 744 return ~0U;
755 } 745 }
746 x_recv = hc->rx_x_recv;
756 } 747 }
757 748
758 fval = scaled_div(hc->rx_s, hc->rx_rtt); 749 fval = scaled_div(hc->rx_s, hc->rx_rtt);
@@ -870,30 +861,18 @@ static void ccid3_hc_rx_exit(struct sock *sk)
870 861
871static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) 862static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
872{ 863{
873 const struct ccid3_hc_rx_sock *hc; 864 info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state;
874
875 /* Listen socks doesn't have a private CCID block */
876 if (sk->sk_state == DCCP_LISTEN)
877 return;
878
879 hc = ccid3_hc_rx_sk(sk);
880 info->tcpi_ca_state = hc->rx_state;
881 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 865 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
882 info->tcpi_rcv_rtt = hc->rx_rtt; 866 info->tcpi_rcv_rtt = ccid3_hc_rx_sk(sk)->rx_rtt;
883} 867}
884 868
885static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, 869static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
886 u32 __user *optval, int __user *optlen) 870 u32 __user *optval, int __user *optlen)
887{ 871{
888 const struct ccid3_hc_rx_sock *hc; 872 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
889 struct tfrc_rx_info rx_info; 873 struct tfrc_rx_info rx_info;
890 const void *val; 874 const void *val;
891 875
892 /* Listen socks doesn't have a private CCID block */
893 if (sk->sk_state == DCCP_LISTEN)
894 return -EINVAL;
895
896 hc = ccid3_hc_rx_sk(sk);
897 switch (optname) { 876 switch (optname) {
898 case DCCP_SOCKOPT_CCID_RX_INFO: 877 case DCCP_SOCKOPT_CCID_RX_INFO:
899 if (len < sizeof(rx_info)) 878 if (len < sizeof(rx_info))
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 032635776653..b7e569c22f36 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -42,7 +42,7 @@
42#include "lib/tfrc.h" 42#include "lib/tfrc.h"
43#include "../ccid.h" 43#include "../ccid.h"
44 44
45/* Two seconds as per RFC 3448 4.2 */ 45/* Two seconds as per RFC 5348, 4.2 */
46#define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC) 46#define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC)
47 47
48/* In usecs - half the scheduling granularity as per RFC3448 4.6 */ 48/* In usecs - half the scheduling granularity as per RFC3448 4.6 */
@@ -95,14 +95,13 @@ enum ccid3_hc_tx_states {
95 * @tx_options_received: Parsed set of retrieved options 95 * @tx_options_received: Parsed set of retrieved options
96 */ 96 */
97struct ccid3_hc_tx_sock { 97struct ccid3_hc_tx_sock {
98 struct tfrc_tx_info tx_tfrc; 98 u64 tx_x;
99#define tx_x tx_tfrc.tfrctx_x 99 u64 tx_x_recv;
100#define tx_x_recv tx_tfrc.tfrctx_x_recv 100 u32 tx_x_calc;
101#define tx_x_calc tx_tfrc.tfrctx_x_calc 101 u32 tx_rtt;
102#define tx_rtt tx_tfrc.tfrctx_rtt 102 u32 tx_p;
103#define tx_p tx_tfrc.tfrctx_p 103 u32 tx_t_rto;
104#define tx_t_rto tx_tfrc.tfrctx_rto 104 u32 tx_t_ipi;
105#define tx_t_ipi tx_tfrc.tfrctx_ipi
106 u16 tx_s; 105 u16 tx_s;
107 enum ccid3_hc_tx_states tx_state:8; 106 enum ccid3_hc_tx_states tx_state:8;
108 u8 tx_last_win_count; 107 u8 tx_last_win_count;
@@ -131,16 +130,12 @@ enum ccid3_hc_rx_states {
131 130
132/** 131/**
133 * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket 132 * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
134 * @rx_x_recv: Receiver estimate of send rate (RFC 3448 4.3)
135 * @rx_rtt: Receiver estimate of rtt (non-standard)
136 * @rx_p: Current loss event rate (RFC 3448 5.4)
137 * @rx_last_counter: Tracks window counter (RFC 4342, 8.1) 133 * @rx_last_counter: Tracks window counter (RFC 4342, 8.1)
138 * @rx_state: Receiver state, one of %ccid3_hc_rx_states 134 * @rx_state: Receiver state, one of %ccid3_hc_rx_states
139 * @rx_bytes_recv: Total sum of DCCP payload bytes 135 * @rx_bytes_recv: Total sum of DCCP payload bytes
140 * @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3) 136 * @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3)
141 * @rx_rtt: Receiver estimate of RTT 137 * @rx_rtt: Receiver estimate of RTT
142 * @rx_tstamp_last_feedback: Time at which last feedback was sent 138 * @rx_tstamp_last_feedback: Time at which last feedback was sent
143 * @rx_tstamp_last_ack: Time at which last feedback was sent
144 * @rx_hist: Packet history (loss detection + RTT sampling) 139 * @rx_hist: Packet history (loss detection + RTT sampling)
145 * @rx_li_hist: Loss Interval database 140 * @rx_li_hist: Loss Interval database
146 * @rx_s: Received packet size in bytes 141 * @rx_s: Received packet size in bytes
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index baeb1eaf011b..2ef115277bea 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -693,22 +693,22 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
693 aux = scp->accessdata.acc_userl; 693 aux = scp->accessdata.acc_userl;
694 *skb_put(skb, 1) = aux; 694 *skb_put(skb, 1) = aux;
695 if (aux > 0) 695 if (aux > 0)
696 memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux); 696 memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);
697 697
698 aux = scp->accessdata.acc_passl; 698 aux = scp->accessdata.acc_passl;
699 *skb_put(skb, 1) = aux; 699 *skb_put(skb, 1) = aux;
700 if (aux > 0) 700 if (aux > 0)
701 memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux); 701 memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);
702 702
703 aux = scp->accessdata.acc_accl; 703 aux = scp->accessdata.acc_accl;
704 *skb_put(skb, 1) = aux; 704 *skb_put(skb, 1) = aux;
705 if (aux > 0) 705 if (aux > 0)
706 memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux); 706 memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);
707 707
708 aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl); 708 aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
709 *skb_put(skb, 1) = aux; 709 *skb_put(skb, 1) = aux;
710 if (aux > 0) 710 if (aux > 0)
711 memcpy(skb_put(skb,aux), scp->conndata_out.opt_data, aux); 711 memcpy(skb_put(skb, aux), scp->conndata_out.opt_data, aux);
712 712
713 scp->persist = dn_nsp_persist(sk); 713 scp->persist = dn_nsp_persist(sk);
714 scp->persist_fxn = dn_nsp_retrans_conninit; 714 scp->persist_fxn = dn_nsp_retrans_conninit;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index dc54bd0d083b..baa98fb83552 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -1009,7 +1009,6 @@ static int __init aun_udp_initialise(void)
1009 struct sockaddr_in sin; 1009 struct sockaddr_in sin;
1010 1010
1011 skb_queue_head_init(&aun_queue); 1011 skb_queue_head_init(&aun_queue);
1012 spin_lock_init(&aun_queue_lock);
1013 setup_timer(&ab_cleanup_timer, ab_cleanup, 0); 1012 setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
1014 ab_cleanup_timer.expires = jiffies + (HZ*2); 1013 ab_cleanup_timer.expires = jiffies + (HZ*2);
1015 add_timer(&ab_cleanup_timer); 1014 add_timer(&ab_cleanup_timer);
@@ -1167,7 +1166,6 @@ static int __init econet_proto_init(void)
1167 goto out; 1166 goto out;
1168 sock_register(&econet_family_ops); 1167 sock_register(&econet_family_ops);
1169#ifdef CONFIG_ECONET_AUNUDP 1168#ifdef CONFIG_ECONET_AUNUDP
1170 spin_lock_init(&aun_queue_lock);
1171 aun_udp_initialise(); 1169 aun_udp_initialise();
1172#endif 1170#endif
1173#ifdef CONFIG_ECONET_NATIVE 1171#ifdef CONFIG_ECONET_NATIVE
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 215c83986a9d..85e7b4551326 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -367,7 +367,7 @@ struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
367EXPORT_SYMBOL(alloc_etherdev_mq); 367EXPORT_SYMBOL(alloc_etherdev_mq);
368 368
369static size_t _format_mac_addr(char *buf, int buflen, 369static size_t _format_mac_addr(char *buf, int buflen,
370 const unsigned char *addr, int len) 370 const unsigned char *addr, int len)
371{ 371{
372 int i; 372 int i;
373 char *cp = buf; 373 char *cp = buf;
@@ -376,7 +376,7 @@ static size_t _format_mac_addr(char *buf, int buflen,
376 cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]); 376 cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
377 if (i == len - 1) 377 if (i == len - 1)
378 break; 378 break;
379 cp += strlcpy(cp, ":", buflen - (cp - buf)); 379 cp += scnprintf(cp, buflen - (cp - buf), ":");
380 } 380 }
381 return cp - buf; 381 return cp - buf;
382} 382}
@@ -386,7 +386,7 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
386 size_t l; 386 size_t l;
387 387
388 l = _format_mac_addr(buf, PAGE_SIZE, addr, len); 388 l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
389 l += strlcpy(buf + l, "\n", PAGE_SIZE - l); 389 l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
390 return ((ssize_t) l); 390 return ((ssize_t) l);
391} 391}
392EXPORT_SYMBOL(sysfs_format_mac); 392EXPORT_SYMBOL(sysfs_format_mac);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 571f8950ed06..5462e2d147a6 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -215,8 +215,15 @@ config NET_IPIP
215 be inserted in and removed from the running kernel whenever you 215 be inserted in and removed from the running kernel whenever you
216 want). Most people won't need this and can say N. 216 want). Most people won't need this and can say N.
217 217
218config NET_IPGRE_DEMUX
219 tristate "IP: GRE demultiplexer"
220 help
221 This is helper module to demultiplex GRE packets on GRE version field criteria.
222 Required by ip_gre and pptp modules.
223
218config NET_IPGRE 224config NET_IPGRE
219 tristate "IP: GRE tunnels over IP" 225 tristate "IP: GRE tunnels over IP"
226 depends on NET_IPGRE_DEMUX
220 help 227 help
221 Tunneling means encapsulating data of one protocol type within 228 Tunneling means encapsulating data of one protocol type within
222 another protocol and sending it over a channel that understands the 229 another protocol and sending it over a channel that understands the
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 80ff87ce43aa..4978d22f9a75 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
20obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o 20obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
21obj-$(CONFIG_IP_MROUTE) += ipmr.o 21obj-$(CONFIG_IP_MROUTE) += ipmr.o
22obj-$(CONFIG_NET_IPIP) += ipip.o 22obj-$(CONFIG_NET_IPIP) += ipip.o
23obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
23obj-$(CONFIG_NET_IPGRE) += ip_gre.o 24obj-$(CONFIG_NET_IPGRE) += ip_gre.o
24obj-$(CONFIG_SYN_COOKIES) += syncookies.o 25obj-$(CONFIG_SYN_COOKIES) += syncookies.o
25obj-$(CONFIG_INET_AH) += ah4.o 26obj-$(CONFIG_INET_AH) += ah4.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6a1100c25a9f..f581f77d1097 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -227,18 +227,16 @@ EXPORT_SYMBOL(inet_ehash_secret);
227 227
228/* 228/*
229 * inet_ehash_secret must be set exactly once 229 * inet_ehash_secret must be set exactly once
230 * Instead of using a dedicated spinlock, we (ab)use inetsw_lock
231 */ 230 */
232void build_ehash_secret(void) 231void build_ehash_secret(void)
233{ 232{
234 u32 rnd; 233 u32 rnd;
234
235 do { 235 do {
236 get_random_bytes(&rnd, sizeof(rnd)); 236 get_random_bytes(&rnd, sizeof(rnd));
237 } while (rnd == 0); 237 } while (rnd == 0);
238 spin_lock_bh(&inetsw_lock); 238
239 if (!inet_ehash_secret) 239 cmpxchg(&inet_ehash_secret, 0, rnd);
240 inet_ehash_secret = rnd;
241 spin_unlock_bh(&inetsw_lock);
242} 240}
243EXPORT_SYMBOL(build_ehash_secret); 241EXPORT_SYMBOL(build_ehash_secret);
244 242
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 96c1955b3e2f..dcfe7e961c10 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -55,7 +55,7 @@
55 * Stuart Cheshire : Metricom and grat arp fixes 55 * Stuart Cheshire : Metricom and grat arp fixes
56 * *** FOR 2.1 clean this up *** 56 * *** FOR 2.1 clean this up ***
57 * Lawrence V. Stefani: (08/12/96) Added FDDI support. 57 * Lawrence V. Stefani: (08/12/96) Added FDDI support.
58 * Alan Cox : Took the AP1000 nasty FDDI hack and 58 * Alan Cox : Took the AP1000 nasty FDDI hack and
59 * folded into the mainstream FDDI code. 59 * folded into the mainstream FDDI code.
60 * Ack spit, Linus how did you allow that 60 * Ack spit, Linus how did you allow that
61 * one in... 61 * one in...
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(clip_tbl_hook);
120#endif 120#endif
121 121
122#include <asm/system.h> 122#include <asm/system.h>
123#include <asm/uaccess.h> 123#include <linux/uaccess.h>
124 124
125#include <linux/netfilter_arp.h> 125#include <linux/netfilter_arp.h>
126 126
@@ -173,32 +173,32 @@ const struct neigh_ops arp_broken_ops = {
173EXPORT_SYMBOL(arp_broken_ops); 173EXPORT_SYMBOL(arp_broken_ops);
174 174
175struct neigh_table arp_tbl = { 175struct neigh_table arp_tbl = {
176 .family = AF_INET, 176 .family = AF_INET,
177 .entry_size = sizeof(struct neighbour) + 4, 177 .entry_size = sizeof(struct neighbour) + 4,
178 .key_len = 4, 178 .key_len = 4,
179 .hash = arp_hash, 179 .hash = arp_hash,
180 .constructor = arp_constructor, 180 .constructor = arp_constructor,
181 .proxy_redo = parp_redo, 181 .proxy_redo = parp_redo,
182 .id = "arp_cache", 182 .id = "arp_cache",
183 .parms = { 183 .parms = {
184 .tbl = &arp_tbl, 184 .tbl = &arp_tbl,
185 .base_reachable_time = 30 * HZ, 185 .base_reachable_time = 30 * HZ,
186 .retrans_time = 1 * HZ, 186 .retrans_time = 1 * HZ,
187 .gc_staletime = 60 * HZ, 187 .gc_staletime = 60 * HZ,
188 .reachable_time = 30 * HZ, 188 .reachable_time = 30 * HZ,
189 .delay_probe_time = 5 * HZ, 189 .delay_probe_time = 5 * HZ,
190 .queue_len = 3, 190 .queue_len = 3,
191 .ucast_probes = 3, 191 .ucast_probes = 3,
192 .mcast_probes = 3, 192 .mcast_probes = 3,
193 .anycast_delay = 1 * HZ, 193 .anycast_delay = 1 * HZ,
194 .proxy_delay = (8 * HZ) / 10, 194 .proxy_delay = (8 * HZ) / 10,
195 .proxy_qlen = 64, 195 .proxy_qlen = 64,
196 .locktime = 1 * HZ, 196 .locktime = 1 * HZ,
197 }, 197 },
198 .gc_interval = 30 * HZ, 198 .gc_interval = 30 * HZ,
199 .gc_thresh1 = 128, 199 .gc_thresh1 = 128,
200 .gc_thresh2 = 512, 200 .gc_thresh2 = 512,
201 .gc_thresh3 = 1024, 201 .gc_thresh3 = 1024,
202}; 202};
203EXPORT_SYMBOL(arp_tbl); 203EXPORT_SYMBOL(arp_tbl);
204 204
@@ -233,7 +233,7 @@ static u32 arp_hash(const void *pkey, const struct net_device *dev)
233 233
234static int arp_constructor(struct neighbour *neigh) 234static int arp_constructor(struct neighbour *neigh)
235{ 235{
236 __be32 addr = *(__be32*)neigh->primary_key; 236 __be32 addr = *(__be32 *)neigh->primary_key;
237 struct net_device *dev = neigh->dev; 237 struct net_device *dev = neigh->dev;
238 struct in_device *in_dev; 238 struct in_device *in_dev;
239 struct neigh_parms *parms; 239 struct neigh_parms *parms;
@@ -296,16 +296,19 @@ static int arp_constructor(struct neighbour *neigh)
296 neigh->ops = &arp_broken_ops; 296 neigh->ops = &arp_broken_ops;
297 neigh->output = neigh->ops->output; 297 neigh->output = neigh->ops->output;
298 return 0; 298 return 0;
299#else
300 break;
299#endif 301#endif
300 ;} 302 }
301#endif 303#endif
302 if (neigh->type == RTN_MULTICAST) { 304 if (neigh->type == RTN_MULTICAST) {
303 neigh->nud_state = NUD_NOARP; 305 neigh->nud_state = NUD_NOARP;
304 arp_mc_map(addr, neigh->ha, dev, 1); 306 arp_mc_map(addr, neigh->ha, dev, 1);
305 } else if (dev->flags&(IFF_NOARP|IFF_LOOPBACK)) { 307 } else if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) {
306 neigh->nud_state = NUD_NOARP; 308 neigh->nud_state = NUD_NOARP;
307 memcpy(neigh->ha, dev->dev_addr, dev->addr_len); 309 memcpy(neigh->ha, dev->dev_addr, dev->addr_len);
308 } else if (neigh->type == RTN_BROADCAST || dev->flags&IFF_POINTOPOINT) { 310 } else if (neigh->type == RTN_BROADCAST ||
311 (dev->flags & IFF_POINTOPOINT)) {
309 neigh->nud_state = NUD_NOARP; 312 neigh->nud_state = NUD_NOARP;
310 memcpy(neigh->ha, dev->broadcast, dev->addr_len); 313 memcpy(neigh->ha, dev->broadcast, dev->addr_len);
311 } 314 }
@@ -315,7 +318,7 @@ static int arp_constructor(struct neighbour *neigh)
315 else 318 else
316 neigh->ops = &arp_generic_ops; 319 neigh->ops = &arp_generic_ops;
317 320
318 if (neigh->nud_state&NUD_VALID) 321 if (neigh->nud_state & NUD_VALID)
319 neigh->output = neigh->ops->connected_output; 322 neigh->output = neigh->ops->connected_output;
320 else 323 else
321 neigh->output = neigh->ops->output; 324 neigh->output = neigh->ops->output;
@@ -334,7 +337,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
334 __be32 saddr = 0; 337 __be32 saddr = 0;
335 u8 *dst_ha = NULL; 338 u8 *dst_ha = NULL;
336 struct net_device *dev = neigh->dev; 339 struct net_device *dev = neigh->dev;
337 __be32 target = *(__be32*)neigh->primary_key; 340 __be32 target = *(__be32 *)neigh->primary_key;
338 int probes = atomic_read(&neigh->probes); 341 int probes = atomic_read(&neigh->probes);
339 struct in_device *in_dev; 342 struct in_device *in_dev;
340 343
@@ -347,7 +350,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
347 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { 350 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
348 default: 351 default:
349 case 0: /* By default announce any local IP */ 352 case 0: /* By default announce any local IP */
350 if (skb && inet_addr_type(dev_net(dev), ip_hdr(skb)->saddr) == RTN_LOCAL) 353 if (skb && inet_addr_type(dev_net(dev),
354 ip_hdr(skb)->saddr) == RTN_LOCAL)
351 saddr = ip_hdr(skb)->saddr; 355 saddr = ip_hdr(skb)->saddr;
352 break; 356 break;
353 case 1: /* Restrict announcements of saddr in same subnet */ 357 case 1: /* Restrict announcements of saddr in same subnet */
@@ -369,16 +373,21 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
369 if (!saddr) 373 if (!saddr)
370 saddr = inet_select_addr(dev, target, RT_SCOPE_LINK); 374 saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
371 375
372 if ((probes -= neigh->parms->ucast_probes) < 0) { 376 probes -= neigh->parms->ucast_probes;
373 if (!(neigh->nud_state&NUD_VALID)) 377 if (probes < 0) {
374 printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n"); 378 if (!(neigh->nud_state & NUD_VALID))
379 printk(KERN_DEBUG
380 "trying to ucast probe in NUD_INVALID\n");
375 dst_ha = neigh->ha; 381 dst_ha = neigh->ha;
376 read_lock_bh(&neigh->lock); 382 read_lock_bh(&neigh->lock);
377 } else if ((probes -= neigh->parms->app_probes) < 0) { 383 } else {
384 probes -= neigh->parms->app_probes;
385 if (probes < 0) {
378#ifdef CONFIG_ARPD 386#ifdef CONFIG_ARPD
379 neigh_app_ns(neigh); 387 neigh_app_ns(neigh);
380#endif 388#endif
381 return; 389 return;
390 }
382 } 391 }
383 392
384 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, 393 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
@@ -451,7 +460,8 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
451 * is allowed to use this function, it is scheduled to be removed. --ANK 460 * is allowed to use this function, it is scheduled to be removed. --ANK
452 */ 461 */
453 462
454static int arp_set_predefined(int addr_hint, unsigned char * haddr, __be32 paddr, struct net_device * dev) 463static int arp_set_predefined(int addr_hint, unsigned char *haddr,
464 __be32 paddr, struct net_device *dev)
455{ 465{
456 switch (addr_hint) { 466 switch (addr_hint) {
457 case RTN_LOCAL: 467 case RTN_LOCAL:
@@ -483,7 +493,8 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
483 493
484 paddr = skb_rtable(skb)->rt_gateway; 494 paddr = skb_rtable(skb)->rt_gateway;
485 495
486 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) 496 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
497 paddr, dev))
487 return 0; 498 return 0;
488 499
489 n = __neigh_lookup(&arp_tbl, &paddr, dev, 1); 500 n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
@@ -515,13 +526,14 @@ int arp_bind_neighbour(struct dst_entry *dst)
515 return -EINVAL; 526 return -EINVAL;
516 if (n == NULL) { 527 if (n == NULL) {
517 __be32 nexthop = ((struct rtable *)dst)->rt_gateway; 528 __be32 nexthop = ((struct rtable *)dst)->rt_gateway;
518 if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT)) 529 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
519 nexthop = 0; 530 nexthop = 0;
520 n = __neigh_lookup_errno( 531 n = __neigh_lookup_errno(
521#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) 532#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
522 dev->type == ARPHRD_ATM ? clip_tbl_hook : 533 dev->type == ARPHRD_ATM ?
534 clip_tbl_hook :
523#endif 535#endif
524 &arp_tbl, &nexthop, dev); 536 &arp_tbl, &nexthop, dev);
525 if (IS_ERR(n)) 537 if (IS_ERR(n))
526 return PTR_ERR(n); 538 return PTR_ERR(n);
527 dst->neighbour = n; 539 dst->neighbour = n;
@@ -543,8 +555,8 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
543 555
544 if (!IN_DEV_PROXY_ARP(in_dev)) 556 if (!IN_DEV_PROXY_ARP(in_dev))
545 return 0; 557 return 0;
546 558 imi = IN_DEV_MEDIUM_ID(in_dev);
547 if ((imi = IN_DEV_MEDIUM_ID(in_dev)) == 0) 559 if (imi == 0)
548 return 1; 560 return 1;
549 if (imi == -1) 561 if (imi == -1)
550 return 0; 562 return 0;
@@ -685,7 +697,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
685 arp->ar_pln = 4; 697 arp->ar_pln = 4;
686 arp->ar_op = htons(type); 698 arp->ar_op = htons(type);
687 699
688 arp_ptr=(unsigned char *)(arp+1); 700 arp_ptr = (unsigned char *)(arp + 1);
689 701
690 memcpy(arp_ptr, src_hw, dev->addr_len); 702 memcpy(arp_ptr, src_hw, dev->addr_len);
691 arp_ptr += dev->addr_len; 703 arp_ptr += dev->addr_len;
@@ -735,9 +747,8 @@ void arp_send(int type, int ptype, __be32 dest_ip,
735 747
736 skb = arp_create(type, ptype, dest_ip, dev, src_ip, 748 skb = arp_create(type, ptype, dest_ip, dev, src_ip,
737 dest_hw, src_hw, target_hw); 749 dest_hw, src_hw, target_hw);
738 if (skb == NULL) { 750 if (skb == NULL)
739 return; 751 return;
740 }
741 752
742 arp_xmit(skb); 753 arp_xmit(skb);
743} 754}
@@ -815,7 +826,7 @@ static int arp_process(struct sk_buff *skb)
815/* 826/*
816 * Extract fields 827 * Extract fields
817 */ 828 */
818 arp_ptr= (unsigned char *)(arp+1); 829 arp_ptr = (unsigned char *)(arp + 1);
819 sha = arp_ptr; 830 sha = arp_ptr;
820 arp_ptr += dev->addr_len; 831 arp_ptr += dev->addr_len;
821 memcpy(&sip, arp_ptr, 4); 832 memcpy(&sip, arp_ptr, 4);
@@ -869,16 +880,17 @@ static int arp_process(struct sk_buff *skb)
869 addr_type = rt->rt_type; 880 addr_type = rt->rt_type;
870 881
871 if (addr_type == RTN_LOCAL) { 882 if (addr_type == RTN_LOCAL) {
872 int dont_send = 0; 883 int dont_send;
873 884
874 if (!dont_send) 885 dont_send = arp_ignore(in_dev, sip, tip);
875 dont_send |= arp_ignore(in_dev,sip,tip);
876 if (!dont_send && IN_DEV_ARPFILTER(in_dev)) 886 if (!dont_send && IN_DEV_ARPFILTER(in_dev))
877 dont_send |= arp_filter(sip,tip,dev); 887 dont_send |= arp_filter(sip, tip, dev);
878 if (!dont_send) { 888 if (!dont_send) {
879 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 889 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
880 if (n) { 890 if (n) {
881 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); 891 arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
892 dev, tip, sha, dev->dev_addr,
893 sha);
882 neigh_release(n); 894 neigh_release(n);
883 } 895 }
884 } 896 }
@@ -887,8 +899,7 @@ static int arp_process(struct sk_buff *skb)
887 if (addr_type == RTN_UNICAST && 899 if (addr_type == RTN_UNICAST &&
888 (arp_fwd_proxy(in_dev, dev, rt) || 900 (arp_fwd_proxy(in_dev, dev, rt) ||
889 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || 901 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
890 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) 902 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
891 {
892 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 903 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
893 if (n) 904 if (n)
894 neigh_release(n); 905 neigh_release(n);
@@ -896,9 +907,12 @@ static int arp_process(struct sk_buff *skb)
896 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || 907 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
897 skb->pkt_type == PACKET_HOST || 908 skb->pkt_type == PACKET_HOST ||
898 in_dev->arp_parms->proxy_delay == 0) { 909 in_dev->arp_parms->proxy_delay == 0) {
899 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); 910 arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
911 dev, tip, sha, dev->dev_addr,
912 sha);
900 } else { 913 } else {
901 pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); 914 pneigh_enqueue(&arp_tbl,
915 in_dev->arp_parms, skb);
902 return 0; 916 return 0;
903 } 917 }
904 goto out; 918 goto out;
@@ -939,7 +953,8 @@ static int arp_process(struct sk_buff *skb)
939 if (arp->ar_op != htons(ARPOP_REPLY) || 953 if (arp->ar_op != htons(ARPOP_REPLY) ||
940 skb->pkt_type != PACKET_HOST) 954 skb->pkt_type != PACKET_HOST)
941 state = NUD_STALE; 955 state = NUD_STALE;
942 neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0); 956 neigh_update(n, sha, state,
957 override ? NEIGH_UPDATE_F_OVERRIDE : 0);
943 neigh_release(n); 958 neigh_release(n);
944 } 959 }
945 960
@@ -975,7 +990,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
975 arp->ar_pln != 4) 990 arp->ar_pln != 4)
976 goto freeskb; 991 goto freeskb;
977 992
978 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 993 skb = skb_share_check(skb, GFP_ATOMIC);
994 if (skb == NULL)
979 goto out_of_mem; 995 goto out_of_mem;
980 996
981 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); 997 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
@@ -1019,7 +1035,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
1019 return -EINVAL; 1035 return -EINVAL;
1020 if (!dev && (r->arp_flags & ATF_COM)) { 1036 if (!dev && (r->arp_flags & ATF_COM)) {
1021 dev = dev_getbyhwaddr(net, r->arp_ha.sa_family, 1037 dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
1022 r->arp_ha.sa_data); 1038 r->arp_ha.sa_data);
1023 if (!dev) 1039 if (!dev)
1024 return -ENODEV; 1040 return -ENODEV;
1025 } 1041 }
@@ -1033,7 +1049,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
1033} 1049}
1034 1050
1035static int arp_req_set(struct net *net, struct arpreq *r, 1051static int arp_req_set(struct net *net, struct arpreq *r,
1036 struct net_device * dev) 1052 struct net_device *dev)
1037{ 1053{
1038 __be32 ip; 1054 __be32 ip;
1039 struct neighbour *neigh; 1055 struct neighbour *neigh;
@@ -1046,10 +1062,11 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1046 if (r->arp_flags & ATF_PERM) 1062 if (r->arp_flags & ATF_PERM)
1047 r->arp_flags |= ATF_COM; 1063 r->arp_flags |= ATF_COM;
1048 if (dev == NULL) { 1064 if (dev == NULL) {
1049 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, 1065 struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
1050 .tos = RTO_ONLINK } } }; 1066 .tos = RTO_ONLINK } };
1051 struct rtable * rt; 1067 struct rtable *rt;
1052 if ((err = ip_route_output_key(net, &rt, &fl)) != 0) 1068 err = ip_route_output_key(net, &rt, &fl);
1069 if (err != 0)
1053 return err; 1070 return err;
1054 dev = rt->dst.dev; 1071 dev = rt->dst.dev;
1055 ip_rt_put(rt); 1072 ip_rt_put(rt);
@@ -1083,9 +1100,9 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1083 unsigned state = NUD_STALE; 1100 unsigned state = NUD_STALE;
1084 if (r->arp_flags & ATF_PERM) 1101 if (r->arp_flags & ATF_PERM)
1085 state = NUD_PERMANENT; 1102 state = NUD_PERMANENT;
1086 err = neigh_update(neigh, (r->arp_flags&ATF_COM) ? 1103 err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
1087 r->arp_ha.sa_data : NULL, state, 1104 r->arp_ha.sa_data : NULL, state,
1088 NEIGH_UPDATE_F_OVERRIDE| 1105 NEIGH_UPDATE_F_OVERRIDE |
1089 NEIGH_UPDATE_F_ADMIN); 1106 NEIGH_UPDATE_F_ADMIN);
1090 neigh_release(neigh); 1107 neigh_release(neigh);
1091 } 1108 }
@@ -1094,12 +1111,12 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1094 1111
1095static unsigned arp_state_to_flags(struct neighbour *neigh) 1112static unsigned arp_state_to_flags(struct neighbour *neigh)
1096{ 1113{
1097 unsigned flags = 0;
1098 if (neigh->nud_state&NUD_PERMANENT) 1114 if (neigh->nud_state&NUD_PERMANENT)
1099 flags = ATF_PERM|ATF_COM; 1115 return ATF_PERM | ATF_COM;
1100 else if (neigh->nud_state&NUD_VALID) 1116 else if (neigh->nud_state&NUD_VALID)
1101 flags = ATF_COM; 1117 return ATF_COM;
1102 return flags; 1118 else
1119 return 0;
1103} 1120}
1104 1121
1105/* 1122/*
@@ -1142,7 +1159,7 @@ static int arp_req_delete_public(struct net *net, struct arpreq *r,
1142} 1159}
1143 1160
1144static int arp_req_delete(struct net *net, struct arpreq *r, 1161static int arp_req_delete(struct net *net, struct arpreq *r,
1145 struct net_device * dev) 1162 struct net_device *dev)
1146{ 1163{
1147 int err; 1164 int err;
1148 __be32 ip; 1165 __be32 ip;
@@ -1153,10 +1170,11 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1153 1170
1154 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; 1171 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
1155 if (dev == NULL) { 1172 if (dev == NULL) {
1156 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, 1173 struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
1157 .tos = RTO_ONLINK } } }; 1174 .tos = RTO_ONLINK } };
1158 struct rtable * rt; 1175 struct rtable *rt;
1159 if ((err = ip_route_output_key(net, &rt, &fl)) != 0) 1176 err = ip_route_output_key(net, &rt, &fl);
1177 if (err != 0)
1160 return err; 1178 return err;
1161 dev = rt->dst.dev; 1179 dev = rt->dst.dev;
1162 ip_rt_put(rt); 1180 ip_rt_put(rt);
@@ -1166,7 +1184,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1166 err = -ENXIO; 1184 err = -ENXIO;
1167 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1185 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1168 if (neigh) { 1186 if (neigh) {
1169 if (neigh->nud_state&~NUD_NOARP) 1187 if (neigh->nud_state & ~NUD_NOARP)
1170 err = neigh_update(neigh, NULL, NUD_FAILED, 1188 err = neigh_update(neigh, NULL, NUD_FAILED,
1171 NEIGH_UPDATE_F_OVERRIDE| 1189 NEIGH_UPDATE_F_OVERRIDE|
1172 NEIGH_UPDATE_F_ADMIN); 1190 NEIGH_UPDATE_F_ADMIN);
@@ -1186,24 +1204,24 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1186 struct net_device *dev = NULL; 1204 struct net_device *dev = NULL;
1187 1205
1188 switch (cmd) { 1206 switch (cmd) {
1189 case SIOCDARP: 1207 case SIOCDARP:
1190 case SIOCSARP: 1208 case SIOCSARP:
1191 if (!capable(CAP_NET_ADMIN)) 1209 if (!capable(CAP_NET_ADMIN))
1192 return -EPERM; 1210 return -EPERM;
1193 case SIOCGARP: 1211 case SIOCGARP:
1194 err = copy_from_user(&r, arg, sizeof(struct arpreq)); 1212 err = copy_from_user(&r, arg, sizeof(struct arpreq));
1195 if (err) 1213 if (err)
1196 return -EFAULT; 1214 return -EFAULT;
1197 break; 1215 break;
1198 default: 1216 default:
1199 return -EINVAL; 1217 return -EINVAL;
1200 } 1218 }
1201 1219
1202 if (r.arp_pa.sa_family != AF_INET) 1220 if (r.arp_pa.sa_family != AF_INET)
1203 return -EPFNOSUPPORT; 1221 return -EPFNOSUPPORT;
1204 1222
1205 if (!(r.arp_flags & ATF_PUBL) && 1223 if (!(r.arp_flags & ATF_PUBL) &&
1206 (r.arp_flags & (ATF_NETMASK|ATF_DONTPUB))) 1224 (r.arp_flags & (ATF_NETMASK | ATF_DONTPUB)))
1207 return -EINVAL; 1225 return -EINVAL;
1208 if (!(r.arp_flags & ATF_NETMASK)) 1226 if (!(r.arp_flags & ATF_NETMASK))
1209 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = 1227 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
@@ -1211,7 +1229,8 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1211 rtnl_lock(); 1229 rtnl_lock();
1212 if (r.arp_dev[0]) { 1230 if (r.arp_dev[0]) {
1213 err = -ENODEV; 1231 err = -ENODEV;
1214 if ((dev = __dev_get_by_name(net, r.arp_dev)) == NULL) 1232 dev = __dev_get_by_name(net, r.arp_dev);
1233 if (dev == NULL)
1215 goto out; 1234 goto out;
1216 1235
1217 /* Mmmm... It is wrong... ARPHRD_NETROM==0 */ 1236 /* Mmmm... It is wrong... ARPHRD_NETROM==0 */
@@ -1243,7 +1262,8 @@ out:
1243 return err; 1262 return err;
1244} 1263}
1245 1264
1246static int arp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1265static int arp_netdev_event(struct notifier_block *this, unsigned long event,
1266 void *ptr)
1247{ 1267{
1248 struct net_device *dev = ptr; 1268 struct net_device *dev = ptr;
1249 1269
@@ -1311,12 +1331,13 @@ static char *ax2asc2(ax25_address *a, char *buf)
1311 for (n = 0, s = buf; n < 6; n++) { 1331 for (n = 0, s = buf; n < 6; n++) {
1312 c = (a->ax25_call[n] >> 1) & 0x7F; 1332 c = (a->ax25_call[n] >> 1) & 0x7F;
1313 1333
1314 if (c != ' ') *s++ = c; 1334 if (c != ' ')
1335 *s++ = c;
1315 } 1336 }
1316 1337
1317 *s++ = '-'; 1338 *s++ = '-';
1318 1339 n = (a->ax25_call[6] >> 1) & 0x0F;
1319 if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) { 1340 if (n > 9) {
1320 *s++ = '1'; 1341 *s++ = '1';
1321 n -= 10; 1342 n -= 10;
1322 } 1343 }
@@ -1325,10 +1346,9 @@ static char *ax2asc2(ax25_address *a, char *buf)
1325 *s++ = '\0'; 1346 *s++ = '\0';
1326 1347
1327 if (*buf == '\0' || *buf == '-') 1348 if (*buf == '\0' || *buf == '-')
1328 return "*"; 1349 return "*";
1329 1350
1330 return buf; 1351 return buf;
1331
1332} 1352}
1333#endif /* CONFIG_AX25 */ 1353#endif /* CONFIG_AX25 */
1334 1354
@@ -1408,10 +1428,10 @@ static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
1408/* ------------------------------------------------------------------------ */ 1428/* ------------------------------------------------------------------------ */
1409 1429
1410static const struct seq_operations arp_seq_ops = { 1430static const struct seq_operations arp_seq_ops = {
1411 .start = arp_seq_start, 1431 .start = arp_seq_start,
1412 .next = neigh_seq_next, 1432 .next = neigh_seq_next,
1413 .stop = neigh_seq_stop, 1433 .stop = neigh_seq_stop,
1414 .show = arp_seq_show, 1434 .show = arp_seq_show,
1415}; 1435};
1416 1436
1417static int arp_seq_open(struct inode *inode, struct file *file) 1437static int arp_seq_open(struct inode *inode, struct file *file)
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
new file mode 100644
index 000000000000..b546736da2e1
--- /dev/null
+++ b/net/ipv4/gre.c
@@ -0,0 +1,151 @@
1/*
2 * GRE over IPv4 demultiplexer driver
3 *
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/kmod.h>
16#include <linux/skbuff.h>
17#include <linux/in.h>
18#include <linux/netdevice.h>
19#include <linux/version.h>
20#include <linux/spinlock.h>
21#include <net/protocol.h>
22#include <net/gre.h>
23
24
25const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
26static DEFINE_SPINLOCK(gre_proto_lock);
27
28int gre_add_protocol(const struct gre_protocol *proto, u8 version)
29{
30 if (version >= GREPROTO_MAX)
31 goto err_out;
32
33 spin_lock(&gre_proto_lock);
34 if (gre_proto[version])
35 goto err_out_unlock;
36
37 rcu_assign_pointer(gre_proto[version], proto);
38 spin_unlock(&gre_proto_lock);
39 return 0;
40
41err_out_unlock:
42 spin_unlock(&gre_proto_lock);
43err_out:
44 return -1;
45}
46EXPORT_SYMBOL_GPL(gre_add_protocol);
47
48int gre_del_protocol(const struct gre_protocol *proto, u8 version)
49{
50 if (version >= GREPROTO_MAX)
51 goto err_out;
52
53 spin_lock(&gre_proto_lock);
54 if (gre_proto[version] != proto)
55 goto err_out_unlock;
56 rcu_assign_pointer(gre_proto[version], NULL);
57 spin_unlock(&gre_proto_lock);
58 synchronize_rcu();
59 return 0;
60
61err_out_unlock:
62 spin_unlock(&gre_proto_lock);
63err_out:
64 return -1;
65}
66EXPORT_SYMBOL_GPL(gre_del_protocol);
67
68static int gre_rcv(struct sk_buff *skb)
69{
70 const struct gre_protocol *proto;
71 u8 ver;
72 int ret;
73
74 if (!pskb_may_pull(skb, 12))
75 goto drop;
76
77 ver = skb->data[1]&0x7f;
78 if (ver >= GREPROTO_MAX)
79 goto drop;
80
81 rcu_read_lock();
82 proto = rcu_dereference(gre_proto[ver]);
83 if (!proto || !proto->handler)
84 goto drop_unlock;
85 ret = proto->handler(skb);
86 rcu_read_unlock();
87 return ret;
88
89drop_unlock:
90 rcu_read_unlock();
91drop:
92 kfree_skb(skb);
93 return NET_RX_DROP;
94}
95
96static void gre_err(struct sk_buff *skb, u32 info)
97{
98 const struct gre_protocol *proto;
99 u8 ver;
100
101 if (!pskb_may_pull(skb, 12))
102 goto drop;
103
104 ver = skb->data[1]&0x7f;
105 if (ver >= GREPROTO_MAX)
106 goto drop;
107
108 rcu_read_lock();
109 proto = rcu_dereference(gre_proto[ver]);
110 if (!proto || !proto->err_handler)
111 goto drop_unlock;
112 proto->err_handler(skb, info);
113 rcu_read_unlock();
114 return;
115
116drop_unlock:
117 rcu_read_unlock();
118drop:
119 kfree_skb(skb);
120}
121
122static const struct net_protocol net_gre_protocol = {
123 .handler = gre_rcv,
124 .err_handler = gre_err,
125 .netns_ok = 1,
126};
127
128static int __init gre_init(void)
129{
130 pr_info("GRE over IPv4 demultiplexor driver");
131
132 if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
133 pr_err("gre: can't add protocol\n");
134 return -EAGAIN;
135 }
136
137 return 0;
138}
139
140static void __exit gre_exit(void)
141{
142 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
143}
144
145module_init(gre_init);
146module_exit(gre_exit);
147
148MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
149MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
150MODULE_LICENSE("GPL");
151
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a0d847c7cba5..96bc7f9475a3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -379,7 +379,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
379 inet->tos = ip_hdr(skb)->tos; 379 inet->tos = ip_hdr(skb)->tos;
380 daddr = ipc.addr = rt->rt_src; 380 daddr = ipc.addr = rt->rt_src;
381 ipc.opt = NULL; 381 ipc.opt = NULL;
382 ipc.shtx.flags = 0; 382 ipc.tx_flags = 0;
383 if (icmp_param->replyopts.optlen) { 383 if (icmp_param->replyopts.optlen) {
384 ipc.opt = &icmp_param->replyopts; 384 ipc.opt = &icmp_param->replyopts;
385 if (ipc.opt->srr) 385 if (ipc.opt->srr)
@@ -538,7 +538,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
538 inet_sk(sk)->tos = tos; 538 inet_sk(sk)->tos = tos;
539 ipc.addr = iph->saddr; 539 ipc.addr = iph->saddr;
540 ipc.opt = &icmp_param.replyopts; 540 ipc.opt = &icmp_param.replyopts;
541 ipc.shtx.flags = 0; 541 ipc.tx_flags = 0;
542 542
543 { 543 {
544 struct flowi fl = { 544 struct flowi fl = {
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b7c41654dde5..f4dc879e258e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -542,7 +542,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
542 /* If the first fragment is fragmented itself, we split 542 /* If the first fragment is fragmented itself, we split
543 * it to two chunks: the first with data and paged part 543 * it to two chunks: the first with data and paged part
544 * and the second, holding only fragments. */ 544 * and the second, holding only fragments. */
545 if (skb_has_frags(head)) { 545 if (skb_has_frag_list(head)) {
546 struct sk_buff *clone; 546 struct sk_buff *clone;
547 int i, plen = 0; 547 int i, plen = 0;
548 548
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 945b20a5ad50..85176895495a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -44,6 +44,7 @@
44#include <net/net_namespace.h> 44#include <net/net_namespace.h>
45#include <net/netns/generic.h> 45#include <net/netns/generic.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/gre.h>
47 48
48#ifdef CONFIG_IPV6 49#ifdef CONFIG_IPV6
49#include <net/ipv6.h> 50#include <net/ipv6.h>
@@ -1278,10 +1279,9 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
1278} 1279}
1279 1280
1280 1281
1281static const struct net_protocol ipgre_protocol = { 1282static const struct gre_protocol ipgre_protocol = {
1282 .handler = ipgre_rcv, 1283 .handler = ipgre_rcv,
1283 .err_handler = ipgre_err, 1284 .err_handler = ipgre_err,
1284 .netns_ok = 1,
1285}; 1285};
1286 1286
1287static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head) 1287static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
@@ -1663,7 +1663,7 @@ static int __init ipgre_init(void)
1663 if (err < 0) 1663 if (err < 0)
1664 return err; 1664 return err;
1665 1665
1666 err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE); 1666 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1667 if (err < 0) { 1667 if (err < 0) {
1668 printk(KERN_INFO "ipgre init: can't add protocol\n"); 1668 printk(KERN_INFO "ipgre init: can't add protocol\n");
1669 goto add_proto_failed; 1669 goto add_proto_failed;
@@ -1683,7 +1683,7 @@ out:
1683tap_ops_failed: 1683tap_ops_failed:
1684 rtnl_link_unregister(&ipgre_link_ops); 1684 rtnl_link_unregister(&ipgre_link_ops);
1685rtnl_link_failed: 1685rtnl_link_failed:
1686 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE); 1686 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1687add_proto_failed: 1687add_proto_failed:
1688 unregister_pernet_device(&ipgre_net_ops); 1688 unregister_pernet_device(&ipgre_net_ops);
1689 goto out; 1689 goto out;
@@ -1693,7 +1693,7 @@ static void __exit ipgre_fini(void)
1693{ 1693{
1694 rtnl_link_unregister(&ipgre_tap_ops); 1694 rtnl_link_unregister(&ipgre_tap_ops);
1695 rtnl_link_unregister(&ipgre_link_ops); 1695 rtnl_link_unregister(&ipgre_link_ops);
1696 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) 1696 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1697 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1697 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1698 unregister_pernet_device(&ipgre_net_ops); 1698 unregister_pernet_device(&ipgre_net_ops);
1699} 1699}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04b69896df5f..e42762023c27 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -487,7 +487,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
487 * LATER: this step can be merged to real generation of fragments, 487 * LATER: this step can be merged to real generation of fragments,
488 * we can switch to copy when see the first bad fragment. 488 * we can switch to copy when see the first bad fragment.
489 */ 489 */
490 if (skb_has_frags(skb)) { 490 if (skb_has_frag_list(skb)) {
491 struct sk_buff *frag; 491 struct sk_buff *frag;
492 int first_len = skb_pagelen(skb); 492 int first_len = skb_pagelen(skb);
493 int truesizes = 0; 493 int truesizes = 0;
@@ -837,10 +837,9 @@ int ip_append_data(struct sock *sk,
837 inet->cork.length = 0; 837 inet->cork.length = 0;
838 sk->sk_sndmsg_page = NULL; 838 sk->sk_sndmsg_page = NULL;
839 sk->sk_sndmsg_off = 0; 839 sk->sk_sndmsg_off = 0;
840 if ((exthdrlen = rt->dst.header_len) != 0) { 840 exthdrlen = rt->dst.header_len;
841 length += exthdrlen; 841 length += exthdrlen;
842 transhdrlen += exthdrlen; 842 transhdrlen += exthdrlen;
843 }
844 } else { 843 } else {
845 rt = (struct rtable *)inet->cork.dst; 844 rt = (struct rtable *)inet->cork.dst;
846 if (inet->cork.flags & IPCORK_OPT) 845 if (inet->cork.flags & IPCORK_OPT)
@@ -953,7 +952,7 @@ alloc_new_skb:
953 else 952 else
954 /* only the initial fragment is 953 /* only the initial fragment is
955 time stamped */ 954 time stamped */
956 ipc->shtx.flags = 0; 955 ipc->tx_flags = 0;
957 } 956 }
958 if (skb == NULL) 957 if (skb == NULL)
959 goto error; 958 goto error;
@@ -964,7 +963,7 @@ alloc_new_skb:
964 skb->ip_summed = csummode; 963 skb->ip_summed = csummode;
965 skb->csum = 0; 964 skb->csum = 0;
966 skb_reserve(skb, hh_len); 965 skb_reserve(skb, hh_len);
967 *skb_tx(skb) = ipc->shtx; 966 skb_shinfo(skb)->tx_flags = ipc->tx_flags;
968 967
969 /* 968 /*
970 * Find where to start putting bytes. 969 * Find where to start putting bytes.
@@ -1384,7 +1383,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1384 1383
1385 daddr = ipc.addr = rt->rt_src; 1384 daddr = ipc.addr = rt->rt_src;
1386 ipc.opt = NULL; 1385 ipc.opt = NULL;
1387 ipc.shtx.flags = 0; 1386 ipc.tx_flags = 0;
1388 1387
1389 if (replyopts.opt.optlen) { 1388 if (replyopts.opt.optlen) {
1390 ipc.opt = &replyopts.opt; 1389 ipc.opt = &replyopts.opt;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ec036731a70b..3c6f8f3968a6 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -744,7 +744,7 @@ static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
744 ipn->tunnels_wc[0] = tunnel; 744 ipn->tunnels_wc[0] = tunnel;
745} 745}
746 746
747static struct xfrm_tunnel ipip_handler = { 747static struct xfrm_tunnel ipip_handler __read_mostly = {
748 .handler = ipip_rcv, 748 .handler = ipip_rcv,
749 .err_handler = ipip_err, 749 .err_handler = ipip_err,
750 .priority = 1, 750 .priority = 1,
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 3a43cf36db87..1e26a4897655 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -29,6 +29,7 @@
29#include <net/netfilter/nf_conntrack.h> 29#include <net/netfilter/nf_conntrack.h>
30#include <net/net_namespace.h> 30#include <net/net_namespace.h>
31#include <net/checksum.h> 31#include <net/checksum.h>
32#include <net/ip.h>
32 33
33#define CLUSTERIP_VERSION "0.8" 34#define CLUSTERIP_VERSION "0.8"
34 35
@@ -231,24 +232,22 @@ clusterip_hashfn(const struct sk_buff *skb,
231{ 232{
232 const struct iphdr *iph = ip_hdr(skb); 233 const struct iphdr *iph = ip_hdr(skb);
233 unsigned long hashval; 234 unsigned long hashval;
234 u_int16_t sport, dport; 235 u_int16_t sport = 0, dport = 0;
235 const u_int16_t *ports; 236 int poff;
236 237
237 switch (iph->protocol) { 238 poff = proto_ports_offset(iph->protocol);
238 case IPPROTO_TCP: 239 if (poff >= 0) {
239 case IPPROTO_UDP: 240 const u_int16_t *ports;
240 case IPPROTO_UDPLITE: 241 u16 _ports[2];
241 case IPPROTO_SCTP: 242
242 case IPPROTO_DCCP: 243 ports = skb_header_pointer(skb, iph->ihl * 4 + poff, 4, _ports);
243 case IPPROTO_ICMP: 244 if (ports) {
244 ports = (const void *)iph+iph->ihl*4; 245 sport = ports[0];
245 sport = ports[0]; 246 dport = ports[1];
246 dport = ports[1]; 247 }
247 break; 248 } else {
248 default:
249 if (net_ratelimit()) 249 if (net_ratelimit())
250 pr_info("unknown protocol %u\n", iph->protocol); 250 pr_info("unknown protocol %u\n", iph->protocol);
251 sport = dport = 0;
252 } 251 }
253 252
254 switch (config->hash_mode) { 253 switch (config->hash_mode) {
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index f2d297351405..65699c24411c 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -28,8 +28,7 @@
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <net/protocol.h> 29#include <net/protocol.h>
30 30
31const struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp; 31const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
32static DEFINE_SPINLOCK(inet_proto_lock);
33 32
34/* 33/*
35 * Add a protocol handler to the hash tables 34 * Add a protocol handler to the hash tables
@@ -37,20 +36,9 @@ static DEFINE_SPINLOCK(inet_proto_lock);
37 36
38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 37int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
39{ 38{
40 int hash, ret; 39 int hash = protocol & (MAX_INET_PROTOS - 1);
41 40
42 hash = protocol & (MAX_INET_PROTOS - 1); 41 return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
43
44 spin_lock_bh(&inet_proto_lock);
45 if (inet_protos[hash]) {
46 ret = -1;
47 } else {
48 inet_protos[hash] = prot;
49 ret = 0;
50 }
51 spin_unlock_bh(&inet_proto_lock);
52
53 return ret;
54} 42}
55EXPORT_SYMBOL(inet_add_protocol); 43EXPORT_SYMBOL(inet_add_protocol);
56 44
@@ -60,18 +48,9 @@ EXPORT_SYMBOL(inet_add_protocol);
60 48
61int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) 49int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
62{ 50{
63 int hash, ret; 51 int ret, hash = protocol & (MAX_INET_PROTOS - 1);
64
65 hash = protocol & (MAX_INET_PROTOS - 1);
66 52
67 spin_lock_bh(&inet_proto_lock); 53 ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
68 if (inet_protos[hash] == prot) {
69 inet_protos[hash] = NULL;
70 ret = 0;
71 } else {
72 ret = -1;
73 }
74 spin_unlock_bh(&inet_proto_lock);
75 54
76 synchronize_net(); 55 synchronize_net();
77 56
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 009a7b2aa1ef..1f85ef289895 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -505,7 +505,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
505 505
506 ipc.addr = inet->inet_saddr; 506 ipc.addr = inet->inet_saddr;
507 ipc.opt = NULL; 507 ipc.opt = NULL;
508 ipc.shtx.flags = 0; 508 ipc.tx_flags = 0;
509 ipc.oif = sk->sk_bound_dev_if; 509 ipc.oif = sk->sk_bound_dev_if;
510 510
511 if (msg->msg_controllen) { 511 if (msg->msg_controllen) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6298f75d5e93..e24d48dd99d3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1268,18 +1268,11 @@ skip_hashing:
1268 1268
1269void rt_bind_peer(struct rtable *rt, int create) 1269void rt_bind_peer(struct rtable *rt, int create)
1270{ 1270{
1271 static DEFINE_SPINLOCK(rt_peer_lock);
1272 struct inet_peer *peer; 1271 struct inet_peer *peer;
1273 1272
1274 peer = inet_getpeer(rt->rt_dst, create); 1273 peer = inet_getpeer(rt->rt_dst, create);
1275 1274
1276 spin_lock_bh(&rt_peer_lock); 1275 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1277 if (rt->peer == NULL) {
1278 rt->peer = peer;
1279 peer = NULL;
1280 }
1281 spin_unlock_bh(&rt_peer_lock);
1282 if (peer)
1283 inet_putpeer(peer); 1276 inet_putpeer(peer);
1284} 1277}
1285 1278
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3fb1428e526e..3e8a4dbc721b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2389,7 +2389,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2389 err = tp->af_specific->md5_parse(sk, optval, optlen); 2389 err = tp->af_specific->md5_parse(sk, optval, optlen);
2390 break; 2390 break;
2391#endif 2391#endif
2392 2392 case TCP_USER_TIMEOUT:
2393 /* Cap the max timeout in ms TCP will retry/retrans
2394 * before giving up and aborting (ETIMEDOUT) a connection.
2395 */
2396 icsk->icsk_user_timeout = msecs_to_jiffies(val);
2397 break;
2393 default: 2398 default:
2394 err = -ENOPROTOOPT; 2399 err = -ENOPROTOOPT;
2395 break; 2400 break;
@@ -2608,6 +2613,10 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2608 case TCP_THIN_DUPACK: 2613 case TCP_THIN_DUPACK:
2609 val = tp->thin_dupack; 2614 val = tp->thin_dupack;
2610 break; 2615 break;
2616
2617 case TCP_USER_TIMEOUT:
2618 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2619 break;
2611 default: 2620 default:
2612 return -ENOPROTOOPT; 2621 return -ENOPROTOOPT;
2613 } 2622 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e663b78a2ef6..1bc87a05c734 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -805,25 +805,12 @@ void tcp_update_metrics(struct sock *sk)
805 } 805 }
806} 806}
807 807
808/* Numbers are taken from RFC3390.
809 *
810 * John Heffner states:
811 *
812 * The RFC specifies a window of no more than 4380 bytes
813 * unless 2*MSS > 4380. Reading the pseudocode in the RFC
814 * is a bit misleading because they use a clamp at 4380 bytes
815 * rather than use a multiplier in the relevant range.
816 */
817__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 808__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
818{ 809{
819 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 810 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
820 811
821 if (!cwnd) { 812 if (!cwnd)
822 if (tp->mss_cache > 1460) 813 cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
823 cwnd = 2;
824 else
825 cwnd = (tp->mss_cache > 1095) ? 3 : 4;
826 }
827 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 814 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
828} 815}
829 816
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 020766292bb0..a0232f3a358b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2571,7 +2571,6 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2571 2571
2572 return tcp_gro_receive(head, skb); 2572 return tcp_gro_receive(head, skb);
2573} 2573}
2574EXPORT_SYMBOL(tcp4_gro_receive);
2575 2574
2576int tcp4_gro_complete(struct sk_buff *skb) 2575int tcp4_gro_complete(struct sk_buff *skb)
2577{ 2576{
@@ -2584,7 +2583,6 @@ int tcp4_gro_complete(struct sk_buff *skb)
2584 2583
2585 return tcp_gro_complete(skb); 2584 return tcp_gro_complete(skb);
2586} 2585}
2587EXPORT_SYMBOL(tcp4_gro_complete);
2588 2586
2589struct proto tcp_prot = { 2587struct proto tcp_prot = {
2590 .name = "TCP", 2588 .name = "TCP",
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index de3bd8458588..ea09d2fd50c7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -224,16 +224,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
224 } 224 }
225 } 225 }
226 226
227 /* Set initial window to value enough for senders, 227 /* Set initial window to value enough for senders, following RFC5681. */
228 * following RFC2414. Senders, not following this RFC,
229 * will be satisfied with 2.
230 */
231 if (mss > (1 << *rcv_wscale)) { 228 if (mss > (1 << *rcv_wscale)) {
232 int init_cwnd = 4; 229 int init_cwnd = rfc3390_bytes_to_packets(mss);
233 if (mss > 1460 * 3) 230
234 init_cwnd = 2;
235 else if (mss > 1460)
236 init_cwnd = 3;
237 /* when initializing use the value from init_rcv_wnd 231 /* when initializing use the value from init_rcv_wnd
238 * rather than the default from above 232 * rather than the default from above
239 */ 233 */
@@ -2429,6 +2423,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2429 __u8 rcv_wscale; 2423 __u8 rcv_wscale;
2430 /* Set this up on the first call only */ 2424 /* Set this up on the first call only */
2431 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2425 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2426
2427 /* limit the window selection if the user enforce a smaller rx buffer */
2428 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2429 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2430 req->window_clamp = tcp_full_space(sk);
2431
2432 /* tcp_full_space because it is guaranteed to be the first packet */ 2432 /* tcp_full_space because it is guaranteed to be the first packet */
2433 tcp_select_initial_window(tcp_full_space(sk), 2433 tcp_select_initial_window(tcp_full_space(sk),
2434 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2434 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
@@ -2555,6 +2555,11 @@ static void tcp_connect_init(struct sock *sk)
2555 2555
2556 tcp_initialize_rcv_mss(sk); 2556 tcp_initialize_rcv_mss(sk);
2557 2557
2558 /* limit the window selection if the user enforce a smaller rx buffer */
2559 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2560 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2561 tp->window_clamp = tcp_full_space(sk);
2562
2558 tcp_select_initial_window(tcp_full_space(sk), 2563 tcp_select_initial_window(tcp_full_space(sk),
2559 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2564 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2560 &tp->rcv_wnd, 2565 &tp->rcv_wnd,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c35b469e851c..baea4a129022 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -138,10 +138,10 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
138 * retransmissions with an initial RTO of TCP_RTO_MIN. 138 * retransmissions with an initial RTO of TCP_RTO_MIN.
139 */ 139 */
140static bool retransmits_timed_out(struct sock *sk, 140static bool retransmits_timed_out(struct sock *sk,
141 unsigned int boundary) 141 unsigned int boundary,
142 unsigned int timeout)
142{ 143{
143 unsigned int timeout, linear_backoff_thresh; 144 unsigned int linear_backoff_thresh, start_ts;
144 unsigned int start_ts;
145 145
146 if (!inet_csk(sk)->icsk_retransmits) 146 if (!inet_csk(sk)->icsk_retransmits)
147 return false; 147 return false;
@@ -151,14 +151,15 @@ static bool retransmits_timed_out(struct sock *sk,
151 else 151 else
152 start_ts = tcp_sk(sk)->retrans_stamp; 152 start_ts = tcp_sk(sk)->retrans_stamp;
153 153
154 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); 154 if (likely(timeout == 0)) {
155 155 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
156 if (boundary <= linear_backoff_thresh)
157 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
158 else
159 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
160 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
161 156
157 if (boundary <= linear_backoff_thresh)
158 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
159 else
160 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
161 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
162 }
162 return (tcp_time_stamp - start_ts) >= timeout; 163 return (tcp_time_stamp - start_ts) >= timeout;
163} 164}
164 165
@@ -174,7 +175,7 @@ static int tcp_write_timeout(struct sock *sk)
174 dst_negative_advice(sk); 175 dst_negative_advice(sk);
175 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 176 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
176 } else { 177 } else {
177 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { 178 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
178 /* Black hole detection */ 179 /* Black hole detection */
179 tcp_mtu_probing(icsk, sk); 180 tcp_mtu_probing(icsk, sk);
180 181
@@ -187,14 +188,16 @@ static int tcp_write_timeout(struct sock *sk)
187 188
188 retry_until = tcp_orphan_retries(sk, alive); 189 retry_until = tcp_orphan_retries(sk, alive);
189 do_reset = alive || 190 do_reset = alive ||
190 !retransmits_timed_out(sk, retry_until); 191 !retransmits_timed_out(sk, retry_until, 0);
191 192
192 if (tcp_out_of_resources(sk, do_reset)) 193 if (tcp_out_of_resources(sk, do_reset))
193 return 1; 194 return 1;
194 } 195 }
195 } 196 }
196 197
197 if (retransmits_timed_out(sk, retry_until)) { 198 if (retransmits_timed_out(sk, retry_until,
199 (1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV) ? 0 :
200 icsk->icsk_user_timeout)) {
198 /* Has it gone just too far? */ 201 /* Has it gone just too far? */
199 tcp_write_err(sk); 202 tcp_write_err(sk);
200 return 1; 203 return 1;
@@ -436,7 +439,7 @@ out_reset_timer:
436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 439 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
437 } 440 }
438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 441 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
439 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 442 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
440 __sk_dst_reset(sk); 443 __sk_dst_reset(sk);
441 444
442out:; 445out:;
@@ -556,7 +559,14 @@ static void tcp_keepalive_timer (unsigned long data)
556 elapsed = keepalive_time_elapsed(tp); 559 elapsed = keepalive_time_elapsed(tp);
557 560
558 if (elapsed >= keepalive_time_when(tp)) { 561 if (elapsed >= keepalive_time_when(tp)) {
559 if (icsk->icsk_probes_out >= keepalive_probes(tp)) { 562 /* If the TCP_USER_TIMEOUT option is enabled, use that
563 * to determine when to timeout instead.
564 */
565 if ((icsk->icsk_user_timeout != 0 &&
566 elapsed >= icsk->icsk_user_timeout &&
567 icsk->icsk_probes_out > 0) ||
568 (icsk->icsk_user_timeout == 0 &&
569 icsk->icsk_probes_out >= keepalive_probes(tp))) {
560 tcp_send_active_reset(sk, GFP_ATOMIC); 570 tcp_send_active_reset(sk, GFP_ATOMIC);
561 tcp_write_err(sk); 571 tcp_write_err(sk);
562 goto out; 572 goto out;
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 59186ca7808a..9a17bd2a0a37 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -14,8 +14,8 @@
14#include <net/protocol.h> 14#include <net/protocol.h>
15#include <net/xfrm.h> 15#include <net/xfrm.h>
16 16
17static struct xfrm_tunnel *tunnel4_handlers; 17static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
18static struct xfrm_tunnel *tunnel64_handlers; 18static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
19static DEFINE_MUTEX(tunnel4_mutex); 19static DEFINE_MUTEX(tunnel4_mutex);
20 20
21static inline struct xfrm_tunnel **fam_handlers(unsigned short family) 21static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
@@ -39,7 +39,7 @@ int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
39 } 39 }
40 40
41 handler->next = *pprev; 41 handler->next = *pprev;
42 *pprev = handler; 42 rcu_assign_pointer(*pprev, handler);
43 43
44 ret = 0; 44 ret = 0;
45 45
@@ -73,6 +73,11 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
73} 73}
74EXPORT_SYMBOL(xfrm4_tunnel_deregister); 74EXPORT_SYMBOL(xfrm4_tunnel_deregister);
75 75
76#define for_each_tunnel_rcu(head, handler) \
77 for (handler = rcu_dereference(head); \
78 handler != NULL; \
79 handler = rcu_dereference(handler->next)) \
80
76static int tunnel4_rcv(struct sk_buff *skb) 81static int tunnel4_rcv(struct sk_buff *skb)
77{ 82{
78 struct xfrm_tunnel *handler; 83 struct xfrm_tunnel *handler;
@@ -80,7 +85,7 @@ static int tunnel4_rcv(struct sk_buff *skb)
80 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 85 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
81 goto drop; 86 goto drop;
82 87
83 for (handler = tunnel4_handlers; handler; handler = handler->next) 88 for_each_tunnel_rcu(tunnel4_handlers, handler)
84 if (!handler->handler(skb)) 89 if (!handler->handler(skb))
85 return 0; 90 return 0;
86 91
@@ -99,7 +104,7 @@ static int tunnel64_rcv(struct sk_buff *skb)
99 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 104 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
100 goto drop; 105 goto drop;
101 106
102 for (handler = tunnel64_handlers; handler; handler = handler->next) 107 for_each_tunnel_rcu(tunnel64_handlers, handler)
103 if (!handler->handler(skb)) 108 if (!handler->handler(skb))
104 return 0; 109 return 0;
105 110
@@ -115,7 +120,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
115{ 120{
116 struct xfrm_tunnel *handler; 121 struct xfrm_tunnel *handler;
117 122
118 for (handler = tunnel4_handlers; handler; handler = handler->next) 123 for_each_tunnel_rcu(tunnel4_handlers, handler)
119 if (!handler->err_handler(skb, info)) 124 if (!handler->err_handler(skb, info))
120 break; 125 break;
121} 126}
@@ -125,7 +130,7 @@ static void tunnel64_err(struct sk_buff *skb, u32 info)
125{ 130{
126 struct xfrm_tunnel *handler; 131 struct xfrm_tunnel *handler;
127 132
128 for (handler = tunnel64_handlers; handler; handler = handler->next) 133 for_each_tunnel_rcu(tunnel64_handlers, handler)
129 if (!handler->err_handler(skb, info)) 134 if (!handler->err_handler(skb, info))
130 break; 135 break;
131} 136}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fb23c2e63b52..b3f7e8cf18ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -797,7 +797,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
797 return -EOPNOTSUPP; 797 return -EOPNOTSUPP;
798 798
799 ipc.opt = NULL; 799 ipc.opt = NULL;
800 ipc.shtx.flags = 0; 800 ipc.tx_flags = 0;
801 801
802 if (up->pending) { 802 if (up->pending) {
803 /* 803 /*
@@ -845,7 +845,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
845 ipc.addr = inet->inet_saddr; 845 ipc.addr = inet->inet_saddr;
846 846
847 ipc.oif = sk->sk_bound_dev_if; 847 ipc.oif = sk->sk_bound_dev_if;
848 err = sock_tx_timestamp(msg, sk, &ipc.shtx); 848 err = sock_tx_timestamp(sk, &ipc.tx_flags);
849 if (err) 849 if (err)
850 return err; 850 return err;
851 if (msg->msg_controllen) { 851 if (msg->msg_controllen) {
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 41f5982d2087..82806455e859 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -58,14 +58,14 @@ static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
58 return -ENOENT; 58 return -ENOENT;
59} 59}
60 60
61static struct xfrm_tunnel xfrm_tunnel_handler = { 61static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
62 .handler = xfrm_tunnel_rcv, 62 .handler = xfrm_tunnel_rcv,
63 .err_handler = xfrm_tunnel_err, 63 .err_handler = xfrm_tunnel_err,
64 .priority = 2, 64 .priority = 2,
65}; 65};
66 66
67#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 67#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
68static struct xfrm_tunnel xfrm64_tunnel_handler = { 68static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
69 .handler = xfrm_tunnel_rcv, 69 .handler = xfrm_tunnel_rcv,
70 .err_handler = xfrm_tunnel_err, 70 .err_handler = xfrm_tunnel_err,
71 .priority = 2, 71 .priority = 2,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab70a3fbcafa..5bc893e28008 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2964,7 +2964,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2964 start sending router solicitations. 2964 start sending router solicitations.
2965 */ 2965 */
2966 2966
2967 if (ifp->idev->cnf.forwarding == 0 && 2967 if ((ifp->idev->cnf.forwarding == 0 ||
2968 ifp->idev->cnf.forwarding == 2) &&
2968 ifp->idev->cnf.rtr_solicits > 0 && 2969 ifp->idev->cnf.rtr_solicits > 0 &&
2969 (dev->flags&IFF_LOOPBACK) == 0 && 2970 (dev->flags&IFF_LOOPBACK) == 0 &&
2970 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { 2971 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d40b330c0ee6..1838927a2243 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -637,7 +637,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
637 } 637 }
638 mtu -= hlen + sizeof(struct frag_hdr); 638 mtu -= hlen + sizeof(struct frag_hdr);
639 639
640 if (skb_has_frags(skb)) { 640 if (skb_has_frag_list(skb)) {
641 int first_len = skb_pagelen(skb); 641 int first_len = skb_pagelen(skb);
642 int truesizes = 0; 642 int truesizes = 0;
643 643
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0fd027f3f47e..29f99dd75bc6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1372,13 +1372,13 @@ static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1372 ip6n->tnls_wc[0] = t; 1372 ip6n->tnls_wc[0] = t;
1373} 1373}
1374 1374
1375static struct xfrm6_tunnel ip4ip6_handler = { 1375static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
1376 .handler = ip4ip6_rcv, 1376 .handler = ip4ip6_rcv,
1377 .err_handler = ip4ip6_err, 1377 .err_handler = ip4ip6_err,
1378 .priority = 1, 1378 .priority = 1,
1379}; 1379};
1380 1380
1381static struct xfrm6_tunnel ip6ip6_handler = { 1381static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
1382 .handler = ip6ip6_rcv, 1382 .handler = ip6ip6_rcv,
1383 .err_handler = ip6ip6_err, 1383 .err_handler = ip6ip6_err,
1384 .priority = 1, 1384 .priority = 1,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 58841c4ae947..69a0051cea67 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1105,6 +1105,18 @@ errout:
1105 rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err); 1105 rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err);
1106} 1106}
1107 1107
1108static inline int accept_ra(struct inet6_dev *in6_dev)
1109{
1110 /*
1111 * If forwarding is enabled, RA are not accepted unless the special
1112 * hybrid mode (accept_ra=2) is enabled.
1113 */
1114 if (in6_dev->cnf.forwarding && in6_dev->cnf.accept_ra < 2)
1115 return 0;
1116
1117 return in6_dev->cnf.accept_ra;
1118}
1119
1108static void ndisc_router_discovery(struct sk_buff *skb) 1120static void ndisc_router_discovery(struct sk_buff *skb)
1109{ 1121{
1110 struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb); 1122 struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
@@ -1158,8 +1170,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1158 return; 1170 return;
1159 } 1171 }
1160 1172
1161 /* skip route and link configuration on routers */ 1173 if (!accept_ra(in6_dev))
1162 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
1163 goto skip_linkparms; 1174 goto skip_linkparms;
1164 1175
1165#ifdef CONFIG_IPV6_NDISC_NODETYPE 1176#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1309,8 +1320,7 @@ skip_linkparms:
1309 NEIGH_UPDATE_F_ISROUTER); 1320 NEIGH_UPDATE_F_ISROUTER);
1310 } 1321 }
1311 1322
1312 /* skip route and link configuration on routers */ 1323 if (!accept_ra(in6_dev))
1313 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
1314 goto out; 1324 goto out;
1315 1325
1316#ifdef CONFIG_IPV6_ROUTE_INFO 1326#ifdef CONFIG_IPV6_ROUTE_INFO
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 578f3c1a16db..138a8b362706 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -363,7 +363,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
363 /* If the first fragment is fragmented itself, we split 363 /* If the first fragment is fragmented itself, we split
364 * it to two chunks: the first with data and paged part 364 * it to two chunks: the first with data and paged part
365 * and the second, holding only fragments. */ 365 * and the second, holding only fragments. */
366 if (skb_has_frags(head)) { 366 if (skb_has_frag_list(head)) {
367 struct sk_buff *clone; 367 struct sk_buff *clone;
368 int i, plen = 0; 368 int i, plen = 0;
369 369
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 1fa3468f0f32..9bb936ae2452 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,28 +25,14 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <net/protocol.h> 26#include <net/protocol.h>
27 27
28const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS]; 28const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
29static DEFINE_SPINLOCK(inet6_proto_lock);
30
31 29
32int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) 30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
33{ 31{
34 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 32 int hash = protocol & (MAX_INET_PROTOS - 1);
35
36 spin_lock_bh(&inet6_proto_lock);
37
38 if (inet6_protos[hash]) {
39 ret = -1;
40 } else {
41 inet6_protos[hash] = prot;
42 ret = 0;
43 }
44
45 spin_unlock_bh(&inet6_proto_lock);
46 33
47 return ret; 34 return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
48} 35}
49
50EXPORT_SYMBOL(inet6_add_protocol); 36EXPORT_SYMBOL(inet6_add_protocol);
51 37
52/* 38/*
@@ -57,20 +43,10 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
57{ 43{
58 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 44 int ret, hash = protocol & (MAX_INET_PROTOS - 1);
59 45
60 spin_lock_bh(&inet6_proto_lock); 46 ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
61
62 if (inet6_protos[hash] != prot) {
63 ret = -1;
64 } else {
65 inet6_protos[hash] = NULL;
66 ret = 0;
67 }
68
69 spin_unlock_bh(&inet6_proto_lock);
70 47
71 synchronize_net(); 48 synchronize_net();
72 49
73 return ret; 50 return ret;
74} 51}
75
76EXPORT_SYMBOL(inet6_del_protocol); 52EXPORT_SYMBOL(inet6_del_protocol);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 64cfef1b0a4c..c7ba3149633f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -458,7 +458,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
458 /* If the first fragment is fragmented itself, we split 458 /* If the first fragment is fragmented itself, we split
459 * it to two chunks: the first with data and paged part 459 * it to two chunks: the first with data and paged part
460 * and the second, holding only fragments. */ 460 * and the second, holding only fragments. */
461 if (skb_has_frags(head)) { 461 if (skb_has_frag_list(head)) {
462 struct sk_buff *clone; 462 struct sk_buff *clone;
463 int i, plen = 0; 463 int i, plen = 0;
464 464
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 4699cd3c3118..86618eb30335 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1132,7 +1132,7 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1132 sitn->tunnels_wc[0] = tunnel; 1132 sitn->tunnels_wc[0] = tunnel;
1133} 1133}
1134 1134
1135static struct xfrm_tunnel sit_handler = { 1135static struct xfrm_tunnel sit_handler __read_mostly = {
1136 .handler = ipip6_rcv, 1136 .handler = ipip6_rcv,
1137 .err_handler = ipip6_err, 1137 .err_handler = ipip6_err,
1138 .priority = 1, 1138 .priority = 1,
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index fc3c86a47452..d9864725d0c6 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -30,8 +30,8 @@
30#include <net/protocol.h> 30#include <net/protocol.h>
31#include <net/xfrm.h> 31#include <net/xfrm.h>
32 32
33static struct xfrm6_tunnel *tunnel6_handlers; 33static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
34static struct xfrm6_tunnel *tunnel46_handlers; 34static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
35static DEFINE_MUTEX(tunnel6_mutex); 35static DEFINE_MUTEX(tunnel6_mutex);
36 36
37int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family) 37int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
@@ -51,7 +51,7 @@ int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
51 } 51 }
52 52
53 handler->next = *pprev; 53 handler->next = *pprev;
54 *pprev = handler; 54 rcu_assign_pointer(*pprev, handler);
55 55
56 ret = 0; 56 ret = 0;
57 57
@@ -88,6 +88,11 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
88 88
89EXPORT_SYMBOL(xfrm6_tunnel_deregister); 89EXPORT_SYMBOL(xfrm6_tunnel_deregister);
90 90
91#define for_each_tunnel_rcu(head, handler) \
92 for (handler = rcu_dereference(head); \
93 handler != NULL; \
94 handler = rcu_dereference(handler->next)) \
95
91static int tunnel6_rcv(struct sk_buff *skb) 96static int tunnel6_rcv(struct sk_buff *skb)
92{ 97{
93 struct xfrm6_tunnel *handler; 98 struct xfrm6_tunnel *handler;
@@ -95,7 +100,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
95 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 100 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
96 goto drop; 101 goto drop;
97 102
98 for (handler = tunnel6_handlers; handler; handler = handler->next) 103 for_each_tunnel_rcu(tunnel6_handlers, handler)
99 if (!handler->handler(skb)) 104 if (!handler->handler(skb))
100 return 0; 105 return 0;
101 106
@@ -113,7 +118,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
113 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 118 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
114 goto drop; 119 goto drop;
115 120
116 for (handler = tunnel46_handlers; handler; handler = handler->next) 121 for_each_tunnel_rcu(tunnel46_handlers, handler)
117 if (!handler->handler(skb)) 122 if (!handler->handler(skb))
118 return 0; 123 return 0;
119 124
@@ -129,7 +134,7 @@ static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
129{ 134{
130 struct xfrm6_tunnel *handler; 135 struct xfrm6_tunnel *handler;
131 136
132 for (handler = tunnel6_handlers; handler; handler = handler->next) 137 for_each_tunnel_rcu(tunnel6_handlers, handler)
133 if (!handler->err_handler(skb, opt, type, code, offset, info)) 138 if (!handler->err_handler(skb, opt, type, code, offset, info))
134 break; 139 break;
135} 140}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2ce3a8278f26..ac7584b946a5 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -317,13 +317,13 @@ static const struct xfrm_type xfrm6_tunnel_type = {
317 .output = xfrm6_tunnel_output, 317 .output = xfrm6_tunnel_output,
318}; 318};
319 319
320static struct xfrm6_tunnel xfrm6_tunnel_handler = { 320static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
321 .handler = xfrm6_tunnel_rcv, 321 .handler = xfrm6_tunnel_rcv,
322 .err_handler = xfrm6_tunnel_err, 322 .err_handler = xfrm6_tunnel_err,
323 .priority = 2, 323 .priority = 2,
324}; 324};
325 325
326static struct xfrm6_tunnel xfrm46_tunnel_handler = { 326static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
327 .handler = xfrm6_tunnel_rcv, 327 .handler = xfrm6_tunnel_rcv,
328 .err_handler = xfrm6_tunnel_err, 328 .err_handler = xfrm6_tunnel_err,
329 .priority = 2, 329 .priority = 2,
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 5bb8353105cc..8ee1ff6c742f 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -45,13 +45,11 @@ static int irlan_eth_close(struct net_device *dev);
45static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, 45static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
46 struct net_device *dev); 46 struct net_device *dev);
47static void irlan_eth_set_multicast_list( struct net_device *dev); 47static void irlan_eth_set_multicast_list( struct net_device *dev);
48static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
49 48
50static const struct net_device_ops irlan_eth_netdev_ops = { 49static const struct net_device_ops irlan_eth_netdev_ops = {
51 .ndo_open = irlan_eth_open, 50 .ndo_open = irlan_eth_open,
52 .ndo_stop = irlan_eth_close, 51 .ndo_stop = irlan_eth_close,
53 .ndo_start_xmit = irlan_eth_xmit, 52 .ndo_start_xmit = irlan_eth_xmit,
54 .ndo_get_stats = irlan_eth_get_stats,
55 .ndo_set_multicast_list = irlan_eth_set_multicast_list, 53 .ndo_set_multicast_list = irlan_eth_set_multicast_list,
56 .ndo_change_mtu = eth_change_mtu, 54 .ndo_change_mtu = eth_change_mtu,
57 .ndo_validate_addr = eth_validate_addr, 55 .ndo_validate_addr = eth_validate_addr,
@@ -208,10 +206,10 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
208 * tried :-) DB 206 * tried :-) DB
209 */ 207 */
210 /* irttp_data_request already free the packet */ 208 /* irttp_data_request already free the packet */
211 self->stats.tx_dropped++; 209 dev->stats.tx_dropped++;
212 } else { 210 } else {
213 self->stats.tx_packets++; 211 dev->stats.tx_packets++;
214 self->stats.tx_bytes += len; 212 dev->stats.tx_bytes += len;
215 } 213 }
216 214
217 return NETDEV_TX_OK; 215 return NETDEV_TX_OK;
@@ -226,15 +224,16 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
226int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb) 224int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
227{ 225{
228 struct irlan_cb *self = instance; 226 struct irlan_cb *self = instance;
227 struct net_device *dev = self->dev;
229 228
230 if (skb == NULL) { 229 if (skb == NULL) {
231 ++self->stats.rx_dropped; 230 dev->stats.rx_dropped++;
232 return 0; 231 return 0;
233 } 232 }
234 if (skb->len < ETH_HLEN) { 233 if (skb->len < ETH_HLEN) {
235 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", 234 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
236 __func__, skb->len); 235 __func__, skb->len);
237 ++self->stats.rx_dropped; 236 dev->stats.rx_dropped++;
238 dev_kfree_skb(skb); 237 dev_kfree_skb(skb);
239 return 0; 238 return 0;
240 } 239 }
@@ -244,10 +243,10 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
244 * might have been previously set by the low level IrDA network 243 * might have been previously set by the low level IrDA network
245 * device driver 244 * device driver
246 */ 245 */
247 skb->protocol = eth_type_trans(skb, self->dev); /* Remove eth header */ 246 skb->protocol = eth_type_trans(skb, dev); /* Remove eth header */
248 247
249 self->stats.rx_packets++; 248 dev->stats.rx_packets++;
250 self->stats.rx_bytes += skb->len; 249 dev->stats.rx_bytes += skb->len;
251 250
252 netif_rx(skb); /* Eat it! */ 251 netif_rx(skb); /* Eat it! */
253 252
@@ -348,16 +347,3 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
348 else 347 else
349 irlan_set_broadcast_filter(self, FALSE); 348 irlan_set_broadcast_filter(self, FALSE);
350} 349}
351
352/*
353 * Function irlan_get_stats (dev)
354 *
355 * Get the current statistics for this device
356 *
357 */
358static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev)
359{
360 struct irlan_cb *self = netdev_priv(dev);
361
362 return &self->stats;
363}
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 1ae697681bc7..8d9ce0accc98 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -144,7 +144,6 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
144 nf_reset(skb); 144 nf_reset(skb);
145 145
146 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { 146 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
147 dev->last_rx = jiffies;
148 dev->stats.rx_packets++; 147 dev->stats.rx_packets++;
149 dev->stats.rx_bytes += data_len; 148 dev->stats.rx_bytes += data_len;
150 } else 149 } else
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index a87cb3ba2df6..d2b03e0851ef 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -138,10 +138,8 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
138 struct crypto_cipher *tfm; 138 struct crypto_cipher *tfm;
139 139
140 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 140 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
141 if (IS_ERR(tfm)) 141 if (!IS_ERR(tfm))
142 return NULL; 142 crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN);
143
144 crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN);
145 143
146 return tfm; 144 return tfm;
147} 145}
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 3d097b3d7b62..b4d66cca76d6 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -119,10 +119,8 @@ struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
119 struct crypto_cipher *tfm; 119 struct crypto_cipher *tfm;
120 120
121 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 121 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
122 if (IS_ERR(tfm)) 122 if (!IS_ERR(tfm))
123 return NULL; 123 crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
124
125 crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
126 124
127 return tfm; 125 return tfm;
128} 126}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 965b272499fd..58eab9e8e4ee 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -86,6 +86,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
86 tid, 0, reason); 86 tid, 0, reason);
87 87
88 del_timer_sync(&tid_rx->session_timer); 88 del_timer_sync(&tid_rx->session_timer);
89 del_timer_sync(&tid_rx->reorder_timer);
89 90
90 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); 91 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
91} 92}
@@ -120,6 +121,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
120 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); 121 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
121} 122}
122 123
124static void sta_rx_agg_reorder_timer_expired(unsigned long data)
125{
126 u8 *ptid = (u8 *)data;
127 u8 *timer_to_id = ptid - *ptid;
128 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
129 timer_to_tid[0]);
130
131 rcu_read_lock();
132 spin_lock(&sta->lock);
133 ieee80211_release_reorder_timeout(sta, *ptid);
134 spin_unlock(&sta->lock);
135 rcu_read_unlock();
136}
137
123static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, 138static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
124 u8 dialog_token, u16 status, u16 policy, 139 u8 dialog_token, u16 status, u16 policy,
125 u16 buf_size, u16 timeout) 140 u16 buf_size, u16 timeout)
@@ -251,11 +266,18 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
251 goto end; 266 goto end;
252 } 267 }
253 268
269 spin_lock_init(&tid_agg_rx->reorder_lock);
270
254 /* rx timer */ 271 /* rx timer */
255 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; 272 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
256 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 273 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
257 init_timer(&tid_agg_rx->session_timer); 274 init_timer(&tid_agg_rx->session_timer);
258 275
276 /* rx reorder timer */
277 tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired;
278 tid_agg_rx->reorder_timer.data = (unsigned long)&sta->timer_to_tid[tid];
279 init_timer(&tid_agg_rx->reorder_timer);
280
259 /* prepare reordering buffer */ 281 /* prepare reordering buffer */
260 tid_agg_rx->reorder_buf = 282 tid_agg_rx->reorder_buf =
261 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC); 283 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 29ac8e1a509e..5de1ca3f17b9 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -19,33 +19,6 @@
19#include "rate.h" 19#include "rate.h"
20#include "mesh.h" 20#include "mesh.h"
21 21
22static bool nl80211_type_check(enum nl80211_iftype type)
23{
24 switch (type) {
25 case NL80211_IFTYPE_ADHOC:
26 case NL80211_IFTYPE_STATION:
27 case NL80211_IFTYPE_MONITOR:
28#ifdef CONFIG_MAC80211_MESH
29 case NL80211_IFTYPE_MESH_POINT:
30#endif
31 case NL80211_IFTYPE_AP:
32 case NL80211_IFTYPE_AP_VLAN:
33 case NL80211_IFTYPE_WDS:
34 return true;
35 default:
36 return false;
37 }
38}
39
40static bool nl80211_params_check(enum nl80211_iftype type,
41 struct vif_params *params)
42{
43 if (!nl80211_type_check(type))
44 return false;
45
46 return true;
47}
48
49static int ieee80211_add_iface(struct wiphy *wiphy, char *name, 22static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
50 enum nl80211_iftype type, u32 *flags, 23 enum nl80211_iftype type, u32 *flags,
51 struct vif_params *params) 24 struct vif_params *params)
@@ -55,9 +28,6 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
55 struct ieee80211_sub_if_data *sdata; 28 struct ieee80211_sub_if_data *sdata;
56 int err; 29 int err;
57 30
58 if (!nl80211_params_check(type, params))
59 return -EINVAL;
60
61 err = ieee80211_if_add(local, name, &dev, type, params); 31 err = ieee80211_if_add(local, name, &dev, type, params);
62 if (err || type != NL80211_IFTYPE_MONITOR || !flags) 32 if (err || type != NL80211_IFTYPE_MONITOR || !flags)
63 return err; 33 return err;
@@ -82,12 +52,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
82 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 52 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
83 int ret; 53 int ret;
84 54
85 if (ieee80211_sdata_running(sdata))
86 return -EBUSY;
87
88 if (!nl80211_params_check(type, params))
89 return -EINVAL;
90
91 ret = ieee80211_if_change_type(sdata, type); 55 ret = ieee80211_if_change_type(sdata, type);
92 if (ret) 56 if (ret)
93 return ret; 57 return ret;
@@ -114,44 +78,30 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
114 u8 key_idx, const u8 *mac_addr, 78 u8 key_idx, const u8 *mac_addr,
115 struct key_params *params) 79 struct key_params *params)
116{ 80{
117 struct ieee80211_sub_if_data *sdata; 81 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
118 struct sta_info *sta = NULL; 82 struct sta_info *sta = NULL;
119 enum ieee80211_key_alg alg;
120 struct ieee80211_key *key; 83 struct ieee80211_key *key;
121 int err; 84 int err;
122 85
123 if (!netif_running(dev)) 86 if (!ieee80211_sdata_running(sdata))
124 return -ENETDOWN; 87 return -ENETDOWN;
125 88
126 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 89 /* reject WEP and TKIP keys if WEP failed to initialize */
127
128 switch (params->cipher) { 90 switch (params->cipher) {
129 case WLAN_CIPHER_SUITE_WEP40: 91 case WLAN_CIPHER_SUITE_WEP40:
130 case WLAN_CIPHER_SUITE_WEP104:
131 alg = ALG_WEP;
132 break;
133 case WLAN_CIPHER_SUITE_TKIP: 92 case WLAN_CIPHER_SUITE_TKIP:
134 alg = ALG_TKIP; 93 case WLAN_CIPHER_SUITE_WEP104:
135 break; 94 if (IS_ERR(sdata->local->wep_tx_tfm))
136 case WLAN_CIPHER_SUITE_CCMP: 95 return -EINVAL;
137 alg = ALG_CCMP;
138 break;
139 case WLAN_CIPHER_SUITE_AES_CMAC:
140 alg = ALG_AES_CMAC;
141 break; 96 break;
142 default: 97 default:
143 return -EINVAL; 98 break;
144 } 99 }
145 100
146 /* reject WEP and TKIP keys if WEP failed to initialize */ 101 key = ieee80211_key_alloc(params->cipher, key_idx, params->key_len,
147 if ((alg == ALG_WEP || alg == ALG_TKIP) && 102 params->key, params->seq_len, params->seq);
148 IS_ERR(sdata->local->wep_tx_tfm)) 103 if (IS_ERR(key))
149 return -EINVAL; 104 return PTR_ERR(key);
150
151 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key,
152 params->seq_len, params->seq);
153 if (!key)
154 return -ENOMEM;
155 105
156 mutex_lock(&sdata->local->sta_mtx); 106 mutex_lock(&sdata->local->sta_mtx);
157 107
@@ -164,9 +114,10 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
164 } 114 }
165 } 115 }
166 116
167 ieee80211_key_link(key, sdata, sta); 117 err = ieee80211_key_link(key, sdata, sta);
118 if (err)
119 ieee80211_key_free(sdata->local, key);
168 120
169 err = 0;
170 out_unlock: 121 out_unlock:
171 mutex_unlock(&sdata->local->sta_mtx); 122 mutex_unlock(&sdata->local->sta_mtx);
172 123
@@ -247,10 +198,10 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
247 198
248 memset(&params, 0, sizeof(params)); 199 memset(&params, 0, sizeof(params));
249 200
250 switch (key->conf.alg) { 201 params.cipher = key->conf.cipher;
251 case ALG_TKIP:
252 params.cipher = WLAN_CIPHER_SUITE_TKIP;
253 202
203 switch (key->conf.cipher) {
204 case WLAN_CIPHER_SUITE_TKIP:
254 iv32 = key->u.tkip.tx.iv32; 205 iv32 = key->u.tkip.tx.iv32;
255 iv16 = key->u.tkip.tx.iv16; 206 iv16 = key->u.tkip.tx.iv16;
256 207
@@ -268,8 +219,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
268 params.seq = seq; 219 params.seq = seq;
269 params.seq_len = 6; 220 params.seq_len = 6;
270 break; 221 break;
271 case ALG_CCMP: 222 case WLAN_CIPHER_SUITE_CCMP:
272 params.cipher = WLAN_CIPHER_SUITE_CCMP;
273 seq[0] = key->u.ccmp.tx_pn[5]; 223 seq[0] = key->u.ccmp.tx_pn[5];
274 seq[1] = key->u.ccmp.tx_pn[4]; 224 seq[1] = key->u.ccmp.tx_pn[4];
275 seq[2] = key->u.ccmp.tx_pn[3]; 225 seq[2] = key->u.ccmp.tx_pn[3];
@@ -279,14 +229,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
279 params.seq = seq; 229 params.seq = seq;
280 params.seq_len = 6; 230 params.seq_len = 6;
281 break; 231 break;
282 case ALG_WEP: 232 case WLAN_CIPHER_SUITE_AES_CMAC:
283 if (key->conf.keylen == 5)
284 params.cipher = WLAN_CIPHER_SUITE_WEP40;
285 else
286 params.cipher = WLAN_CIPHER_SUITE_WEP104;
287 break;
288 case ALG_AES_CMAC:
289 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
290 seq[0] = key->u.aes_cmac.tx_pn[5]; 233 seq[0] = key->u.aes_cmac.tx_pn[5];
291 seq[1] = key->u.aes_cmac.tx_pn[4]; 234 seq[1] = key->u.aes_cmac.tx_pn[4];
292 seq[2] = key->u.aes_cmac.tx_pn[3]; 235 seq[2] = key->u.aes_cmac.tx_pn[3];
@@ -1143,9 +1086,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1143 p.uapsd = false; 1086 p.uapsd = false;
1144 1087
1145 if (drv_conf_tx(local, params->queue, &p)) { 1088 if (drv_conf_tx(local, params->queue, &p)) {
1146 printk(KERN_DEBUG "%s: failed to set TX queue " 1089 wiphy_debug(local->hw.wiphy,
1147 "parameters for queue %d\n", 1090 "failed to set TX queue parameters for queue %d\n",
1148 wiphy_name(local->hw.wiphy), params->queue); 1091 params->queue);
1149 return -EINVAL; 1092 return -EINVAL;
1150 } 1093 }
1151 1094
@@ -1541,11 +1484,11 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
1541 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie); 1484 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
1542} 1485}
1543 1486
1544static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, 1487static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1545 struct ieee80211_channel *chan, 1488 struct ieee80211_channel *chan,
1546 enum nl80211_channel_type channel_type, 1489 enum nl80211_channel_type channel_type,
1547 bool channel_type_valid, 1490 bool channel_type_valid,
1548 const u8 *buf, size_t len, u64 *cookie) 1491 const u8 *buf, size_t len, u64 *cookie)
1549{ 1492{
1550 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1493 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1551 struct ieee80211_local *local = sdata->local; 1494 struct ieee80211_local *local = sdata->local;
@@ -1575,8 +1518,6 @@ static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
1575 return -ENOLINK; 1518 return -ENOLINK;
1576 break; 1519 break;
1577 case NL80211_IFTYPE_STATION: 1520 case NL80211_IFTYPE_STATION:
1578 if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
1579 flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1580 break; 1521 break;
1581 default: 1522 default:
1582 return -EOPNOTSUPP; 1523 return -EOPNOTSUPP;
@@ -1647,6 +1588,6 @@ struct cfg80211_ops mac80211_config_ops = {
1647 .set_bitrate_mask = ieee80211_set_bitrate_mask, 1588 .set_bitrate_mask = ieee80211_set_bitrate_mask,
1648 .remain_on_channel = ieee80211_remain_on_channel, 1589 .remain_on_channel = ieee80211_remain_on_channel,
1649 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, 1590 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
1650 .action = ieee80211_action, 1591 .mgmt_tx = ieee80211_mgmt_tx,
1651 .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, 1592 .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
1652}; 1593};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index a694c593ff6a..e81ef4e8cb32 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -85,13 +85,15 @@ static ssize_t tsf_write(struct file *file,
85 if (strncmp(buf, "reset", 5) == 0) { 85 if (strncmp(buf, "reset", 5) == 0) {
86 if (local->ops->reset_tsf) { 86 if (local->ops->reset_tsf) {
87 drv_reset_tsf(local); 87 drv_reset_tsf(local);
88 printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy)); 88 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
89 } 89 }
90 } else { 90 } else {
91 tsf = simple_strtoul(buf, NULL, 0); 91 tsf = simple_strtoul(buf, NULL, 0);
92 if (local->ops->set_tsf) { 92 if (local->ops->set_tsf) {
93 drv_set_tsf(local, tsf); 93 drv_set_tsf(local, tsf);
94 printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf); 94 wiphy_info(local->hw.wiphy,
95 "debugfs set TSF to %#018llx\n", tsf);
96
95 } 97 }
96 } 98 }
97 99
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index fa5e76e658ef..1647f8dc5cda 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -64,26 +64,13 @@ static ssize_t key_algorithm_read(struct file *file,
64 char __user *userbuf, 64 char __user *userbuf,
65 size_t count, loff_t *ppos) 65 size_t count, loff_t *ppos)
66{ 66{
67 char *alg; 67 char buf[15];
68 struct ieee80211_key *key = file->private_data; 68 struct ieee80211_key *key = file->private_data;
69 u32 c = key->conf.cipher;
69 70
70 switch (key->conf.alg) { 71 sprintf(buf, "%.2x-%.2x-%.2x:%d\n",
71 case ALG_WEP: 72 c >> 24, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff);
72 alg = "WEP\n"; 73 return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
73 break;
74 case ALG_TKIP:
75 alg = "TKIP\n";
76 break;
77 case ALG_CCMP:
78 alg = "CCMP\n";
79 break;
80 case ALG_AES_CMAC:
81 alg = "AES-128-CMAC\n";
82 break;
83 default:
84 return 0;
85 }
86 return simple_read_from_buffer(userbuf, count, ppos, alg, strlen(alg));
87} 74}
88KEY_OPS(algorithm); 75KEY_OPS(algorithm);
89 76
@@ -95,21 +82,22 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
95 int len; 82 int len;
96 struct ieee80211_key *key = file->private_data; 83 struct ieee80211_key *key = file->private_data;
97 84
98 switch (key->conf.alg) { 85 switch (key->conf.cipher) {
99 case ALG_WEP: 86 case WLAN_CIPHER_SUITE_WEP40:
87 case WLAN_CIPHER_SUITE_WEP104:
100 len = scnprintf(buf, sizeof(buf), "\n"); 88 len = scnprintf(buf, sizeof(buf), "\n");
101 break; 89 break;
102 case ALG_TKIP: 90 case WLAN_CIPHER_SUITE_TKIP:
103 len = scnprintf(buf, sizeof(buf), "%08x %04x\n", 91 len = scnprintf(buf, sizeof(buf), "%08x %04x\n",
104 key->u.tkip.tx.iv32, 92 key->u.tkip.tx.iv32,
105 key->u.tkip.tx.iv16); 93 key->u.tkip.tx.iv16);
106 break; 94 break;
107 case ALG_CCMP: 95 case WLAN_CIPHER_SUITE_CCMP:
108 tpn = key->u.ccmp.tx_pn; 96 tpn = key->u.ccmp.tx_pn;
109 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", 97 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
110 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]); 98 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]);
111 break; 99 break;
112 case ALG_AES_CMAC: 100 case WLAN_CIPHER_SUITE_AES_CMAC:
113 tpn = key->u.aes_cmac.tx_pn; 101 tpn = key->u.aes_cmac.tx_pn;
114 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", 102 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
115 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], 103 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4],
@@ -130,11 +118,12 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
130 int i, len; 118 int i, len;
131 const u8 *rpn; 119 const u8 *rpn;
132 120
133 switch (key->conf.alg) { 121 switch (key->conf.cipher) {
134 case ALG_WEP: 122 case WLAN_CIPHER_SUITE_WEP40:
123 case WLAN_CIPHER_SUITE_WEP104:
135 len = scnprintf(buf, sizeof(buf), "\n"); 124 len = scnprintf(buf, sizeof(buf), "\n");
136 break; 125 break;
137 case ALG_TKIP: 126 case WLAN_CIPHER_SUITE_TKIP:
138 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 127 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
139 p += scnprintf(p, sizeof(buf)+buf-p, 128 p += scnprintf(p, sizeof(buf)+buf-p,
140 "%08x %04x\n", 129 "%08x %04x\n",
@@ -142,7 +131,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
142 key->u.tkip.rx[i].iv16); 131 key->u.tkip.rx[i].iv16);
143 len = p - buf; 132 len = p - buf;
144 break; 133 break;
145 case ALG_CCMP: 134 case WLAN_CIPHER_SUITE_CCMP:
146 for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) { 135 for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) {
147 rpn = key->u.ccmp.rx_pn[i]; 136 rpn = key->u.ccmp.rx_pn[i];
148 p += scnprintf(p, sizeof(buf)+buf-p, 137 p += scnprintf(p, sizeof(buf)+buf-p,
@@ -152,7 +141,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
152 } 141 }
153 len = p - buf; 142 len = p - buf;
154 break; 143 break;
155 case ALG_AES_CMAC: 144 case WLAN_CIPHER_SUITE_AES_CMAC:
156 rpn = key->u.aes_cmac.rx_pn; 145 rpn = key->u.aes_cmac.rx_pn;
157 p += scnprintf(p, sizeof(buf)+buf-p, 146 p += scnprintf(p, sizeof(buf)+buf-p,
158 "%02x%02x%02x%02x%02x%02x\n", 147 "%02x%02x%02x%02x%02x%02x\n",
@@ -174,11 +163,11 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf,
174 char buf[20]; 163 char buf[20];
175 int len; 164 int len;
176 165
177 switch (key->conf.alg) { 166 switch (key->conf.cipher) {
178 case ALG_CCMP: 167 case WLAN_CIPHER_SUITE_CCMP:
179 len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); 168 len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays);
180 break; 169 break;
181 case ALG_AES_CMAC: 170 case WLAN_CIPHER_SUITE_AES_CMAC:
182 len = scnprintf(buf, sizeof(buf), "%u\n", 171 len = scnprintf(buf, sizeof(buf), "%u\n",
183 key->u.aes_cmac.replays); 172 key->u.aes_cmac.replays);
184 break; 173 break;
@@ -196,8 +185,8 @@ static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
196 char buf[20]; 185 char buf[20];
197 int len; 186 int len;
198 187
199 switch (key->conf.alg) { 188 switch (key->conf.cipher) {
200 case ALG_AES_CMAC: 189 case WLAN_CIPHER_SUITE_AES_CMAC:
201 len = scnprintf(buf, sizeof(buf), "%u\n", 190 len = scnprintf(buf, sizeof(buf), "%u\n",
202 key->u.aes_cmac.icverrors); 191 key->u.aes_cmac.icverrors);
203 break; 192 break;
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 14123dce544b..6064b7b09e01 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -54,6 +54,20 @@ static inline int drv_add_interface(struct ieee80211_local *local,
54 return ret; 54 return ret;
55} 55}
56 56
57static inline int drv_change_interface(struct ieee80211_local *local,
58 struct ieee80211_sub_if_data *sdata,
59 enum nl80211_iftype type)
60{
61 int ret;
62
63 might_sleep();
64
65 trace_drv_change_interface(local, sdata, type);
66 ret = local->ops->change_interface(&local->hw, &sdata->vif, type);
67 trace_drv_return_int(local, ret);
68 return ret;
69}
70
57static inline void drv_remove_interface(struct ieee80211_local *local, 71static inline void drv_remove_interface(struct ieee80211_local *local,
58 struct ieee80211_vif *vif) 72 struct ieee80211_vif *vif)
59{ 73{
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 5d5d2a974668..f6f3d89e43fa 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -136,6 +136,31 @@ TRACE_EVENT(drv_add_interface,
136 ) 136 )
137); 137);
138 138
139TRACE_EVENT(drv_change_interface,
140 TP_PROTO(struct ieee80211_local *local,
141 struct ieee80211_sub_if_data *sdata,
142 enum nl80211_iftype type),
143
144 TP_ARGS(local, sdata, type),
145
146 TP_STRUCT__entry(
147 LOCAL_ENTRY
148 VIF_ENTRY
149 __field(u32, new_type)
150 ),
151
152 TP_fast_assign(
153 LOCAL_ASSIGN;
154 VIF_ASSIGN;
155 __entry->new_type = type;
156 ),
157
158 TP_printk(
159 LOCAL_PR_FMT VIF_PR_FMT " new type:%d",
160 LOCAL_PR_ARG, VIF_PR_ARG, __entry->new_type
161 )
162);
163
139TRACE_EVENT(drv_remove_interface, 164TRACE_EVENT(drv_remove_interface,
140 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), 165 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
141 166
@@ -336,7 +361,7 @@ TRACE_EVENT(drv_set_key,
336 LOCAL_ENTRY 361 LOCAL_ENTRY
337 VIF_ENTRY 362 VIF_ENTRY
338 STA_ENTRY 363 STA_ENTRY
339 __field(enum ieee80211_key_alg, alg) 364 __field(u32, cipher)
340 __field(u8, hw_key_idx) 365 __field(u8, hw_key_idx)
341 __field(u8, flags) 366 __field(u8, flags)
342 __field(s8, keyidx) 367 __field(s8, keyidx)
@@ -346,7 +371,7 @@ TRACE_EVENT(drv_set_key,
346 LOCAL_ASSIGN; 371 LOCAL_ASSIGN;
347 VIF_ASSIGN; 372 VIF_ASSIGN;
348 STA_ASSIGN; 373 STA_ASSIGN;
349 __entry->alg = key->alg; 374 __entry->cipher = key->cipher;
350 __entry->flags = key->flags; 375 __entry->flags = key->flags;
351 __entry->keyidx = key->keyidx; 376 __entry->keyidx = key->keyidx;
352 __entry->hw_key_idx = key->hw_key_idx; 377 __entry->hw_key_idx = key->hw_key_idx;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 9d101fb33861..11f74f5f7b2f 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -265,3 +265,31 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
265 265
266 return 0; 266 return 0;
267} 267}
268
269void ieee80211_request_smps_work(struct work_struct *work)
270{
271 struct ieee80211_sub_if_data *sdata =
272 container_of(work, struct ieee80211_sub_if_data,
273 u.mgd.request_smps_work);
274
275 mutex_lock(&sdata->u.mgd.mtx);
276 __ieee80211_request_smps(sdata, sdata->u.mgd.driver_smps_mode);
277 mutex_unlock(&sdata->u.mgd.mtx);
278}
279
280void ieee80211_request_smps(struct ieee80211_vif *vif,
281 enum ieee80211_smps_mode smps_mode)
282{
283 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
284
285 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
286 return;
287
288 if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF))
289 smps_mode = IEEE80211_SMPS_AUTOMATIC;
290
291 ieee80211_queue_work(&sdata->local->hw,
292 &sdata->u.mgd.request_smps_work);
293}
294/* this might change ... don't want non-open drivers using it */
295EXPORT_SYMBOL_GPL(ieee80211_request_smps);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index c691780725a7..1a3aae54f0cf 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -427,8 +427,8 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
427 return NULL; 427 return NULL;
428 428
429#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 429#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
430 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n", 430 wiphy_debug(local->hw.wiphy, "Adding new IBSS station %pM (dev=%s)\n",
431 wiphy_name(local->hw.wiphy), addr, sdata->name); 431 addr, sdata->name);
432#endif 432#endif
433 433
434 sta = sta_info_alloc(sdata, addr, gfp); 434 sta = sta_info_alloc(sdata, addr, gfp);
@@ -920,12 +920,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
920 memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN); 920 memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
921 sdata->u.ibss.ssid_len = params->ssid_len; 921 sdata->u.ibss.ssid_len = params->ssid_len;
922 922
923 mutex_unlock(&sdata->u.ibss.mtx);
924
925 mutex_lock(&sdata->local->mtx);
923 ieee80211_recalc_idle(sdata->local); 926 ieee80211_recalc_idle(sdata->local);
927 mutex_unlock(&sdata->local->mtx);
924 928
925 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 929 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
926 930
927 mutex_unlock(&sdata->u.ibss.mtx);
928
929 return 0; 931 return 0;
930} 932}
931 933
@@ -980,7 +982,9 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
980 982
981 mutex_unlock(&sdata->u.ibss.mtx); 983 mutex_unlock(&sdata->u.ibss.mtx);
982 984
985 mutex_lock(&local->mtx);
983 ieee80211_recalc_idle(sdata->local); 986 ieee80211_recalc_idle(sdata->local);
987 mutex_unlock(&local->mtx);
984 988
985 return 0; 989 return 0;
986} 990}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 65e0ed6c2975..4e635e2fabdb 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -50,12 +50,6 @@ struct ieee80211_local;
50 * increased memory use (about 2 kB of RAM per entry). */ 50 * increased memory use (about 2 kB of RAM per entry). */
51#define IEEE80211_FRAGMENT_MAX 4 51#define IEEE80211_FRAGMENT_MAX 4
52 52
53/*
54 * Time after which we ignore scan results and no longer report/use
55 * them in any way.
56 */
57#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
58
59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) 53#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
60 54
61#define IEEE80211_DEFAULT_UAPSD_QUEUES \ 55#define IEEE80211_DEFAULT_UAPSD_QUEUES \
@@ -170,6 +164,7 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
170#define IEEE80211_RX_RA_MATCH BIT(1) 164#define IEEE80211_RX_RA_MATCH BIT(1)
171#define IEEE80211_RX_AMSDU BIT(2) 165#define IEEE80211_RX_AMSDU BIT(2)
172#define IEEE80211_RX_FRAGMENTED BIT(3) 166#define IEEE80211_RX_FRAGMENTED BIT(3)
167#define IEEE80211_MALFORMED_ACTION_FRM BIT(4)
173/* only add flags here that do not change with subframes of an aMPDU */ 168/* only add flags here that do not change with subframes of an aMPDU */
174 169
175struct ieee80211_rx_data { 170struct ieee80211_rx_data {
@@ -343,7 +338,10 @@ struct ieee80211_if_managed {
343 unsigned long timers_running; /* used for quiesce/restart */ 338 unsigned long timers_running; /* used for quiesce/restart */
344 bool powersave; /* powersave requested for this iface */ 339 bool powersave; /* powersave requested for this iface */
345 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 340 enum ieee80211_smps_mode req_smps, /* requested smps mode */
346 ap_smps; /* smps mode AP thinks we're in */ 341 ap_smps, /* smps mode AP thinks we're in */
342 driver_smps_mode; /* smps mode request */
343
344 struct work_struct request_smps_work;
347 345
348 unsigned int flags; 346 unsigned int flags;
349 347
@@ -371,6 +369,13 @@ struct ieee80211_if_managed {
371 int ave_beacon_signal; 369 int ave_beacon_signal;
372 370
373 /* 371 /*
372 * Number of Beacon frames used in ave_beacon_signal. This can be used
373 * to avoid generating less reliable cqm events that would be based
374 * only on couple of received frames.
375 */
376 unsigned int count_beacon_signal;
377
378 /*
374 * Last Beacon frame signal strength average (ave_beacon_signal / 16) 379 * Last Beacon frame signal strength average (ave_beacon_signal / 16)
375 * that triggered a cqm event. 0 indicates that no event has been 380 * that triggered a cqm event. 0 indicates that no event has been
376 * generated for the current association. 381 * generated for the current association.
@@ -474,6 +479,19 @@ enum ieee80211_sub_if_data_flags {
474 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), 479 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
475}; 480};
476 481
482/**
483 * enum ieee80211_sdata_state_bits - virtual interface state bits
484 * @SDATA_STATE_RUNNING: virtual interface is up & running; this
485 * mirrors netif_running() but is separate for interface type
486 * change handling while the interface is up
487 * @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel
488 * mode, so queues are stopped
489 */
490enum ieee80211_sdata_state_bits {
491 SDATA_STATE_RUNNING,
492 SDATA_STATE_OFFCHANNEL,
493};
494
477struct ieee80211_sub_if_data { 495struct ieee80211_sub_if_data {
478 struct list_head list; 496 struct list_head list;
479 497
@@ -487,6 +505,8 @@ struct ieee80211_sub_if_data {
487 505
488 unsigned int flags; 506 unsigned int flags;
489 507
508 unsigned long state;
509
490 int drop_unencrypted; 510 int drop_unencrypted;
491 511
492 char name[IFNAMSIZ]; 512 char name[IFNAMSIZ];
@@ -497,6 +517,9 @@ struct ieee80211_sub_if_data {
497 */ 517 */
498 bool ht_opmode_valid; 518 bool ht_opmode_valid;
499 519
520 /* to detect idle changes */
521 bool old_idle;
522
500 /* Fragment table for host-based reassembly */ 523 /* Fragment table for host-based reassembly */
501 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 524 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
502 unsigned int fragment_next; 525 unsigned int fragment_next;
@@ -508,6 +531,8 @@ struct ieee80211_sub_if_data {
508 struct ieee80211_key *default_mgmt_key; 531 struct ieee80211_key *default_mgmt_key;
509 532
510 u16 sequence_number; 533 u16 sequence_number;
534 __be16 control_port_protocol;
535 bool control_port_no_encrypt;
511 536
512 struct work_struct work; 537 struct work_struct work;
513 struct sk_buff_head skb_queue; 538 struct sk_buff_head skb_queue;
@@ -595,11 +620,17 @@ enum queue_stop_reason {
595 * determine if we are on the operating channel or not 620 * determine if we are on the operating channel or not
596 * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning, 621 * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
597 * gets only set in conjunction with SCAN_SW_SCANNING 622 * gets only set in conjunction with SCAN_SW_SCANNING
623 * @SCAN_COMPLETED: Set for our scan work function when the driver reported
624 * that the scan completed.
625 * @SCAN_ABORTED: Set for our scan work function when the driver reported
626 * a scan complete for an aborted scan.
598 */ 627 */
599enum { 628enum {
600 SCAN_SW_SCANNING, 629 SCAN_SW_SCANNING,
601 SCAN_HW_SCANNING, 630 SCAN_HW_SCANNING,
602 SCAN_OFF_CHANNEL, 631 SCAN_OFF_CHANNEL,
632 SCAN_COMPLETED,
633 SCAN_ABORTED,
603}; 634};
604 635
605/** 636/**
@@ -634,7 +665,6 @@ struct ieee80211_local {
634 /* 665 /*
635 * work stuff, potentially off-channel (in the future) 666 * work stuff, potentially off-channel (in the future)
636 */ 667 */
637 struct mutex work_mtx;
638 struct list_head work_list; 668 struct list_head work_list;
639 struct timer_list work_timer; 669 struct timer_list work_timer;
640 struct work_struct work_work; 670 struct work_struct work_work;
@@ -656,6 +686,8 @@ struct ieee80211_local {
656 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll; 686 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
657 unsigned int filter_flags; /* FIF_* */ 687 unsigned int filter_flags; /* FIF_* */
658 688
689 bool wiphy_ciphers_allocated;
690
659 /* protects the aggregated multicast list and filter calls */ 691 /* protects the aggregated multicast list and filter calls */
660 spinlock_t filter_lock; 692 spinlock_t filter_lock;
661 693
@@ -746,9 +778,10 @@ struct ieee80211_local {
746 */ 778 */
747 struct mutex key_mtx; 779 struct mutex key_mtx;
748 780
781 /* mutex for scan and work locking */
782 struct mutex mtx;
749 783
750 /* Scanning and BSS list */ 784 /* Scanning and BSS list */
751 struct mutex scan_mtx;
752 unsigned long scanning; 785 unsigned long scanning;
753 struct cfg80211_ssid scan_ssid; 786 struct cfg80211_ssid scan_ssid;
754 struct cfg80211_scan_request *int_scan_req; 787 struct cfg80211_scan_request *int_scan_req;
@@ -870,6 +903,11 @@ struct ieee80211_local {
870 struct dentry *keys; 903 struct dentry *keys;
871 } debugfs; 904 } debugfs;
872#endif 905#endif
906
907 /* dummy netdev for use w/ NAPI */
908 struct net_device napi_dev;
909
910 struct napi_struct napi;
873}; 911};
874 912
875static inline struct ieee80211_sub_if_data * 913static inline struct ieee80211_sub_if_data *
@@ -1071,7 +1109,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local);
1071 1109
1072static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) 1110static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1073{ 1111{
1074 return netif_running(sdata->dev); 1112 return test_bit(SDATA_STATE_RUNNING, &sdata->state);
1075} 1113}
1076 1114
1077/* tx handling */ 1115/* tx handling */
@@ -1105,6 +1143,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
1105int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, 1143int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1106 enum ieee80211_smps_mode smps, const u8 *da, 1144 enum ieee80211_smps_mode smps, const u8 *da,
1107 const u8 *bssid); 1145 const u8 *bssid);
1146void ieee80211_request_smps_work(struct work_struct *work);
1108 1147
1109void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 1148void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
1110 u16 initiator, u16 reason); 1149 u16 initiator, u16 reason);
@@ -1131,6 +1170,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
1131void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1170void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
1132void ieee80211_ba_session_work(struct work_struct *work); 1171void ieee80211_ba_session_work(struct work_struct *work);
1133void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1172void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1173void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
1134 1174
1135/* Spectrum management */ 1175/* Spectrum management */
1136void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1176void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1146,6 +1186,12 @@ int __ieee80211_suspend(struct ieee80211_hw *hw);
1146 1186
1147static inline int __ieee80211_resume(struct ieee80211_hw *hw) 1187static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1148{ 1188{
1189 struct ieee80211_local *local = hw_to_local(hw);
1190
1191 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
1192 "%s: resume with hardware scan still in progress\n",
1193 wiphy_name(hw->wiphy));
1194
1149 return ieee80211_reconfig(hw_to_local(hw)); 1195 return ieee80211_reconfig(hw_to_local(hw));
1150} 1196}
1151#else 1197#else
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ebbe264e2b0b..c1cc200ac81f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -94,21 +94,14 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
94 type2 == NL80211_IFTYPE_AP_VLAN)); 94 type2 == NL80211_IFTYPE_AP_VLAN));
95} 95}
96 96
97static int ieee80211_open(struct net_device *dev) 97static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
98 enum nl80211_iftype iftype)
98{ 99{
99 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
100 struct ieee80211_sub_if_data *nsdata;
101 struct ieee80211_local *local = sdata->local; 100 struct ieee80211_local *local = sdata->local;
102 struct sta_info *sta; 101 struct ieee80211_sub_if_data *nsdata;
103 u32 changed = 0; 102 struct net_device *dev = sdata->dev;
104 int res;
105 u32 hw_reconf_flags = 0;
106 u8 null_addr[ETH_ALEN] = {0};
107 103
108 /* fail early if user set an invalid address */ 104 ASSERT_RTNL();
109 if (compare_ether_addr(dev->dev_addr, null_addr) &&
110 !is_valid_ether_addr(dev->dev_addr))
111 return -EADDRNOTAVAIL;
112 105
113 /* we hold the RTNL here so can safely walk the list */ 106 /* we hold the RTNL here so can safely walk the list */
114 list_for_each_entry(nsdata, &local->interfaces, list) { 107 list_for_each_entry(nsdata, &local->interfaces, list) {
@@ -125,7 +118,7 @@ static int ieee80211_open(struct net_device *dev)
125 * belonging to the same hardware. Then, however, we're 118 * belonging to the same hardware. Then, however, we're
126 * faced with having to adopt two different TSF timers... 119 * faced with having to adopt two different TSF timers...
127 */ 120 */
128 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 121 if (iftype == NL80211_IFTYPE_ADHOC &&
129 nsdata->vif.type == NL80211_IFTYPE_ADHOC) 122 nsdata->vif.type == NL80211_IFTYPE_ADHOC)
130 return -EBUSY; 123 return -EBUSY;
131 124
@@ -139,19 +132,36 @@ static int ieee80211_open(struct net_device *dev)
139 /* 132 /*
140 * check whether it may have the same address 133 * check whether it may have the same address
141 */ 134 */
142 if (!identical_mac_addr_allowed(sdata->vif.type, 135 if (!identical_mac_addr_allowed(iftype,
143 nsdata->vif.type)) 136 nsdata->vif.type))
144 return -ENOTUNIQ; 137 return -ENOTUNIQ;
145 138
146 /* 139 /*
147 * can only add VLANs to enabled APs 140 * can only add VLANs to enabled APs
148 */ 141 */
149 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 142 if (iftype == NL80211_IFTYPE_AP_VLAN &&
150 nsdata->vif.type == NL80211_IFTYPE_AP) 143 nsdata->vif.type == NL80211_IFTYPE_AP)
151 sdata->bss = &nsdata->u.ap; 144 sdata->bss = &nsdata->u.ap;
152 } 145 }
153 } 146 }
154 147
148 return 0;
149}
150
151/*
152 * NOTE: Be very careful when changing this function, it must NOT return
153 * an error on interface type changes that have been pre-checked, so most
154 * checks should be in ieee80211_check_concurrent_iface.
155 */
156static int ieee80211_do_open(struct net_device *dev, bool coming_up)
157{
158 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
159 struct ieee80211_local *local = sdata->local;
160 struct sta_info *sta;
161 u32 changed = 0;
162 int res;
163 u32 hw_reconf_flags = 0;
164
155 switch (sdata->vif.type) { 165 switch (sdata->vif.type) {
156 case NL80211_IFTYPE_WDS: 166 case NL80211_IFTYPE_WDS:
157 if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) 167 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
@@ -177,7 +187,7 @@ static int ieee80211_open(struct net_device *dev)
177 /* no special treatment */ 187 /* no special treatment */
178 break; 188 break;
179 case NL80211_IFTYPE_UNSPECIFIED: 189 case NL80211_IFTYPE_UNSPECIFIED:
180 case __NL80211_IFTYPE_AFTER_LAST: 190 case NUM_NL80211_IFTYPES:
181 /* cannot happen */ 191 /* cannot happen */
182 WARN_ON(1); 192 WARN_ON(1);
183 break; 193 break;
@@ -187,39 +197,30 @@ static int ieee80211_open(struct net_device *dev)
187 res = drv_start(local); 197 res = drv_start(local);
188 if (res) 198 if (res)
189 goto err_del_bss; 199 goto err_del_bss;
200 if (local->ops->napi_poll)
201 napi_enable(&local->napi);
190 /* we're brought up, everything changes */ 202 /* we're brought up, everything changes */
191 hw_reconf_flags = ~0; 203 hw_reconf_flags = ~0;
192 ieee80211_led_radio(local, true); 204 ieee80211_led_radio(local, true);
193 } 205 }
194 206
195 /* 207 /*
196 * Check all interfaces and copy the hopefully now-present 208 * Copy the hopefully now-present MAC address to
197 * MAC address to those that have the special null one. 209 * this interface, if it has the special null one.
198 */ 210 */
199 list_for_each_entry(nsdata, &local->interfaces, list) { 211 if (is_zero_ether_addr(dev->dev_addr)) {
200 struct net_device *ndev = nsdata->dev; 212 memcpy(dev->dev_addr,
201 213 local->hw.wiphy->perm_addr,
202 /* 214 ETH_ALEN);
203 * No need to check running since we do not allow 215 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
204 * it to start up with this invalid address. 216
205 */ 217 if (!is_valid_ether_addr(dev->dev_addr)) {
206 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { 218 if (!local->open_count)
207 memcpy(ndev->dev_addr, 219 drv_stop(local);
208 local->hw.wiphy->perm_addr, 220 return -EADDRNOTAVAIL;
209 ETH_ALEN);
210 memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN);
211 } 221 }
212 } 222 }
213 223
214 /*
215 * Validate the MAC address for this device.
216 */
217 if (!is_valid_ether_addr(dev->dev_addr)) {
218 if (!local->open_count)
219 drv_stop(local);
220 return -EADDRNOTAVAIL;
221 }
222
223 switch (sdata->vif.type) { 224 switch (sdata->vif.type) {
224 case NL80211_IFTYPE_AP_VLAN: 225 case NL80211_IFTYPE_AP_VLAN:
225 /* no need to tell driver */ 226 /* no need to tell driver */
@@ -253,9 +254,11 @@ static int ieee80211_open(struct net_device *dev)
253 netif_carrier_on(dev); 254 netif_carrier_on(dev);
254 break; 255 break;
255 default: 256 default:
256 res = drv_add_interface(local, &sdata->vif); 257 if (coming_up) {
257 if (res) 258 res = drv_add_interface(local, &sdata->vif);
258 goto err_stop; 259 if (res)
260 goto err_stop;
261 }
259 262
260 if (ieee80211_vif_is_mesh(&sdata->vif)) { 263 if (ieee80211_vif_is_mesh(&sdata->vif)) {
261 local->fif_other_bss++; 264 local->fif_other_bss++;
@@ -307,9 +310,13 @@ static int ieee80211_open(struct net_device *dev)
307 if (sdata->flags & IEEE80211_SDATA_PROMISC) 310 if (sdata->flags & IEEE80211_SDATA_PROMISC)
308 atomic_inc(&local->iff_promiscs); 311 atomic_inc(&local->iff_promiscs);
309 312
313 mutex_lock(&local->mtx);
310 hw_reconf_flags |= __ieee80211_recalc_idle(local); 314 hw_reconf_flags |= __ieee80211_recalc_idle(local);
315 mutex_unlock(&local->mtx);
316
317 if (coming_up)
318 local->open_count++;
311 319
312 local->open_count++;
313 if (hw_reconf_flags) { 320 if (hw_reconf_flags) {
314 ieee80211_hw_config(local, hw_reconf_flags); 321 ieee80211_hw_config(local, hw_reconf_flags);
315 /* 322 /*
@@ -324,6 +331,8 @@ static int ieee80211_open(struct net_device *dev)
324 331
325 netif_tx_start_all_queues(dev); 332 netif_tx_start_all_queues(dev);
326 333
334 set_bit(SDATA_STATE_RUNNING, &sdata->state);
335
327 return 0; 336 return 0;
328 err_del_interface: 337 err_del_interface:
329 drv_remove_interface(local, &sdata->vif); 338 drv_remove_interface(local, &sdata->vif);
@@ -337,19 +346,38 @@ static int ieee80211_open(struct net_device *dev)
337 return res; 346 return res;
338} 347}
339 348
340static int ieee80211_stop(struct net_device *dev) 349static int ieee80211_open(struct net_device *dev)
341{ 350{
342 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 351 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
352 int err;
353
354 /* fail early if user set an invalid address */
355 if (!is_zero_ether_addr(dev->dev_addr) &&
356 !is_valid_ether_addr(dev->dev_addr))
357 return -EADDRNOTAVAIL;
358
359 err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
360 if (err)
361 return err;
362
363 return ieee80211_do_open(dev, true);
364}
365
366static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
367 bool going_down)
368{
343 struct ieee80211_local *local = sdata->local; 369 struct ieee80211_local *local = sdata->local;
344 unsigned long flags; 370 unsigned long flags;
345 struct sk_buff *skb, *tmp; 371 struct sk_buff *skb, *tmp;
346 u32 hw_reconf_flags = 0; 372 u32 hw_reconf_flags = 0;
347 int i; 373 int i;
348 374
375 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
376
349 /* 377 /*
350 * Stop TX on this interface first. 378 * Stop TX on this interface first.
351 */ 379 */
352 netif_tx_stop_all_queues(dev); 380 netif_tx_stop_all_queues(sdata->dev);
353 381
354 /* 382 /*
355 * Purge work for this interface. 383 * Purge work for this interface.
@@ -366,12 +394,9 @@ static int ieee80211_stop(struct net_device *dev)
366 * (because if we remove a STA after ops->remove_interface() 394 * (because if we remove a STA after ops->remove_interface()
367 * the driver will have removed the vif info already!) 395 * the driver will have removed the vif info already!)
368 * 396 *
369 * We could relax this and only unlink the stations from the 397 * This is relevant only in AP, WDS and mesh modes, since in
370 * hash table and list but keep them on a per-sdata list that 398 * all other modes we've already removed all stations when
371 * will be inserted back again when the interface is brought 399 * disconnecting etc.
372 * up again, but I don't currently see a use case for that,
373 * except with WDS which gets a STA entry created when it is
374 * brought up.
375 */ 400 */
376 sta_info_flush(local, sdata); 401 sta_info_flush(local, sdata);
377 402
@@ -390,11 +415,12 @@ static int ieee80211_stop(struct net_device *dev)
390 if (sdata->vif.type == NL80211_IFTYPE_AP) 415 if (sdata->vif.type == NL80211_IFTYPE_AP)
391 local->fif_pspoll--; 416 local->fif_pspoll--;
392 417
393 netif_addr_lock_bh(dev); 418 netif_addr_lock_bh(sdata->dev);
394 spin_lock_bh(&local->filter_lock); 419 spin_lock_bh(&local->filter_lock);
395 __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len); 420 __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
421 sdata->dev->addr_len);
396 spin_unlock_bh(&local->filter_lock); 422 spin_unlock_bh(&local->filter_lock);
397 netif_addr_unlock_bh(dev); 423 netif_addr_unlock_bh(sdata->dev);
398 424
399 ieee80211_configure_filter(local); 425 ieee80211_configure_filter(local);
400 426
@@ -406,11 +432,21 @@ static int ieee80211_stop(struct net_device *dev)
406 struct ieee80211_sub_if_data *vlan, *tmpsdata; 432 struct ieee80211_sub_if_data *vlan, *tmpsdata;
407 struct beacon_data *old_beacon = sdata->u.ap.beacon; 433 struct beacon_data *old_beacon = sdata->u.ap.beacon;
408 434
435 /* sdata_running will return false, so this will disable */
436 ieee80211_bss_info_change_notify(sdata,
437 BSS_CHANGED_BEACON_ENABLED);
438
409 /* remove beacon */ 439 /* remove beacon */
410 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 440 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
411 synchronize_rcu(); 441 synchronize_rcu();
412 kfree(old_beacon); 442 kfree(old_beacon);
413 443
444 /* free all potentially still buffered bcast frames */
445 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
446 local->total_ps_buffered--;
447 dev_kfree_skb(skb);
448 }
449
414 /* down all dependent devices, that is VLANs */ 450 /* down all dependent devices, that is VLANs */
415 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 451 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
416 u.vlan.list) 452 u.vlan.list)
@@ -418,7 +454,8 @@ static int ieee80211_stop(struct net_device *dev)
418 WARN_ON(!list_empty(&sdata->u.ap.vlans)); 454 WARN_ON(!list_empty(&sdata->u.ap.vlans));
419 } 455 }
420 456
421 local->open_count--; 457 if (going_down)
458 local->open_count--;
422 459
423 switch (sdata->vif.type) { 460 switch (sdata->vif.type) {
424 case NL80211_IFTYPE_AP_VLAN: 461 case NL80211_IFTYPE_AP_VLAN:
@@ -450,27 +487,6 @@ static int ieee80211_stop(struct net_device *dev)
450 487
451 ieee80211_configure_filter(local); 488 ieee80211_configure_filter(local);
452 break; 489 break;
453 case NL80211_IFTYPE_STATION:
454 del_timer_sync(&sdata->u.mgd.chswitch_timer);
455 del_timer_sync(&sdata->u.mgd.timer);
456 del_timer_sync(&sdata->u.mgd.conn_mon_timer);
457 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
458 /*
459 * If any of the timers fired while we waited for it, it will
460 * have queued its work. Now the work will be running again
461 * but will not rearm the timer again because it checks
462 * whether the interface is running, which, at this point,
463 * it no longer is.
464 */
465 cancel_work_sync(&sdata->u.mgd.chswitch_work);
466 cancel_work_sync(&sdata->u.mgd.monitor_work);
467 cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
468
469 /* fall through */
470 case NL80211_IFTYPE_ADHOC:
471 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
472 del_timer_sync(&sdata->u.ibss.timer);
473 /* fall through */
474 case NL80211_IFTYPE_MESH_POINT: 490 case NL80211_IFTYPE_MESH_POINT:
475 if (ieee80211_vif_is_mesh(&sdata->vif)) { 491 if (ieee80211_vif_is_mesh(&sdata->vif)) {
476 /* other_bss and allmulti are always set on mesh 492 /* other_bss and allmulti are always set on mesh
@@ -498,27 +514,34 @@ static int ieee80211_stop(struct net_device *dev)
498 ieee80211_scan_cancel(local); 514 ieee80211_scan_cancel(local);
499 515
500 /* 516 /*
501 * Disable beaconing for AP and mesh, IBSS can't 517 * Disable beaconing here for mesh only, AP and IBSS
502 * still be joined to a network at this point. 518 * are already taken care of.
503 */ 519 */
504 if (sdata->vif.type == NL80211_IFTYPE_AP || 520 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
505 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) {
506 ieee80211_bss_info_change_notify(sdata, 521 ieee80211_bss_info_change_notify(sdata,
507 BSS_CHANGED_BEACON_ENABLED); 522 BSS_CHANGED_BEACON_ENABLED);
508 }
509 523
510 /* free all remaining keys, there shouldn't be any */ 524 /*
525 * Free all remaining keys, there shouldn't be any,
526 * except maybe group keys in AP more or WDS?
527 */
511 ieee80211_free_keys(sdata); 528 ieee80211_free_keys(sdata);
512 drv_remove_interface(local, &sdata->vif); 529
530 if (going_down)
531 drv_remove_interface(local, &sdata->vif);
513 } 532 }
514 533
515 sdata->bss = NULL; 534 sdata->bss = NULL;
516 535
536 mutex_lock(&local->mtx);
517 hw_reconf_flags |= __ieee80211_recalc_idle(local); 537 hw_reconf_flags |= __ieee80211_recalc_idle(local);
538 mutex_unlock(&local->mtx);
518 539
519 ieee80211_recalc_ps(local, -1); 540 ieee80211_recalc_ps(local, -1);
520 541
521 if (local->open_count == 0) { 542 if (local->open_count == 0) {
543 if (local->ops->napi_poll)
544 napi_disable(&local->napi);
522 ieee80211_clear_tx_pending(local); 545 ieee80211_clear_tx_pending(local);
523 ieee80211_stop_device(local); 546 ieee80211_stop_device(local);
524 547
@@ -541,6 +564,13 @@ static int ieee80211_stop(struct net_device *dev)
541 } 564 }
542 } 565 }
543 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 566 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
567}
568
569static int ieee80211_stop(struct net_device *dev)
570{
571 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
572
573 ieee80211_do_stop(sdata, true);
544 574
545 return 0; 575 return 0;
546} 576}
@@ -585,8 +615,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
585{ 615{
586 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 616 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
587 struct ieee80211_local *local = sdata->local; 617 struct ieee80211_local *local = sdata->local;
588 struct beacon_data *beacon;
589 struct sk_buff *skb;
590 int flushed; 618 int flushed;
591 int i; 619 int i;
592 620
@@ -599,37 +627,8 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
599 __skb_queue_purge(&sdata->fragments[i].skb_list); 627 __skb_queue_purge(&sdata->fragments[i].skb_list);
600 sdata->fragment_next = 0; 628 sdata->fragment_next = 0;
601 629
602 switch (sdata->vif.type) { 630 if (ieee80211_vif_is_mesh(&sdata->vif))
603 case NL80211_IFTYPE_AP: 631 mesh_rmc_free(sdata);
604 beacon = sdata->u.ap.beacon;
605 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
606 synchronize_rcu();
607 kfree(beacon);
608
609 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
610 local->total_ps_buffered--;
611 dev_kfree_skb(skb);
612 }
613
614 break;
615 case NL80211_IFTYPE_MESH_POINT:
616 if (ieee80211_vif_is_mesh(&sdata->vif))
617 mesh_rmc_free(sdata);
618 break;
619 case NL80211_IFTYPE_ADHOC:
620 if (WARN_ON(sdata->u.ibss.presp))
621 kfree_skb(sdata->u.ibss.presp);
622 break;
623 case NL80211_IFTYPE_STATION:
624 case NL80211_IFTYPE_WDS:
625 case NL80211_IFTYPE_AP_VLAN:
626 case NL80211_IFTYPE_MONITOR:
627 break;
628 case NL80211_IFTYPE_UNSPECIFIED:
629 case __NL80211_IFTYPE_AFTER_LAST:
630 BUG();
631 break;
632 }
633 632
634 flushed = sta_info_flush(local, sdata); 633 flushed = sta_info_flush(local, sdata);
635 WARN_ON(flushed); 634 WARN_ON(flushed);
@@ -847,6 +846,9 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
847 sdata->dev->netdev_ops = &ieee80211_dataif_ops; 846 sdata->dev->netdev_ops = &ieee80211_dataif_ops;
848 sdata->wdev.iftype = type; 847 sdata->wdev.iftype = type;
849 848
849 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
850 sdata->control_port_no_encrypt = false;
851
850 /* only monitor differs */ 852 /* only monitor differs */
851 sdata->dev->type = ARPHRD_ETHER; 853 sdata->dev->type = ARPHRD_ETHER;
852 854
@@ -878,7 +880,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
878 case NL80211_IFTYPE_AP_VLAN: 880 case NL80211_IFTYPE_AP_VLAN:
879 break; 881 break;
880 case NL80211_IFTYPE_UNSPECIFIED: 882 case NL80211_IFTYPE_UNSPECIFIED:
881 case __NL80211_IFTYPE_AFTER_LAST: 883 case NUM_NL80211_IFTYPES:
882 BUG(); 884 BUG();
883 break; 885 break;
884 } 886 }
@@ -886,9 +888,72 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
886 ieee80211_debugfs_add_netdev(sdata); 888 ieee80211_debugfs_add_netdev(sdata);
887} 889}
888 890
891static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
892 enum nl80211_iftype type)
893{
894 struct ieee80211_local *local = sdata->local;
895 int ret, err;
896
897 ASSERT_RTNL();
898
899 if (!local->ops->change_interface)
900 return -EBUSY;
901
902 switch (sdata->vif.type) {
903 case NL80211_IFTYPE_AP:
904 case NL80211_IFTYPE_STATION:
905 case NL80211_IFTYPE_ADHOC:
906 /*
907 * Could maybe also all others here?
908 * Just not sure how that interacts
909 * with the RX/config path e.g. for
910 * mesh.
911 */
912 break;
913 default:
914 return -EBUSY;
915 }
916
917 switch (type) {
918 case NL80211_IFTYPE_AP:
919 case NL80211_IFTYPE_STATION:
920 case NL80211_IFTYPE_ADHOC:
921 /*
922 * Could probably support everything
923 * but WDS here (WDS do_open can fail
924 * under memory pressure, which this
925 * code isn't prepared to handle).
926 */
927 break;
928 default:
929 return -EBUSY;
930 }
931
932 ret = ieee80211_check_concurrent_iface(sdata, type);
933 if (ret)
934 return ret;
935
936 ieee80211_do_stop(sdata, false);
937
938 ieee80211_teardown_sdata(sdata->dev);
939
940 ret = drv_change_interface(local, sdata, type);
941 if (ret)
942 type = sdata->vif.type;
943
944 ieee80211_setup_sdata(sdata, type);
945
946 err = ieee80211_do_open(sdata->dev, false);
947 WARN(err, "type change: do_open returned %d", err);
948
949 return ret;
950}
951
889int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 952int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
890 enum nl80211_iftype type) 953 enum nl80211_iftype type)
891{ 954{
955 int ret;
956
892 ASSERT_RTNL(); 957 ASSERT_RTNL();
893 958
894 if (type == sdata->vif.type) 959 if (type == sdata->vif.type)
@@ -899,18 +964,15 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
899 type == NL80211_IFTYPE_ADHOC) 964 type == NL80211_IFTYPE_ADHOC)
900 return -EOPNOTSUPP; 965 return -EOPNOTSUPP;
901 966
902 /* 967 if (ieee80211_sdata_running(sdata)) {
903 * We could, here, on changes between IBSS/STA/MESH modes, 968 ret = ieee80211_runtime_change_iftype(sdata, type);
904 * invoke an MLME function instead that disassociates etc. 969 if (ret)
905 * and goes into the requested mode. 970 return ret;
906 */ 971 } else {
907 972 /* Purge and reset type-dependent state. */
908 if (ieee80211_sdata_running(sdata)) 973 ieee80211_teardown_sdata(sdata->dev);
909 return -EBUSY; 974 ieee80211_setup_sdata(sdata, type);
910 975 }
911 /* Purge and reset type-dependent state. */
912 ieee80211_teardown_sdata(sdata->dev);
913 ieee80211_setup_sdata(sdata, type);
914 976
915 /* reset some values that shouldn't be kept across type changes */ 977 /* reset some values that shouldn't be kept across type changes */
916 sdata->vif.bss_conf.basic_rates = 978 sdata->vif.bss_conf.basic_rates =
@@ -1167,8 +1229,7 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local,
1167 return 0; 1229 return 0;
1168 1230
1169#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1231#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1170 printk(KERN_DEBUG "%s: device no longer idle - %s\n", 1232 wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason);
1171 wiphy_name(local->hw.wiphy), reason);
1172#endif 1233#endif
1173 1234
1174 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; 1235 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
@@ -1181,8 +1242,7 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
1181 return 0; 1242 return 0;
1182 1243
1183#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1244#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1184 printk(KERN_DEBUG "%s: device now idle\n", 1245 wiphy_debug(local->hw.wiphy, "device now idle\n");
1185 wiphy_name(local->hw.wiphy));
1186#endif 1246#endif
1187 1247
1188 drv_flush(local, false); 1248 drv_flush(local, false);
@@ -1195,28 +1255,61 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1195{ 1255{
1196 struct ieee80211_sub_if_data *sdata; 1256 struct ieee80211_sub_if_data *sdata;
1197 int count = 0; 1257 int count = 0;
1258 bool working = false, scanning = false;
1259 struct ieee80211_work *wk;
1198 1260
1199 if (!list_empty(&local->work_list)) 1261#ifdef CONFIG_PROVE_LOCKING
1200 return ieee80211_idle_off(local, "working"); 1262 WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
1201 1263 !lockdep_is_held(&local->iflist_mtx));
1202 if (local->scanning) 1264#endif
1203 return ieee80211_idle_off(local, "scanning"); 1265 lockdep_assert_held(&local->mtx);
1204 1266
1205 list_for_each_entry(sdata, &local->interfaces, list) { 1267 list_for_each_entry(sdata, &local->interfaces, list) {
1206 if (!ieee80211_sdata_running(sdata)) 1268 if (!ieee80211_sdata_running(sdata)) {
1269 sdata->vif.bss_conf.idle = true;
1207 continue; 1270 continue;
1271 }
1272
1273 sdata->old_idle = sdata->vif.bss_conf.idle;
1274
1208 /* do not count disabled managed interfaces */ 1275 /* do not count disabled managed interfaces */
1209 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1276 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1210 !sdata->u.mgd.associated) 1277 !sdata->u.mgd.associated) {
1278 sdata->vif.bss_conf.idle = true;
1211 continue; 1279 continue;
1280 }
1212 /* do not count unused IBSS interfaces */ 1281 /* do not count unused IBSS interfaces */
1213 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 1282 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
1214 !sdata->u.ibss.ssid_len) 1283 !sdata->u.ibss.ssid_len) {
1284 sdata->vif.bss_conf.idle = true;
1215 continue; 1285 continue;
1286 }
1216 /* count everything else */ 1287 /* count everything else */
1217 count++; 1288 count++;
1218 } 1289 }
1219 1290
1291 list_for_each_entry(wk, &local->work_list, list) {
1292 working = true;
1293 wk->sdata->vif.bss_conf.idle = false;
1294 }
1295
1296 if (local->scan_sdata) {
1297 scanning = true;
1298 local->scan_sdata->vif.bss_conf.idle = false;
1299 }
1300
1301 list_for_each_entry(sdata, &local->interfaces, list) {
1302 if (sdata->old_idle == sdata->vif.bss_conf.idle)
1303 continue;
1304 if (!ieee80211_sdata_running(sdata))
1305 continue;
1306 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
1307 }
1308
1309 if (working)
1310 return ieee80211_idle_off(local, "working");
1311 if (scanning)
1312 return ieee80211_idle_off(local, "scanning");
1220 if (!count) 1313 if (!count)
1221 return ieee80211_idle_on(local); 1314 return ieee80211_idle_on(local);
1222 else 1315 else
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 1b9d87ed143a..3570f8c2bb40 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -60,7 +60,7 @@ static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
60 return NULL; 60 return NULL;
61} 61}
62 62
63static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) 63static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
64{ 64{
65 struct ieee80211_sub_if_data *sdata; 65 struct ieee80211_sub_if_data *sdata;
66 struct ieee80211_sta *sta; 66 struct ieee80211_sta *sta;
@@ -68,8 +68,10 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
68 68
69 might_sleep(); 69 might_sleep();
70 70
71 if (!key->local->ops->set_key) 71 if (!key->local->ops->set_key) {
72 return; 72 ret = -EOPNOTSUPP;
73 goto out_unsupported;
74 }
73 75
74 assert_key_lock(key->local); 76 assert_key_lock(key->local);
75 77
@@ -87,10 +89,27 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
87 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 89 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
88 90
89 if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) 91 if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
90 printk(KERN_ERR "mac80211-%s: failed to set key " 92 wiphy_err(key->local->hw.wiphy,
91 "(%d, %pM) to hardware (%d)\n", 93 "failed to set key (%d, %pM) to hardware (%d)\n",
92 wiphy_name(key->local->hw.wiphy), 94 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
93 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 95
96out_unsupported:
97 if (ret) {
98 switch (key->conf.cipher) {
99 case WLAN_CIPHER_SUITE_WEP40:
100 case WLAN_CIPHER_SUITE_WEP104:
101 case WLAN_CIPHER_SUITE_TKIP:
102 case WLAN_CIPHER_SUITE_CCMP:
103 case WLAN_CIPHER_SUITE_AES_CMAC:
104 /* all of these we can do in software */
105 ret = 0;
106 break;
107 default:
108 ret = -EINVAL;
109 }
110 }
111
112 return ret;
94} 113}
95 114
96static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) 115static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
@@ -121,10 +140,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
121 sta, &key->conf); 140 sta, &key->conf);
122 141
123 if (ret) 142 if (ret)
124 printk(KERN_ERR "mac80211-%s: failed to remove key " 143 wiphy_err(key->local->hw.wiphy,
125 "(%d, %pM) from hardware (%d)\n", 144 "failed to remove key (%d, %pM) from hardware (%d)\n",
126 wiphy_name(key->local->hw.wiphy), 145 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
127 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
128 146
129 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 147 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
130} 148}
@@ -227,20 +245,18 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
227 } 245 }
228} 246}
229 247
230struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, 248struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
231 int idx,
232 size_t key_len,
233 const u8 *key_data, 249 const u8 *key_data,
234 size_t seq_len, const u8 *seq) 250 size_t seq_len, const u8 *seq)
235{ 251{
236 struct ieee80211_key *key; 252 struct ieee80211_key *key;
237 int i, j; 253 int i, j, err;
238 254
239 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS); 255 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS);
240 256
241 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); 257 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
242 if (!key) 258 if (!key)
243 return NULL; 259 return ERR_PTR(-ENOMEM);
244 260
245 /* 261 /*
246 * Default to software encryption; we'll later upload the 262 * Default to software encryption; we'll later upload the
@@ -249,15 +265,16 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
249 key->conf.flags = 0; 265 key->conf.flags = 0;
250 key->flags = 0; 266 key->flags = 0;
251 267
252 key->conf.alg = alg; 268 key->conf.cipher = cipher;
253 key->conf.keyidx = idx; 269 key->conf.keyidx = idx;
254 key->conf.keylen = key_len; 270 key->conf.keylen = key_len;
255 switch (alg) { 271 switch (cipher) {
256 case ALG_WEP: 272 case WLAN_CIPHER_SUITE_WEP40:
273 case WLAN_CIPHER_SUITE_WEP104:
257 key->conf.iv_len = WEP_IV_LEN; 274 key->conf.iv_len = WEP_IV_LEN;
258 key->conf.icv_len = WEP_ICV_LEN; 275 key->conf.icv_len = WEP_ICV_LEN;
259 break; 276 break;
260 case ALG_TKIP: 277 case WLAN_CIPHER_SUITE_TKIP:
261 key->conf.iv_len = TKIP_IV_LEN; 278 key->conf.iv_len = TKIP_IV_LEN;
262 key->conf.icv_len = TKIP_ICV_LEN; 279 key->conf.icv_len = TKIP_ICV_LEN;
263 if (seq) { 280 if (seq) {
@@ -269,7 +286,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
269 } 286 }
270 } 287 }
271 break; 288 break;
272 case ALG_CCMP: 289 case WLAN_CIPHER_SUITE_CCMP:
273 key->conf.iv_len = CCMP_HDR_LEN; 290 key->conf.iv_len = CCMP_HDR_LEN;
274 key->conf.icv_len = CCMP_MIC_LEN; 291 key->conf.icv_len = CCMP_MIC_LEN;
275 if (seq) { 292 if (seq) {
@@ -278,42 +295,38 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
278 key->u.ccmp.rx_pn[i][j] = 295 key->u.ccmp.rx_pn[i][j] =
279 seq[CCMP_PN_LEN - j - 1]; 296 seq[CCMP_PN_LEN - j - 1];
280 } 297 }
281 break;
282 case ALG_AES_CMAC:
283 key->conf.iv_len = 0;
284 key->conf.icv_len = sizeof(struct ieee80211_mmie);
285 if (seq)
286 for (j = 0; j < 6; j++)
287 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
288 break;
289 }
290 memcpy(key->conf.key, key_data, key_len);
291 INIT_LIST_HEAD(&key->list);
292
293 if (alg == ALG_CCMP) {
294 /* 298 /*
295 * Initialize AES key state here as an optimization so that 299 * Initialize AES key state here as an optimization so that
296 * it does not need to be initialized for every packet. 300 * it does not need to be initialized for every packet.
297 */ 301 */
298 key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data); 302 key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data);
299 if (!key->u.ccmp.tfm) { 303 if (IS_ERR(key->u.ccmp.tfm)) {
304 err = PTR_ERR(key->u.ccmp.tfm);
300 kfree(key); 305 kfree(key);
301 return NULL; 306 key = ERR_PTR(err);
302 } 307 }
303 } 308 break;
304 309 case WLAN_CIPHER_SUITE_AES_CMAC:
305 if (alg == ALG_AES_CMAC) { 310 key->conf.iv_len = 0;
311 key->conf.icv_len = sizeof(struct ieee80211_mmie);
312 if (seq)
313 for (j = 0; j < 6; j++)
314 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
306 /* 315 /*
307 * Initialize AES key state here as an optimization so that 316 * Initialize AES key state here as an optimization so that
308 * it does not need to be initialized for every packet. 317 * it does not need to be initialized for every packet.
309 */ 318 */
310 key->u.aes_cmac.tfm = 319 key->u.aes_cmac.tfm =
311 ieee80211_aes_cmac_key_setup(key_data); 320 ieee80211_aes_cmac_key_setup(key_data);
312 if (!key->u.aes_cmac.tfm) { 321 if (IS_ERR(key->u.aes_cmac.tfm)) {
322 err = PTR_ERR(key->u.aes_cmac.tfm);
313 kfree(key); 323 kfree(key);
314 return NULL; 324 key = ERR_PTR(err);
315 } 325 }
326 break;
316 } 327 }
328 memcpy(key->conf.key, key_data, key_len);
329 INIT_LIST_HEAD(&key->list);
317 330
318 return key; 331 return key;
319} 332}
@@ -326,9 +339,9 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
326 if (key->local) 339 if (key->local)
327 ieee80211_key_disable_hw_accel(key); 340 ieee80211_key_disable_hw_accel(key);
328 341
329 if (key->conf.alg == ALG_CCMP) 342 if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP)
330 ieee80211_aes_key_free(key->u.ccmp.tfm); 343 ieee80211_aes_key_free(key->u.ccmp.tfm);
331 if (key->conf.alg == ALG_AES_CMAC) 344 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
332 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); 345 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
333 if (key->local) 346 if (key->local)
334 ieee80211_debugfs_key_remove(key); 347 ieee80211_debugfs_key_remove(key);
@@ -336,12 +349,12 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
336 kfree(key); 349 kfree(key);
337} 350}
338 351
339void ieee80211_key_link(struct ieee80211_key *key, 352int ieee80211_key_link(struct ieee80211_key *key,
340 struct ieee80211_sub_if_data *sdata, 353 struct ieee80211_sub_if_data *sdata,
341 struct sta_info *sta) 354 struct sta_info *sta)
342{ 355{
343 struct ieee80211_key *old_key; 356 struct ieee80211_key *old_key;
344 int idx; 357 int idx, ret;
345 358
346 BUG_ON(!sdata); 359 BUG_ON(!sdata);
347 BUG_ON(!key); 360 BUG_ON(!key);
@@ -396,9 +409,11 @@ void ieee80211_key_link(struct ieee80211_key *key,
396 409
397 ieee80211_debugfs_key_add(key); 410 ieee80211_debugfs_key_add(key);
398 411
399 ieee80211_key_enable_hw_accel(key); 412 ret = ieee80211_key_enable_hw_accel(key);
400 413
401 mutex_unlock(&sdata->local->key_mtx); 414 mutex_unlock(&sdata->local->key_mtx);
415
416 return ret;
402} 417}
403 418
404static void __ieee80211_key_free(struct ieee80211_key *key) 419static void __ieee80211_key_free(struct ieee80211_key *key)
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index b665bbb7a471..cb9a4a65cc68 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -123,18 +123,16 @@ struct ieee80211_key {
123 struct ieee80211_key_conf conf; 123 struct ieee80211_key_conf conf;
124}; 124};
125 125
126struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, 126struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
127 int idx,
128 size_t key_len,
129 const u8 *key_data, 127 const u8 *key_data,
130 size_t seq_len, const u8 *seq); 128 size_t seq_len, const u8 *seq);
131/* 129/*
132 * Insert a key into data structures (sdata, sta if necessary) 130 * Insert a key into data structures (sdata, sta if necessary)
133 * to make it used, free old key. 131 * to make it used, free old key.
134 */ 132 */
135void ieee80211_key_link(struct ieee80211_key *key, 133int __must_check ieee80211_key_link(struct ieee80211_key *key,
136 struct ieee80211_sub_if_data *sdata, 134 struct ieee80211_sub_if_data *sdata,
137 struct sta_info *sta); 135 struct sta_info *sta);
138void ieee80211_key_free(struct ieee80211_local *local, 136void ieee80211_key_free(struct ieee80211_local *local,
139 struct ieee80211_key *key); 137 struct ieee80211_key *key);
140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); 138void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index ded5c3843e06..4935b843bcca 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -99,11 +99,13 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
99 int ret = 0; 99 int ret = 0;
100 int power; 100 int power;
101 enum nl80211_channel_type channel_type; 101 enum nl80211_channel_type channel_type;
102 u32 offchannel_flag;
102 103
103 might_sleep(); 104 might_sleep();
104 105
105 scan_chan = local->scan_channel; 106 scan_chan = local->scan_channel;
106 107
108 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
107 if (scan_chan) { 109 if (scan_chan) {
108 chan = scan_chan; 110 chan = scan_chan;
109 channel_type = NL80211_CHAN_NO_HT; 111 channel_type = NL80211_CHAN_NO_HT;
@@ -117,8 +119,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
117 channel_type = local->_oper_channel_type; 119 channel_type = local->_oper_channel_type;
118 local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; 120 local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
119 } 121 }
122 offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
120 123
121 if (chan != local->hw.conf.channel || 124 if (offchannel_flag || chan != local->hw.conf.channel ||
122 channel_type != local->hw.conf.channel_type) { 125 channel_type != local->hw.conf.channel_type) {
123 local->hw.conf.channel = chan; 126 local->hw.conf.channel = chan;
124 local->hw.conf.channel_type = channel_type; 127 local->hw.conf.channel_type = channel_type;
@@ -302,7 +305,13 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
302 305
303 trace_api_restart_hw(local); 306 trace_api_restart_hw(local);
304 307
305 /* use this reason, __ieee80211_resume will unblock it */ 308 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
309 "%s called with hardware scan in progress\n", __func__);
310
311 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)))
312 ieee80211_scan_cancel(local);
313
314 /* use this reason, ieee80211_reconfig will unblock it */
306 ieee80211_stop_queues_by_reason(hw, 315 ieee80211_stop_queues_by_reason(hw,
307 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 316 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
308 317
@@ -336,9 +345,6 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
336 struct ieee80211_if_managed *ifmgd; 345 struct ieee80211_if_managed *ifmgd;
337 int c = 0; 346 int c = 0;
338 347
339 if (!netif_running(ndev))
340 return NOTIFY_DONE;
341
342 /* Make sure it's our interface that got changed */ 348 /* Make sure it's our interface that got changed */
343 if (!wdev) 349 if (!wdev)
344 return NOTIFY_DONE; 350 return NOTIFY_DONE;
@@ -349,6 +355,9 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
349 sdata = IEEE80211_DEV_TO_SUB_IF(ndev); 355 sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
350 bss_conf = &sdata->vif.bss_conf; 356 bss_conf = &sdata->vif.bss_conf;
351 357
358 if (!ieee80211_sdata_running(sdata))
359 return NOTIFY_DONE;
360
352 /* ARP filtering is only supported in managed mode */ 361 /* ARP filtering is only supported in managed mode */
353 if (sdata->vif.type != NL80211_IFTYPE_STATION) 362 if (sdata->vif.type != NL80211_IFTYPE_STATION)
354 return NOTIFY_DONE; 363 return NOTIFY_DONE;
@@ -390,6 +399,65 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
390} 399}
391#endif 400#endif
392 401
402static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
403{
404 struct ieee80211_local *local =
405 container_of(napi, struct ieee80211_local, napi);
406
407 return local->ops->napi_poll(&local->hw, budget);
408}
409
410void ieee80211_napi_schedule(struct ieee80211_hw *hw)
411{
412 struct ieee80211_local *local = hw_to_local(hw);
413
414 napi_schedule(&local->napi);
415}
416EXPORT_SYMBOL(ieee80211_napi_schedule);
417
418void ieee80211_napi_complete(struct ieee80211_hw *hw)
419{
420 struct ieee80211_local *local = hw_to_local(hw);
421
422 napi_complete(&local->napi);
423}
424EXPORT_SYMBOL(ieee80211_napi_complete);
425
426/* There isn't a lot of sense in it, but you can transmit anything you like */
427static const struct ieee80211_txrx_stypes
428ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
429 [NL80211_IFTYPE_ADHOC] = {
430 .tx = 0xffff,
431 .rx = BIT(IEEE80211_STYPE_ACTION >> 4),
432 },
433 [NL80211_IFTYPE_STATION] = {
434 .tx = 0xffff,
435 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
436 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
437 },
438 [NL80211_IFTYPE_AP] = {
439 .tx = 0xffff,
440 .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
441 BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
442 BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
443 BIT(IEEE80211_STYPE_DISASSOC >> 4) |
444 BIT(IEEE80211_STYPE_AUTH >> 4) |
445 BIT(IEEE80211_STYPE_DEAUTH >> 4) |
446 BIT(IEEE80211_STYPE_ACTION >> 4),
447 },
448 [NL80211_IFTYPE_AP_VLAN] = {
449 /* copy AP */
450 .tx = 0xffff,
451 .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
452 BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
453 BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
454 BIT(IEEE80211_STYPE_DISASSOC >> 4) |
455 BIT(IEEE80211_STYPE_AUTH >> 4) |
456 BIT(IEEE80211_STYPE_DEAUTH >> 4) |
457 BIT(IEEE80211_STYPE_ACTION >> 4),
458 },
459};
460
393struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 461struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
394 const struct ieee80211_ops *ops) 462 const struct ieee80211_ops *ops)
395{ 463{
@@ -419,6 +487,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
419 if (!wiphy) 487 if (!wiphy)
420 return NULL; 488 return NULL;
421 489
490 wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;
491
422 wiphy->flags |= WIPHY_FLAG_NETNS_OK | 492 wiphy->flags |= WIPHY_FLAG_NETNS_OK |
423 WIPHY_FLAG_4ADDR_AP | 493 WIPHY_FLAG_4ADDR_AP |
424 WIPHY_FLAG_4ADDR_STATION; 494 WIPHY_FLAG_4ADDR_STATION;
@@ -455,7 +525,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
455 __hw_addr_init(&local->mc_list); 525 __hw_addr_init(&local->mc_list);
456 526
457 mutex_init(&local->iflist_mtx); 527 mutex_init(&local->iflist_mtx);
458 mutex_init(&local->scan_mtx); 528 mutex_init(&local->mtx);
459 529
460 mutex_init(&local->key_mtx); 530 mutex_init(&local->key_mtx);
461 spin_lock_init(&local->filter_lock); 531 spin_lock_init(&local->filter_lock);
@@ -494,6 +564,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
494 skb_queue_head_init(&local->skb_queue); 564 skb_queue_head_init(&local->skb_queue);
495 skb_queue_head_init(&local->skb_queue_unreliable); 565 skb_queue_head_init(&local->skb_queue_unreliable);
496 566
567 /* init dummy netdev for use w/ NAPI */
568 init_dummy_netdev(&local->napi_dev);
569
497 return local_to_hw(local); 570 return local_to_hw(local);
498} 571}
499EXPORT_SYMBOL(ieee80211_alloc_hw); 572EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -506,6 +579,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
506 int channels, max_bitrates; 579 int channels, max_bitrates;
507 bool supp_ht; 580 bool supp_ht;
508 static const u32 cipher_suites[] = { 581 static const u32 cipher_suites[] = {
582 /* keep WEP first, it may be removed below */
509 WLAN_CIPHER_SUITE_WEP40, 583 WLAN_CIPHER_SUITE_WEP40,
510 WLAN_CIPHER_SUITE_WEP104, 584 WLAN_CIPHER_SUITE_WEP104,
511 WLAN_CIPHER_SUITE_TKIP, 585 WLAN_CIPHER_SUITE_TKIP,
@@ -554,6 +628,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
554 /* mac80211 always supports monitor */ 628 /* mac80211 always supports monitor */
555 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); 629 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
556 630
631#ifndef CONFIG_MAC80211_MESH
632 /* mesh depends on Kconfig, but drivers should set it if they want */
633 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
634#endif
635
636 /* mac80211 supports control port protocol changing */
637 local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
638
557 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 639 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
558 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 640 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
559 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 641 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
@@ -589,10 +671,41 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
589 if (local->hw.wiphy->max_scan_ie_len) 671 if (local->hw.wiphy->max_scan_ie_len)
590 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; 672 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
591 673
592 local->hw.wiphy->cipher_suites = cipher_suites; 674 /* Set up cipher suites unless driver already did */
593 local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 675 if (!local->hw.wiphy->cipher_suites) {
594 if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE)) 676 local->hw.wiphy->cipher_suites = cipher_suites;
595 local->hw.wiphy->n_cipher_suites--; 677 local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
678 if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE))
679 local->hw.wiphy->n_cipher_suites--;
680 }
681 if (IS_ERR(local->wep_tx_tfm) || IS_ERR(local->wep_rx_tfm)) {
682 if (local->hw.wiphy->cipher_suites == cipher_suites) {
683 local->hw.wiphy->cipher_suites += 2;
684 local->hw.wiphy->n_cipher_suites -= 2;
685 } else {
686 u32 *suites;
687 int r, w = 0;
688
689 /* Filter out WEP */
690
691 suites = kmemdup(
692 local->hw.wiphy->cipher_suites,
693 sizeof(u32) * local->hw.wiphy->n_cipher_suites,
694 GFP_KERNEL);
695 if (!suites)
696 return -ENOMEM;
697 for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
698 u32 suite = local->hw.wiphy->cipher_suites[r];
699 if (suite == WLAN_CIPHER_SUITE_WEP40 ||
700 suite == WLAN_CIPHER_SUITE_WEP104)
701 continue;
702 suites[w++] = suite;
703 }
704 local->hw.wiphy->cipher_suites = suites;
705 local->hw.wiphy->n_cipher_suites = w;
706 local->wiphy_ciphers_allocated = true;
707 }
708 }
596 709
597 result = wiphy_register(local->hw.wiphy); 710 result = wiphy_register(local->hw.wiphy);
598 if (result < 0) 711 if (result < 0)
@@ -641,16 +754,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
641 754
642 result = ieee80211_wep_init(local); 755 result = ieee80211_wep_init(local);
643 if (result < 0) 756 if (result < 0)
644 printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", 757 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
645 wiphy_name(local->hw.wiphy), result); 758 result);
646 759
647 rtnl_lock(); 760 rtnl_lock();
648 761
649 result = ieee80211_init_rate_ctrl_alg(local, 762 result = ieee80211_init_rate_ctrl_alg(local,
650 hw->rate_control_algorithm); 763 hw->rate_control_algorithm);
651 if (result < 0) { 764 if (result < 0) {
652 printk(KERN_DEBUG "%s: Failed to initialize rate control " 765 wiphy_debug(local->hw.wiphy,
653 "algorithm\n", wiphy_name(local->hw.wiphy)); 766 "Failed to initialize rate control algorithm\n");
654 goto fail_rate; 767 goto fail_rate;
655 } 768 }
656 769
@@ -659,8 +772,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
659 result = ieee80211_if_add(local, "wlan%d", NULL, 772 result = ieee80211_if_add(local, "wlan%d", NULL,
660 NL80211_IFTYPE_STATION, NULL); 773 NL80211_IFTYPE_STATION, NULL);
661 if (result) 774 if (result)
662 printk(KERN_WARNING "%s: Failed to add default virtual iface\n", 775 wiphy_warn(local->hw.wiphy,
663 wiphy_name(local->hw.wiphy)); 776 "Failed to add default virtual iface\n");
664 } 777 }
665 778
666 rtnl_unlock(); 779 rtnl_unlock();
@@ -683,6 +796,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
683 goto fail_ifa; 796 goto fail_ifa;
684#endif 797#endif
685 798
799 netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
800 local->hw.napi_weight);
801
686 return 0; 802 return 0;
687 803
688#ifdef CONFIG_INET 804#ifdef CONFIG_INET
@@ -703,6 +819,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
703 fail_workqueue: 819 fail_workqueue:
704 wiphy_unregister(local->hw.wiphy); 820 wiphy_unregister(local->hw.wiphy);
705 fail_wiphy_register: 821 fail_wiphy_register:
822 if (local->wiphy_ciphers_allocated)
823 kfree(local->hw.wiphy->cipher_suites);
706 kfree(local->int_scan_req); 824 kfree(local->int_scan_req);
707 return result; 825 return result;
708} 826}
@@ -738,6 +856,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
738 */ 856 */
739 del_timer_sync(&local->work_timer); 857 del_timer_sync(&local->work_timer);
740 858
859 cancel_work_sync(&local->restart_work);
741 cancel_work_sync(&local->reconfig_filter); 860 cancel_work_sync(&local->reconfig_filter);
742 861
743 ieee80211_clear_tx_pending(local); 862 ieee80211_clear_tx_pending(local);
@@ -746,8 +865,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
746 865
747 if (skb_queue_len(&local->skb_queue) || 866 if (skb_queue_len(&local->skb_queue) ||
748 skb_queue_len(&local->skb_queue_unreliable)) 867 skb_queue_len(&local->skb_queue_unreliable))
749 printk(KERN_WARNING "%s: skb_queue not empty\n", 868 wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
750 wiphy_name(local->hw.wiphy));
751 skb_queue_purge(&local->skb_queue); 869 skb_queue_purge(&local->skb_queue);
752 skb_queue_purge(&local->skb_queue_unreliable); 870 skb_queue_purge(&local->skb_queue_unreliable);
753 871
@@ -764,7 +882,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
764 struct ieee80211_local *local = hw_to_local(hw); 882 struct ieee80211_local *local = hw_to_local(hw);
765 883
766 mutex_destroy(&local->iflist_mtx); 884 mutex_destroy(&local->iflist_mtx);
767 mutex_destroy(&local->scan_mtx); 885 mutex_destroy(&local->mtx);
886
887 if (local->wiphy_ciphers_allocated)
888 kfree(local->hw.wiphy->cipher_suites);
768 889
769 wiphy_free(local->hw.wiphy); 890 wiphy_free(local->hw.wiphy);
770} 891}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b6c163ac22da..0cb822cc12e9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -54,6 +54,12 @@
54 */ 54 */
55#define IEEE80211_SIGNAL_AVE_WEIGHT 3 55#define IEEE80211_SIGNAL_AVE_WEIGHT 3
56 56
57/*
58 * How many Beacon frames need to have been used in average signal strength
59 * before starting to indicate signal change events.
60 */
61#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
62
57#define TMR_RUNNING_TIMER 0 63#define TMR_RUNNING_TIMER 0
58#define TMR_RUNNING_CHANSW 1 64#define TMR_RUNNING_CHANSW 1
59 65
@@ -778,16 +784,17 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
778 params.uapsd = uapsd; 784 params.uapsd = uapsd;
779 785
780#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 786#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
781 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 787 wiphy_debug(local->hw.wiphy,
782 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n", 788 "WMM queue=%d aci=%d acm=%d aifs=%d "
783 wiphy_name(local->hw.wiphy), queue, aci, acm, 789 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
784 params.aifs, params.cw_min, params.cw_max, params.txop, 790 queue, aci, acm,
785 params.uapsd); 791 params.aifs, params.cw_min, params.cw_max,
792 params.txop, params.uapsd);
786#endif 793#endif
787 if (drv_conf_tx(local, queue, &params)) 794 if (drv_conf_tx(local, queue, &params))
788 printk(KERN_DEBUG "%s: failed to set TX queue " 795 wiphy_debug(local->hw.wiphy,
789 "parameters for queue %d\n", 796 "failed to set TX queue parameters for queue %d\n",
790 wiphy_name(local->hw.wiphy), queue); 797 queue);
791 } 798 }
792 799
793 /* enable WMM or activate new settings */ 800 /* enable WMM or activate new settings */
@@ -990,6 +997,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
990 997
991 if (remove_sta) 998 if (remove_sta)
992 sta_info_destroy_addr(sdata, bssid); 999 sta_info_destroy_addr(sdata, bssid);
1000
1001 del_timer_sync(&sdata->u.mgd.conn_mon_timer);
1002 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
1003 del_timer_sync(&sdata->u.mgd.timer);
1004 del_timer_sync(&sdata->u.mgd.chswitch_timer);
993} 1005}
994 1006
995void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 1007void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1103,8 +1115,11 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1103 printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid); 1115 printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
1104 1116
1105 ieee80211_set_disassoc(sdata, true); 1117 ieee80211_set_disassoc(sdata, true);
1106 ieee80211_recalc_idle(local);
1107 mutex_unlock(&ifmgd->mtx); 1118 mutex_unlock(&ifmgd->mtx);
1119
1120 mutex_lock(&local->mtx);
1121 ieee80211_recalc_idle(local);
1122 mutex_unlock(&local->mtx);
1108 /* 1123 /*
1109 * must be outside lock due to cfg80211, 1124 * must be outside lock due to cfg80211,
1110 * but that's not a problem. 1125 * but that's not a problem.
@@ -1173,7 +1188,9 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1173 sdata->name, bssid, reason_code); 1188 sdata->name, bssid, reason_code);
1174 1189
1175 ieee80211_set_disassoc(sdata, true); 1190 ieee80211_set_disassoc(sdata, true);
1191 mutex_lock(&sdata->local->mtx);
1176 ieee80211_recalc_idle(sdata->local); 1192 ieee80211_recalc_idle(sdata->local);
1193 mutex_unlock(&sdata->local->mtx);
1177 1194
1178 return RX_MGMT_CFG80211_DEAUTH; 1195 return RX_MGMT_CFG80211_DEAUTH;
1179} 1196}
@@ -1203,7 +1220,9 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1203 sdata->name, mgmt->sa, reason_code); 1220 sdata->name, mgmt->sa, reason_code);
1204 1221
1205 ieee80211_set_disassoc(sdata, true); 1222 ieee80211_set_disassoc(sdata, true);
1223 mutex_lock(&sdata->local->mtx);
1206 ieee80211_recalc_idle(sdata->local); 1224 ieee80211_recalc_idle(sdata->local);
1225 mutex_unlock(&sdata->local->mtx);
1207 return RX_MGMT_CFG80211_DISASSOC; 1226 return RX_MGMT_CFG80211_DISASSOC;
1208} 1227}
1209 1228
@@ -1540,15 +1559,18 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1540 ifmgd->last_beacon_signal = rx_status->signal; 1559 ifmgd->last_beacon_signal = rx_status->signal;
1541 if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) { 1560 if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
1542 ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE; 1561 ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
1543 ifmgd->ave_beacon_signal = rx_status->signal; 1562 ifmgd->ave_beacon_signal = rx_status->signal * 16;
1544 ifmgd->last_cqm_event_signal = 0; 1563 ifmgd->last_cqm_event_signal = 0;
1564 ifmgd->count_beacon_signal = 1;
1545 } else { 1565 } else {
1546 ifmgd->ave_beacon_signal = 1566 ifmgd->ave_beacon_signal =
1547 (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 + 1567 (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
1548 (16 - IEEE80211_SIGNAL_AVE_WEIGHT) * 1568 (16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
1549 ifmgd->ave_beacon_signal) / 16; 1569 ifmgd->ave_beacon_signal) / 16;
1570 ifmgd->count_beacon_signal++;
1550 } 1571 }
1551 if (bss_conf->cqm_rssi_thold && 1572 if (bss_conf->cqm_rssi_thold &&
1573 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
1552 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { 1574 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
1553 int sig = ifmgd->ave_beacon_signal / 16; 1575 int sig = ifmgd->ave_beacon_signal / 16;
1554 int last_event = ifmgd->last_cqm_event_signal; 1576 int last_event = ifmgd->last_cqm_event_signal;
@@ -1751,7 +1773,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1751 struct ieee80211_local *local = sdata->local; 1773 struct ieee80211_local *local = sdata->local;
1752 struct ieee80211_work *wk; 1774 struct ieee80211_work *wk;
1753 1775
1754 mutex_lock(&local->work_mtx); 1776 mutex_lock(&local->mtx);
1755 list_for_each_entry(wk, &local->work_list, list) { 1777 list_for_each_entry(wk, &local->work_list, list) {
1756 if (wk->sdata != sdata) 1778 if (wk->sdata != sdata)
1757 continue; 1779 continue;
@@ -1783,7 +1805,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1783 free_work(wk); 1805 free_work(wk);
1784 break; 1806 break;
1785 } 1807 }
1786 mutex_unlock(&local->work_mtx); 1808 mutex_unlock(&local->mtx);
1787 1809
1788 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1810 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
1789 } 1811 }
@@ -1840,8 +1862,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
1840 " after %dms, disconnecting.\n", 1862 " after %dms, disconnecting.\n",
1841 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 1863 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
1842 ieee80211_set_disassoc(sdata, true); 1864 ieee80211_set_disassoc(sdata, true);
1843 ieee80211_recalc_idle(local);
1844 mutex_unlock(&ifmgd->mtx); 1865 mutex_unlock(&ifmgd->mtx);
1866 mutex_lock(&local->mtx);
1867 ieee80211_recalc_idle(local);
1868 mutex_unlock(&local->mtx);
1845 /* 1869 /*
1846 * must be outside lock due to cfg80211, 1870 * must be outside lock due to cfg80211,
1847 * but that's not a problem. 1871 * but that's not a problem.
@@ -1917,6 +1941,8 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
1917 * time -- the code here is properly synchronised. 1941 * time -- the code here is properly synchronised.
1918 */ 1942 */
1919 1943
1944 cancel_work_sync(&ifmgd->request_smps_work);
1945
1920 cancel_work_sync(&ifmgd->beacon_connection_loss_work); 1946 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
1921 if (del_timer_sync(&ifmgd->timer)) 1947 if (del_timer_sync(&ifmgd->timer))
1922 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 1948 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1952,6 +1978,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1952 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); 1978 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
1953 INIT_WORK(&ifmgd->beacon_connection_loss_work, 1979 INIT_WORK(&ifmgd->beacon_connection_loss_work,
1954 ieee80211_beacon_connection_loss_work); 1980 ieee80211_beacon_connection_loss_work);
1981 INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work);
1955 setup_timer(&ifmgd->timer, ieee80211_sta_timer, 1982 setup_timer(&ifmgd->timer, ieee80211_sta_timer,
1956 (unsigned long) sdata); 1983 (unsigned long) sdata);
1957 setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 1984 setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -2249,6 +2276,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2249 else 2276 else
2250 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; 2277 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
2251 2278
2279 sdata->control_port_protocol = req->crypto.control_port_ethertype;
2280 sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
2281
2252 ieee80211_add_work(wk); 2282 ieee80211_add_work(wk);
2253 return 0; 2283 return 0;
2254} 2284}
@@ -2275,7 +2305,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2275 2305
2276 mutex_unlock(&ifmgd->mtx); 2306 mutex_unlock(&ifmgd->mtx);
2277 2307
2278 mutex_lock(&local->work_mtx); 2308 mutex_lock(&local->mtx);
2279 list_for_each_entry(wk, &local->work_list, list) { 2309 list_for_each_entry(wk, &local->work_list, list) {
2280 if (wk->sdata != sdata) 2310 if (wk->sdata != sdata)
2281 continue; 2311 continue;
@@ -2294,7 +2324,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2294 free_work(wk); 2324 free_work(wk);
2295 break; 2325 break;
2296 } 2326 }
2297 mutex_unlock(&local->work_mtx); 2327 mutex_unlock(&local->mtx);
2298 2328
2299 /* 2329 /*
2300 * If somebody requests authentication and we haven't 2330 * If somebody requests authentication and we haven't
@@ -2319,7 +2349,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2319 if (assoc_bss) 2349 if (assoc_bss)
2320 sta_info_destroy_addr(sdata, bssid); 2350 sta_info_destroy_addr(sdata, bssid);
2321 2351
2352 mutex_lock(&sdata->local->mtx);
2322 ieee80211_recalc_idle(sdata->local); 2353 ieee80211_recalc_idle(sdata->local);
2354 mutex_unlock(&sdata->local->mtx);
2323 2355
2324 return 0; 2356 return 0;
2325} 2357}
@@ -2357,7 +2389,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2357 cookie, !req->local_state_change); 2389 cookie, !req->local_state_change);
2358 sta_info_destroy_addr(sdata, bssid); 2390 sta_info_destroy_addr(sdata, bssid);
2359 2391
2392 mutex_lock(&sdata->local->mtx);
2360 ieee80211_recalc_idle(sdata->local); 2393 ieee80211_recalc_idle(sdata->local);
2394 mutex_unlock(&sdata->local->mtx);
2361 2395
2362 return 0; 2396 return 0;
2363} 2397}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c36b1911987a..eeacaa59380a 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -112,8 +112,10 @@ void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
112 * used from user space controlled off-channel operations. 112 * used from user space controlled off-channel operations.
113 */ 113 */
114 if (sdata->vif.type != NL80211_IFTYPE_STATION && 114 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
115 sdata->vif.type != NL80211_IFTYPE_MONITOR) 115 sdata->vif.type != NL80211_IFTYPE_MONITOR) {
116 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
116 netif_tx_stop_all_queues(sdata->dev); 117 netif_tx_stop_all_queues(sdata->dev);
118 }
117 } 119 }
118 mutex_unlock(&local->iflist_mtx); 120 mutex_unlock(&local->iflist_mtx);
119} 121}
@@ -131,6 +133,7 @@ void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
131 continue; 133 continue;
132 134
133 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 135 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
136 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
134 netif_tx_stop_all_queues(sdata->dev); 137 netif_tx_stop_all_queues(sdata->dev);
135 if (sdata->u.mgd.associated) 138 if (sdata->u.mgd.associated)
136 ieee80211_offchannel_ps_enable(sdata); 139 ieee80211_offchannel_ps_enable(sdata);
@@ -155,8 +158,20 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
155 ieee80211_offchannel_ps_disable(sdata); 158 ieee80211_offchannel_ps_disable(sdata);
156 } 159 }
157 160
158 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 161 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
162 clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
163 /*
164 * This may wake up queues even though the driver
165 * currently has them stopped. This is not very
166 * likely, since the driver won't have gotten any
167 * (or hardly any) new packets while we weren't
168 * on the right channel, and even if it happens
169 * it will at most lead to queueing up one more
170 * packet per queue in mac80211 rather than on
171 * the interface qdisc.
172 */
159 netif_tx_wake_all_queues(sdata->dev); 173 netif_tx_wake_all_queues(sdata->dev);
174 }
160 175
161 /* re-enable beaconing */ 176 /* re-enable beaconing */
162 if (enable_beaconing && 177 if (enable_beaconing &&
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index d287fde0431d..ce671dfd238c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -12,7 +12,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
13 struct sta_info *sta; 13 struct sta_info *sta;
14 14
15 ieee80211_scan_cancel(local); 15 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)))
16 ieee80211_scan_cancel(local);
16 17
17 ieee80211_stop_queues_by_reason(hw, 18 ieee80211_stop_queues_by_reason(hw,
18 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 19 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index be04d46110fe..4f772de2f213 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -368,8 +368,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
368 368
369 ref = rate_control_alloc(name, local); 369 ref = rate_control_alloc(name, local);
370 if (!ref) { 370 if (!ref) {
371 printk(KERN_WARNING "%s: Failed to select rate control " 371 wiphy_warn(local->hw.wiphy,
372 "algorithm\n", wiphy_name(local->hw.wiphy)); 372 "Failed to select rate control algorithm\n");
373 return -ENOENT; 373 return -ENOENT;
374 } 374 }
375 375
@@ -380,9 +380,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
380 sta_info_flush(local, NULL); 380 sta_info_flush(local, NULL);
381 } 381 }
382 382
383 printk(KERN_DEBUG "%s: Selected rate control " 383 wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n",
384 "algorithm '%s'\n", wiphy_name(local->hw.wiphy), 384 ref->ops->name);
385 ref->ops->name);
386 385
387 return 0; 386 return 0;
388} 387}
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 47438b4a9af5..135f36fd4d5d 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -162,7 +162,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
162 file_info->next_entry = (file_info->next_entry + 1) % 162 file_info->next_entry = (file_info->next_entry + 1) %
163 RC_PID_EVENT_RING_SIZE; 163 RC_PID_EVENT_RING_SIZE;
164 164
165 /* Print information about the event. Note that userpace needs to 165 /* Print information about the event. Note that userspace needs to
166 * provide large enough buffers. */ 166 * provide large enough buffers. */
167 length = length < RC_PID_PRINT_BUF_SIZE ? 167 length = length < RC_PID_PRINT_BUF_SIZE ?
168 length : RC_PID_PRINT_BUF_SIZE; 168 length : RC_PID_PRINT_BUF_SIZE;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fa0f37e4afe4..ac205a33690f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -538,20 +538,12 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
538 int index, 538 int index,
539 struct sk_buff_head *frames) 539 struct sk_buff_head *frames)
540{ 540{
541 struct ieee80211_supported_band *sband;
542 struct ieee80211_rate *rate = NULL;
543 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 541 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
544 struct ieee80211_rx_status *status;
545 542
546 if (!skb) 543 if (!skb)
547 goto no_frame; 544 goto no_frame;
548 545
549 status = IEEE80211_SKB_RXCB(skb); 546 /* release the frame from the reorder ring buffer */
550
551 /* release the reordered frames to stack */
552 sband = hw->wiphy->bands[status->band];
553 if (!(status->flag & RX_FLAG_HT))
554 rate = &sband->bitrates[status->rate_idx];
555 tid_agg_rx->stored_mpdu_num--; 547 tid_agg_rx->stored_mpdu_num--;
556 tid_agg_rx->reorder_buf[index] = NULL; 548 tid_agg_rx->reorder_buf[index] = NULL;
557 __skb_queue_tail(frames, skb); 549 __skb_queue_tail(frames, skb);
@@ -580,9 +572,78 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
580 * frames that have not yet been received are assumed to be lost and the skb 572 * frames that have not yet been received are assumed to be lost and the skb
581 * can be released for processing. This may also release other skb's from the 573 * can be released for processing. This may also release other skb's from the
582 * reorder buffer if there are no additional gaps between the frames. 574 * reorder buffer if there are no additional gaps between the frames.
575 *
576 * Callers must hold tid_agg_rx->reorder_lock.
583 */ 577 */
584#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 578#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
585 579
580static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
581 struct tid_ampdu_rx *tid_agg_rx,
582 struct sk_buff_head *frames)
583{
584 int index, j;
585
586 /* release the buffer until next missing frame */
587 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
588 tid_agg_rx->buf_size;
589 if (!tid_agg_rx->reorder_buf[index] &&
590 tid_agg_rx->stored_mpdu_num > 1) {
591 /*
592 * No buffers ready to be released, but check whether any
593 * frames in the reorder buffer have timed out.
594 */
595 int skipped = 1;
596 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
597 j = (j + 1) % tid_agg_rx->buf_size) {
598 if (!tid_agg_rx->reorder_buf[j]) {
599 skipped++;
600 continue;
601 }
602 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
603 HT_RX_REORDER_BUF_TIMEOUT))
604 goto set_release_timer;
605
606#ifdef CONFIG_MAC80211_HT_DEBUG
607 if (net_ratelimit())
608 wiphy_debug(hw->wiphy,
609 "release an RX reorder frame due to timeout on earlier frames\n");
610#endif
611 ieee80211_release_reorder_frame(hw, tid_agg_rx,
612 j, frames);
613
614 /*
615 * Increment the head seq# also for the skipped slots.
616 */
617 tid_agg_rx->head_seq_num =
618 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
619 skipped = 0;
620 }
621 } else while (tid_agg_rx->reorder_buf[index]) {
622 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
623 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
624 tid_agg_rx->buf_size;
625 }
626
627 if (tid_agg_rx->stored_mpdu_num) {
628 j = index = seq_sub(tid_agg_rx->head_seq_num,
629 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
630
631 for (; j != (index - 1) % tid_agg_rx->buf_size;
632 j = (j + 1) % tid_agg_rx->buf_size) {
633 if (tid_agg_rx->reorder_buf[j])
634 break;
635 }
636
637 set_release_timer:
638
639 mod_timer(&tid_agg_rx->reorder_timer,
640 tid_agg_rx->reorder_time[j] +
641 HT_RX_REORDER_BUF_TIMEOUT);
642 } else {
643 del_timer(&tid_agg_rx->reorder_timer);
644 }
645}
646
586/* 647/*
587 * As this function belongs to the RX path it must be under 648 * As this function belongs to the RX path it must be under
588 * rcu_read_lock protection. It returns false if the frame 649 * rcu_read_lock protection. It returns false if the frame
@@ -598,14 +659,16 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
598 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 659 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
599 u16 head_seq_num, buf_size; 660 u16 head_seq_num, buf_size;
600 int index; 661 int index;
662 bool ret = true;
601 663
602 buf_size = tid_agg_rx->buf_size; 664 buf_size = tid_agg_rx->buf_size;
603 head_seq_num = tid_agg_rx->head_seq_num; 665 head_seq_num = tid_agg_rx->head_seq_num;
604 666
667 spin_lock(&tid_agg_rx->reorder_lock);
605 /* frame with out of date sequence number */ 668 /* frame with out of date sequence number */
606 if (seq_less(mpdu_seq_num, head_seq_num)) { 669 if (seq_less(mpdu_seq_num, head_seq_num)) {
607 dev_kfree_skb(skb); 670 dev_kfree_skb(skb);
608 return true; 671 goto out;
609 } 672 }
610 673
611 /* 674 /*
@@ -626,7 +689,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
626 /* check if we already stored this frame */ 689 /* check if we already stored this frame */
627 if (tid_agg_rx->reorder_buf[index]) { 690 if (tid_agg_rx->reorder_buf[index]) {
628 dev_kfree_skb(skb); 691 dev_kfree_skb(skb);
629 return true; 692 goto out;
630 } 693 }
631 694
632 /* 695 /*
@@ -636,58 +699,19 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
636 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 699 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
637 tid_agg_rx->stored_mpdu_num == 0) { 700 tid_agg_rx->stored_mpdu_num == 0) {
638 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 701 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
639 return false; 702 ret = false;
703 goto out;
640 } 704 }
641 705
642 /* put the frame in the reordering buffer */ 706 /* put the frame in the reordering buffer */
643 tid_agg_rx->reorder_buf[index] = skb; 707 tid_agg_rx->reorder_buf[index] = skb;
644 tid_agg_rx->reorder_time[index] = jiffies; 708 tid_agg_rx->reorder_time[index] = jiffies;
645 tid_agg_rx->stored_mpdu_num++; 709 tid_agg_rx->stored_mpdu_num++;
646 /* release the buffer until next missing frame */ 710 ieee80211_sta_reorder_release(hw, tid_agg_rx, frames);
647 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
648 tid_agg_rx->buf_size;
649 if (!tid_agg_rx->reorder_buf[index] &&
650 tid_agg_rx->stored_mpdu_num > 1) {
651 /*
652 * No buffers ready to be released, but check whether any
653 * frames in the reorder buffer have timed out.
654 */
655 int j;
656 int skipped = 1;
657 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
658 j = (j + 1) % tid_agg_rx->buf_size) {
659 if (!tid_agg_rx->reorder_buf[j]) {
660 skipped++;
661 continue;
662 }
663 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
664 HT_RX_REORDER_BUF_TIMEOUT))
665 break;
666 711
667#ifdef CONFIG_MAC80211_HT_DEBUG 712 out:
668 if (net_ratelimit()) 713 spin_unlock(&tid_agg_rx->reorder_lock);
669 printk(KERN_DEBUG "%s: release an RX reorder " 714 return ret;
670 "frame due to timeout on earlier "
671 "frames\n",
672 wiphy_name(hw->wiphy));
673#endif
674 ieee80211_release_reorder_frame(hw, tid_agg_rx,
675 j, frames);
676
677 /*
678 * Increment the head seq# also for the skipped slots.
679 */
680 tid_agg_rx->head_seq_num =
681 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
682 skipped = 0;
683 }
684 } else while (tid_agg_rx->reorder_buf[index]) {
685 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
686 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
687 tid_agg_rx->buf_size;
688 }
689
690 return true;
691} 715}
692 716
693/* 717/*
@@ -873,6 +897,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
873 897
874 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 898 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
875 rx->key = stakey; 899 rx->key = stakey;
900 if ((status->flag & RX_FLAG_DECRYPTED) &&
901 (status->flag & RX_FLAG_IV_STRIPPED))
902 return RX_CONTINUE;
876 /* Skip decryption if the frame is not protected. */ 903 /* Skip decryption if the frame is not protected. */
877 if (!ieee80211_has_protected(fc)) 904 if (!ieee80211_has_protected(fc))
878 return RX_CONTINUE; 905 return RX_CONTINUE;
@@ -935,7 +962,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
935 * pairwise or station-to-station keys, but for WEP we allow 962 * pairwise or station-to-station keys, but for WEP we allow
936 * using a key index as well. 963 * using a key index as well.
937 */ 964 */
938 if (rx->key && rx->key->conf.alg != ALG_WEP && 965 if (rx->key && rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
966 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
939 !is_multicast_ether_addr(hdr->addr1)) 967 !is_multicast_ether_addr(hdr->addr1))
940 rx->key = NULL; 968 rx->key = NULL;
941 } 969 }
@@ -951,8 +979,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
951 return RX_DROP_UNUSABLE; 979 return RX_DROP_UNUSABLE;
952 /* the hdr variable is invalid now! */ 980 /* the hdr variable is invalid now! */
953 981
954 switch (rx->key->conf.alg) { 982 switch (rx->key->conf.cipher) {
955 case ALG_WEP: 983 case WLAN_CIPHER_SUITE_WEP40:
984 case WLAN_CIPHER_SUITE_WEP104:
956 /* Check for weak IVs if possible */ 985 /* Check for weak IVs if possible */
957 if (rx->sta && ieee80211_is_data(fc) && 986 if (rx->sta && ieee80211_is_data(fc) &&
958 (!(status->flag & RX_FLAG_IV_STRIPPED) || 987 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
@@ -962,15 +991,21 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
962 991
963 result = ieee80211_crypto_wep_decrypt(rx); 992 result = ieee80211_crypto_wep_decrypt(rx);
964 break; 993 break;
965 case ALG_TKIP: 994 case WLAN_CIPHER_SUITE_TKIP:
966 result = ieee80211_crypto_tkip_decrypt(rx); 995 result = ieee80211_crypto_tkip_decrypt(rx);
967 break; 996 break;
968 case ALG_CCMP: 997 case WLAN_CIPHER_SUITE_CCMP:
969 result = ieee80211_crypto_ccmp_decrypt(rx); 998 result = ieee80211_crypto_ccmp_decrypt(rx);
970 break; 999 break;
971 case ALG_AES_CMAC: 1000 case WLAN_CIPHER_SUITE_AES_CMAC:
972 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1001 result = ieee80211_crypto_aes_cmac_decrypt(rx);
973 break; 1002 break;
1003 default:
1004 /*
1005 * We can reach here only with HW-only algorithms
1006 * but why didn't it decrypt the frame?!
1007 */
1008 return RX_DROP_UNUSABLE;
974 } 1009 }
975 1010
976 /* either the frame has been decrypted or will be dropped */ 1011 /* either the frame has been decrypted or will be dropped */
@@ -1265,7 +1300,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1265 /* This is the first fragment of a new frame. */ 1300 /* This is the first fragment of a new frame. */
1266 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1301 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1267 rx->queue, &(rx->skb)); 1302 rx->queue, &(rx->skb));
1268 if (rx->key && rx->key->conf.alg == ALG_CCMP && 1303 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1269 ieee80211_has_protected(fc)) { 1304 ieee80211_has_protected(fc)) {
1270 int queue = ieee80211_is_mgmt(fc) ? 1305 int queue = ieee80211_is_mgmt(fc) ?
1271 NUM_RX_DATA_QUEUES : rx->queue; 1306 NUM_RX_DATA_QUEUES : rx->queue;
@@ -1294,7 +1329,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1294 int i; 1329 int i;
1295 u8 pn[CCMP_PN_LEN], *rpn; 1330 u8 pn[CCMP_PN_LEN], *rpn;
1296 int queue; 1331 int queue;
1297 if (!rx->key || rx->key->conf.alg != ALG_CCMP) 1332 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1298 return RX_DROP_UNUSABLE; 1333 return RX_DROP_UNUSABLE;
1299 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1334 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1300 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 1335 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -1492,7 +1527,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1492 * Allow EAPOL frames to us/the PAE group address regardless 1527 * Allow EAPOL frames to us/the PAE group address regardless
1493 * of whether the frame was encrypted or not. 1528 * of whether the frame was encrypted or not.
1494 */ 1529 */
1495 if (ehdr->h_proto == htons(ETH_P_PAE) && 1530 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1496 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || 1531 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1497 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1532 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1498 return true; 1533 return true;
@@ -1909,13 +1944,36 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1909} 1944}
1910 1945
1911static ieee80211_rx_result debug_noinline 1946static ieee80211_rx_result debug_noinline
1947ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
1948{
1949 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1950
1951 /*
1952 * From here on, look only at management frames.
1953 * Data and control frames are already handled,
1954 * and unknown (reserved) frames are useless.
1955 */
1956 if (rx->skb->len < 24)
1957 return RX_DROP_MONITOR;
1958
1959 if (!ieee80211_is_mgmt(mgmt->frame_control))
1960 return RX_DROP_MONITOR;
1961
1962 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1963 return RX_DROP_MONITOR;
1964
1965 if (ieee80211_drop_unencrypted_mgmt(rx))
1966 return RX_DROP_UNUSABLE;
1967
1968 return RX_CONTINUE;
1969}
1970
1971static ieee80211_rx_result debug_noinline
1912ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 1972ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1913{ 1973{
1914 struct ieee80211_local *local = rx->local; 1974 struct ieee80211_local *local = rx->local;
1915 struct ieee80211_sub_if_data *sdata = rx->sdata; 1975 struct ieee80211_sub_if_data *sdata = rx->sdata;
1916 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1976 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1917 struct sk_buff *nskb;
1918 struct ieee80211_rx_status *status;
1919 int len = rx->skb->len; 1977 int len = rx->skb->len;
1920 1978
1921 if (!ieee80211_is_action(mgmt->frame_control)) 1979 if (!ieee80211_is_action(mgmt->frame_control))
@@ -1931,9 +1989,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1931 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1989 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1932 return RX_DROP_UNUSABLE; 1990 return RX_DROP_UNUSABLE;
1933 1991
1934 if (ieee80211_drop_unencrypted_mgmt(rx))
1935 return RX_DROP_UNUSABLE;
1936
1937 switch (mgmt->u.action.category) { 1992 switch (mgmt->u.action.category) {
1938 case WLAN_CATEGORY_BACK: 1993 case WLAN_CATEGORY_BACK:
1939 /* 1994 /*
@@ -2024,17 +2079,36 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2024 goto queue; 2079 goto queue;
2025 } 2080 }
2026 2081
2082 return RX_CONTINUE;
2083
2027 invalid: 2084 invalid:
2028 /* 2085 rx->flags |= IEEE80211_MALFORMED_ACTION_FRM;
2029 * For AP mode, hostapd is responsible for handling any action 2086 /* will return in the next handlers */
2030 * frames that we didn't handle, including returning unknown 2087 return RX_CONTINUE;
2031 * ones. For all other modes we will return them to the sender, 2088
2032 * setting the 0x80 bit in the action category, as required by 2089 handled:
2033 * 802.11-2007 7.3.1.11. 2090 if (rx->sta)
2034 */ 2091 rx->sta->rx_packets++;
2035 if (sdata->vif.type == NL80211_IFTYPE_AP || 2092 dev_kfree_skb(rx->skb);
2036 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2093 return RX_QUEUED;
2037 return RX_DROP_MONITOR; 2094
2095 queue:
2096 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2097 skb_queue_tail(&sdata->skb_queue, rx->skb);
2098 ieee80211_queue_work(&local->hw, &sdata->work);
2099 if (rx->sta)
2100 rx->sta->rx_packets++;
2101 return RX_QUEUED;
2102}
2103
2104static ieee80211_rx_result debug_noinline
2105ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2106{
2107 struct ieee80211_rx_status *status;
2108
2109 /* skip known-bad action frames and return them in the next handler */
2110 if (rx->flags & IEEE80211_MALFORMED_ACTION_FRM)
2111 return RX_CONTINUE;
2038 2112
2039 /* 2113 /*
2040 * Getting here means the kernel doesn't know how to handle 2114 * Getting here means the kernel doesn't know how to handle
@@ -2044,10 +2118,44 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2044 */ 2118 */
2045 status = IEEE80211_SKB_RXCB(rx->skb); 2119 status = IEEE80211_SKB_RXCB(rx->skb);
2046 2120
2047 if (cfg80211_rx_action(rx->sdata->dev, status->freq, 2121 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2048 rx->skb->data, rx->skb->len, 2122 rx->skb->data, rx->skb->len,
2049 GFP_ATOMIC)) 2123 GFP_ATOMIC)) {
2050 goto handled; 2124 if (rx->sta)
2125 rx->sta->rx_packets++;
2126 dev_kfree_skb(rx->skb);
2127 return RX_QUEUED;
2128 }
2129
2130
2131 return RX_CONTINUE;
2132}
2133
2134static ieee80211_rx_result debug_noinline
2135ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2136{
2137 struct ieee80211_local *local = rx->local;
2138 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2139 struct sk_buff *nskb;
2140 struct ieee80211_sub_if_data *sdata = rx->sdata;
2141
2142 if (!ieee80211_is_action(mgmt->frame_control))
2143 return RX_CONTINUE;
2144
2145 /*
2146 * For AP mode, hostapd is responsible for handling any action
2147 * frames that we didn't handle, including returning unknown
2148 * ones. For all other modes we will return them to the sender,
2149 * setting the 0x80 bit in the action category, as required by
2150 * 802.11-2007 7.3.1.11.
2151 * Newer versions of hostapd shall also use the management frame
2152 * registration mechanisms, but older ones still use cooked
2153 * monitor interfaces so push all frames there.
2154 */
2155 if (!(rx->flags & IEEE80211_MALFORMED_ACTION_FRM) &&
2156 (sdata->vif.type == NL80211_IFTYPE_AP ||
2157 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2158 return RX_DROP_MONITOR;
2051 2159
2052 /* do not return rejected action frames */ 2160 /* do not return rejected action frames */
2053 if (mgmt->u.action.category & 0x80) 2161 if (mgmt->u.action.category & 0x80)
@@ -2066,20 +2174,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2066 2174
2067 ieee80211_tx_skb(rx->sdata, nskb); 2175 ieee80211_tx_skb(rx->sdata, nskb);
2068 } 2176 }
2069
2070 handled:
2071 if (rx->sta)
2072 rx->sta->rx_packets++;
2073 dev_kfree_skb(rx->skb); 2177 dev_kfree_skb(rx->skb);
2074 return RX_QUEUED; 2178 return RX_QUEUED;
2075
2076 queue:
2077 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2078 skb_queue_tail(&sdata->skb_queue, rx->skb);
2079 ieee80211_queue_work(&local->hw, &sdata->work);
2080 if (rx->sta)
2081 rx->sta->rx_packets++;
2082 return RX_QUEUED;
2083} 2179}
2084 2180
2085static ieee80211_rx_result debug_noinline 2181static ieee80211_rx_result debug_noinline
@@ -2090,15 +2186,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2090 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2186 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2091 __le16 stype; 2187 __le16 stype;
2092 2188
2093 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2094 return RX_DROP_MONITOR;
2095
2096 if (rx->skb->len < 24)
2097 return RX_DROP_MONITOR;
2098
2099 if (ieee80211_drop_unencrypted_mgmt(rx))
2100 return RX_DROP_UNUSABLE;
2101
2102 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb); 2189 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2103 if (rxs != RX_CONTINUE) 2190 if (rxs != RX_CONTINUE)
2104 return rxs; 2191 return rxs;
@@ -2267,19 +2354,46 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2267 dev_kfree_skb(skb); 2354 dev_kfree_skb(skb);
2268} 2355}
2269 2356
2357static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2358 ieee80211_rx_result res)
2359{
2360 switch (res) {
2361 case RX_DROP_MONITOR:
2362 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2363 if (rx->sta)
2364 rx->sta->rx_dropped++;
2365 /* fall through */
2366 case RX_CONTINUE: {
2367 struct ieee80211_rate *rate = NULL;
2368 struct ieee80211_supported_band *sband;
2369 struct ieee80211_rx_status *status;
2370
2371 status = IEEE80211_SKB_RXCB((rx->skb));
2372
2373 sband = rx->local->hw.wiphy->bands[status->band];
2374 if (!(status->flag & RX_FLAG_HT))
2375 rate = &sband->bitrates[status->rate_idx];
2376
2377 ieee80211_rx_cooked_monitor(rx, rate);
2378 break;
2379 }
2380 case RX_DROP_UNUSABLE:
2381 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2382 if (rx->sta)
2383 rx->sta->rx_dropped++;
2384 dev_kfree_skb(rx->skb);
2385 break;
2386 case RX_QUEUED:
2387 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2388 break;
2389 }
2390}
2270 2391
2271static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, 2392static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2272 struct ieee80211_rx_data *rx, 2393 struct sk_buff_head *frames)
2273 struct sk_buff *skb,
2274 struct ieee80211_rate *rate)
2275{ 2394{
2276 struct sk_buff_head reorder_release;
2277 ieee80211_rx_result res = RX_DROP_MONITOR; 2395 ieee80211_rx_result res = RX_DROP_MONITOR;
2278 2396 struct sk_buff *skb;
2279 __skb_queue_head_init(&reorder_release);
2280
2281 rx->skb = skb;
2282 rx->sdata = sdata;
2283 2397
2284#define CALL_RXH(rxh) \ 2398#define CALL_RXH(rxh) \
2285 do { \ 2399 do { \
@@ -2288,17 +2402,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2288 goto rxh_next; \ 2402 goto rxh_next; \
2289 } while (0); 2403 } while (0);
2290 2404
2291 /* 2405 while ((skb = __skb_dequeue(frames))) {
2292 * NB: the rxh_next label works even if we jump
2293 * to it from here because then the list will
2294 * be empty, which is a trivial check
2295 */
2296 CALL_RXH(ieee80211_rx_h_passive_scan)
2297 CALL_RXH(ieee80211_rx_h_check)
2298
2299 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2300
2301 while ((skb = __skb_dequeue(&reorder_release))) {
2302 /* 2406 /*
2303 * all the other fields are valid across frames 2407 * all the other fields are valid across frames
2304 * that belong to an aMPDU since they are on the 2408 * that belong to an aMPDU since they are on the
@@ -2316,42 +2420,95 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2316 CALL_RXH(ieee80211_rx_h_remove_qos_control) 2420 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2317 CALL_RXH(ieee80211_rx_h_amsdu) 2421 CALL_RXH(ieee80211_rx_h_amsdu)
2318#ifdef CONFIG_MAC80211_MESH 2422#ifdef CONFIG_MAC80211_MESH
2319 if (ieee80211_vif_is_mesh(&sdata->vif)) 2423 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2320 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2424 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2321#endif 2425#endif
2322 CALL_RXH(ieee80211_rx_h_data) 2426 CALL_RXH(ieee80211_rx_h_data)
2323 2427
2324 /* special treatment -- needs the queue */ 2428 /* special treatment -- needs the queue */
2325 res = ieee80211_rx_h_ctrl(rx, &reorder_release); 2429 res = ieee80211_rx_h_ctrl(rx, frames);
2326 if (res != RX_CONTINUE) 2430 if (res != RX_CONTINUE)
2327 goto rxh_next; 2431 goto rxh_next;
2328 2432
2433 CALL_RXH(ieee80211_rx_h_mgmt_check)
2329 CALL_RXH(ieee80211_rx_h_action) 2434 CALL_RXH(ieee80211_rx_h_action)
2435 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2436 CALL_RXH(ieee80211_rx_h_action_return)
2330 CALL_RXH(ieee80211_rx_h_mgmt) 2437 CALL_RXH(ieee80211_rx_h_mgmt)
2331 2438
2439 rxh_next:
2440 ieee80211_rx_handlers_result(rx, res);
2441
2332#undef CALL_RXH 2442#undef CALL_RXH
2443 }
2444}
2445
2446static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2447 struct ieee80211_rx_data *rx,
2448 struct sk_buff *skb)
2449{
2450 struct sk_buff_head reorder_release;
2451 ieee80211_rx_result res = RX_DROP_MONITOR;
2452
2453 __skb_queue_head_init(&reorder_release);
2454
2455 rx->skb = skb;
2456 rx->sdata = sdata;
2457
2458#define CALL_RXH(rxh) \
2459 do { \
2460 res = rxh(rx); \
2461 if (res != RX_CONTINUE) \
2462 goto rxh_next; \
2463 } while (0);
2464
2465 CALL_RXH(ieee80211_rx_h_passive_scan)
2466 CALL_RXH(ieee80211_rx_h_check)
2467
2468 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2469
2470 ieee80211_rx_handlers(rx, &reorder_release);
2471 return;
2333 2472
2334 rxh_next: 2473 rxh_next:
2335 switch (res) { 2474 ieee80211_rx_handlers_result(rx, res);
2336 case RX_DROP_MONITOR: 2475
2337 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2476#undef CALL_RXH
2338 if (rx->sta) 2477}
2339 rx->sta->rx_dropped++; 2478
2340 /* fall through */ 2479/*
2341 case RX_CONTINUE: 2480 * This function makes calls into the RX path. Therefore the
2342 ieee80211_rx_cooked_monitor(rx, rate); 2481 * caller must hold the sta_info->lock and everything has to
2343 break; 2482 * be under rcu_read_lock protection as well.
2344 case RX_DROP_UNUSABLE: 2483 */
2345 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2484void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2346 if (rx->sta) 2485{
2347 rx->sta->rx_dropped++; 2486 struct sk_buff_head frames;
2348 dev_kfree_skb(rx->skb); 2487 struct ieee80211_rx_data rx = { };
2349 break; 2488 struct tid_ampdu_rx *tid_agg_rx;
2350 case RX_QUEUED: 2489
2351 I802_DEBUG_INC(sdata->local->rx_handlers_queued); 2490 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2352 break; 2491 if (!tid_agg_rx)
2353 } 2492 return;
2354 } 2493
2494 __skb_queue_head_init(&frames);
2495
2496 /* construct rx struct */
2497 rx.sta = sta;
2498 rx.sdata = sta->sdata;
2499 rx.local = sta->local;
2500 rx.queue = tid;
2501 rx.flags |= IEEE80211_RX_RA_MATCH;
2502
2503 if (unlikely(test_bit(SCAN_HW_SCANNING, &sta->local->scanning) ||
2504 test_bit(SCAN_OFF_CHANNEL, &sta->local->scanning)))
2505 rx.flags |= IEEE80211_RX_IN_SCAN;
2506
2507 spin_lock(&tid_agg_rx->reorder_lock);
2508 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames);
2509 spin_unlock(&tid_agg_rx->reorder_lock);
2510
2511 ieee80211_rx_handlers(&rx, &frames);
2355} 2512}
2356 2513
2357/* main receive path */ 2514/* main receive path */
@@ -2433,7 +2590,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2433 break; 2590 break;
2434 case NL80211_IFTYPE_MONITOR: 2591 case NL80211_IFTYPE_MONITOR:
2435 case NL80211_IFTYPE_UNSPECIFIED: 2592 case NL80211_IFTYPE_UNSPECIFIED:
2436 case __NL80211_IFTYPE_AFTER_LAST: 2593 case NUM_NL80211_IFTYPES:
2437 /* should never get here */ 2594 /* should never get here */
2438 WARN_ON(1); 2595 WARN_ON(1);
2439 break; 2596 break;
@@ -2447,8 +2604,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2447 * be called with rcu_read_lock protection. 2604 * be called with rcu_read_lock protection.
2448 */ 2605 */
2449static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 2606static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2450 struct sk_buff *skb, 2607 struct sk_buff *skb)
2451 struct ieee80211_rate *rate)
2452{ 2608{
2453 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2609 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2454 struct ieee80211_local *local = hw_to_local(hw); 2610 struct ieee80211_local *local = hw_to_local(hw);
@@ -2550,13 +2706,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2550 skb_new = skb_copy(skb, GFP_ATOMIC); 2706 skb_new = skb_copy(skb, GFP_ATOMIC);
2551 if (!skb_new) { 2707 if (!skb_new) {
2552 if (net_ratelimit()) 2708 if (net_ratelimit())
2553 printk(KERN_DEBUG "%s: failed to copy " 2709 wiphy_debug(local->hw.wiphy,
2554 "multicast frame for %s\n", 2710 "failed to copy multicast frame for %s\n",
2555 wiphy_name(local->hw.wiphy), 2711 prev->name);
2556 prev->name);
2557 goto next; 2712 goto next;
2558 } 2713 }
2559 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate); 2714 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2560next: 2715next:
2561 prev = sdata; 2716 prev = sdata;
2562 } 2717 }
@@ -2572,7 +2727,7 @@ next:
2572 } 2727 }
2573 } 2728 }
2574 if (prev) 2729 if (prev)
2575 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); 2730 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2576 else 2731 else
2577 dev_kfree_skb(skb); 2732 dev_kfree_skb(skb);
2578} 2733}
@@ -2615,28 +2770,37 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2615 if (WARN_ON(!local->started)) 2770 if (WARN_ON(!local->started))
2616 goto drop; 2771 goto drop;
2617 2772
2618 if (status->flag & RX_FLAG_HT) { 2773 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2619 /* 2774 /*
2620 * rate_idx is MCS index, which can be [0-76] as documented on: 2775 * Validate the rate, unless a PLCP error means that
2621 * 2776 * we probably can't have a valid rate here anyway.
2622 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2623 *
2624 * Anything else would be some sort of driver or hardware error.
2625 * The driver should catch hardware errors.
2626 */ 2777 */
2627 if (WARN((status->rate_idx < 0 || 2778
2628 status->rate_idx > 76), 2779 if (status->flag & RX_FLAG_HT) {
2629 "Rate marked as an HT rate but passed " 2780 /*
2630 "status->rate_idx is not " 2781 * rate_idx is MCS index, which can be [0-76]
2631 "an MCS index [0-76]: %d (0x%02x)\n", 2782 * as documented on:
2632 status->rate_idx, 2783 *
2633 status->rate_idx)) 2784 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2634 goto drop; 2785 *
2635 } else { 2786 * Anything else would be some sort of driver or
2636 if (WARN_ON(status->rate_idx < 0 || 2787 * hardware error. The driver should catch hardware
2637 status->rate_idx >= sband->n_bitrates)) 2788 * errors.
2638 goto drop; 2789 */
2639 rate = &sband->bitrates[status->rate_idx]; 2790 if (WARN((status->rate_idx < 0 ||
2791 status->rate_idx > 76),
2792 "Rate marked as an HT rate but passed "
2793 "status->rate_idx is not "
2794 "an MCS index [0-76]: %d (0x%02x)\n",
2795 status->rate_idx,
2796 status->rate_idx))
2797 goto drop;
2798 } else {
2799 if (WARN_ON(status->rate_idx < 0 ||
2800 status->rate_idx >= sband->n_bitrates))
2801 goto drop;
2802 rate = &sband->bitrates[status->rate_idx];
2803 }
2640 } 2804 }
2641 2805
2642 /* 2806 /*
@@ -2658,7 +2822,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2658 return; 2822 return;
2659 } 2823 }
2660 2824
2661 __ieee80211_rx_handle_packet(hw, skb, rate); 2825 __ieee80211_rx_handle_packet(hw, skb);
2662 2826
2663 rcu_read_unlock(); 2827 rcu_read_unlock();
2664 2828
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 872d7b6ef6b3..d60389ba9b95 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -248,14 +248,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
248 return true; 248 return true;
249} 249}
250 250
251void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 251static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
252{ 252{
253 struct ieee80211_local *local = hw_to_local(hw); 253 struct ieee80211_local *local = hw_to_local(hw);
254 bool was_hw_scan; 254 bool was_hw_scan;
255 255
256 trace_api_scan_completed(local, aborted); 256 mutex_lock(&local->mtx);
257
258 mutex_lock(&local->scan_mtx);
259 257
260 /* 258 /*
261 * It's ok to abort a not-yet-running scan (that 259 * It's ok to abort a not-yet-running scan (that
@@ -267,7 +265,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
267 aborted = true; 265 aborted = true;
268 266
269 if (WARN_ON(!local->scan_req)) { 267 if (WARN_ON(!local->scan_req)) {
270 mutex_unlock(&local->scan_mtx); 268 mutex_unlock(&local->mtx);
271 return; 269 return;
272 } 270 }
273 271
@@ -275,7 +273,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
275 if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) { 273 if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
276 ieee80211_queue_delayed_work(&local->hw, 274 ieee80211_queue_delayed_work(&local->hw,
277 &local->scan_work, 0); 275 &local->scan_work, 0);
278 mutex_unlock(&local->scan_mtx); 276 mutex_unlock(&local->mtx);
279 return; 277 return;
280 } 278 }
281 279
@@ -291,7 +289,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
291 local->scan_channel = NULL; 289 local->scan_channel = NULL;
292 290
293 /* we only have to protect scan_req and hw/sw scan */ 291 /* we only have to protect scan_req and hw/sw scan */
294 mutex_unlock(&local->scan_mtx); 292 mutex_unlock(&local->mtx);
295 293
296 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 294 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
297 if (was_hw_scan) 295 if (was_hw_scan)
@@ -304,12 +302,26 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
304 ieee80211_offchannel_return(local, true); 302 ieee80211_offchannel_return(local, true);
305 303
306 done: 304 done:
305 mutex_lock(&local->mtx);
307 ieee80211_recalc_idle(local); 306 ieee80211_recalc_idle(local);
307 mutex_unlock(&local->mtx);
308 ieee80211_mlme_notify_scan_completed(local); 308 ieee80211_mlme_notify_scan_completed(local);
309 ieee80211_ibss_notify_scan_completed(local); 309 ieee80211_ibss_notify_scan_completed(local);
310 ieee80211_mesh_notify_scan_completed(local); 310 ieee80211_mesh_notify_scan_completed(local);
311 ieee80211_queue_work(&local->hw, &local->work_work); 311 ieee80211_queue_work(&local->hw, &local->work_work);
312} 312}
313
314void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
315{
316 struct ieee80211_local *local = hw_to_local(hw);
317
318 trace_api_scan_completed(local, aborted);
319
320 set_bit(SCAN_COMPLETED, &local->scanning);
321 if (aborted)
322 set_bit(SCAN_ABORTED, &local->scanning);
323 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
324}
313EXPORT_SYMBOL(ieee80211_scan_completed); 325EXPORT_SYMBOL(ieee80211_scan_completed);
314 326
315static int ieee80211_start_sw_scan(struct ieee80211_local *local) 327static int ieee80211_start_sw_scan(struct ieee80211_local *local)
@@ -447,7 +459,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
447 459
448 /* if no more bands/channels left, complete scan and advance to the idle state */ 460 /* if no more bands/channels left, complete scan and advance to the idle state */
449 if (local->scan_channel_idx >= local->scan_req->n_channels) { 461 if (local->scan_channel_idx >= local->scan_req->n_channels) {
450 ieee80211_scan_completed(&local->hw, false); 462 __ieee80211_scan_completed(&local->hw, false);
451 return 1; 463 return 1;
452 } 464 }
453 465
@@ -639,17 +651,25 @@ void ieee80211_scan_work(struct work_struct *work)
639 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 651 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
640 unsigned long next_delay = 0; 652 unsigned long next_delay = 0;
641 653
642 mutex_lock(&local->scan_mtx); 654 if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
655 bool aborted;
656
657 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
658 __ieee80211_scan_completed(&local->hw, aborted);
659 return;
660 }
661
662 mutex_lock(&local->mtx);
643 if (!sdata || !local->scan_req) { 663 if (!sdata || !local->scan_req) {
644 mutex_unlock(&local->scan_mtx); 664 mutex_unlock(&local->mtx);
645 return; 665 return;
646 } 666 }
647 667
648 if (local->hw_scan_req) { 668 if (local->hw_scan_req) {
649 int rc = drv_hw_scan(local, sdata, local->hw_scan_req); 669 int rc = drv_hw_scan(local, sdata, local->hw_scan_req);
650 mutex_unlock(&local->scan_mtx); 670 mutex_unlock(&local->mtx);
651 if (rc) 671 if (rc)
652 ieee80211_scan_completed(&local->hw, true); 672 __ieee80211_scan_completed(&local->hw, true);
653 return; 673 return;
654 } 674 }
655 675
@@ -661,20 +681,20 @@ void ieee80211_scan_work(struct work_struct *work)
661 local->scan_sdata = NULL; 681 local->scan_sdata = NULL;
662 682
663 rc = __ieee80211_start_scan(sdata, req); 683 rc = __ieee80211_start_scan(sdata, req);
664 mutex_unlock(&local->scan_mtx); 684 mutex_unlock(&local->mtx);
665 685
666 if (rc) 686 if (rc)
667 ieee80211_scan_completed(&local->hw, true); 687 __ieee80211_scan_completed(&local->hw, true);
668 return; 688 return;
669 } 689 }
670 690
671 mutex_unlock(&local->scan_mtx); 691 mutex_unlock(&local->mtx);
672 692
673 /* 693 /*
674 * Avoid re-scheduling when the sdata is going away. 694 * Avoid re-scheduling when the sdata is going away.
675 */ 695 */
676 if (!ieee80211_sdata_running(sdata)) { 696 if (!ieee80211_sdata_running(sdata)) {
677 ieee80211_scan_completed(&local->hw, true); 697 __ieee80211_scan_completed(&local->hw, true);
678 return; 698 return;
679 } 699 }
680 700
@@ -711,9 +731,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
711{ 731{
712 int res; 732 int res;
713 733
714 mutex_lock(&sdata->local->scan_mtx); 734 mutex_lock(&sdata->local->mtx);
715 res = __ieee80211_start_scan(sdata, req); 735 res = __ieee80211_start_scan(sdata, req);
716 mutex_unlock(&sdata->local->scan_mtx); 736 mutex_unlock(&sdata->local->mtx);
717 737
718 return res; 738 return res;
719} 739}
@@ -726,7 +746,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
726 int ret = -EBUSY; 746 int ret = -EBUSY;
727 enum ieee80211_band band; 747 enum ieee80211_band band;
728 748
729 mutex_lock(&local->scan_mtx); 749 mutex_lock(&local->mtx);
730 750
731 /* busy scanning */ 751 /* busy scanning */
732 if (local->scan_req) 752 if (local->scan_req)
@@ -761,7 +781,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
761 781
762 ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req); 782 ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
763 unlock: 783 unlock:
764 mutex_unlock(&local->scan_mtx); 784 mutex_unlock(&local->mtx);
765 return ret; 785 return ret;
766} 786}
767 787
@@ -775,11 +795,11 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
775 * Only call this function when a scan can't be 795 * Only call this function when a scan can't be
776 * queued -- mostly at suspend under RTNL. 796 * queued -- mostly at suspend under RTNL.
777 */ 797 */
778 mutex_lock(&local->scan_mtx); 798 mutex_lock(&local->mtx);
779 abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) || 799 abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
780 (!local->scanning && local->scan_req); 800 (!local->scanning && local->scan_req);
781 mutex_unlock(&local->scan_mtx); 801 mutex_unlock(&local->mtx);
782 802
783 if (abortscan) 803 if (abortscan)
784 ieee80211_scan_completed(&local->hw, true); 804 __ieee80211_scan_completed(&local->hw, true);
785} 805}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 6d86f0c1ad04..687077e49dc6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -174,8 +174,7 @@ static void __sta_info_free(struct ieee80211_local *local,
174 } 174 }
175 175
176#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 176#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
177 printk(KERN_DEBUG "%s: Destroyed STA %pM\n", 177 wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
178 wiphy_name(local->hw.wiphy), sta->sta.addr);
179#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 178#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
180 179
181 kfree(sta); 180 kfree(sta);
@@ -262,8 +261,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
262 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 261 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
263 262
264#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 263#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
265 printk(KERN_DEBUG "%s: Allocated STA %pM\n", 264 wiphy_debug(local->hw.wiphy, "Allocated STA %pM\n", sta->sta.addr);
266 wiphy_name(local->hw.wiphy), sta->sta.addr);
267#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 265#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
268 266
269#ifdef CONFIG_MAC80211_MESH 267#ifdef CONFIG_MAC80211_MESH
@@ -300,8 +298,9 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
300 sta->uploaded = true; 298 sta->uploaded = true;
301#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 299#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
302 if (async) 300 if (async)
303 printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n", 301 wiphy_debug(local->hw.wiphy,
304 wiphy_name(local->hw.wiphy), sta->sta.addr); 302 "Finished adding IBSS STA %pM\n",
303 sta->sta.addr);
305#endif 304#endif
306 } 305 }
307 306
@@ -411,8 +410,8 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
411 spin_unlock_irqrestore(&local->sta_lock, flags); 410 spin_unlock_irqrestore(&local->sta_lock, flags);
412 411
413#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 412#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
414 printk(KERN_DEBUG "%s: Added IBSS STA %pM\n", 413 wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
415 wiphy_name(local->hw.wiphy), sta->sta.addr); 414 sta->sta.addr);
416#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 415#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
417 416
418 ieee80211_queue_work(&local->hw, &local->sta_finish_work); 417 ieee80211_queue_work(&local->hw, &local->sta_finish_work);
@@ -459,8 +458,7 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
459 } 458 }
460 459
461#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 460#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
462 printk(KERN_DEBUG "%s: Inserted STA %pM\n", 461 wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
463 wiphy_name(local->hw.wiphy), sta->sta.addr);
464#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 462#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
465 463
466 /* move reference to rcu-protected */ 464 /* move reference to rcu-protected */
@@ -690,8 +688,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
690#endif 688#endif
691 689
692#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 690#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
693 printk(KERN_DEBUG "%s: Removed STA %pM\n", 691 wiphy_debug(local->hw.wiphy, "Removed STA %pM\n", sta->sta.addr);
694 wiphy_name(local->hw.wiphy), sta->sta.addr);
695#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 692#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
696 cancel_work_sync(&sta->drv_unblock_wk); 693 cancel_work_sync(&sta->drv_unblock_wk);
697 694
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 54262e72376d..810c5ce98316 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -103,6 +103,7 @@ struct tid_ampdu_tx {
103 * @reorder_buf: buffer to reorder incoming aggregated MPDUs 103 * @reorder_buf: buffer to reorder incoming aggregated MPDUs
104 * @reorder_time: jiffies when skb was added 104 * @reorder_time: jiffies when skb was added
105 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) 105 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
106 * @reorder_timer: releases expired frames from the reorder buffer.
106 * @head_seq_num: head sequence number in reordering buffer. 107 * @head_seq_num: head sequence number in reordering buffer.
107 * @stored_mpdu_num: number of MPDUs in reordering buffer 108 * @stored_mpdu_num: number of MPDUs in reordering buffer
108 * @ssn: Starting Sequence Number expected to be aggregated. 109 * @ssn: Starting Sequence Number expected to be aggregated.
@@ -110,20 +111,25 @@ struct tid_ampdu_tx {
110 * @timeout: reset timer value (in TUs). 111 * @timeout: reset timer value (in TUs).
111 * @dialog_token: dialog token for aggregation session 112 * @dialog_token: dialog token for aggregation session
112 * @rcu_head: RCU head used for freeing this struct 113 * @rcu_head: RCU head used for freeing this struct
114 * @reorder_lock: serializes access to reorder buffer, see below.
113 * 115 *
114 * This structure is protected by RCU and the per-station 116 * This structure is protected by RCU and the per-station
115 * spinlock. Assignments to the array holding it must hold 117 * spinlock. Assignments to the array holding it must hold
116 * the spinlock, only the RX path can access it under RCU 118 * the spinlock.
117 * lock-free. The RX path, since it is single-threaded, 119 *
118 * can even modify the structure without locking since the 120 * The @reorder_lock is used to protect the variables and
119 * only other modifications to it are done when the struct 121 * arrays such as @reorder_buf, @reorder_time, @head_seq_num,
120 * can not yet or no longer be found by the RX path. 122 * @stored_mpdu_num and @reorder_time from being corrupted by
123 * concurrent access of the RX path and the expired frame
124 * release timer.
121 */ 125 */
122struct tid_ampdu_rx { 126struct tid_ampdu_rx {
123 struct rcu_head rcu_head; 127 struct rcu_head rcu_head;
128 spinlock_t reorder_lock;
124 struct sk_buff **reorder_buf; 129 struct sk_buff **reorder_buf;
125 unsigned long *reorder_time; 130 unsigned long *reorder_time;
126 struct timer_list session_timer; 131 struct timer_list session_timer;
132 struct timer_list reorder_timer;
127 u16 head_seq_num; 133 u16 head_seq_num;
128 u16 stored_mpdu_num; 134 u16 stored_mpdu_num;
129 u16 ssn; 135 u16 ssn;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 10caec5ea8fa..571b32bfc54c 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -114,11 +114,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
114 114
115#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 115#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
116 if (net_ratelimit()) 116 if (net_ratelimit())
117 printk(KERN_DEBUG "%s: dropped TX filtered frame, " 117 wiphy_debug(local->hw.wiphy,
118 "queue_len=%d PS=%d @%lu\n", 118 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
119 wiphy_name(local->hw.wiphy), 119 skb_queue_len(&sta->tx_filtered),
120 skb_queue_len(&sta->tx_filtered), 120 !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
121 !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
122#endif 121#endif
123 dev_kfree_skb(skb); 122 dev_kfree_skb(skb);
124} 123}
@@ -296,7 +295,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
296 } 295 }
297 296
298 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) 297 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX)
299 cfg80211_action_tx_status( 298 cfg80211_mgmt_tx_status(
300 skb->dev, (unsigned long) skb, skb->data, skb->len, 299 skb->dev, (unsigned long) skb, skb->data, skb->len,
301 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); 300 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
302 301
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c54db966926b..ccf373788ce9 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -351,8 +351,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
351 351
352 local->total_ps_buffered = total; 352 local->total_ps_buffered = total;
353#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 353#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
354 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", 354 wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n",
355 wiphy_name(local->hw.wiphy), purged); 355 purged);
356#endif 356#endif
357} 357}
358 358
@@ -509,6 +509,18 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
509} 509}
510 510
511static ieee80211_tx_result debug_noinline 511static ieee80211_tx_result debug_noinline
512ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
513{
514 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
515
516 if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
517 tx->sdata->control_port_no_encrypt))
518 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
519
520 return TX_CONTINUE;
521}
522
523static ieee80211_tx_result debug_noinline
512ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 524ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
513{ 525{
514 struct ieee80211_key *key = NULL; 526 struct ieee80211_key *key = NULL;
@@ -527,7 +539,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
527 else if ((key = rcu_dereference(tx->sdata->default_key))) 539 else if ((key = rcu_dereference(tx->sdata->default_key)))
528 tx->key = key; 540 tx->key = key;
529 else if (tx->sdata->drop_unencrypted && 541 else if (tx->sdata->drop_unencrypted &&
530 (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) && 542 (tx->skb->protocol != tx->sdata->control_port_protocol) &&
531 !(info->flags & IEEE80211_TX_CTL_INJECTED) && 543 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
532 (!ieee80211_is_robust_mgmt_frame(hdr) || 544 (!ieee80211_is_robust_mgmt_frame(hdr) ||
533 (ieee80211_is_action(hdr->frame_control) && 545 (ieee80211_is_action(hdr->frame_control) &&
@@ -543,15 +555,16 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
543 tx->key->tx_rx_count++; 555 tx->key->tx_rx_count++;
544 /* TODO: add threshold stuff again */ 556 /* TODO: add threshold stuff again */
545 557
546 switch (tx->key->conf.alg) { 558 switch (tx->key->conf.cipher) {
547 case ALG_WEP: 559 case WLAN_CIPHER_SUITE_WEP40:
560 case WLAN_CIPHER_SUITE_WEP104:
548 if (ieee80211_is_auth(hdr->frame_control)) 561 if (ieee80211_is_auth(hdr->frame_control))
549 break; 562 break;
550 case ALG_TKIP: 563 case WLAN_CIPHER_SUITE_TKIP:
551 if (!ieee80211_is_data_present(hdr->frame_control)) 564 if (!ieee80211_is_data_present(hdr->frame_control))
552 tx->key = NULL; 565 tx->key = NULL;
553 break; 566 break;
554 case ALG_CCMP: 567 case WLAN_CIPHER_SUITE_CCMP:
555 if (!ieee80211_is_data_present(hdr->frame_control) && 568 if (!ieee80211_is_data_present(hdr->frame_control) &&
556 !ieee80211_use_mfp(hdr->frame_control, tx->sta, 569 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
557 tx->skb)) 570 tx->skb))
@@ -561,7 +574,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
561 IEEE80211_KEY_FLAG_SW_MGMT) && 574 IEEE80211_KEY_FLAG_SW_MGMT) &&
562 ieee80211_is_mgmt(hdr->frame_control); 575 ieee80211_is_mgmt(hdr->frame_control);
563 break; 576 break;
564 case ALG_AES_CMAC: 577 case WLAN_CIPHER_SUITE_AES_CMAC:
565 if (!ieee80211_is_mgmt(hdr->frame_control)) 578 if (!ieee80211_is_mgmt(hdr->frame_control))
566 tx->key = NULL; 579 tx->key = NULL;
567 break; 580 break;
@@ -946,22 +959,31 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
946static ieee80211_tx_result debug_noinline 959static ieee80211_tx_result debug_noinline
947ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) 960ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
948{ 961{
962 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
963
949 if (!tx->key) 964 if (!tx->key)
950 return TX_CONTINUE; 965 return TX_CONTINUE;
951 966
952 switch (tx->key->conf.alg) { 967 switch (tx->key->conf.cipher) {
953 case ALG_WEP: 968 case WLAN_CIPHER_SUITE_WEP40:
969 case WLAN_CIPHER_SUITE_WEP104:
954 return ieee80211_crypto_wep_encrypt(tx); 970 return ieee80211_crypto_wep_encrypt(tx);
955 case ALG_TKIP: 971 case WLAN_CIPHER_SUITE_TKIP:
956 return ieee80211_crypto_tkip_encrypt(tx); 972 return ieee80211_crypto_tkip_encrypt(tx);
957 case ALG_CCMP: 973 case WLAN_CIPHER_SUITE_CCMP:
958 return ieee80211_crypto_ccmp_encrypt(tx); 974 return ieee80211_crypto_ccmp_encrypt(tx);
959 case ALG_AES_CMAC: 975 case WLAN_CIPHER_SUITE_AES_CMAC:
960 return ieee80211_crypto_aes_cmac_encrypt(tx); 976 return ieee80211_crypto_aes_cmac_encrypt(tx);
977 default:
978 /* handle hw-only algorithm */
979 if (info->control.hw_key) {
980 ieee80211_tx_set_protected(tx);
981 return TX_CONTINUE;
982 }
983 break;
984
961 } 985 }
962 986
963 /* not reached */
964 WARN_ON(1);
965 return TX_DROP; 987 return TX_DROP;
966} 988}
967 989
@@ -1339,6 +1361,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1339 CALL_TXH(ieee80211_tx_h_dynamic_ps); 1361 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1340 CALL_TXH(ieee80211_tx_h_check_assoc); 1362 CALL_TXH(ieee80211_tx_h_check_assoc);
1341 CALL_TXH(ieee80211_tx_h_ps_buf); 1363 CALL_TXH(ieee80211_tx_h_ps_buf);
1364 CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
1342 CALL_TXH(ieee80211_tx_h_select_key); 1365 CALL_TXH(ieee80211_tx_h_select_key);
1343 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1366 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1344 CALL_TXH(ieee80211_tx_h_rate_ctrl); 1367 CALL_TXH(ieee80211_tx_h_rate_ctrl);
@@ -1511,8 +1534,8 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1511 I802_DEBUG_INC(local->tx_expand_skb_head); 1534 I802_DEBUG_INC(local->tx_expand_skb_head);
1512 1535
1513 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { 1536 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1514 printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n", 1537 wiphy_debug(local->hw.wiphy,
1515 wiphy_name(local->hw.wiphy)); 1538 "failed to reallocate TX buffer\n");
1516 return -ENOMEM; 1539 return -ENOMEM;
1517 } 1540 }
1518 1541
@@ -1699,7 +1722,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1699 u16 ethertype, hdrlen, meshhdrlen = 0; 1722 u16 ethertype, hdrlen, meshhdrlen = 0;
1700 __le16 fc; 1723 __le16 fc;
1701 struct ieee80211_hdr hdr; 1724 struct ieee80211_hdr hdr;
1702 struct ieee80211s_hdr mesh_hdr; 1725 struct ieee80211s_hdr mesh_hdr __maybe_unused;
1703 const u8 *encaps_data; 1726 const u8 *encaps_data;
1704 int encaps_len, skip_header_bytes; 1727 int encaps_len, skip_header_bytes;
1705 int nh_pos, h_pos; 1728 int nh_pos, h_pos;
@@ -1816,7 +1839,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1816#endif 1839#endif
1817 case NL80211_IFTYPE_STATION: 1840 case NL80211_IFTYPE_STATION:
1818 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); 1841 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1819 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) { 1842 if (sdata->u.mgd.use_4addr &&
1843 cpu_to_be16(ethertype) != sdata->control_port_protocol) {
1820 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1844 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1821 /* RA TA DA SA */ 1845 /* RA TA DA SA */
1822 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1846 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
@@ -1869,7 +1893,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1869 if (!ieee80211_vif_is_mesh(&sdata->vif) && 1893 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
1870 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1894 unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1871 !(sta_flags & WLAN_STA_AUTHORIZED) && 1895 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1872 !(ethertype == ETH_P_PAE && 1896 !(cpu_to_be16(ethertype) == sdata->control_port_protocol &&
1873 compare_ether_addr(sdata->vif.addr, 1897 compare_ether_addr(sdata->vif.addr,
1874 skb->data + ETH_ALEN) == 0))) { 1898 skb->data + ETH_ALEN) == 0))) {
1875#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1899#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -2068,8 +2092,7 @@ void ieee80211_tx_pending(unsigned long data)
2068 2092
2069 if (skb_queue_empty(&local->pending[i])) 2093 if (skb_queue_empty(&local->pending[i]))
2070 list_for_each_entry_rcu(sdata, &local->interfaces, list) 2094 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2071 netif_tx_wake_queue( 2095 netif_wake_subqueue(sdata->dev, i);
2072 netdev_get_tx_queue(sdata->dev, i));
2073 } 2096 }
2074 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 2097 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2075 2098
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 748387d45bc0..bd40b11d5ab9 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -283,8 +283,11 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
283 283
284 if (skb_queue_empty(&local->pending[queue])) { 284 if (skb_queue_empty(&local->pending[queue])) {
285 rcu_read_lock(); 285 rcu_read_lock();
286 list_for_each_entry_rcu(sdata, &local->interfaces, list) 286 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
287 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); 287 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
288 continue;
289 netif_wake_subqueue(sdata->dev, queue);
290 }
288 rcu_read_unlock(); 291 rcu_read_unlock();
289 } else 292 } else
290 tasklet_schedule(&local->tx_pending_tasklet); 293 tasklet_schedule(&local->tx_pending_tasklet);
@@ -323,7 +326,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
323 326
324 rcu_read_lock(); 327 rcu_read_lock();
325 list_for_each_entry_rcu(sdata, &local->interfaces, list) 328 list_for_each_entry_rcu(sdata, &local->interfaces, list)
326 netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue)); 329 netif_stop_subqueue(sdata->dev, queue);
327 rcu_read_unlock(); 330 rcu_read_unlock();
328} 331}
329 332
@@ -471,7 +474,7 @@ void ieee80211_iterate_active_interfaces(
471 474
472 list_for_each_entry(sdata, &local->interfaces, list) { 475 list_for_each_entry(sdata, &local->interfaces, list) {
473 switch (sdata->vif.type) { 476 switch (sdata->vif.type) {
474 case __NL80211_IFTYPE_AFTER_LAST: 477 case NUM_NL80211_IFTYPES:
475 case NL80211_IFTYPE_UNSPECIFIED: 478 case NL80211_IFTYPE_UNSPECIFIED:
476 case NL80211_IFTYPE_MONITOR: 479 case NL80211_IFTYPE_MONITOR:
477 case NL80211_IFTYPE_AP_VLAN: 480 case NL80211_IFTYPE_AP_VLAN:
@@ -505,7 +508,7 @@ void ieee80211_iterate_active_interfaces_atomic(
505 508
506 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 509 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
507 switch (sdata->vif.type) { 510 switch (sdata->vif.type) {
508 case __NL80211_IFTYPE_AFTER_LAST: 511 case NUM_NL80211_IFTYPES:
509 case NL80211_IFTYPE_UNSPECIFIED: 512 case NL80211_IFTYPE_UNSPECIFIED:
510 case NL80211_IFTYPE_MONITOR: 513 case NL80211_IFTYPE_MONITOR:
511 case NL80211_IFTYPE_AP_VLAN: 514 case NL80211_IFTYPE_AP_VLAN:
@@ -1189,7 +1192,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1189 /* ignore virtual */ 1192 /* ignore virtual */
1190 break; 1193 break;
1191 case NL80211_IFTYPE_UNSPECIFIED: 1194 case NL80211_IFTYPE_UNSPECIFIED:
1192 case __NL80211_IFTYPE_AFTER_LAST: 1195 case NUM_NL80211_IFTYPES:
1193 WARN_ON(1); 1196 WARN_ON(1);
1194 break; 1197 break;
1195 } 1198 }
@@ -1308,7 +1311,7 @@ void ieee80211_recalc_smps(struct ieee80211_local *local,
1308 */ 1311 */
1309 1312
1310 list_for_each_entry(sdata, &local->interfaces, list) { 1313 list_for_each_entry(sdata, &local->interfaces, list) {
1311 if (!netif_running(sdata->dev)) 1314 if (!ieee80211_sdata_running(sdata))
1312 continue; 1315 continue;
1313 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1316 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1314 goto set; 1317 goto set;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 9ebc8d8a1f5b..f27484c22b9f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -240,7 +240,7 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local,
240 240
241 keyidx = skb->data[hdrlen + 3] >> 6; 241 keyidx = skb->data[hdrlen + 3] >> 6;
242 242
243 if (!key || keyidx != key->conf.keyidx || key->conf.alg != ALG_WEP) 243 if (!key || keyidx != key->conf.keyidx)
244 return -1; 244 return -1;
245 245
246 klen = 3 + key->conf.keylen; 246 klen = 3 + key->conf.keylen;
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 81d4ad64184a..ae344d1ba056 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -43,7 +43,7 @@ enum work_action {
43/* utils */ 43/* utils */
44static inline void ASSERT_WORK_MTX(struct ieee80211_local *local) 44static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
45{ 45{
46 WARN_ON(!mutex_is_locked(&local->work_mtx)); 46 lockdep_assert_held(&local->mtx);
47} 47}
48 48
49/* 49/*
@@ -757,7 +757,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
757 mgmt = (struct ieee80211_mgmt *) skb->data; 757 mgmt = (struct ieee80211_mgmt *) skb->data;
758 fc = le16_to_cpu(mgmt->frame_control); 758 fc = le16_to_cpu(mgmt->frame_control);
759 759
760 mutex_lock(&local->work_mtx); 760 mutex_lock(&local->mtx);
761 761
762 list_for_each_entry(wk, &local->work_list, list) { 762 list_for_each_entry(wk, &local->work_list, list) {
763 const u8 *bssid = NULL; 763 const u8 *bssid = NULL;
@@ -833,7 +833,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
833 WARN(1, "unexpected: %d", rma); 833 WARN(1, "unexpected: %d", rma);
834 } 834 }
835 835
836 mutex_unlock(&local->work_mtx); 836 mutex_unlock(&local->mtx);
837 837
838 if (rma != WORK_ACT_DONE) 838 if (rma != WORK_ACT_DONE)
839 goto out; 839 goto out;
@@ -845,9 +845,9 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
845 case WORK_DONE_REQUEUE: 845 case WORK_DONE_REQUEUE:
846 synchronize_rcu(); 846 synchronize_rcu();
847 wk->started = false; /* restart */ 847 wk->started = false; /* restart */
848 mutex_lock(&local->work_mtx); 848 mutex_lock(&local->mtx);
849 list_add_tail(&wk->list, &local->work_list); 849 list_add_tail(&wk->list, &local->work_list);
850 mutex_unlock(&local->work_mtx); 850 mutex_unlock(&local->mtx);
851 } 851 }
852 852
853 out: 853 out:
@@ -888,9 +888,9 @@ static void ieee80211_work_work(struct work_struct *work)
888 while ((skb = skb_dequeue(&local->work_skb_queue))) 888 while ((skb = skb_dequeue(&local->work_skb_queue)))
889 ieee80211_work_rx_queued_mgmt(local, skb); 889 ieee80211_work_rx_queued_mgmt(local, skb);
890 890
891 ieee80211_recalc_idle(local); 891 mutex_lock(&local->mtx);
892 892
893 mutex_lock(&local->work_mtx); 893 ieee80211_recalc_idle(local);
894 894
895 list_for_each_entry_safe(wk, tmp, &local->work_list, list) { 895 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
896 bool started = wk->started; 896 bool started = wk->started;
@@ -995,20 +995,16 @@ static void ieee80211_work_work(struct work_struct *work)
995 run_again(local, jiffies + HZ/2); 995 run_again(local, jiffies + HZ/2);
996 } 996 }
997 997
998 mutex_lock(&local->scan_mtx);
999
1000 if (list_empty(&local->work_list) && local->scan_req && 998 if (list_empty(&local->work_list) && local->scan_req &&
1001 !local->scanning) 999 !local->scanning)
1002 ieee80211_queue_delayed_work(&local->hw, 1000 ieee80211_queue_delayed_work(&local->hw,
1003 &local->scan_work, 1001 &local->scan_work,
1004 round_jiffies_relative(0)); 1002 round_jiffies_relative(0));
1005 1003
1006 mutex_unlock(&local->scan_mtx);
1007
1008 mutex_unlock(&local->work_mtx);
1009
1010 ieee80211_recalc_idle(local); 1004 ieee80211_recalc_idle(local);
1011 1005
1006 mutex_unlock(&local->mtx);
1007
1012 list_for_each_entry_safe(wk, tmp, &free_work, list) { 1008 list_for_each_entry_safe(wk, tmp, &free_work, list) {
1013 wk->done(wk, NULL); 1009 wk->done(wk, NULL);
1014 list_del(&wk->list); 1010 list_del(&wk->list);
@@ -1035,16 +1031,15 @@ void ieee80211_add_work(struct ieee80211_work *wk)
1035 wk->started = false; 1031 wk->started = false;
1036 1032
1037 local = wk->sdata->local; 1033 local = wk->sdata->local;
1038 mutex_lock(&local->work_mtx); 1034 mutex_lock(&local->mtx);
1039 list_add_tail(&wk->list, &local->work_list); 1035 list_add_tail(&wk->list, &local->work_list);
1040 mutex_unlock(&local->work_mtx); 1036 mutex_unlock(&local->mtx);
1041 1037
1042 ieee80211_queue_work(&local->hw, &local->work_work); 1038 ieee80211_queue_work(&local->hw, &local->work_work);
1043} 1039}
1044 1040
1045void ieee80211_work_init(struct ieee80211_local *local) 1041void ieee80211_work_init(struct ieee80211_local *local)
1046{ 1042{
1047 mutex_init(&local->work_mtx);
1048 INIT_LIST_HEAD(&local->work_list); 1043 INIT_LIST_HEAD(&local->work_list);
1049 setup_timer(&local->work_timer, ieee80211_work_timer, 1044 setup_timer(&local->work_timer, ieee80211_work_timer,
1050 (unsigned long)local); 1045 (unsigned long)local);
@@ -1057,7 +1052,7 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
1057 struct ieee80211_local *local = sdata->local; 1052 struct ieee80211_local *local = sdata->local;
1058 struct ieee80211_work *wk; 1053 struct ieee80211_work *wk;
1059 1054
1060 mutex_lock(&local->work_mtx); 1055 mutex_lock(&local->mtx);
1061 list_for_each_entry(wk, &local->work_list, list) { 1056 list_for_each_entry(wk, &local->work_list, list) {
1062 if (wk->sdata != sdata) 1057 if (wk->sdata != sdata)
1063 continue; 1058 continue;
@@ -1065,19 +1060,19 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
1065 wk->started = true; 1060 wk->started = true;
1066 wk->timeout = jiffies; 1061 wk->timeout = jiffies;
1067 } 1062 }
1068 mutex_unlock(&local->work_mtx); 1063 mutex_unlock(&local->mtx);
1069 1064
1070 /* run cleanups etc. */ 1065 /* run cleanups etc. */
1071 ieee80211_work_work(&local->work_work); 1066 ieee80211_work_work(&local->work_work);
1072 1067
1073 mutex_lock(&local->work_mtx); 1068 mutex_lock(&local->mtx);
1074 list_for_each_entry(wk, &local->work_list, list) { 1069 list_for_each_entry(wk, &local->work_list, list) {
1075 if (wk->sdata != sdata) 1070 if (wk->sdata != sdata)
1076 continue; 1071 continue;
1077 WARN_ON(1); 1072 WARN_ON(1);
1078 break; 1073 break;
1079 } 1074 }
1080 mutex_unlock(&local->work_mtx); 1075 mutex_unlock(&local->mtx);
1081} 1076}
1082 1077
1083ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata, 1078ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -1163,7 +1158,7 @@ int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1163 struct ieee80211_work *wk, *tmp; 1158 struct ieee80211_work *wk, *tmp;
1164 bool found = false; 1159 bool found = false;
1165 1160
1166 mutex_lock(&local->work_mtx); 1161 mutex_lock(&local->mtx);
1167 list_for_each_entry_safe(wk, tmp, &local->work_list, list) { 1162 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
1168 if ((unsigned long) wk == cookie) { 1163 if ((unsigned long) wk == cookie) {
1169 wk->timeout = jiffies; 1164 wk->timeout = jiffies;
@@ -1171,7 +1166,7 @@ int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1171 break; 1166 break;
1172 } 1167 }
1173 } 1168 }
1174 mutex_unlock(&local->work_mtx); 1169 mutex_unlock(&local->mtx);
1175 1170
1176 if (!found) 1171 if (!found)
1177 return -ENOENT; 1172 return -ENOENT;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 8d59d27d887e..43882b36da55 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -36,8 +36,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
36 int tail; 36 int tail;
37 37
38 hdr = (struct ieee80211_hdr *)skb->data; 38 hdr = (struct ieee80211_hdr *)skb->data;
39 if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || 39 if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
40 !ieee80211_is_data_present(hdr->frame_control)) 40 skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control))
41 return TX_CONTINUE; 41 return TX_CONTINUE;
42 42
43 hdrlen = ieee80211_hdrlen(hdr->frame_control); 43 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -94,7 +94,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
94 if (status->flag & RX_FLAG_MMIC_STRIPPED) 94 if (status->flag & RX_FLAG_MMIC_STRIPPED)
95 return RX_CONTINUE; 95 return RX_CONTINUE;
96 96
97 if (!rx->key || rx->key->conf.alg != ALG_TKIP || 97 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
98 !ieee80211_has_protected(hdr->frame_control) || 98 !ieee80211_has_protected(hdr->frame_control) ||
99 !ieee80211_is_data_present(hdr->frame_control)) 99 !ieee80211_is_data_present(hdr->frame_control))
100 return RX_CONTINUE; 100 return RX_CONTINUE;
@@ -221,19 +221,13 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
221 if (!rx->sta || skb->len - hdrlen < 12) 221 if (!rx->sta || skb->len - hdrlen < 12)
222 return RX_DROP_UNUSABLE; 222 return RX_DROP_UNUSABLE;
223 223
224 if (status->flag & RX_FLAG_DECRYPTED) { 224 /*
225 if (status->flag & RX_FLAG_IV_STRIPPED) { 225 * Let TKIP code verify IV, but skip decryption.
226 /* 226 * In the case where hardware checks the IV as well,
227 * Hardware took care of all processing, including 227 * we don't even get here, see ieee80211_rx_h_decrypt()
228 * replay protection, and stripped the ICV/IV so 228 */
229 * we cannot do any checks here. 229 if (status->flag & RX_FLAG_DECRYPTED)
230 */
231 return RX_CONTINUE;
232 }
233
234 /* let TKIP code verify IV, but skip decryption */
235 hwaccel = 1; 230 hwaccel = 1;
236 }
237 231
238 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 232 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
239 key, skb->data + hdrlen, 233 key, skb->data + hdrlen,
@@ -447,10 +441,6 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
447 if (!rx->sta || data_len < 0) 441 if (!rx->sta || data_len < 0)
448 return RX_DROP_UNUSABLE; 442 return RX_DROP_UNUSABLE;
449 443
450 if ((status->flag & RX_FLAG_DECRYPTED) &&
451 (status->flag & RX_FLAG_IV_STRIPPED))
452 return RX_CONTINUE;
453
454 ccmp_hdr2pn(pn, skb->data + hdrlen); 444 ccmp_hdr2pn(pn, skb->data + hdrlen);
455 445
456 queue = ieee80211_is_mgmt(hdr->frame_control) ? 446 queue = ieee80211_is_mgmt(hdr->frame_control) ?
@@ -564,10 +554,6 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
564 if (!ieee80211_is_mgmt(hdr->frame_control)) 554 if (!ieee80211_is_mgmt(hdr->frame_control))
565 return RX_CONTINUE; 555 return RX_CONTINUE;
566 556
567 if ((status->flag & RX_FLAG_DECRYPTED) &&
568 (status->flag & RX_FLAG_IV_STRIPPED))
569 return RX_CONTINUE;
570
571 if (skb->len < 24 + sizeof(*mmie)) 557 if (skb->len < 24 + sizeof(*mmie))
572 return RX_DROP_UNUSABLE; 558 return RX_DROP_UNUSABLE;
573 559
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4c2f89df5cce..0c043b6ce65e 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -40,6 +40,7 @@
40#include <net/udp.h> 40#include <net/udp.h>
41#include <net/icmp.h> /* for icmp_send */ 41#include <net/icmp.h> /* for icmp_send */
42#include <net/route.h> 42#include <net/route.h>
43#include <net/ip6_checksum.h>
43 44
44#include <linux/netfilter.h> 45#include <linux/netfilter.h>
45#include <linux/netfilter_ipv4.h> 46#include <linux/netfilter_ipv4.h>
@@ -637,10 +638,12 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
637 } 638 }
638 639
639 /* And finally the ICMP checksum */ 640 /* And finally the ICMP checksum */
640 icmph->icmp6_cksum = 0; 641 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
641 /* TODO IPv6: is this correct for ICMPv6? */ 642 skb->len - icmp_offset,
642 ip_vs_checksum_complete(skb, icmp_offset); 643 IPPROTO_ICMPV6, 0);
643 skb->ip_summed = CHECKSUM_UNNECESSARY; 644 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
645 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
646 skb->ip_summed = CHECKSUM_PARTIAL;
644 647
645 if (inout) 648 if (inout)
646 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, 649 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
@@ -1381,8 +1384,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1381 if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) && 1384 if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1382 cp->protocol == IPPROTO_SCTP) { 1385 cp->protocol == IPPROTO_SCTP) {
1383 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED && 1386 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1384 (atomic_read(&cp->in_pkts) % 1387 (pkts % sysctl_ip_vs_sync_threshold[1]
1385 sysctl_ip_vs_sync_threshold[1]
1386 == sysctl_ip_vs_sync_threshold[0])) || 1388 == sysctl_ip_vs_sync_threshold[0])) ||
1387 (cp->old_state != cp->state && 1389 (cp->old_state != cp->state &&
1388 ((cp->state == IP_VS_SCTP_S_CLOSED) || 1390 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
@@ -1393,7 +1395,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1393 } 1395 }
1394 } 1396 }
1395 1397
1396 if (af == AF_INET && 1398 /* Keep this block last: TCP and others with pp->num_states <= 1 */
1399 else if (af == AF_INET &&
1397 (ip_vs_sync_state & IP_VS_STATE_MASTER) && 1400 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1398 (((cp->protocol != IPPROTO_TCP || 1401 (((cp->protocol != IPPROTO_TCP ||
1399 cp->state == IP_VS_TCP_S_ESTABLISHED) && 1402 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 0f0c079c422a..ca8ec8c4f311 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -61,7 +61,7 @@ static DEFINE_RWLOCK(__ip_vs_svc_lock);
61static DEFINE_RWLOCK(__ip_vs_rs_lock); 61static DEFINE_RWLOCK(__ip_vs_rs_lock);
62 62
63/* lock for state and timeout tables */ 63/* lock for state and timeout tables */
64static DEFINE_RWLOCK(__ip_vs_securetcp_lock); 64static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
65 65
66/* lock for drop entry handling */ 66/* lock for drop entry handling */
67static DEFINE_SPINLOCK(__ip_vs_dropentry_lock); 67static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
@@ -204,7 +204,7 @@ static void update_defense_level(void)
204 spin_unlock(&__ip_vs_droppacket_lock); 204 spin_unlock(&__ip_vs_droppacket_lock);
205 205
206 /* secure_tcp */ 206 /* secure_tcp */
207 write_lock(&__ip_vs_securetcp_lock); 207 spin_lock(&ip_vs_securetcp_lock);
208 switch (sysctl_ip_vs_secure_tcp) { 208 switch (sysctl_ip_vs_secure_tcp) {
209 case 0: 209 case 0:
210 if (old_secure_tcp >= 2) 210 if (old_secure_tcp >= 2)
@@ -238,7 +238,7 @@ static void update_defense_level(void)
238 old_secure_tcp = sysctl_ip_vs_secure_tcp; 238 old_secure_tcp = sysctl_ip_vs_secure_tcp;
239 if (to_change >= 0) 239 if (to_change >= 0)
240 ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1); 240 ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
241 write_unlock(&__ip_vs_securetcp_lock); 241 spin_unlock(&ip_vs_securetcp_lock);
242 242
243 local_bh_enable(); 243 local_bh_enable();
244} 244}
@@ -843,7 +843,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
843 return -EINVAL; 843 return -EINVAL;
844 } 844 }
845 845
846 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 846 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
847 if (dest == NULL) { 847 if (dest == NULL) {
848 pr_err("%s(): no memory.\n", __func__); 848 pr_err("%s(): no memory.\n", __func__);
849 return -ENOMEM; 849 return -ENOMEM;
@@ -1177,7 +1177,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
1177 } 1177 }
1178#endif 1178#endif
1179 1179
1180 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); 1180 svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL);
1181 if (svc == NULL) { 1181 if (svc == NULL) {
1182 IP_VS_DBG(1, "%s(): no memory\n", __func__); 1182 IP_VS_DBG(1, "%s(): no memory\n", __func__);
1183 ret = -ENOMEM; 1183 ret = -ENOMEM;
@@ -2155,7 +2155,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2155 if (cmd != IP_VS_SO_SET_ADD 2155 if (cmd != IP_VS_SO_SET_ADD
2156 && (svc == NULL || svc->protocol != usvc.protocol)) { 2156 && (svc == NULL || svc->protocol != usvc.protocol)) {
2157 ret = -ESRCH; 2157 ret = -ESRCH;
2158 goto out_unlock; 2158 goto out_drop_service;
2159 } 2159 }
2160 2160
2161 switch (cmd) { 2161 switch (cmd) {
@@ -2189,6 +2189,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2189 ret = -EINVAL; 2189 ret = -EINVAL;
2190 } 2190 }
2191 2191
2192out_drop_service:
2192 if (svc) 2193 if (svc)
2193 ip_vs_service_put(svc); 2194 ip_vs_service_put(svc);
2194 2195
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index bbc1ac795952..727e45b66953 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -35,7 +35,7 @@
35static LIST_HEAD(ip_vs_schedulers); 35static LIST_HEAD(ip_vs_schedulers);
36 36
37/* lock for service table */ 37/* lock for service table */
38static DEFINE_RWLOCK(__ip_vs_sched_lock); 38static DEFINE_SPINLOCK(ip_vs_sched_lock);
39 39
40 40
41/* 41/*
@@ -108,7 +108,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
108 108
109 IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name); 109 IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name);
110 110
111 read_lock_bh(&__ip_vs_sched_lock); 111 spin_lock_bh(&ip_vs_sched_lock);
112 112
113 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { 113 list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
114 /* 114 /*
@@ -122,14 +122,14 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
122 } 122 }
123 if (strcmp(sched_name, sched->name)==0) { 123 if (strcmp(sched_name, sched->name)==0) {
124 /* HIT */ 124 /* HIT */
125 read_unlock_bh(&__ip_vs_sched_lock); 125 spin_unlock_bh(&ip_vs_sched_lock);
126 return sched; 126 return sched;
127 } 127 }
128 if (sched->module) 128 if (sched->module)
129 module_put(sched->module); 129 module_put(sched->module);
130 } 130 }
131 131
132 read_unlock_bh(&__ip_vs_sched_lock); 132 spin_unlock_bh(&ip_vs_sched_lock);
133 return NULL; 133 return NULL;
134} 134}
135 135
@@ -184,10 +184,10 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
184 /* increase the module use count */ 184 /* increase the module use count */
185 ip_vs_use_count_inc(); 185 ip_vs_use_count_inc();
186 186
187 write_lock_bh(&__ip_vs_sched_lock); 187 spin_lock_bh(&ip_vs_sched_lock);
188 188
189 if (!list_empty(&scheduler->n_list)) { 189 if (!list_empty(&scheduler->n_list)) {
190 write_unlock_bh(&__ip_vs_sched_lock); 190 spin_unlock_bh(&ip_vs_sched_lock);
191 ip_vs_use_count_dec(); 191 ip_vs_use_count_dec();
192 pr_err("%s(): [%s] scheduler already linked\n", 192 pr_err("%s(): [%s] scheduler already linked\n",
193 __func__, scheduler->name); 193 __func__, scheduler->name);
@@ -200,7 +200,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
200 */ 200 */
201 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { 201 list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
202 if (strcmp(scheduler->name, sched->name) == 0) { 202 if (strcmp(scheduler->name, sched->name) == 0) {
203 write_unlock_bh(&__ip_vs_sched_lock); 203 spin_unlock_bh(&ip_vs_sched_lock);
204 ip_vs_use_count_dec(); 204 ip_vs_use_count_dec();
205 pr_err("%s(): [%s] scheduler already existed " 205 pr_err("%s(): [%s] scheduler already existed "
206 "in the system\n", __func__, scheduler->name); 206 "in the system\n", __func__, scheduler->name);
@@ -211,7 +211,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
211 * Add it into the d-linked scheduler list 211 * Add it into the d-linked scheduler list
212 */ 212 */
213 list_add(&scheduler->n_list, &ip_vs_schedulers); 213 list_add(&scheduler->n_list, &ip_vs_schedulers);
214 write_unlock_bh(&__ip_vs_sched_lock); 214 spin_unlock_bh(&ip_vs_sched_lock);
215 215
216 pr_info("[%s] scheduler registered.\n", scheduler->name); 216 pr_info("[%s] scheduler registered.\n", scheduler->name);
217 217
@@ -229,9 +229,9 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
229 return -EINVAL; 229 return -EINVAL;
230 } 230 }
231 231
232 write_lock_bh(&__ip_vs_sched_lock); 232 spin_lock_bh(&ip_vs_sched_lock);
233 if (list_empty(&scheduler->n_list)) { 233 if (list_empty(&scheduler->n_list)) {
234 write_unlock_bh(&__ip_vs_sched_lock); 234 spin_unlock_bh(&ip_vs_sched_lock);
235 pr_err("%s(): [%s] scheduler is not in the list. failed\n", 235 pr_err("%s(): [%s] scheduler is not in the list. failed\n",
236 __func__, scheduler->name); 236 __func__, scheduler->name);
237 return -EINVAL; 237 return -EINVAL;
@@ -241,7 +241,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
241 * Remove it from the d-linked scheduler list 241 * Remove it from the d-linked scheduler list
242 */ 242 */
243 list_del(&scheduler->n_list); 243 list_del(&scheduler->n_list);
244 write_unlock_bh(&__ip_vs_sched_lock); 244 spin_unlock_bh(&ip_vs_sched_lock);
245 245
246 /* decrease the module use count */ 246 /* decrease the module use count */
247 ip_vs_use_count_dec(); 247 ip_vs_use_count_dec();
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index b46a8390896d..9228ee0dc11a 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -448,6 +448,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
448{ 448{
449 __be16 _ports[2], *ports; 449 __be16 _ports[2], *ports;
450 u8 nexthdr; 450 u8 nexthdr;
451 int poff;
451 452
452 memset(dst, 0, sizeof(*dst)); 453 memset(dst, 0, sizeof(*dst));
453 454
@@ -492,19 +493,13 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
492 return 0; 493 return 0;
493 } 494 }
494 495
495 switch (nexthdr) { 496 poff = proto_ports_offset(nexthdr);
496 case IPPROTO_TCP: 497 if (poff >= 0) {
497 case IPPROTO_UDP: 498 ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports),
498 case IPPROTO_UDPLITE:
499 case IPPROTO_SCTP:
500 case IPPROTO_DCCP:
501 ports = skb_header_pointer(skb, protoff, sizeof(_ports),
502 &_ports); 499 &_ports);
503 break; 500 } else {
504 default:
505 _ports[0] = _ports[1] = 0; 501 _ports[0] = _ports[1] = 0;
506 ports = _ports; 502 ports = _ports;
507 break;
508 } 503 }
509 if (!ports) 504 if (!ports)
510 return -1; 505 return -1;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9a17f28b1253..3616f27b9d46 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -488,7 +488,7 @@ retry:
488 skb->dev = dev; 488 skb->dev = dev;
489 skb->priority = sk->sk_priority; 489 skb->priority = sk->sk_priority;
490 skb->mark = sk->sk_mark; 490 skb->mark = sk->sk_mark;
491 err = sock_tx_timestamp(msg, sk, skb_tx(skb)); 491 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
492 if (err < 0) 492 if (err < 0)
493 goto out_unlock; 493 goto out_unlock;
494 494
@@ -1209,7 +1209,7 @@ static int packet_snd(struct socket *sock,
1209 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); 1209 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1210 if (err) 1210 if (err)
1211 goto out_free; 1211 goto out_free;
1212 err = sock_tx_timestamp(msg, sk, skb_tx(skb)); 1212 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1213 if (err < 0) 1213 if (err < 0)
1214 goto out_free; 1214 goto out_free;
1215 1215
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b2a3ae6cad78..04e34196c9de 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -834,6 +834,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
834{ 834{
835 struct pep_sock *pn = pep_sk(sk); 835 struct pep_sock *pn = pep_sk(sk);
836 struct pnpipehdr *ph; 836 struct pnpipehdr *ph;
837 int err;
837 838
838 if (pn_flow_safe(pn->tx_fc) && 839 if (pn_flow_safe(pn->tx_fc) &&
839 !atomic_add_unless(&pn->tx_credits, -1, 0)) { 840 !atomic_add_unless(&pn->tx_credits, -1, 0)) {
@@ -852,7 +853,10 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
852 ph->message_id = PNS_PIPE_DATA; 853 ph->message_id = PNS_PIPE_DATA;
853 ph->pipe_handle = pn->pipe_handle; 854 ph->pipe_handle = pn->pipe_handle;
854 855
855 return pn_skb_send(sk, skb, &pipe_srv); 856 err = pn_skb_send(sk, skb, &pipe_srv);
857 if (err && pn_flow_safe(pn->tx_fc))
858 atomic_inc(&pn->tx_credits);
859 return err;
856} 860}
857 861
858static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, 862static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
@@ -872,7 +876,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
872 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, 876 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
873 flags & MSG_DONTWAIT, &err); 877 flags & MSG_DONTWAIT, &err);
874 if (!skb) 878 if (!skb)
875 return -ENOBUFS; 879 return err;
876 880
877 skb_reserve(skb, MAX_PHONET_HEADER + 3); 881 skb_reserve(skb, MAX_PHONET_HEADER + 3);
878 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 882 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index b18e48fae975..d0a429459370 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -292,8 +292,7 @@ static void phonet_route_autodel(struct net_device *dev)
292 if (bitmap_empty(deleted, 64)) 292 if (bitmap_empty(deleted, 64))
293 return; /* short-circuit RCU */ 293 return; /* short-circuit RCU */
294 synchronize_rcu(); 294 synchronize_rcu();
295 for (i = find_first_bit(deleted, 64); i < 64; 295 for_each_set_bit(i, deleted, 64) {
296 i = find_next_bit(deleted, 64, i + 1)) {
297 rtm_phonet_notify(RTM_DELROUTE, dev, i); 296 rtm_phonet_notify(RTM_DELROUTE, dev, i);
298 dev_put(dev); 297 dev_put(dev);
299 } 298 }
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 6e9848bf0370..7c91f739f138 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -281,7 +281,9 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
281 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) 281 if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
282 return POLLHUP; 282 return POLLHUP;
283 283
284 if (sk->sk_state == TCP_ESTABLISHED && atomic_read(&pn->tx_credits)) 284 if (sk->sk_state == TCP_ESTABLISHED &&
285 atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
286 atomic_read(&pn->tx_credits))
285 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 287 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
286 288
287 return mask; 289 return mask;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index aebfecbdb841..bb6ad81b671d 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -39,7 +39,15 @@
39#include <net/sock.h> 39#include <net/sock.h>
40 40
41#include "rds.h" 41#include "rds.h"
42#include "rdma.h" 42
43char *rds_str_array(char **array, size_t elements, size_t index)
44{
45 if ((index < elements) && array[index])
46 return array[index];
47 else
48 return "unknown";
49}
50EXPORT_SYMBOL(rds_str_array);
43 51
44/* this is just used for stats gathering :/ */ 52/* this is just used for stats gathering :/ */
45static DEFINE_SPINLOCK(rds_sock_lock); 53static DEFINE_SPINLOCK(rds_sock_lock);
@@ -62,7 +70,7 @@ static int rds_release(struct socket *sock)
62 struct rds_sock *rs; 70 struct rds_sock *rs;
63 unsigned long flags; 71 unsigned long flags;
64 72
65 if (sk == NULL) 73 if (!sk)
66 goto out; 74 goto out;
67 75
68 rs = rds_sk_to_rs(sk); 76 rs = rds_sk_to_rs(sk);
@@ -73,7 +81,15 @@ static int rds_release(struct socket *sock)
73 * with the socket. */ 81 * with the socket. */
74 rds_clear_recv_queue(rs); 82 rds_clear_recv_queue(rs);
75 rds_cong_remove_socket(rs); 83 rds_cong_remove_socket(rs);
84
85 /*
86 * the binding lookup hash uses rcu, we need to
87 * make sure we sychronize_rcu before we free our
88 * entry
89 */
76 rds_remove_bound(rs); 90 rds_remove_bound(rs);
91 synchronize_rcu();
92
77 rds_send_drop_to(rs, NULL); 93 rds_send_drop_to(rs, NULL);
78 rds_rdma_drop_keys(rs); 94 rds_rdma_drop_keys(rs);
79 rds_notify_queue_get(rs, NULL); 95 rds_notify_queue_get(rs, NULL);
@@ -83,6 +99,8 @@ static int rds_release(struct socket *sock)
83 rds_sock_count--; 99 rds_sock_count--;
84 spin_unlock_irqrestore(&rds_sock_lock, flags); 100 spin_unlock_irqrestore(&rds_sock_lock, flags);
85 101
102 rds_trans_put(rs->rs_transport);
103
86 sock->sk = NULL; 104 sock->sk = NULL;
87 sock_put(sk); 105 sock_put(sk);
88out: 106out:
@@ -514,7 +532,7 @@ out:
514 spin_unlock_irqrestore(&rds_sock_lock, flags); 532 spin_unlock_irqrestore(&rds_sock_lock, flags);
515} 533}
516 534
517static void __exit rds_exit(void) 535static void rds_exit(void)
518{ 536{
519 sock_unregister(rds_family_ops.family); 537 sock_unregister(rds_family_ops.family);
520 proto_unregister(&rds_proto); 538 proto_unregister(&rds_proto);
@@ -529,7 +547,7 @@ static void __exit rds_exit(void)
529} 547}
530module_exit(rds_exit); 548module_exit(rds_exit);
531 549
532static int __init rds_init(void) 550static int rds_init(void)
533{ 551{
534 int ret; 552 int ret;
535 553
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 5d95fc007f1a..2f6b3fcc79f8 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -34,45 +34,52 @@
34#include <net/sock.h> 34#include <net/sock.h>
35#include <linux/in.h> 35#include <linux/in.h>
36#include <linux/if_arp.h> 36#include <linux/if_arp.h>
37#include <linux/jhash.h>
37#include "rds.h" 38#include "rds.h"
38 39
39/* 40#define BIND_HASH_SIZE 1024
40 * XXX this probably still needs more work.. no INADDR_ANY, and rbtrees aren't 41static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
41 * particularly zippy.
42 *
43 * This is now called for every incoming frame so we arguably care much more
44 * about it than we used to.
45 */
46static DEFINE_SPINLOCK(rds_bind_lock); 42static DEFINE_SPINLOCK(rds_bind_lock);
47static struct rb_root rds_bind_tree = RB_ROOT;
48 43
49static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port, 44static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
50 struct rds_sock *insert) 45{
46 return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
47 (BIND_HASH_SIZE - 1));
48}
49
50static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
51 struct rds_sock *insert)
51{ 52{
52 struct rb_node **p = &rds_bind_tree.rb_node;
53 struct rb_node *parent = NULL;
54 struct rds_sock *rs; 53 struct rds_sock *rs;
54 struct hlist_node *node;
55 struct hlist_head *head = hash_to_bucket(addr, port);
55 u64 cmp; 56 u64 cmp;
56 u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); 57 u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
57 58
58 while (*p) { 59 rcu_read_lock();
59 parent = *p; 60 hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
60 rs = rb_entry(parent, struct rds_sock, rs_bound_node);
61
62 cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | 61 cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
63 be16_to_cpu(rs->rs_bound_port); 62 be16_to_cpu(rs->rs_bound_port);
64 63
65 if (needle < cmp) 64 if (cmp == needle) {
66 p = &(*p)->rb_left; 65 rcu_read_unlock();
67 else if (needle > cmp)
68 p = &(*p)->rb_right;
69 else
70 return rs; 66 return rs;
67 }
71 } 68 }
69 rcu_read_unlock();
72 70
73 if (insert) { 71 if (insert) {
74 rb_link_node(&insert->rs_bound_node, parent, p); 72 /*
75 rb_insert_color(&insert->rs_bound_node, &rds_bind_tree); 73 * make sure our addr and port are set before
74 * we are added to the list, other people
75 * in rcu will find us as soon as the
76 * hlist_add_head_rcu is done
77 */
78 insert->rs_bound_addr = addr;
79 insert->rs_bound_port = port;
80 rds_sock_addref(insert);
81
82 hlist_add_head_rcu(&insert->rs_bound_node, head);
76 } 83 }
77 return NULL; 84 return NULL;
78} 85}
@@ -86,15 +93,13 @@ static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port,
86struct rds_sock *rds_find_bound(__be32 addr, __be16 port) 93struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
87{ 94{
88 struct rds_sock *rs; 95 struct rds_sock *rs;
89 unsigned long flags;
90 96
91 spin_lock_irqsave(&rds_bind_lock, flags); 97 rs = rds_bind_lookup(addr, port, NULL);
92 rs = rds_bind_tree_walk(addr, port, NULL); 98
93 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 99 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
94 rds_sock_addref(rs); 100 rds_sock_addref(rs);
95 else 101 else
96 rs = NULL; 102 rs = NULL;
97 spin_unlock_irqrestore(&rds_bind_lock, flags);
98 103
99 rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr, 104 rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
100 ntohs(port)); 105 ntohs(port));
@@ -121,22 +126,15 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
121 do { 126 do {
122 if (rover == 0) 127 if (rover == 0)
123 rover++; 128 rover++;
124 if (rds_bind_tree_walk(addr, cpu_to_be16(rover), rs) == NULL) { 129 if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) {
125 *port = cpu_to_be16(rover); 130 *port = rs->rs_bound_port;
126 ret = 0; 131 ret = 0;
132 rdsdebug("rs %p binding to %pI4:%d\n",
133 rs, &addr, (int)ntohs(*port));
127 break; 134 break;
128 } 135 }
129 } while (rover++ != last); 136 } while (rover++ != last);
130 137
131 if (ret == 0) {
132 rs->rs_bound_addr = addr;
133 rs->rs_bound_port = *port;
134 rds_sock_addref(rs);
135
136 rdsdebug("rs %p binding to %pI4:%d\n",
137 rs, &addr, (int)ntohs(*port));
138 }
139
140 spin_unlock_irqrestore(&rds_bind_lock, flags); 138 spin_unlock_irqrestore(&rds_bind_lock, flags);
141 139
142 return ret; 140 return ret;
@@ -153,7 +151,7 @@ void rds_remove_bound(struct rds_sock *rs)
153 rs, &rs->rs_bound_addr, 151 rs, &rs->rs_bound_addr,
154 ntohs(rs->rs_bound_port)); 152 ntohs(rs->rs_bound_port));
155 153
156 rb_erase(&rs->rs_bound_node, &rds_bind_tree); 154 hlist_del_init_rcu(&rs->rs_bound_node);
157 rds_sock_put(rs); 155 rds_sock_put(rs);
158 rs->rs_bound_addr = 0; 156 rs->rs_bound_addr = 0;
159 } 157 }
@@ -184,7 +182,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
184 goto out; 182 goto out;
185 183
186 trans = rds_trans_get_preferred(sin->sin_addr.s_addr); 184 trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
187 if (trans == NULL) { 185 if (!trans) {
188 ret = -EADDRNOTAVAIL; 186 ret = -EADDRNOTAVAIL;
189 rds_remove_bound(rs); 187 rds_remove_bound(rs);
190 if (printk_ratelimit()) 188 if (printk_ratelimit())
@@ -198,5 +196,9 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
198 196
199out: 197out:
200 release_sock(sk); 198 release_sock(sk);
199
200 /* we might have called rds_remove_bound on error */
201 if (ret)
202 synchronize_rcu();
201 return ret; 203 return ret;
202} 204}
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 0871a29f0780..75ea686f27d5 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -141,7 +141,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
141 unsigned long flags; 141 unsigned long flags;
142 142
143 map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL); 143 map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
144 if (map == NULL) 144 if (!map)
145 return NULL; 145 return NULL;
146 146
147 map->m_addr = addr; 147 map->m_addr = addr;
@@ -159,7 +159,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
159 ret = rds_cong_tree_walk(addr, map); 159 ret = rds_cong_tree_walk(addr, map);
160 spin_unlock_irqrestore(&rds_cong_lock, flags); 160 spin_unlock_irqrestore(&rds_cong_lock, flags);
161 161
162 if (ret == NULL) { 162 if (!ret) {
163 ret = map; 163 ret = map;
164 map = NULL; 164 map = NULL;
165 } 165 }
@@ -205,7 +205,7 @@ int rds_cong_get_maps(struct rds_connection *conn)
205 conn->c_lcong = rds_cong_from_addr(conn->c_laddr); 205 conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
206 conn->c_fcong = rds_cong_from_addr(conn->c_faddr); 206 conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
207 207
208 if (conn->c_lcong == NULL || conn->c_fcong == NULL) 208 if (!(conn->c_lcong && conn->c_fcong))
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 return 0; 211 return 0;
@@ -221,7 +221,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
221 list_for_each_entry(conn, &map->m_conn_list, c_map_item) { 221 list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
222 if (!test_and_set_bit(0, &conn->c_map_queued)) { 222 if (!test_and_set_bit(0, &conn->c_map_queued)) {
223 rds_stats_inc(s_cong_update_queued); 223 rds_stats_inc(s_cong_update_queued);
224 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 224 rds_send_xmit(conn);
225 } 225 }
226 } 226 }
227 227
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 7619b671ca28..870992e08cae 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -37,7 +37,6 @@
37 37
38#include "rds.h" 38#include "rds.h"
39#include "loop.h" 39#include "loop.h"
40#include "rdma.h"
41 40
42#define RDS_CONNECTION_HASH_BITS 12 41#define RDS_CONNECTION_HASH_BITS 12
43#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) 42#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
@@ -63,18 +62,7 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
63 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 62 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
64} while (0) 63} while (0)
65 64
66static inline int rds_conn_is_sending(struct rds_connection *conn) 65/* rcu read lock must be held or the connection spinlock */
67{
68 int ret = 0;
69
70 if (!mutex_trylock(&conn->c_send_lock))
71 ret = 1;
72 else
73 mutex_unlock(&conn->c_send_lock);
74
75 return ret;
76}
77
78static struct rds_connection *rds_conn_lookup(struct hlist_head *head, 66static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
79 __be32 laddr, __be32 faddr, 67 __be32 laddr, __be32 faddr,
80 struct rds_transport *trans) 68 struct rds_transport *trans)
@@ -82,7 +70,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
82 struct rds_connection *conn, *ret = NULL; 70 struct rds_connection *conn, *ret = NULL;
83 struct hlist_node *pos; 71 struct hlist_node *pos;
84 72
85 hlist_for_each_entry(conn, pos, head, c_hash_node) { 73 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
86 if (conn->c_faddr == faddr && conn->c_laddr == laddr && 74 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
87 conn->c_trans == trans) { 75 conn->c_trans == trans) {
88 ret = conn; 76 ret = conn;
@@ -129,10 +117,11 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
129{ 117{
130 struct rds_connection *conn, *parent = NULL; 118 struct rds_connection *conn, *parent = NULL;
131 struct hlist_head *head = rds_conn_bucket(laddr, faddr); 119 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
120 struct rds_transport *loop_trans;
132 unsigned long flags; 121 unsigned long flags;
133 int ret; 122 int ret;
134 123
135 spin_lock_irqsave(&rds_conn_lock, flags); 124 rcu_read_lock();
136 conn = rds_conn_lookup(head, laddr, faddr, trans); 125 conn = rds_conn_lookup(head, laddr, faddr, trans);
137 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 126 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
138 !is_outgoing) { 127 !is_outgoing) {
@@ -143,12 +132,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
143 parent = conn; 132 parent = conn;
144 conn = parent->c_passive; 133 conn = parent->c_passive;
145 } 134 }
146 spin_unlock_irqrestore(&rds_conn_lock, flags); 135 rcu_read_unlock();
147 if (conn) 136 if (conn)
148 goto out; 137 goto out;
149 138
150 conn = kmem_cache_zalloc(rds_conn_slab, gfp); 139 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
151 if (conn == NULL) { 140 if (!conn) {
152 conn = ERR_PTR(-ENOMEM); 141 conn = ERR_PTR(-ENOMEM);
153 goto out; 142 goto out;
154 } 143 }
@@ -159,7 +148,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
159 spin_lock_init(&conn->c_lock); 148 spin_lock_init(&conn->c_lock);
160 conn->c_next_tx_seq = 1; 149 conn->c_next_tx_seq = 1;
161 150
162 mutex_init(&conn->c_send_lock); 151 init_waitqueue_head(&conn->c_waitq);
163 INIT_LIST_HEAD(&conn->c_send_queue); 152 INIT_LIST_HEAD(&conn->c_send_queue);
164 INIT_LIST_HEAD(&conn->c_retrans); 153 INIT_LIST_HEAD(&conn->c_retrans);
165 154
@@ -175,7 +164,9 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
175 * can bind to the destination address then we'd rather the messages 164 * can bind to the destination address then we'd rather the messages
176 * flow through loopback rather than either transport. 165 * flow through loopback rather than either transport.
177 */ 166 */
178 if (rds_trans_get_preferred(faddr)) { 167 loop_trans = rds_trans_get_preferred(faddr);
168 if (loop_trans) {
169 rds_trans_put(loop_trans);
179 conn->c_loopback = 1; 170 conn->c_loopback = 1;
180 if (is_outgoing && trans->t_prefer_loopback) { 171 if (is_outgoing && trans->t_prefer_loopback) {
181 /* "outgoing" connection - and the transport 172 /* "outgoing" connection - and the transport
@@ -238,7 +229,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
238 kmem_cache_free(rds_conn_slab, conn); 229 kmem_cache_free(rds_conn_slab, conn);
239 conn = found; 230 conn = found;
240 } else { 231 } else {
241 hlist_add_head(&conn->c_hash_node, head); 232 hlist_add_head_rcu(&conn->c_hash_node, head);
242 rds_cong_add_conn(conn); 233 rds_cong_add_conn(conn);
243 rds_conn_count++; 234 rds_conn_count++;
244 } 235 }
@@ -263,21 +254,91 @@ struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
263} 254}
264EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 255EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
265 256
257void rds_conn_shutdown(struct rds_connection *conn)
258{
259 /* shut it down unless it's down already */
260 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
261 /*
262 * Quiesce the connection mgmt handlers before we start tearing
263 * things down. We don't hold the mutex for the entire
264 * duration of the shutdown operation, else we may be
265 * deadlocking with the CM handler. Instead, the CM event
266 * handler is supposed to check for state DISCONNECTING
267 */
268 mutex_lock(&conn->c_cm_lock);
269 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
270 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
271 rds_conn_error(conn, "shutdown called in state %d\n",
272 atomic_read(&conn->c_state));
273 mutex_unlock(&conn->c_cm_lock);
274 return;
275 }
276 mutex_unlock(&conn->c_cm_lock);
277
278 wait_event(conn->c_waitq,
279 !test_bit(RDS_IN_XMIT, &conn->c_flags));
280
281 conn->c_trans->conn_shutdown(conn);
282 rds_conn_reset(conn);
283
284 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
285 /* This can happen - eg when we're in the middle of tearing
286 * down the connection, and someone unloads the rds module.
287 * Quite reproduceable with loopback connections.
288 * Mostly harmless.
289 */
290 rds_conn_error(conn,
291 "%s: failed to transition to state DOWN, "
292 "current state is %d\n",
293 __func__,
294 atomic_read(&conn->c_state));
295 return;
296 }
297 }
298
299 /* Then reconnect if it's still live.
300 * The passive side of an IB loopback connection is never added
301 * to the conn hash, so we never trigger a reconnect on this
302 * conn - the reconnect is always triggered by the active peer. */
303 cancel_delayed_work_sync(&conn->c_conn_w);
304 rcu_read_lock();
305 if (!hlist_unhashed(&conn->c_hash_node)) {
306 rcu_read_unlock();
307 rds_queue_reconnect(conn);
308 } else {
309 rcu_read_unlock();
310 }
311}
312
313/*
314 * Stop and free a connection.
315 *
316 * This can only be used in very limited circumstances. It assumes that once
317 * the conn has been shutdown that no one else is referencing the connection.
318 * We can only ensure this in the rmmod path in the current code.
319 */
266void rds_conn_destroy(struct rds_connection *conn) 320void rds_conn_destroy(struct rds_connection *conn)
267{ 321{
268 struct rds_message *rm, *rtmp; 322 struct rds_message *rm, *rtmp;
323 unsigned long flags;
269 324
270 rdsdebug("freeing conn %p for %pI4 -> " 325 rdsdebug("freeing conn %p for %pI4 -> "
271 "%pI4\n", conn, &conn->c_laddr, 326 "%pI4\n", conn, &conn->c_laddr,
272 &conn->c_faddr); 327 &conn->c_faddr);
273 328
274 hlist_del_init(&conn->c_hash_node); 329 /* Ensure conn will not be scheduled for reconnect */
330 spin_lock_irq(&rds_conn_lock);
331 hlist_del_init_rcu(&conn->c_hash_node);
332 spin_unlock_irq(&rds_conn_lock);
333 synchronize_rcu();
275 334
276 /* wait for the rds thread to shut it down */ 335 /* shut the connection down */
277 atomic_set(&conn->c_state, RDS_CONN_ERROR); 336 rds_conn_drop(conn);
278 cancel_delayed_work(&conn->c_conn_w); 337 flush_work(&conn->c_down_w);
279 queue_work(rds_wq, &conn->c_down_w); 338
280 flush_workqueue(rds_wq); 339 /* make sure lingering queued work won't try to ref the conn */
340 cancel_delayed_work_sync(&conn->c_send_w);
341 cancel_delayed_work_sync(&conn->c_recv_w);
281 342
282 /* tear down queued messages */ 343 /* tear down queued messages */
283 list_for_each_entry_safe(rm, rtmp, 344 list_for_each_entry_safe(rm, rtmp,
@@ -302,7 +363,9 @@ void rds_conn_destroy(struct rds_connection *conn)
302 BUG_ON(!list_empty(&conn->c_retrans)); 363 BUG_ON(!list_empty(&conn->c_retrans));
303 kmem_cache_free(rds_conn_slab, conn); 364 kmem_cache_free(rds_conn_slab, conn);
304 365
366 spin_lock_irqsave(&rds_conn_lock, flags);
305 rds_conn_count--; 367 rds_conn_count--;
368 spin_unlock_irqrestore(&rds_conn_lock, flags);
306} 369}
307EXPORT_SYMBOL_GPL(rds_conn_destroy); 370EXPORT_SYMBOL_GPL(rds_conn_destroy);
308 371
@@ -316,23 +379,23 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
316 struct list_head *list; 379 struct list_head *list;
317 struct rds_connection *conn; 380 struct rds_connection *conn;
318 struct rds_message *rm; 381 struct rds_message *rm;
319 unsigned long flags;
320 unsigned int total = 0; 382 unsigned int total = 0;
383 unsigned long flags;
321 size_t i; 384 size_t i;
322 385
323 len /= sizeof(struct rds_info_message); 386 len /= sizeof(struct rds_info_message);
324 387
325 spin_lock_irqsave(&rds_conn_lock, flags); 388 rcu_read_lock();
326 389
327 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 390 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
328 i++, head++) { 391 i++, head++) {
329 hlist_for_each_entry(conn, pos, head, c_hash_node) { 392 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
330 if (want_send) 393 if (want_send)
331 list = &conn->c_send_queue; 394 list = &conn->c_send_queue;
332 else 395 else
333 list = &conn->c_retrans; 396 list = &conn->c_retrans;
334 397
335 spin_lock(&conn->c_lock); 398 spin_lock_irqsave(&conn->c_lock, flags);
336 399
337 /* XXX too lazy to maintain counts.. */ 400 /* XXX too lazy to maintain counts.. */
338 list_for_each_entry(rm, list, m_conn_item) { 401 list_for_each_entry(rm, list, m_conn_item) {
@@ -343,11 +406,10 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
343 conn->c_faddr, 0); 406 conn->c_faddr, 0);
344 } 407 }
345 408
346 spin_unlock(&conn->c_lock); 409 spin_unlock_irqrestore(&conn->c_lock, flags);
347 } 410 }
348 } 411 }
349 412 rcu_read_unlock();
350 spin_unlock_irqrestore(&rds_conn_lock, flags);
351 413
352 lens->nr = total; 414 lens->nr = total;
353 lens->each = sizeof(struct rds_info_message); 415 lens->each = sizeof(struct rds_info_message);
@@ -377,19 +439,17 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
377 uint64_t buffer[(item_len + 7) / 8]; 439 uint64_t buffer[(item_len + 7) / 8];
378 struct hlist_head *head; 440 struct hlist_head *head;
379 struct hlist_node *pos; 441 struct hlist_node *pos;
380 struct hlist_node *tmp;
381 struct rds_connection *conn; 442 struct rds_connection *conn;
382 unsigned long flags;
383 size_t i; 443 size_t i;
384 444
385 spin_lock_irqsave(&rds_conn_lock, flags); 445 rcu_read_lock();
386 446
387 lens->nr = 0; 447 lens->nr = 0;
388 lens->each = item_len; 448 lens->each = item_len;
389 449
390 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 450 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
391 i++, head++) { 451 i++, head++) {
392 hlist_for_each_entry_safe(conn, pos, tmp, head, c_hash_node) { 452 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
393 453
394 /* XXX no c_lock usage.. */ 454 /* XXX no c_lock usage.. */
395 if (!visitor(conn, buffer)) 455 if (!visitor(conn, buffer))
@@ -405,8 +465,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
405 lens->nr++; 465 lens->nr++;
406 } 466 }
407 } 467 }
408 468 rcu_read_unlock();
409 spin_unlock_irqrestore(&rds_conn_lock, flags);
410} 469}
411EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 470EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
412 471
@@ -423,8 +482,8 @@ static int rds_conn_info_visitor(struct rds_connection *conn,
423 sizeof(cinfo->transport)); 482 sizeof(cinfo->transport));
424 cinfo->flags = 0; 483 cinfo->flags = 0;
425 484
426 rds_conn_info_set(cinfo->flags, 485 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
427 rds_conn_is_sending(conn), SENDING); 486 SENDING);
428 /* XXX Future: return the state rather than these funky bits */ 487 /* XXX Future: return the state rather than these funky bits */
429 rds_conn_info_set(cinfo->flags, 488 rds_conn_info_set(cinfo->flags,
430 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, 489 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
@@ -444,12 +503,12 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
444 sizeof(struct rds_info_connection)); 503 sizeof(struct rds_info_connection));
445} 504}
446 505
447int __init rds_conn_init(void) 506int rds_conn_init(void)
448{ 507{
449 rds_conn_slab = kmem_cache_create("rds_connection", 508 rds_conn_slab = kmem_cache_create("rds_connection",
450 sizeof(struct rds_connection), 509 sizeof(struct rds_connection),
451 0, 0, NULL); 510 0, 0, NULL);
452 if (rds_conn_slab == NULL) 511 if (!rds_conn_slab)
453 return -ENOMEM; 512 return -ENOMEM;
454 513
455 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); 514 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
@@ -487,6 +546,18 @@ void rds_conn_drop(struct rds_connection *conn)
487EXPORT_SYMBOL_GPL(rds_conn_drop); 546EXPORT_SYMBOL_GPL(rds_conn_drop);
488 547
489/* 548/*
549 * If the connection is down, trigger a connect. We may have scheduled a
550 * delayed reconnect however - in this case we should not interfere.
551 */
552void rds_conn_connect_if_down(struct rds_connection *conn)
553{
554 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
555 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
556 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
557}
558EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
559
560/*
490 * An error occurred on the connection 561 * An error occurred on the connection
491 */ 562 */
492void 563void
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 8f2d6dd7700a..b12a3951167d 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -53,12 +53,71 @@ MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
53module_param(rds_ib_retry_count, int, 0444); 53module_param(rds_ib_retry_count, int, 0444);
54MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); 54MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
55 55
56/*
57 * we have a clumsy combination of RCU and a rwsem protecting this list
58 * because it is used both in the get_mr fast path and while blocking in
59 * the FMR flushing path.
60 */
61DECLARE_RWSEM(rds_ib_devices_lock);
56struct list_head rds_ib_devices; 62struct list_head rds_ib_devices;
57 63
58/* NOTE: if also grabbing ibdev lock, grab this first */ 64/* NOTE: if also grabbing ibdev lock, grab this first */
59DEFINE_SPINLOCK(ib_nodev_conns_lock); 65DEFINE_SPINLOCK(ib_nodev_conns_lock);
60LIST_HEAD(ib_nodev_conns); 66LIST_HEAD(ib_nodev_conns);
61 67
68void rds_ib_nodev_connect(void)
69{
70 struct rds_ib_connection *ic;
71
72 spin_lock(&ib_nodev_conns_lock);
73 list_for_each_entry(ic, &ib_nodev_conns, ib_node)
74 rds_conn_connect_if_down(ic->conn);
75 spin_unlock(&ib_nodev_conns_lock);
76}
77
78void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
79{
80 struct rds_ib_connection *ic;
81 unsigned long flags;
82
83 spin_lock_irqsave(&rds_ibdev->spinlock, flags);
84 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
85 rds_conn_drop(ic->conn);
86 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
87}
88
89/*
90 * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
91 * from interrupt context so we push freing off into a work struct in krdsd.
92 */
93static void rds_ib_dev_free(struct work_struct *work)
94{
95 struct rds_ib_ipaddr *i_ipaddr, *i_next;
96 struct rds_ib_device *rds_ibdev = container_of(work,
97 struct rds_ib_device, free_work);
98
99 if (rds_ibdev->mr_pool)
100 rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
101 if (rds_ibdev->mr)
102 ib_dereg_mr(rds_ibdev->mr);
103 if (rds_ibdev->pd)
104 ib_dealloc_pd(rds_ibdev->pd);
105
106 list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
107 list_del(&i_ipaddr->list);
108 kfree(i_ipaddr);
109 }
110
111 kfree(rds_ibdev);
112}
113
114void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
115{
116 BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
117 if (atomic_dec_and_test(&rds_ibdev->refcount))
118 queue_work(rds_wq, &rds_ibdev->free_work);
119}
120
62void rds_ib_add_one(struct ib_device *device) 121void rds_ib_add_one(struct ib_device *device)
63{ 122{
64 struct rds_ib_device *rds_ibdev; 123 struct rds_ib_device *rds_ibdev;
@@ -77,11 +136,14 @@ void rds_ib_add_one(struct ib_device *device)
77 goto free_attr; 136 goto free_attr;
78 } 137 }
79 138
80 rds_ibdev = kmalloc(sizeof *rds_ibdev, GFP_KERNEL); 139 rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
140 ibdev_to_node(device));
81 if (!rds_ibdev) 141 if (!rds_ibdev)
82 goto free_attr; 142 goto free_attr;
83 143
84 spin_lock_init(&rds_ibdev->spinlock); 144 spin_lock_init(&rds_ibdev->spinlock);
145 atomic_set(&rds_ibdev->refcount, 1);
146 INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
85 147
86 rds_ibdev->max_wrs = dev_attr->max_qp_wr; 148 rds_ibdev->max_wrs = dev_attr->max_qp_wr;
87 rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); 149 rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
@@ -91,68 +153,107 @@ void rds_ib_add_one(struct ib_device *device)
91 min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : 153 min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
92 fmr_pool_size; 154 fmr_pool_size;
93 155
156 rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
157 rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
158
94 rds_ibdev->dev = device; 159 rds_ibdev->dev = device;
95 rds_ibdev->pd = ib_alloc_pd(device); 160 rds_ibdev->pd = ib_alloc_pd(device);
96 if (IS_ERR(rds_ibdev->pd)) 161 if (IS_ERR(rds_ibdev->pd)) {
97 goto free_dev; 162 rds_ibdev->pd = NULL;
163 goto put_dev;
164 }
98 165
99 rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, 166 rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
100 IB_ACCESS_LOCAL_WRITE); 167 if (IS_ERR(rds_ibdev->mr)) {
101 if (IS_ERR(rds_ibdev->mr)) 168 rds_ibdev->mr = NULL;
102 goto err_pd; 169 goto put_dev;
170 }
103 171
104 rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); 172 rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
105 if (IS_ERR(rds_ibdev->mr_pool)) { 173 if (IS_ERR(rds_ibdev->mr_pool)) {
106 rds_ibdev->mr_pool = NULL; 174 rds_ibdev->mr_pool = NULL;
107 goto err_mr; 175 goto put_dev;
108 } 176 }
109 177
110 INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); 178 INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
111 INIT_LIST_HEAD(&rds_ibdev->conn_list); 179 INIT_LIST_HEAD(&rds_ibdev->conn_list);
112 list_add_tail(&rds_ibdev->list, &rds_ib_devices); 180
181 down_write(&rds_ib_devices_lock);
182 list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
183 up_write(&rds_ib_devices_lock);
184 atomic_inc(&rds_ibdev->refcount);
113 185
114 ib_set_client_data(device, &rds_ib_client, rds_ibdev); 186 ib_set_client_data(device, &rds_ib_client, rds_ibdev);
187 atomic_inc(&rds_ibdev->refcount);
115 188
116 goto free_attr; 189 rds_ib_nodev_connect();
117 190
118err_mr: 191put_dev:
119 ib_dereg_mr(rds_ibdev->mr); 192 rds_ib_dev_put(rds_ibdev);
120err_pd:
121 ib_dealloc_pd(rds_ibdev->pd);
122free_dev:
123 kfree(rds_ibdev);
124free_attr: 193free_attr:
125 kfree(dev_attr); 194 kfree(dev_attr);
126} 195}
127 196
197/*
198 * New connections use this to find the device to associate with the
199 * connection. It's not in the fast path so we're not concerned about the
200 * performance of the IB call. (As of this writing, it uses an interrupt
201 * blocking spinlock to serialize walking a per-device list of all registered
202 * clients.)
203 *
204 * RCU is used to handle incoming connections racing with device teardown.
205 * Rather than use a lock to serialize removal from the client_data and
206 * getting a new reference, we use an RCU grace period. The destruction
207 * path removes the device from client_data and then waits for all RCU
208 * readers to finish.
209 *
210 * A new connection can get NULL from this if its arriving on a
211 * device that is in the process of being removed.
212 */
213struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
214{
215 struct rds_ib_device *rds_ibdev;
216
217 rcu_read_lock();
218 rds_ibdev = ib_get_client_data(device, &rds_ib_client);
219 if (rds_ibdev)
220 atomic_inc(&rds_ibdev->refcount);
221 rcu_read_unlock();
222 return rds_ibdev;
223}
224
225/*
226 * The IB stack is letting us know that a device is going away. This can
227 * happen if the underlying HCA driver is removed or if PCI hotplug is removing
228 * the pci function, for example.
229 *
230 * This can be called at any time and can be racing with any other RDS path.
231 */
128void rds_ib_remove_one(struct ib_device *device) 232void rds_ib_remove_one(struct ib_device *device)
129{ 233{
130 struct rds_ib_device *rds_ibdev; 234 struct rds_ib_device *rds_ibdev;
131 struct rds_ib_ipaddr *i_ipaddr, *i_next;
132 235
133 rds_ibdev = ib_get_client_data(device, &rds_ib_client); 236 rds_ibdev = ib_get_client_data(device, &rds_ib_client);
134 if (!rds_ibdev) 237 if (!rds_ibdev)
135 return; 238 return;
136 239
137 list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { 240 rds_ib_dev_shutdown(rds_ibdev);
138 list_del(&i_ipaddr->list);
139 kfree(i_ipaddr);
140 }
141 241
142 rds_ib_destroy_conns(rds_ibdev); 242 /* stop connection attempts from getting a reference to this device. */
243 ib_set_client_data(device, &rds_ib_client, NULL);
143 244
144 if (rds_ibdev->mr_pool) 245 down_write(&rds_ib_devices_lock);
145 rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); 246 list_del_rcu(&rds_ibdev->list);
146 247 up_write(&rds_ib_devices_lock);
147 ib_dereg_mr(rds_ibdev->mr);
148
149 while (ib_dealloc_pd(rds_ibdev->pd)) {
150 rdsdebug("Failed to dealloc pd %p\n", rds_ibdev->pd);
151 msleep(1);
152 }
153 248
154 list_del(&rds_ibdev->list); 249 /*
155 kfree(rds_ibdev); 250 * This synchronize rcu is waiting for readers of both the ib
251 * client data and the devices list to finish before we drop
252 * both of those references.
253 */
254 synchronize_rcu();
255 rds_ib_dev_put(rds_ibdev);
256 rds_ib_dev_put(rds_ibdev);
156} 257}
157 258
158struct ib_client rds_ib_client = { 259struct ib_client rds_ib_client = {
@@ -186,7 +287,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
186 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); 287 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
187 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); 288 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
188 289
189 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 290 rds_ibdev = ic->rds_ibdev;
190 iinfo->max_send_wr = ic->i_send_ring.w_nr; 291 iinfo->max_send_wr = ic->i_send_ring.w_nr;
191 iinfo->max_recv_wr = ic->i_recv_ring.w_nr; 292 iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
192 iinfo->max_send_sge = rds_ibdev->max_sge; 293 iinfo->max_send_sge = rds_ibdev->max_sge;
@@ -248,29 +349,36 @@ static int rds_ib_laddr_check(__be32 addr)
248 return ret; 349 return ret;
249} 350}
250 351
352static void rds_ib_unregister_client(void)
353{
354 ib_unregister_client(&rds_ib_client);
355 /* wait for rds_ib_dev_free() to complete */
356 flush_workqueue(rds_wq);
357}
358
251void rds_ib_exit(void) 359void rds_ib_exit(void)
252{ 360{
253 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); 361 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
362 rds_ib_unregister_client();
254 rds_ib_destroy_nodev_conns(); 363 rds_ib_destroy_nodev_conns();
255 ib_unregister_client(&rds_ib_client);
256 rds_ib_sysctl_exit(); 364 rds_ib_sysctl_exit();
257 rds_ib_recv_exit(); 365 rds_ib_recv_exit();
258 rds_trans_unregister(&rds_ib_transport); 366 rds_trans_unregister(&rds_ib_transport);
367 rds_ib_fmr_exit();
259} 368}
260 369
261struct rds_transport rds_ib_transport = { 370struct rds_transport rds_ib_transport = {
262 .laddr_check = rds_ib_laddr_check, 371 .laddr_check = rds_ib_laddr_check,
263 .xmit_complete = rds_ib_xmit_complete, 372 .xmit_complete = rds_ib_xmit_complete,
264 .xmit = rds_ib_xmit, 373 .xmit = rds_ib_xmit,
265 .xmit_cong_map = NULL,
266 .xmit_rdma = rds_ib_xmit_rdma, 374 .xmit_rdma = rds_ib_xmit_rdma,
375 .xmit_atomic = rds_ib_xmit_atomic,
267 .recv = rds_ib_recv, 376 .recv = rds_ib_recv,
268 .conn_alloc = rds_ib_conn_alloc, 377 .conn_alloc = rds_ib_conn_alloc,
269 .conn_free = rds_ib_conn_free, 378 .conn_free = rds_ib_conn_free,
270 .conn_connect = rds_ib_conn_connect, 379 .conn_connect = rds_ib_conn_connect,
271 .conn_shutdown = rds_ib_conn_shutdown, 380 .conn_shutdown = rds_ib_conn_shutdown,
272 .inc_copy_to_user = rds_ib_inc_copy_to_user, 381 .inc_copy_to_user = rds_ib_inc_copy_to_user,
273 .inc_purge = rds_ib_inc_purge,
274 .inc_free = rds_ib_inc_free, 382 .inc_free = rds_ib_inc_free,
275 .cm_initiate_connect = rds_ib_cm_initiate_connect, 383 .cm_initiate_connect = rds_ib_cm_initiate_connect,
276 .cm_handle_connect = rds_ib_cm_handle_connect, 384 .cm_handle_connect = rds_ib_cm_handle_connect,
@@ -286,16 +394,20 @@ struct rds_transport rds_ib_transport = {
286 .t_type = RDS_TRANS_IB 394 .t_type = RDS_TRANS_IB
287}; 395};
288 396
289int __init rds_ib_init(void) 397int rds_ib_init(void)
290{ 398{
291 int ret; 399 int ret;
292 400
293 INIT_LIST_HEAD(&rds_ib_devices); 401 INIT_LIST_HEAD(&rds_ib_devices);
294 402
295 ret = ib_register_client(&rds_ib_client); 403 ret = rds_ib_fmr_init();
296 if (ret) 404 if (ret)
297 goto out; 405 goto out;
298 406
407 ret = ib_register_client(&rds_ib_client);
408 if (ret)
409 goto out_fmr_exit;
410
299 ret = rds_ib_sysctl_init(); 411 ret = rds_ib_sysctl_init();
300 if (ret) 412 if (ret)
301 goto out_ibreg; 413 goto out_ibreg;
@@ -317,7 +429,9 @@ out_recv:
317out_sysctl: 429out_sysctl:
318 rds_ib_sysctl_exit(); 430 rds_ib_sysctl_exit();
319out_ibreg: 431out_ibreg:
320 ib_unregister_client(&rds_ib_client); 432 rds_ib_unregister_client();
433out_fmr_exit:
434 rds_ib_fmr_exit();
321out: 435out:
322 return ret; 436 return ret;
323} 437}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 64df4e79b29f..7ad3d57e06a5 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -3,11 +3,13 @@
3 3
4#include <rdma/ib_verbs.h> 4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h> 5#include <rdma/rdma_cm.h>
6#include <linux/pci.h>
7#include <linux/slab.h>
6#include "rds.h" 8#include "rds.h"
7#include "rdma_transport.h" 9#include "rdma_transport.h"
8 10
9#define RDS_FMR_SIZE 256 11#define RDS_FMR_SIZE 256
10#define RDS_FMR_POOL_SIZE 4096 12#define RDS_FMR_POOL_SIZE 8192
11 13
12#define RDS_IB_MAX_SGE 8 14#define RDS_IB_MAX_SGE 8
13#define RDS_IB_RECV_SGE 2 15#define RDS_IB_RECV_SGE 2
@@ -19,6 +21,9 @@
19 21
20#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ 22#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
21 23
24#define RDS_IB_RECYCLE_BATCH_COUNT 32
25
26extern struct rw_semaphore rds_ib_devices_lock;
22extern struct list_head rds_ib_devices; 27extern struct list_head rds_ib_devices;
23 28
24/* 29/*
@@ -26,20 +31,29 @@ extern struct list_head rds_ib_devices;
26 * try and minimize the amount of memory tied up both the device and 31 * try and minimize the amount of memory tied up both the device and
27 * socket receive queues. 32 * socket receive queues.
28 */ 33 */
29/* page offset of the final full frag that fits in the page */
30#define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
31struct rds_page_frag { 34struct rds_page_frag {
32 struct list_head f_item; 35 struct list_head f_item;
33 struct page *f_page; 36 struct list_head f_cache_entry;
34 unsigned long f_offset; 37 struct scatterlist f_sg;
35 dma_addr_t f_mapped;
36}; 38};
37 39
38struct rds_ib_incoming { 40struct rds_ib_incoming {
39 struct list_head ii_frags; 41 struct list_head ii_frags;
42 struct list_head ii_cache_entry;
40 struct rds_incoming ii_inc; 43 struct rds_incoming ii_inc;
41}; 44};
42 45
46struct rds_ib_cache_head {
47 struct list_head *first;
48 unsigned long count;
49};
50
51struct rds_ib_refill_cache {
52 struct rds_ib_cache_head *percpu;
53 struct list_head *xfer;
54 struct list_head *ready;
55};
56
43struct rds_ib_connect_private { 57struct rds_ib_connect_private {
44 /* Add new fields at the end, and don't permute existing fields. */ 58 /* Add new fields at the end, and don't permute existing fields. */
45 __be32 dp_saddr; 59 __be32 dp_saddr;
@@ -53,8 +67,7 @@ struct rds_ib_connect_private {
53}; 67};
54 68
55struct rds_ib_send_work { 69struct rds_ib_send_work {
56 struct rds_message *s_rm; 70 void *s_op;
57 struct rds_rdma_op *s_op;
58 struct ib_send_wr s_wr; 71 struct ib_send_wr s_wr;
59 struct ib_sge s_sge[RDS_IB_MAX_SGE]; 72 struct ib_sge s_sge[RDS_IB_MAX_SGE];
60 unsigned long s_queued; 73 unsigned long s_queued;
@@ -92,10 +105,11 @@ struct rds_ib_connection {
92 105
93 /* tx */ 106 /* tx */
94 struct rds_ib_work_ring i_send_ring; 107 struct rds_ib_work_ring i_send_ring;
95 struct rds_message *i_rm; 108 struct rm_data_op *i_data_op;
96 struct rds_header *i_send_hdrs; 109 struct rds_header *i_send_hdrs;
97 u64 i_send_hdrs_dma; 110 u64 i_send_hdrs_dma;
98 struct rds_ib_send_work *i_sends; 111 struct rds_ib_send_work *i_sends;
112 atomic_t i_signaled_sends;
99 113
100 /* rx */ 114 /* rx */
101 struct tasklet_struct i_recv_tasklet; 115 struct tasklet_struct i_recv_tasklet;
@@ -106,8 +120,9 @@ struct rds_ib_connection {
106 struct rds_header *i_recv_hdrs; 120 struct rds_header *i_recv_hdrs;
107 u64 i_recv_hdrs_dma; 121 u64 i_recv_hdrs_dma;
108 struct rds_ib_recv_work *i_recvs; 122 struct rds_ib_recv_work *i_recvs;
109 struct rds_page_frag i_frag;
110 u64 i_ack_recv; /* last ACK received */ 123 u64 i_ack_recv; /* last ACK received */
124 struct rds_ib_refill_cache i_cache_incs;
125 struct rds_ib_refill_cache i_cache_frags;
111 126
112 /* sending acks */ 127 /* sending acks */
113 unsigned long i_ack_flags; 128 unsigned long i_ack_flags;
@@ -138,7 +153,6 @@ struct rds_ib_connection {
138 153
139 /* Batched completions */ 154 /* Batched completions */
140 unsigned int i_unsignaled_wrs; 155 unsigned int i_unsignaled_wrs;
141 long i_unsignaled_bytes;
142}; 156};
143 157
144/* This assumes that atomic_t is at least 32 bits */ 158/* This assumes that atomic_t is at least 32 bits */
@@ -164,9 +178,17 @@ struct rds_ib_device {
164 unsigned int max_fmrs; 178 unsigned int max_fmrs;
165 int max_sge; 179 int max_sge;
166 unsigned int max_wrs; 180 unsigned int max_wrs;
181 unsigned int max_initiator_depth;
182 unsigned int max_responder_resources;
167 spinlock_t spinlock; /* protect the above */ 183 spinlock_t spinlock; /* protect the above */
184 atomic_t refcount;
185 struct work_struct free_work;
168}; 186};
169 187
188#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
189#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
190#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
191
170/* bits for i_ack_flags */ 192/* bits for i_ack_flags */
171#define IB_ACK_IN_FLIGHT 0 193#define IB_ACK_IN_FLIGHT 0
172#define IB_ACK_REQUESTED 1 194#define IB_ACK_REQUESTED 1
@@ -202,6 +224,8 @@ struct rds_ib_statistics {
202 uint64_t s_ib_rdma_mr_pool_flush; 224 uint64_t s_ib_rdma_mr_pool_flush;
203 uint64_t s_ib_rdma_mr_pool_wait; 225 uint64_t s_ib_rdma_mr_pool_wait;
204 uint64_t s_ib_rdma_mr_pool_depleted; 226 uint64_t s_ib_rdma_mr_pool_depleted;
227 uint64_t s_ib_atomic_cswp;
228 uint64_t s_ib_atomic_fadd;
205}; 229};
206 230
207extern struct workqueue_struct *rds_ib_wq; 231extern struct workqueue_struct *rds_ib_wq;
@@ -243,6 +267,8 @@ static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
243extern struct rds_transport rds_ib_transport; 267extern struct rds_transport rds_ib_transport;
244extern void rds_ib_add_one(struct ib_device *device); 268extern void rds_ib_add_one(struct ib_device *device);
245extern void rds_ib_remove_one(struct ib_device *device); 269extern void rds_ib_remove_one(struct ib_device *device);
270struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
271void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
246extern struct ib_client rds_ib_client; 272extern struct ib_client rds_ib_client;
247 273
248extern unsigned int fmr_pool_size; 274extern unsigned int fmr_pool_size;
@@ -258,7 +284,7 @@ void rds_ib_conn_free(void *arg);
258int rds_ib_conn_connect(struct rds_connection *conn); 284int rds_ib_conn_connect(struct rds_connection *conn);
259void rds_ib_conn_shutdown(struct rds_connection *conn); 285void rds_ib_conn_shutdown(struct rds_connection *conn);
260void rds_ib_state_change(struct sock *sk); 286void rds_ib_state_change(struct sock *sk);
261int __init rds_ib_listen_init(void); 287int rds_ib_listen_init(void);
262void rds_ib_listen_stop(void); 288void rds_ib_listen_stop(void);
263void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); 289void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
264int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, 290int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -275,15 +301,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn,
275int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr); 301int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
276void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); 302void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
277void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); 303void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
278void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock); 304void rds_ib_destroy_nodev_conns(void);
279static inline void rds_ib_destroy_nodev_conns(void)
280{
281 __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
282}
283static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
284{
285 __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
286}
287struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *); 305struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
288void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); 306void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
289void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); 307void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
@@ -292,14 +310,16 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
292void rds_ib_sync_mr(void *trans_private, int dir); 310void rds_ib_sync_mr(void *trans_private, int dir);
293void rds_ib_free_mr(void *trans_private, int invalidate); 311void rds_ib_free_mr(void *trans_private, int invalidate);
294void rds_ib_flush_mrs(void); 312void rds_ib_flush_mrs(void);
313int rds_ib_fmr_init(void);
314void rds_ib_fmr_exit(void);
295 315
296/* ib_recv.c */ 316/* ib_recv.c */
297int __init rds_ib_recv_init(void); 317int rds_ib_recv_init(void);
298void rds_ib_recv_exit(void); 318void rds_ib_recv_exit(void);
299int rds_ib_recv(struct rds_connection *conn); 319int rds_ib_recv(struct rds_connection *conn);
300int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 320int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
301 gfp_t page_gfp, int prefill); 321void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
302void rds_ib_inc_purge(struct rds_incoming *inc); 322void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
303void rds_ib_inc_free(struct rds_incoming *inc); 323void rds_ib_inc_free(struct rds_incoming *inc);
304int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 324int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
305 size_t size); 325 size_t size);
@@ -325,17 +345,19 @@ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
325extern wait_queue_head_t rds_ib_ring_empty_wait; 345extern wait_queue_head_t rds_ib_ring_empty_wait;
326 346
327/* ib_send.c */ 347/* ib_send.c */
348char *rds_ib_wc_status_str(enum ib_wc_status status);
328void rds_ib_xmit_complete(struct rds_connection *conn); 349void rds_ib_xmit_complete(struct rds_connection *conn);
329int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, 350int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
330 unsigned int hdr_off, unsigned int sg, unsigned int off); 351 unsigned int hdr_off, unsigned int sg, unsigned int off);
331void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context); 352void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
332void rds_ib_send_init_ring(struct rds_ib_connection *ic); 353void rds_ib_send_init_ring(struct rds_ib_connection *ic);
333void rds_ib_send_clear_ring(struct rds_ib_connection *ic); 354void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
334int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); 355int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
335void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); 356void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
336void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); 357void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
337int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, 358int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
338 u32 *adv_credits, int need_posted, int max_posted); 359 u32 *adv_credits, int need_posted, int max_posted);
360int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
339 361
340/* ib_stats.c */ 362/* ib_stats.c */
341DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); 363DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
@@ -344,7 +366,7 @@ unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
344 unsigned int avail); 366 unsigned int avail);
345 367
346/* ib_sysctl.c */ 368/* ib_sysctl.c */
347int __init rds_ib_sysctl_init(void); 369int rds_ib_sysctl_init(void);
348void rds_ib_sysctl_exit(void); 370void rds_ib_sysctl_exit(void);
349extern unsigned long rds_ib_sysctl_max_send_wr; 371extern unsigned long rds_ib_sysctl_max_send_wr;
350extern unsigned long rds_ib_sysctl_max_recv_wr; 372extern unsigned long rds_ib_sysctl_max_recv_wr;
@@ -354,28 +376,4 @@ extern unsigned long rds_ib_sysctl_max_recv_allocation;
354extern unsigned int rds_ib_sysctl_flow_control; 376extern unsigned int rds_ib_sysctl_flow_control;
355extern ctl_table rds_ib_sysctl_table[]; 377extern ctl_table rds_ib_sysctl_table[];
356 378
357/*
358 * Helper functions for getting/setting the header and data SGEs in
359 * RDS packets (not RDMA)
360 *
361 * From version 3.1 onwards, header is in front of data in the sge.
362 */
363static inline struct ib_sge *
364rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
365{
366 if (ic->conn->c_version > RDS_PROTOCOL_3_0)
367 return &sge[0];
368 else
369 return &sge[1];
370}
371
372static inline struct ib_sge *
373rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
374{
375 if (ic->conn->c_version > RDS_PROTOCOL_3_0)
376 return &sge[1];
377 else
378 return &sge[0];
379}
380
381#endif 379#endif
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index f68832798db2..bc3dbc1ba61f 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -38,6 +38,36 @@
38#include "rds.h" 38#include "rds.h"
39#include "ib.h" 39#include "ib.h"
40 40
41static char *rds_ib_event_type_strings[] = {
42#define RDS_IB_EVENT_STRING(foo) \
43 [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo)
44 RDS_IB_EVENT_STRING(CQ_ERR),
45 RDS_IB_EVENT_STRING(QP_FATAL),
46 RDS_IB_EVENT_STRING(QP_REQ_ERR),
47 RDS_IB_EVENT_STRING(QP_ACCESS_ERR),
48 RDS_IB_EVENT_STRING(COMM_EST),
49 RDS_IB_EVENT_STRING(SQ_DRAINED),
50 RDS_IB_EVENT_STRING(PATH_MIG),
51 RDS_IB_EVENT_STRING(PATH_MIG_ERR),
52 RDS_IB_EVENT_STRING(DEVICE_FATAL),
53 RDS_IB_EVENT_STRING(PORT_ACTIVE),
54 RDS_IB_EVENT_STRING(PORT_ERR),
55 RDS_IB_EVENT_STRING(LID_CHANGE),
56 RDS_IB_EVENT_STRING(PKEY_CHANGE),
57 RDS_IB_EVENT_STRING(SM_CHANGE),
58 RDS_IB_EVENT_STRING(SRQ_ERR),
59 RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED),
60 RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED),
61 RDS_IB_EVENT_STRING(CLIENT_REREGISTER),
62#undef RDS_IB_EVENT_STRING
63};
64
65static char *rds_ib_event_str(enum ib_event_type type)
66{
67 return rds_str_array(rds_ib_event_type_strings,
68 ARRAY_SIZE(rds_ib_event_type_strings), type);
69};
70
41/* 71/*
42 * Set the selected protocol version 72 * Set the selected protocol version
43 */ 73 */
@@ -95,7 +125,6 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
95{ 125{
96 const struct rds_ib_connect_private *dp = NULL; 126 const struct rds_ib_connect_private *dp = NULL;
97 struct rds_ib_connection *ic = conn->c_transport_data; 127 struct rds_ib_connection *ic = conn->c_transport_data;
98 struct rds_ib_device *rds_ibdev;
99 struct ib_qp_attr qp_attr; 128 struct ib_qp_attr qp_attr;
100 int err; 129 int err;
101 130
@@ -111,11 +140,21 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
111 } 140 }
112 } 141 }
113 142
114 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", 143 if (conn->c_version < RDS_PROTOCOL(3,1)) {
115 &conn->c_faddr, 144 printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
116 RDS_PROTOCOL_MAJOR(conn->c_version), 145 " no longer supported\n",
117 RDS_PROTOCOL_MINOR(conn->c_version), 146 &conn->c_faddr,
118 ic->i_flowctl ? ", flow control" : ""); 147 RDS_PROTOCOL_MAJOR(conn->c_version),
148 RDS_PROTOCOL_MINOR(conn->c_version));
149 rds_conn_destroy(conn);
150 return;
151 } else {
152 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
153 &conn->c_faddr,
154 RDS_PROTOCOL_MAJOR(conn->c_version),
155 RDS_PROTOCOL_MINOR(conn->c_version),
156 ic->i_flowctl ? ", flow control" : "");
157 }
119 158
120 /* 159 /*
121 * Init rings and fill recv. this needs to wait until protocol negotiation 160 * Init rings and fill recv. this needs to wait until protocol negotiation
@@ -125,7 +164,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
125 rds_ib_recv_init_ring(ic); 164 rds_ib_recv_init_ring(ic);
126 /* Post receive buffers - as a side effect, this will update 165 /* Post receive buffers - as a side effect, this will update
127 * the posted credit count. */ 166 * the posted credit count. */
128 rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); 167 rds_ib_recv_refill(conn, 1);
129 168
130 /* Tune RNR behavior */ 169 /* Tune RNR behavior */
131 rds_ib_tune_rnr(ic, &qp_attr); 170 rds_ib_tune_rnr(ic, &qp_attr);
@@ -135,12 +174,11 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
135 if (err) 174 if (err)
136 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); 175 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
137 176
138 /* update ib_device with this local ipaddr & conn */ 177 /* update ib_device with this local ipaddr */
139 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 178 err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
140 err = rds_ib_update_ipaddr(rds_ibdev, conn->c_laddr);
141 if (err) 179 if (err)
142 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err); 180 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
143 rds_ib_add_conn(rds_ibdev, conn); 181 err);
144 182
145 /* If the peer gave us the last packet it saw, process this as if 183 /* If the peer gave us the last packet it saw, process this as if
146 * we had received a regular ACK. */ 184 * we had received a regular ACK. */
@@ -153,18 +191,23 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
153static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, 191static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
154 struct rdma_conn_param *conn_param, 192 struct rdma_conn_param *conn_param,
155 struct rds_ib_connect_private *dp, 193 struct rds_ib_connect_private *dp,
156 u32 protocol_version) 194 u32 protocol_version,
195 u32 max_responder_resources,
196 u32 max_initiator_depth)
157{ 197{
198 struct rds_ib_connection *ic = conn->c_transport_data;
199 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
200
158 memset(conn_param, 0, sizeof(struct rdma_conn_param)); 201 memset(conn_param, 0, sizeof(struct rdma_conn_param));
159 /* XXX tune these? */ 202
160 conn_param->responder_resources = 1; 203 conn_param->responder_resources =
161 conn_param->initiator_depth = 1; 204 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
205 conn_param->initiator_depth =
206 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
162 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); 207 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
163 conn_param->rnr_retry_count = 7; 208 conn_param->rnr_retry_count = 7;
164 209
165 if (dp) { 210 if (dp) {
166 struct rds_ib_connection *ic = conn->c_transport_data;
167
168 memset(dp, 0, sizeof(*dp)); 211 memset(dp, 0, sizeof(*dp));
169 dp->dp_saddr = conn->c_laddr; 212 dp->dp_saddr = conn->c_laddr;
170 dp->dp_daddr = conn->c_faddr; 213 dp->dp_daddr = conn->c_faddr;
@@ -189,7 +232,8 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
189 232
190static void rds_ib_cq_event_handler(struct ib_event *event, void *data) 233static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
191{ 234{
192 rdsdebug("event %u data %p\n", event->event, data); 235 rdsdebug("event %u (%s) data %p\n",
236 event->event, rds_ib_event_str(event->event), data);
193} 237}
194 238
195static void rds_ib_qp_event_handler(struct ib_event *event, void *data) 239static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
@@ -197,16 +241,18 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
197 struct rds_connection *conn = data; 241 struct rds_connection *conn = data;
198 struct rds_ib_connection *ic = conn->c_transport_data; 242 struct rds_ib_connection *ic = conn->c_transport_data;
199 243
200 rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event); 244 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
245 rds_ib_event_str(event->event));
201 246
202 switch (event->event) { 247 switch (event->event) {
203 case IB_EVENT_COMM_EST: 248 case IB_EVENT_COMM_EST:
204 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); 249 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
205 break; 250 break;
206 default: 251 default:
207 rdsdebug("Fatal QP Event %u " 252 rdsdebug("Fatal QP Event %u (%s) "
208 "- connection %pI4->%pI4, reconnecting\n", 253 "- connection %pI4->%pI4, reconnecting\n",
209 event->event, &conn->c_laddr, &conn->c_faddr); 254 event->event, rds_ib_event_str(event->event),
255 &conn->c_laddr, &conn->c_faddr);
210 rds_conn_drop(conn); 256 rds_conn_drop(conn);
211 break; 257 break;
212 } 258 }
@@ -224,18 +270,16 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
224 struct rds_ib_device *rds_ibdev; 270 struct rds_ib_device *rds_ibdev;
225 int ret; 271 int ret;
226 272
227 /* rds_ib_add_one creates a rds_ib_device object per IB device, 273 /*
228 * and allocates a protection domain, memory range and FMR pool 274 * It's normal to see a null device if an incoming connection races
229 * for each. If that fails for any reason, it will not register 275 * with device removal, so we don't print a warning.
230 * the rds_ibdev at all.
231 */ 276 */
232 rds_ibdev = ib_get_client_data(dev, &rds_ib_client); 277 rds_ibdev = rds_ib_get_client_data(dev);
233 if (rds_ibdev == NULL) { 278 if (!rds_ibdev)
234 if (printk_ratelimit())
235 printk(KERN_NOTICE "RDS/IB: No client_data for device %s\n",
236 dev->name);
237 return -EOPNOTSUPP; 279 return -EOPNOTSUPP;
238 } 280
281 /* add the conn now so that connection establishment has the dev */
282 rds_ib_add_conn(rds_ibdev, conn);
239 283
240 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1) 284 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
241 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1); 285 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
@@ -306,7 +350,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
306 ic->i_send_ring.w_nr * 350 ic->i_send_ring.w_nr *
307 sizeof(struct rds_header), 351 sizeof(struct rds_header),
308 &ic->i_send_hdrs_dma, GFP_KERNEL); 352 &ic->i_send_hdrs_dma, GFP_KERNEL);
309 if (ic->i_send_hdrs == NULL) { 353 if (!ic->i_send_hdrs) {
310 ret = -ENOMEM; 354 ret = -ENOMEM;
311 rdsdebug("ib_dma_alloc_coherent send failed\n"); 355 rdsdebug("ib_dma_alloc_coherent send failed\n");
312 goto out; 356 goto out;
@@ -316,7 +360,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
316 ic->i_recv_ring.w_nr * 360 ic->i_recv_ring.w_nr *
317 sizeof(struct rds_header), 361 sizeof(struct rds_header),
318 &ic->i_recv_hdrs_dma, GFP_KERNEL); 362 &ic->i_recv_hdrs_dma, GFP_KERNEL);
319 if (ic->i_recv_hdrs == NULL) { 363 if (!ic->i_recv_hdrs) {
320 ret = -ENOMEM; 364 ret = -ENOMEM;
321 rdsdebug("ib_dma_alloc_coherent recv failed\n"); 365 rdsdebug("ib_dma_alloc_coherent recv failed\n");
322 goto out; 366 goto out;
@@ -324,22 +368,24 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
324 368
325 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), 369 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
326 &ic->i_ack_dma, GFP_KERNEL); 370 &ic->i_ack_dma, GFP_KERNEL);
327 if (ic->i_ack == NULL) { 371 if (!ic->i_ack) {
328 ret = -ENOMEM; 372 ret = -ENOMEM;
329 rdsdebug("ib_dma_alloc_coherent ack failed\n"); 373 rdsdebug("ib_dma_alloc_coherent ack failed\n");
330 goto out; 374 goto out;
331 } 375 }
332 376
333 ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); 377 ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
334 if (ic->i_sends == NULL) { 378 ibdev_to_node(dev));
379 if (!ic->i_sends) {
335 ret = -ENOMEM; 380 ret = -ENOMEM;
336 rdsdebug("send allocation failed\n"); 381 rdsdebug("send allocation failed\n");
337 goto out; 382 goto out;
338 } 383 }
339 memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); 384 memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
340 385
341 ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); 386 ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
342 if (ic->i_recvs == NULL) { 387 ibdev_to_node(dev));
388 if (!ic->i_recvs) {
343 ret = -ENOMEM; 389 ret = -ENOMEM;
344 rdsdebug("recv allocation failed\n"); 390 rdsdebug("recv allocation failed\n");
345 goto out; 391 goto out;
@@ -352,6 +398,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
352 ic->i_send_cq, ic->i_recv_cq); 398 ic->i_send_cq, ic->i_recv_cq);
353 399
354out: 400out:
401 rds_ib_dev_put(rds_ibdev);
355 return ret; 402 return ret;
356} 403}
357 404
@@ -409,7 +456,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
409 struct rds_ib_connection *ic = NULL; 456 struct rds_ib_connection *ic = NULL;
410 struct rdma_conn_param conn_param; 457 struct rdma_conn_param conn_param;
411 u32 version; 458 u32 version;
412 int err, destroy = 1; 459 int err = 1, destroy = 1;
413 460
414 /* Check whether the remote protocol version matches ours. */ 461 /* Check whether the remote protocol version matches ours. */
415 version = rds_ib_protocol_compatible(event); 462 version = rds_ib_protocol_compatible(event);
@@ -448,7 +495,6 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
448 /* Wait and see - our connect may still be succeeding */ 495 /* Wait and see - our connect may still be succeeding */
449 rds_ib_stats_inc(s_ib_connect_raced); 496 rds_ib_stats_inc(s_ib_connect_raced);
450 } 497 }
451 mutex_unlock(&conn->c_cm_lock);
452 goto out; 498 goto out;
453 } 499 }
454 500
@@ -479,20 +525,20 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
479 goto out; 525 goto out;
480 } 526 }
481 527
482 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version); 528 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
529 event->param.conn.responder_resources,
530 event->param.conn.initiator_depth);
483 531
484 /* rdma_accept() calls rdma_reject() internally if it fails */ 532 /* rdma_accept() calls rdma_reject() internally if it fails */
485 err = rdma_accept(cm_id, &conn_param); 533 err = rdma_accept(cm_id, &conn_param);
486 mutex_unlock(&conn->c_cm_lock); 534 if (err)
487 if (err) {
488 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err); 535 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
489 goto out;
490 }
491
492 return 0;
493 536
494out: 537out:
495 rdma_reject(cm_id, NULL, 0); 538 if (conn)
539 mutex_unlock(&conn->c_cm_lock);
540 if (err)
541 rdma_reject(cm_id, NULL, 0);
496 return destroy; 542 return destroy;
497} 543}
498 544
@@ -516,8 +562,8 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
516 goto out; 562 goto out;
517 } 563 }
518 564
519 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION); 565 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
520 566 UINT_MAX, UINT_MAX);
521 ret = rdma_connect(cm_id, &conn_param); 567 ret = rdma_connect(cm_id, &conn_param);
522 if (ret) 568 if (ret)
523 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); 569 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
@@ -601,9 +647,19 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
601 ic->i_cm_id, err); 647 ic->i_cm_id, err);
602 } 648 }
603 649
650 /*
651 * We want to wait for tx and rx completion to finish
652 * before we tear down the connection, but we have to be
653 * careful not to get stuck waiting on a send ring that
654 * only has unsignaled sends in it. We've shutdown new
655 * sends before getting here so by waiting for signaled
656 * sends to complete we're ensured that there will be no
657 * more tx processing.
658 */
604 wait_event(rds_ib_ring_empty_wait, 659 wait_event(rds_ib_ring_empty_wait,
605 rds_ib_ring_empty(&ic->i_send_ring) && 660 rds_ib_ring_empty(&ic->i_recv_ring) &&
606 rds_ib_ring_empty(&ic->i_recv_ring)); 661 (atomic_read(&ic->i_signaled_sends) == 0));
662 tasklet_kill(&ic->i_recv_tasklet);
607 663
608 if (ic->i_send_hdrs) 664 if (ic->i_send_hdrs)
609 ib_dma_free_coherent(dev, 665 ib_dma_free_coherent(dev,
@@ -654,9 +710,12 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
654 BUG_ON(ic->rds_ibdev); 710 BUG_ON(ic->rds_ibdev);
655 711
656 /* Clear pending transmit */ 712 /* Clear pending transmit */
657 if (ic->i_rm) { 713 if (ic->i_data_op) {
658 rds_message_put(ic->i_rm); 714 struct rds_message *rm;
659 ic->i_rm = NULL; 715
716 rm = container_of(ic->i_data_op, struct rds_message, data);
717 rds_message_put(rm);
718 ic->i_data_op = NULL;
660 } 719 }
661 720
662 /* Clear the ACK state */ 721 /* Clear the ACK state */
@@ -690,12 +749,19 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
690{ 749{
691 struct rds_ib_connection *ic; 750 struct rds_ib_connection *ic;
692 unsigned long flags; 751 unsigned long flags;
752 int ret;
693 753
694 /* XXX too lazy? */ 754 /* XXX too lazy? */
695 ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL); 755 ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL);
696 if (ic == NULL) 756 if (!ic)
697 return -ENOMEM; 757 return -ENOMEM;
698 758
759 ret = rds_ib_recv_alloc_caches(ic);
760 if (ret) {
761 kfree(ic);
762 return ret;
763 }
764
699 INIT_LIST_HEAD(&ic->ib_node); 765 INIT_LIST_HEAD(&ic->ib_node);
700 tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn, 766 tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
701 (unsigned long) ic); 767 (unsigned long) ic);
@@ -703,6 +769,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
703#ifndef KERNEL_HAS_ATOMIC64 769#ifndef KERNEL_HAS_ATOMIC64
704 spin_lock_init(&ic->i_ack_lock); 770 spin_lock_init(&ic->i_ack_lock);
705#endif 771#endif
772 atomic_set(&ic->i_signaled_sends, 0);
706 773
707 /* 774 /*
708 * rds_ib_conn_shutdown() waits for these to be emptied so they 775 * rds_ib_conn_shutdown() waits for these to be emptied so they
@@ -744,6 +811,8 @@ void rds_ib_conn_free(void *arg)
744 list_del(&ic->ib_node); 811 list_del(&ic->ib_node);
745 spin_unlock_irq(lock_ptr); 812 spin_unlock_irq(lock_ptr);
746 813
814 rds_ib_recv_free_caches(ic);
815
747 kfree(ic); 816 kfree(ic);
748} 817}
749 818
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index a54cd63f9e35..8f6e221c9f78 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -32,11 +32,16 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/rculist.h>
35 36
36#include "rds.h" 37#include "rds.h"
37#include "rdma.h"
38#include "ib.h" 38#include "ib.h"
39#include "xlist.h"
39 40
41struct workqueue_struct *rds_ib_fmr_wq;
42
43static DEFINE_PER_CPU(unsigned long, clean_list_grace);
44#define CLEAN_LIST_BUSY_BIT 0
40 45
41/* 46/*
42 * This is stored as mr->r_trans_private. 47 * This is stored as mr->r_trans_private.
@@ -45,7 +50,11 @@ struct rds_ib_mr {
45 struct rds_ib_device *device; 50 struct rds_ib_device *device;
46 struct rds_ib_mr_pool *pool; 51 struct rds_ib_mr_pool *pool;
47 struct ib_fmr *fmr; 52 struct ib_fmr *fmr;
48 struct list_head list; 53
54 struct xlist_head xlist;
55
56 /* unmap_list is for freeing */
57 struct list_head unmap_list;
49 unsigned int remap_count; 58 unsigned int remap_count;
50 59
51 struct scatterlist *sg; 60 struct scatterlist *sg;
@@ -59,14 +68,16 @@ struct rds_ib_mr {
59 */ 68 */
60struct rds_ib_mr_pool { 69struct rds_ib_mr_pool {
61 struct mutex flush_lock; /* serialize fmr invalidate */ 70 struct mutex flush_lock; /* serialize fmr invalidate */
62 struct work_struct flush_worker; /* flush worker */ 71 struct delayed_work flush_worker; /* flush worker */
63 72
64 spinlock_t list_lock; /* protect variables below */
65 atomic_t item_count; /* total # of MRs */ 73 atomic_t item_count; /* total # of MRs */
66 atomic_t dirty_count; /* # dirty of MRs */ 74 atomic_t dirty_count; /* # dirty of MRs */
67 struct list_head drop_list; /* MRs that have reached their max_maps limit */ 75
68 struct list_head free_list; /* unused MRs */ 76 struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
69 struct list_head clean_list; /* unused & unamapped MRs */ 77 struct xlist_head free_list; /* unused MRs */
78 struct xlist_head clean_list; /* global unused & unamapped MRs */
79 wait_queue_head_t flush_wait;
80
70 atomic_t free_pinned; /* memory pinned by free MRs */ 81 atomic_t free_pinned; /* memory pinned by free MRs */
71 unsigned long max_items; 82 unsigned long max_items;
72 unsigned long max_items_soft; 83 unsigned long max_items_soft;
@@ -74,7 +85,7 @@ struct rds_ib_mr_pool {
74 struct ib_fmr_attr fmr_attr; 85 struct ib_fmr_attr fmr_attr;
75}; 86};
76 87
77static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all); 88static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
78static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); 89static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
79static void rds_ib_mr_pool_flush_worker(struct work_struct *work); 90static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
80 91
@@ -83,16 +94,17 @@ static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
83 struct rds_ib_device *rds_ibdev; 94 struct rds_ib_device *rds_ibdev;
84 struct rds_ib_ipaddr *i_ipaddr; 95 struct rds_ib_ipaddr *i_ipaddr;
85 96
86 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 97 rcu_read_lock();
87 spin_lock_irq(&rds_ibdev->spinlock); 98 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
88 list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) { 99 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
89 if (i_ipaddr->ipaddr == ipaddr) { 100 if (i_ipaddr->ipaddr == ipaddr) {
90 spin_unlock_irq(&rds_ibdev->spinlock); 101 atomic_inc(&rds_ibdev->refcount);
102 rcu_read_unlock();
91 return rds_ibdev; 103 return rds_ibdev;
92 } 104 }
93 } 105 }
94 spin_unlock_irq(&rds_ibdev->spinlock);
95 } 106 }
107 rcu_read_unlock();
96 108
97 return NULL; 109 return NULL;
98} 110}
@@ -108,7 +120,7 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
108 i_ipaddr->ipaddr = ipaddr; 120 i_ipaddr->ipaddr = ipaddr;
109 121
110 spin_lock_irq(&rds_ibdev->spinlock); 122 spin_lock_irq(&rds_ibdev->spinlock);
111 list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list); 123 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
112 spin_unlock_irq(&rds_ibdev->spinlock); 124 spin_unlock_irq(&rds_ibdev->spinlock);
113 125
114 return 0; 126 return 0;
@@ -116,17 +128,24 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
116 128
117static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 129static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
118{ 130{
119 struct rds_ib_ipaddr *i_ipaddr, *next; 131 struct rds_ib_ipaddr *i_ipaddr;
132 struct rds_ib_ipaddr *to_free = NULL;
133
120 134
121 spin_lock_irq(&rds_ibdev->spinlock); 135 spin_lock_irq(&rds_ibdev->spinlock);
122 list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) { 136 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
123 if (i_ipaddr->ipaddr == ipaddr) { 137 if (i_ipaddr->ipaddr == ipaddr) {
124 list_del(&i_ipaddr->list); 138 list_del_rcu(&i_ipaddr->list);
125 kfree(i_ipaddr); 139 to_free = i_ipaddr;
126 break; 140 break;
127 } 141 }
128 } 142 }
129 spin_unlock_irq(&rds_ibdev->spinlock); 143 spin_unlock_irq(&rds_ibdev->spinlock);
144
145 if (to_free) {
146 synchronize_rcu();
147 kfree(to_free);
148 }
130} 149}
131 150
132int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 151int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
@@ -134,8 +153,10 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
134 struct rds_ib_device *rds_ibdev_old; 153 struct rds_ib_device *rds_ibdev_old;
135 154
136 rds_ibdev_old = rds_ib_get_device(ipaddr); 155 rds_ibdev_old = rds_ib_get_device(ipaddr);
137 if (rds_ibdev_old) 156 if (rds_ibdev_old) {
138 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); 157 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
158 rds_ib_dev_put(rds_ibdev_old);
159 }
139 160
140 return rds_ib_add_ipaddr(rds_ibdev, ipaddr); 161 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
141} 162}
@@ -156,6 +177,7 @@ void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *con
156 spin_unlock_irq(&ib_nodev_conns_lock); 177 spin_unlock_irq(&ib_nodev_conns_lock);
157 178
158 ic->rds_ibdev = rds_ibdev; 179 ic->rds_ibdev = rds_ibdev;
180 atomic_inc(&rds_ibdev->refcount);
159} 181}
160 182
161void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) 183void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
@@ -175,18 +197,18 @@ void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *
175 spin_unlock(&ib_nodev_conns_lock); 197 spin_unlock(&ib_nodev_conns_lock);
176 198
177 ic->rds_ibdev = NULL; 199 ic->rds_ibdev = NULL;
200 rds_ib_dev_put(rds_ibdev);
178} 201}
179 202
180void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock) 203void rds_ib_destroy_nodev_conns(void)
181{ 204{
182 struct rds_ib_connection *ic, *_ic; 205 struct rds_ib_connection *ic, *_ic;
183 LIST_HEAD(tmp_list); 206 LIST_HEAD(tmp_list);
184 207
185 /* avoid calling conn_destroy with irqs off */ 208 /* avoid calling conn_destroy with irqs off */
186 spin_lock_irq(list_lock); 209 spin_lock_irq(&ib_nodev_conns_lock);
187 list_splice(list, &tmp_list); 210 list_splice(&ib_nodev_conns, &tmp_list);
188 INIT_LIST_HEAD(list); 211 spin_unlock_irq(&ib_nodev_conns_lock);
189 spin_unlock_irq(list_lock);
190 212
191 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) 213 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
192 rds_conn_destroy(ic->conn); 214 rds_conn_destroy(ic->conn);
@@ -200,12 +222,12 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
200 if (!pool) 222 if (!pool)
201 return ERR_PTR(-ENOMEM); 223 return ERR_PTR(-ENOMEM);
202 224
203 INIT_LIST_HEAD(&pool->free_list); 225 INIT_XLIST_HEAD(&pool->free_list);
204 INIT_LIST_HEAD(&pool->drop_list); 226 INIT_XLIST_HEAD(&pool->drop_list);
205 INIT_LIST_HEAD(&pool->clean_list); 227 INIT_XLIST_HEAD(&pool->clean_list);
206 mutex_init(&pool->flush_lock); 228 mutex_init(&pool->flush_lock);
207 spin_lock_init(&pool->list_lock); 229 init_waitqueue_head(&pool->flush_wait);
208 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); 230 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
209 231
210 pool->fmr_attr.max_pages = fmr_message_size; 232 pool->fmr_attr.max_pages = fmr_message_size;
211 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; 233 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
@@ -233,34 +255,60 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co
233 255
234void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) 256void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
235{ 257{
236 flush_workqueue(rds_wq); 258 cancel_delayed_work_sync(&pool->flush_worker);
237 rds_ib_flush_mr_pool(pool, 1); 259 rds_ib_flush_mr_pool(pool, 1, NULL);
238 WARN_ON(atomic_read(&pool->item_count)); 260 WARN_ON(atomic_read(&pool->item_count));
239 WARN_ON(atomic_read(&pool->free_pinned)); 261 WARN_ON(atomic_read(&pool->free_pinned));
240 kfree(pool); 262 kfree(pool);
241} 263}
242 264
265static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
266 struct rds_ib_mr **ibmr_ret)
267{
268 struct xlist_head *ibmr_xl;
269 ibmr_xl = xlist_del_head_fast(xl);
270 *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
271}
272
243static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) 273static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
244{ 274{
245 struct rds_ib_mr *ibmr = NULL; 275 struct rds_ib_mr *ibmr = NULL;
246 unsigned long flags; 276 struct xlist_head *ret;
277 unsigned long *flag;
247 278
248 spin_lock_irqsave(&pool->list_lock, flags); 279 preempt_disable();
249 if (!list_empty(&pool->clean_list)) { 280 flag = &__get_cpu_var(clean_list_grace);
250 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list); 281 set_bit(CLEAN_LIST_BUSY_BIT, flag);
251 list_del_init(&ibmr->list); 282 ret = xlist_del_head(&pool->clean_list);
252 } 283 if (ret)
253 spin_unlock_irqrestore(&pool->list_lock, flags); 284 ibmr = list_entry(ret, struct rds_ib_mr, xlist);
254 285
286 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
287 preempt_enable();
255 return ibmr; 288 return ibmr;
256} 289}
257 290
291static inline void wait_clean_list_grace(void)
292{
293 int cpu;
294 unsigned long *flag;
295
296 for_each_online_cpu(cpu) {
297 flag = &per_cpu(clean_list_grace, cpu);
298 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
299 cpu_relax();
300 }
301}
302
258static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) 303static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
259{ 304{
260 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 305 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
261 struct rds_ib_mr *ibmr = NULL; 306 struct rds_ib_mr *ibmr = NULL;
262 int err = 0, iter = 0; 307 int err = 0, iter = 0;
263 308
309 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
310 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
311
264 while (1) { 312 while (1) {
265 ibmr = rds_ib_reuse_fmr(pool); 313 ibmr = rds_ib_reuse_fmr(pool);
266 if (ibmr) 314 if (ibmr)
@@ -287,19 +335,24 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
287 335
288 /* We do have some empty MRs. Flush them out. */ 336 /* We do have some empty MRs. Flush them out. */
289 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); 337 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
290 rds_ib_flush_mr_pool(pool, 0); 338 rds_ib_flush_mr_pool(pool, 0, &ibmr);
339 if (ibmr)
340 return ibmr;
291 } 341 }
292 342
293 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); 343 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
294 if (!ibmr) { 344 if (!ibmr) {
295 err = -ENOMEM; 345 err = -ENOMEM;
296 goto out_no_cigar; 346 goto out_no_cigar;
297 } 347 }
298 348
349 memset(ibmr, 0, sizeof(*ibmr));
350
299 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, 351 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
300 (IB_ACCESS_LOCAL_WRITE | 352 (IB_ACCESS_LOCAL_WRITE |
301 IB_ACCESS_REMOTE_READ | 353 IB_ACCESS_REMOTE_READ |
302 IB_ACCESS_REMOTE_WRITE), 354 IB_ACCESS_REMOTE_WRITE|
355 IB_ACCESS_REMOTE_ATOMIC),
303 &pool->fmr_attr); 356 &pool->fmr_attr);
304 if (IS_ERR(ibmr->fmr)) { 357 if (IS_ERR(ibmr->fmr)) {
305 err = PTR_ERR(ibmr->fmr); 358 err = PTR_ERR(ibmr->fmr);
@@ -367,7 +420,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
367 if (page_cnt > fmr_message_size) 420 if (page_cnt > fmr_message_size)
368 return -EINVAL; 421 return -EINVAL;
369 422
370 dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC); 423 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
424 rdsibdev_to_node(rds_ibdev));
371 if (!dma_pages) 425 if (!dma_pages)
372 return -ENOMEM; 426 return -ENOMEM;
373 427
@@ -441,7 +495,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
441 495
442 /* FIXME we need a way to tell a r/w MR 496 /* FIXME we need a way to tell a r/w MR
443 * from a r/o MR */ 497 * from a r/o MR */
444 BUG_ON(in_interrupt()); 498 BUG_ON(irqs_disabled());
445 set_page_dirty(page); 499 set_page_dirty(page);
446 put_page(page); 500 put_page(page);
447 } 501 }
@@ -477,33 +531,109 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
477} 531}
478 532
479/* 533/*
534 * given an xlist of mrs, put them all into the list_head for more processing
535 */
536static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
537{
538 struct rds_ib_mr *ibmr;
539 struct xlist_head splice;
540 struct xlist_head *cur;
541 struct xlist_head *next;
542
543 splice.next = NULL;
544 xlist_splice(xlist, &splice);
545 cur = splice.next;
546 while (cur) {
547 next = cur->next;
548 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
549 list_add_tail(&ibmr->unmap_list, list);
550 cur = next;
551 }
552}
553
554/*
555 * this takes a list head of mrs and turns it into an xlist of clusters.
556 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
557 * reuse.
558 */
559static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
560 struct list_head *list, struct xlist_head *xlist,
561 struct xlist_head **tail_ret)
562{
563 struct rds_ib_mr *ibmr;
564 struct xlist_head *cur_mr = xlist;
565 struct xlist_head *tail_mr = NULL;
566
567 list_for_each_entry(ibmr, list, unmap_list) {
568 tail_mr = &ibmr->xlist;
569 tail_mr->next = NULL;
570 cur_mr->next = tail_mr;
571 cur_mr = tail_mr;
572 }
573 *tail_ret = tail_mr;
574}
575
576/*
480 * Flush our pool of MRs. 577 * Flush our pool of MRs.
481 * At a minimum, all currently unused MRs are unmapped. 578 * At a minimum, all currently unused MRs are unmapped.
482 * If the number of MRs allocated exceeds the limit, we also try 579 * If the number of MRs allocated exceeds the limit, we also try
483 * to free as many MRs as needed to get back to this limit. 580 * to free as many MRs as needed to get back to this limit.
484 */ 581 */
485static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all) 582static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
583 int free_all, struct rds_ib_mr **ibmr_ret)
486{ 584{
487 struct rds_ib_mr *ibmr, *next; 585 struct rds_ib_mr *ibmr, *next;
586 struct xlist_head clean_xlist;
587 struct xlist_head *clean_tail;
488 LIST_HEAD(unmap_list); 588 LIST_HEAD(unmap_list);
489 LIST_HEAD(fmr_list); 589 LIST_HEAD(fmr_list);
490 unsigned long unpinned = 0; 590 unsigned long unpinned = 0;
491 unsigned long flags;
492 unsigned int nfreed = 0, ncleaned = 0, free_goal; 591 unsigned int nfreed = 0, ncleaned = 0, free_goal;
493 int ret = 0; 592 int ret = 0;
494 593
495 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); 594 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
496 595
497 mutex_lock(&pool->flush_lock); 596 if (ibmr_ret) {
597 DEFINE_WAIT(wait);
598 while(!mutex_trylock(&pool->flush_lock)) {
599 ibmr = rds_ib_reuse_fmr(pool);
600 if (ibmr) {
601 *ibmr_ret = ibmr;
602 finish_wait(&pool->flush_wait, &wait);
603 goto out_nolock;
604 }
605
606 prepare_to_wait(&pool->flush_wait, &wait,
607 TASK_UNINTERRUPTIBLE);
608 if (xlist_empty(&pool->clean_list))
609 schedule();
610
611 ibmr = rds_ib_reuse_fmr(pool);
612 if (ibmr) {
613 *ibmr_ret = ibmr;
614 finish_wait(&pool->flush_wait, &wait);
615 goto out_nolock;
616 }
617 }
618 finish_wait(&pool->flush_wait, &wait);
619 } else
620 mutex_lock(&pool->flush_lock);
621
622 if (ibmr_ret) {
623 ibmr = rds_ib_reuse_fmr(pool);
624 if (ibmr) {
625 *ibmr_ret = ibmr;
626 goto out;
627 }
628 }
498 629
499 spin_lock_irqsave(&pool->list_lock, flags);
500 /* Get the list of all MRs to be dropped. Ordering matters - 630 /* Get the list of all MRs to be dropped. Ordering matters -
501 * we want to put drop_list ahead of free_list. */ 631 * we want to put drop_list ahead of free_list.
502 list_splice_init(&pool->free_list, &unmap_list); 632 */
503 list_splice_init(&pool->drop_list, &unmap_list); 633 xlist_append_to_list(&pool->drop_list, &unmap_list);
634 xlist_append_to_list(&pool->free_list, &unmap_list);
504 if (free_all) 635 if (free_all)
505 list_splice_init(&pool->clean_list, &unmap_list); 636 xlist_append_to_list(&pool->clean_list, &unmap_list);
506 spin_unlock_irqrestore(&pool->list_lock, flags);
507 637
508 free_goal = rds_ib_flush_goal(pool, free_all); 638 free_goal = rds_ib_flush_goal(pool, free_all);
509 639
@@ -511,19 +641,20 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
511 goto out; 641 goto out;
512 642
513 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ 643 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
514 list_for_each_entry(ibmr, &unmap_list, list) 644 list_for_each_entry(ibmr, &unmap_list, unmap_list)
515 list_add(&ibmr->fmr->list, &fmr_list); 645 list_add(&ibmr->fmr->list, &fmr_list);
646
516 ret = ib_unmap_fmr(&fmr_list); 647 ret = ib_unmap_fmr(&fmr_list);
517 if (ret) 648 if (ret)
518 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); 649 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
519 650
520 /* Now we can destroy the DMA mapping and unpin any pages */ 651 /* Now we can destroy the DMA mapping and unpin any pages */
521 list_for_each_entry_safe(ibmr, next, &unmap_list, list) { 652 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
522 unpinned += ibmr->sg_len; 653 unpinned += ibmr->sg_len;
523 __rds_ib_teardown_mr(ibmr); 654 __rds_ib_teardown_mr(ibmr);
524 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { 655 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
525 rds_ib_stats_inc(s_ib_rdma_mr_free); 656 rds_ib_stats_inc(s_ib_rdma_mr_free);
526 list_del(&ibmr->list); 657 list_del(&ibmr->unmap_list);
527 ib_dealloc_fmr(ibmr->fmr); 658 ib_dealloc_fmr(ibmr->fmr);
528 kfree(ibmr); 659 kfree(ibmr);
529 nfreed++; 660 nfreed++;
@@ -531,9 +662,27 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
531 ncleaned++; 662 ncleaned++;
532 } 663 }
533 664
534 spin_lock_irqsave(&pool->list_lock, flags); 665 if (!list_empty(&unmap_list)) {
535 list_splice(&unmap_list, &pool->clean_list); 666 /* we have to make sure that none of the things we're about
536 spin_unlock_irqrestore(&pool->list_lock, flags); 667 * to put on the clean list would race with other cpus trying
668 * to pull items off. The xlist would explode if we managed to
669 * remove something from the clean list and then add it back again
670 * while another CPU was spinning on that same item in xlist_del_head.
671 *
672 * This is pretty unlikely, but just in case wait for an xlist grace period
673 * here before adding anything back into the clean list.
674 */
675 wait_clean_list_grace();
676
677 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
678 if (ibmr_ret)
679 refill_local(pool, &clean_xlist, ibmr_ret);
680
681 /* refill_local may have emptied our list */
682 if (!xlist_empty(&clean_xlist))
683 xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
684
685 }
537 686
538 atomic_sub(unpinned, &pool->free_pinned); 687 atomic_sub(unpinned, &pool->free_pinned);
539 atomic_sub(ncleaned, &pool->dirty_count); 688 atomic_sub(ncleaned, &pool->dirty_count);
@@ -541,14 +690,35 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
541 690
542out: 691out:
543 mutex_unlock(&pool->flush_lock); 692 mutex_unlock(&pool->flush_lock);
693 if (waitqueue_active(&pool->flush_wait))
694 wake_up(&pool->flush_wait);
695out_nolock:
544 return ret; 696 return ret;
545} 697}
546 698
699int rds_ib_fmr_init(void)
700{
701 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
702 if (!rds_ib_fmr_wq)
703 return -ENOMEM;
704 return 0;
705}
706
707/*
708 * By the time this is called all the IB devices should have been torn down and
709 * had their pools freed. As each pool is freed its work struct is waited on,
710 * so the pool flushing work queue should be idle by the time we get here.
711 */
712void rds_ib_fmr_exit(void)
713{
714 destroy_workqueue(rds_ib_fmr_wq);
715}
716
547static void rds_ib_mr_pool_flush_worker(struct work_struct *work) 717static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
548{ 718{
549 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); 719 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
550 720
551 rds_ib_flush_mr_pool(pool, 0); 721 rds_ib_flush_mr_pool(pool, 0, NULL);
552} 722}
553 723
554void rds_ib_free_mr(void *trans_private, int invalidate) 724void rds_ib_free_mr(void *trans_private, int invalidate)
@@ -556,47 +726,49 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
556 struct rds_ib_mr *ibmr = trans_private; 726 struct rds_ib_mr *ibmr = trans_private;
557 struct rds_ib_device *rds_ibdev = ibmr->device; 727 struct rds_ib_device *rds_ibdev = ibmr->device;
558 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 728 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
559 unsigned long flags;
560 729
561 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); 730 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
562 731
563 /* Return it to the pool's free list */ 732 /* Return it to the pool's free list */
564 spin_lock_irqsave(&pool->list_lock, flags);
565 if (ibmr->remap_count >= pool->fmr_attr.max_maps) 733 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
566 list_add(&ibmr->list, &pool->drop_list); 734 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
567 else 735 else
568 list_add(&ibmr->list, &pool->free_list); 736 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
569 737
570 atomic_add(ibmr->sg_len, &pool->free_pinned); 738 atomic_add(ibmr->sg_len, &pool->free_pinned);
571 atomic_inc(&pool->dirty_count); 739 atomic_inc(&pool->dirty_count);
572 spin_unlock_irqrestore(&pool->list_lock, flags);
573 740
574 /* If we've pinned too many pages, request a flush */ 741 /* If we've pinned too many pages, request a flush */
575 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 742 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
576 atomic_read(&pool->dirty_count) >= pool->max_items / 10) 743 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
577 queue_work(rds_wq, &pool->flush_worker); 744 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
578 745
579 if (invalidate) { 746 if (invalidate) {
580 if (likely(!in_interrupt())) { 747 if (likely(!in_interrupt())) {
581 rds_ib_flush_mr_pool(pool, 0); 748 rds_ib_flush_mr_pool(pool, 0, NULL);
582 } else { 749 } else {
583 /* We get here if the user created a MR marked 750 /* We get here if the user created a MR marked
584 * as use_once and invalidate at the same time. */ 751 * as use_once and invalidate at the same time. */
585 queue_work(rds_wq, &pool->flush_worker); 752 queue_delayed_work(rds_ib_fmr_wq,
753 &pool->flush_worker, 10);
586 } 754 }
587 } 755 }
756
757 rds_ib_dev_put(rds_ibdev);
588} 758}
589 759
590void rds_ib_flush_mrs(void) 760void rds_ib_flush_mrs(void)
591{ 761{
592 struct rds_ib_device *rds_ibdev; 762 struct rds_ib_device *rds_ibdev;
593 763
764 down_read(&rds_ib_devices_lock);
594 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 765 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
595 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 766 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
596 767
597 if (pool) 768 if (pool)
598 rds_ib_flush_mr_pool(pool, 0); 769 rds_ib_flush_mr_pool(pool, 0, NULL);
599 } 770 }
771 up_read(&rds_ib_devices_lock);
600} 772}
601 773
602void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 774void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
@@ -628,6 +800,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
628 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); 800 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
629 801
630 ibmr->device = rds_ibdev; 802 ibmr->device = rds_ibdev;
803 rds_ibdev = NULL;
631 804
632 out: 805 out:
633 if (ret) { 806 if (ret) {
@@ -635,5 +808,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
635 rds_ib_free_mr(ibmr, 0); 808 rds_ib_free_mr(ibmr, 0);
636 ibmr = ERR_PTR(ret); 809 ibmr = ERR_PTR(ret);
637 } 810 }
811 if (rds_ibdev)
812 rds_ib_dev_put(rds_ibdev);
638 return ibmr; 813 return ibmr;
639} 814}
815
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c74e9904a6b2..e29e0ca32f74 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -43,42 +43,6 @@ static struct kmem_cache *rds_ib_incoming_slab;
43static struct kmem_cache *rds_ib_frag_slab; 43static struct kmem_cache *rds_ib_frag_slab;
44static atomic_t rds_ib_allocation = ATOMIC_INIT(0); 44static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
45 45
46static void rds_ib_frag_drop_page(struct rds_page_frag *frag)
47{
48 rdsdebug("frag %p page %p\n", frag, frag->f_page);
49 __free_page(frag->f_page);
50 frag->f_page = NULL;
51}
52
53static void rds_ib_frag_free(struct rds_page_frag *frag)
54{
55 rdsdebug("frag %p page %p\n", frag, frag->f_page);
56 BUG_ON(frag->f_page != NULL);
57 kmem_cache_free(rds_ib_frag_slab, frag);
58}
59
60/*
61 * We map a page at a time. Its fragments are posted in order. This
62 * is called in fragment order as the fragments get send completion events.
63 * Only the last frag in the page performs the unmapping.
64 *
65 * It's OK for ring cleanup to call this in whatever order it likes because
66 * DMA is not in flight and so we can unmap while other ring entries still
67 * hold page references in their frags.
68 */
69static void rds_ib_recv_unmap_page(struct rds_ib_connection *ic,
70 struct rds_ib_recv_work *recv)
71{
72 struct rds_page_frag *frag = recv->r_frag;
73
74 rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
75 if (frag->f_mapped)
76 ib_dma_unmap_page(ic->i_cm_id->device,
77 frag->f_mapped,
78 RDS_FRAG_SIZE, DMA_FROM_DEVICE);
79 frag->f_mapped = 0;
80}
81
82void rds_ib_recv_init_ring(struct rds_ib_connection *ic) 46void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
83{ 47{
84 struct rds_ib_recv_work *recv; 48 struct rds_ib_recv_work *recv;
@@ -95,16 +59,161 @@ void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
95 recv->r_wr.sg_list = recv->r_sge; 59 recv->r_wr.sg_list = recv->r_sge;
96 recv->r_wr.num_sge = RDS_IB_RECV_SGE; 60 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
97 61
98 sge = rds_ib_data_sge(ic, recv->r_sge); 62 sge = &recv->r_sge[0];
63 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
64 sge->length = sizeof(struct rds_header);
65 sge->lkey = ic->i_mr->lkey;
66
67 sge = &recv->r_sge[1];
99 sge->addr = 0; 68 sge->addr = 0;
100 sge->length = RDS_FRAG_SIZE; 69 sge->length = RDS_FRAG_SIZE;
101 sge->lkey = ic->i_mr->lkey; 70 sge->lkey = ic->i_mr->lkey;
71 }
72}
102 73
103 sge = rds_ib_header_sge(ic, recv->r_sge); 74/*
104 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); 75 * The entire 'from' list, including the from element itself, is put on
105 sge->length = sizeof(struct rds_header); 76 * to the tail of the 'to' list.
106 sge->lkey = ic->i_mr->lkey; 77 */
78static void list_splice_entire_tail(struct list_head *from,
79 struct list_head *to)
80{
81 struct list_head *from_last = from->prev;
82
83 list_splice_tail(from_last, to);
84 list_add_tail(from_last, to);
85}
86
87static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
88{
89 struct list_head *tmp;
90
91 tmp = xchg(&cache->xfer, NULL);
92 if (tmp) {
93 if (cache->ready)
94 list_splice_entire_tail(tmp, cache->ready);
95 else
96 cache->ready = tmp;
97 }
98}
99
100static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
101{
102 struct rds_ib_cache_head *head;
103 int cpu;
104
105 cache->percpu = alloc_percpu(struct rds_ib_cache_head);
106 if (!cache->percpu)
107 return -ENOMEM;
108
109 for_each_possible_cpu(cpu) {
110 head = per_cpu_ptr(cache->percpu, cpu);
111 head->first = NULL;
112 head->count = 0;
113 }
114 cache->xfer = NULL;
115 cache->ready = NULL;
116
117 return 0;
118}
119
120int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
121{
122 int ret;
123
124 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
125 if (!ret) {
126 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
127 if (ret)
128 free_percpu(ic->i_cache_incs.percpu);
107 } 129 }
130
131 return ret;
132}
133
134static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
135 struct list_head *caller_list)
136{
137 struct rds_ib_cache_head *head;
138 int cpu;
139
140 for_each_possible_cpu(cpu) {
141 head = per_cpu_ptr(cache->percpu, cpu);
142 if (head->first) {
143 list_splice_entire_tail(head->first, caller_list);
144 head->first = NULL;
145 }
146 }
147
148 if (cache->ready) {
149 list_splice_entire_tail(cache->ready, caller_list);
150 cache->ready = NULL;
151 }
152}
153
154void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
155{
156 struct rds_ib_incoming *inc;
157 struct rds_ib_incoming *inc_tmp;
158 struct rds_page_frag *frag;
159 struct rds_page_frag *frag_tmp;
160 LIST_HEAD(list);
161
162 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
163 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
164 free_percpu(ic->i_cache_incs.percpu);
165
166 list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
167 list_del(&inc->ii_cache_entry);
168 WARN_ON(!list_empty(&inc->ii_frags));
169 kmem_cache_free(rds_ib_incoming_slab, inc);
170 }
171
172 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
173 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
174 free_percpu(ic->i_cache_frags.percpu);
175
176 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
177 list_del(&frag->f_cache_entry);
178 WARN_ON(!list_empty(&frag->f_item));
179 kmem_cache_free(rds_ib_frag_slab, frag);
180 }
181}
182
183/* fwd decl */
184static void rds_ib_recv_cache_put(struct list_head *new_item,
185 struct rds_ib_refill_cache *cache);
186static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
187
188
189/* Recycle frag and attached recv buffer f_sg */
190static void rds_ib_frag_free(struct rds_ib_connection *ic,
191 struct rds_page_frag *frag)
192{
193 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
194
195 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
196}
197
198/* Recycle inc after freeing attached frags */
199void rds_ib_inc_free(struct rds_incoming *inc)
200{
201 struct rds_ib_incoming *ibinc;
202 struct rds_page_frag *frag;
203 struct rds_page_frag *pos;
204 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
205
206 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
207
208 /* Free attached frags */
209 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
210 list_del_init(&frag->f_item);
211 rds_ib_frag_free(ic, frag);
212 }
213 BUG_ON(!list_empty(&ibinc->ii_frags));
214
215 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
216 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
108} 217}
109 218
110static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, 219static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
@@ -115,10 +224,8 @@ static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
115 recv->r_ibinc = NULL; 224 recv->r_ibinc = NULL;
116 } 225 }
117 if (recv->r_frag) { 226 if (recv->r_frag) {
118 rds_ib_recv_unmap_page(ic, recv); 227 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
119 if (recv->r_frag->f_page) 228 rds_ib_frag_free(ic, recv->r_frag);
120 rds_ib_frag_drop_page(recv->r_frag);
121 rds_ib_frag_free(recv->r_frag);
122 recv->r_frag = NULL; 229 recv->r_frag = NULL;
123 } 230 }
124} 231}
@@ -129,84 +236,111 @@ void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
129 236
130 for (i = 0; i < ic->i_recv_ring.w_nr; i++) 237 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
131 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); 238 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
132
133 if (ic->i_frag.f_page)
134 rds_ib_frag_drop_page(&ic->i_frag);
135} 239}
136 240
137static int rds_ib_recv_refill_one(struct rds_connection *conn, 241static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
138 struct rds_ib_recv_work *recv, 242 gfp_t slab_mask)
139 gfp_t kptr_gfp, gfp_t page_gfp)
140{ 243{
141 struct rds_ib_connection *ic = conn->c_transport_data; 244 struct rds_ib_incoming *ibinc;
142 dma_addr_t dma_addr; 245 struct list_head *cache_item;
143 struct ib_sge *sge; 246 int avail_allocs;
144 int ret = -ENOMEM;
145 247
146 if (recv->r_ibinc == NULL) { 248 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
147 if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) { 249 if (cache_item) {
250 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
251 } else {
252 avail_allocs = atomic_add_unless(&rds_ib_allocation,
253 1, rds_ib_sysctl_max_recv_allocation);
254 if (!avail_allocs) {
148 rds_ib_stats_inc(s_ib_rx_alloc_limit); 255 rds_ib_stats_inc(s_ib_rx_alloc_limit);
149 goto out; 256 return NULL;
150 } 257 }
151 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, 258 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
152 kptr_gfp); 259 if (!ibinc) {
153 if (recv->r_ibinc == NULL) {
154 atomic_dec(&rds_ib_allocation); 260 atomic_dec(&rds_ib_allocation);
155 goto out; 261 return NULL;
156 } 262 }
157 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
158 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
159 } 263 }
264 INIT_LIST_HEAD(&ibinc->ii_frags);
265 rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
160 266
161 if (recv->r_frag == NULL) { 267 return ibinc;
162 recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp); 268}
163 if (recv->r_frag == NULL) 269
164 goto out; 270static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
165 INIT_LIST_HEAD(&recv->r_frag->f_item); 271 gfp_t slab_mask, gfp_t page_mask)
166 recv->r_frag->f_page = NULL; 272{
273 struct rds_page_frag *frag;
274 struct list_head *cache_item;
275 int ret;
276
277 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
278 if (cache_item) {
279 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
280 } else {
281 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
282 if (!frag)
283 return NULL;
284
285 sg_init_table(&frag->f_sg, 1);
286 ret = rds_page_remainder_alloc(&frag->f_sg,
287 RDS_FRAG_SIZE, page_mask);
288 if (ret) {
289 kmem_cache_free(rds_ib_frag_slab, frag);
290 return NULL;
291 }
167 } 292 }
168 293
169 if (ic->i_frag.f_page == NULL) { 294 INIT_LIST_HEAD(&frag->f_item);
170 ic->i_frag.f_page = alloc_page(page_gfp); 295
171 if (ic->i_frag.f_page == NULL) 296 return frag;
172 goto out; 297}
173 ic->i_frag.f_offset = 0; 298
299static int rds_ib_recv_refill_one(struct rds_connection *conn,
300 struct rds_ib_recv_work *recv, int prefill)
301{
302 struct rds_ib_connection *ic = conn->c_transport_data;
303 struct ib_sge *sge;
304 int ret = -ENOMEM;
305 gfp_t slab_mask = GFP_NOWAIT;
306 gfp_t page_mask = GFP_NOWAIT;
307
308 if (prefill) {
309 slab_mask = GFP_KERNEL;
310 page_mask = GFP_HIGHUSER;
174 } 311 }
175 312
176 dma_addr = ib_dma_map_page(ic->i_cm_id->device, 313 if (!ic->i_cache_incs.ready)
177 ic->i_frag.f_page, 314 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
178 ic->i_frag.f_offset, 315 if (!ic->i_cache_frags.ready)
179 RDS_FRAG_SIZE, 316 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
180 DMA_FROM_DEVICE);
181 if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
182 goto out;
183 317
184 /* 318 /*
185 * Once we get the RDS_PAGE_LAST_OFF frag then rds_ib_frag_unmap() 319 * ibinc was taken from recv if recv contained the start of a message.
186 * must be called on this recv. This happens as completions hit 320 * recvs that were continuations will still have this allocated.
187 * in order or on connection shutdown.
188 */ 321 */
189 recv->r_frag->f_page = ic->i_frag.f_page; 322 if (!recv->r_ibinc) {
190 recv->r_frag->f_offset = ic->i_frag.f_offset; 323 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
191 recv->r_frag->f_mapped = dma_addr; 324 if (!recv->r_ibinc)
325 goto out;
326 }
192 327
193 sge = rds_ib_data_sge(ic, recv->r_sge); 328 WARN_ON(recv->r_frag); /* leak! */
194 sge->addr = dma_addr; 329 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
195 sge->length = RDS_FRAG_SIZE; 330 if (!recv->r_frag)
331 goto out;
332
333 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
334 1, DMA_FROM_DEVICE);
335 WARN_ON(ret != 1);
196 336
197 sge = rds_ib_header_sge(ic, recv->r_sge); 337 sge = &recv->r_sge[0];
198 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); 338 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
199 sge->length = sizeof(struct rds_header); 339 sge->length = sizeof(struct rds_header);
200 340
201 get_page(recv->r_frag->f_page); 341 sge = &recv->r_sge[1];
202 342 sge->addr = sg_dma_address(&recv->r_frag->f_sg);
203 if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) { 343 sge->length = sg_dma_len(&recv->r_frag->f_sg);
204 ic->i_frag.f_offset += RDS_FRAG_SIZE;
205 } else {
206 put_page(ic->i_frag.f_page);
207 ic->i_frag.f_page = NULL;
208 ic->i_frag.f_offset = 0;
209 }
210 344
211 ret = 0; 345 ret = 0;
212out: 346out:
@@ -216,13 +350,11 @@ out:
216/* 350/*
217 * This tries to allocate and post unused work requests after making sure that 351 * This tries to allocate and post unused work requests after making sure that
218 * they have all the allocations they need to queue received fragments into 352 * they have all the allocations they need to queue received fragments into
219 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc 353 * sockets.
220 * pairs don't go unmatched.
221 * 354 *
222 * -1 is returned if posting fails due to temporary resource exhaustion. 355 * -1 is returned if posting fails due to temporary resource exhaustion.
223 */ 356 */
224int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 357void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
225 gfp_t page_gfp, int prefill)
226{ 358{
227 struct rds_ib_connection *ic = conn->c_transport_data; 359 struct rds_ib_connection *ic = conn->c_transport_data;
228 struct rds_ib_recv_work *recv; 360 struct rds_ib_recv_work *recv;
@@ -236,28 +368,25 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
236 if (pos >= ic->i_recv_ring.w_nr) { 368 if (pos >= ic->i_recv_ring.w_nr) {
237 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", 369 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
238 pos); 370 pos);
239 ret = -EINVAL;
240 break; 371 break;
241 } 372 }
242 373
243 recv = &ic->i_recvs[pos]; 374 recv = &ic->i_recvs[pos];
244 ret = rds_ib_recv_refill_one(conn, recv, kptr_gfp, page_gfp); 375 ret = rds_ib_recv_refill_one(conn, recv, prefill);
245 if (ret) { 376 if (ret) {
246 ret = -1;
247 break; 377 break;
248 } 378 }
249 379
250 /* XXX when can this fail? */ 380 /* XXX when can this fail? */
251 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
252 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, 382 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
253 recv->r_ibinc, recv->r_frag->f_page, 383 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
254 (long) recv->r_frag->f_mapped, ret); 384 (long) sg_dma_address(&recv->r_frag->f_sg), ret);
255 if (ret) { 385 if (ret) {
256 rds_ib_conn_error(conn, "recv post on " 386 rds_ib_conn_error(conn, "recv post on "
257 "%pI4 returned %d, disconnecting and " 387 "%pI4 returned %d, disconnecting and "
258 "reconnecting\n", &conn->c_faddr, 388 "reconnecting\n", &conn->c_faddr,
259 ret); 389 ret);
260 ret = -1;
261 break; 390 break;
262 } 391 }
263 392
@@ -270,37 +399,73 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
270 399
271 if (ret) 400 if (ret)
272 rds_ib_ring_unalloc(&ic->i_recv_ring, 1); 401 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
273 return ret;
274} 402}
275 403
276void rds_ib_inc_purge(struct rds_incoming *inc) 404/*
405 * We want to recycle several types of recv allocations, like incs and frags.
406 * To use this, the *_free() function passes in the ptr to a list_head within
407 * the recyclee, as well as the cache to put it on.
408 *
409 * First, we put the memory on a percpu list. When this reaches a certain size,
410 * We move it to an intermediate non-percpu list in a lockless manner, with some
411 * xchg/compxchg wizardry.
412 *
413 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
414 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
415 * list_empty() will return true with one element is actually present.
416 */
417static void rds_ib_recv_cache_put(struct list_head *new_item,
418 struct rds_ib_refill_cache *cache)
277{ 419{
278 struct rds_ib_incoming *ibinc; 420 unsigned long flags;
279 struct rds_page_frag *frag; 421 struct rds_ib_cache_head *chp;
280 struct rds_page_frag *pos; 422 struct list_head *old;
281 423
282 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); 424 local_irq_save(flags);
283 rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
284 425
285 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { 426 chp = per_cpu_ptr(cache->percpu, smp_processor_id());
286 list_del_init(&frag->f_item); 427 if (!chp->first)
287 rds_ib_frag_drop_page(frag); 428 INIT_LIST_HEAD(new_item);
288 rds_ib_frag_free(frag); 429 else /* put on front */
289 } 430 list_add_tail(new_item, chp->first);
431 chp->first = new_item;
432 chp->count++;
433
434 if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
435 goto end;
436
437 /*
438 * Return our per-cpu first list to the cache's xfer by atomically
439 * grabbing the current xfer list, appending it to our per-cpu list,
440 * and then atomically returning that entire list back to the
441 * cache's xfer list as long as it's still empty.
442 */
443 do {
444 old = xchg(&cache->xfer, NULL);
445 if (old)
446 list_splice_entire_tail(old, chp->first);
447 old = cmpxchg(&cache->xfer, NULL, chp->first);
448 } while (old);
449
450 chp->first = NULL;
451 chp->count = 0;
452end:
453 local_irq_restore(flags);
290} 454}
291 455
292void rds_ib_inc_free(struct rds_incoming *inc) 456static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
293{ 457{
294 struct rds_ib_incoming *ibinc; 458 struct list_head *head = cache->ready;
295 459
296 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); 460 if (head) {
461 if (!list_empty(head)) {
462 cache->ready = head->next;
463 list_del_init(head);
464 } else
465 cache->ready = NULL;
466 }
297 467
298 rds_ib_inc_purge(inc); 468 return head;
299 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
300 BUG_ON(!list_empty(&ibinc->ii_frags));
301 kmem_cache_free(rds_ib_incoming_slab, ibinc);
302 atomic_dec(&rds_ib_allocation);
303 BUG_ON(atomic_read(&rds_ib_allocation) < 0);
304} 469}
305 470
306int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 471int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
@@ -336,13 +501,13 @@ int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
336 to_copy = min_t(unsigned long, to_copy, len - copied); 501 to_copy = min_t(unsigned long, to_copy, len - copied);
337 502
338 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " 503 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
339 "[%p, %lu] + %lu\n", 504 "[%p, %u] + %lu\n",
340 to_copy, iov->iov_base, iov->iov_len, iov_off, 505 to_copy, iov->iov_base, iov->iov_len, iov_off,
341 frag->f_page, frag->f_offset, frag_off); 506 sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
342 507
343 /* XXX needs + offset for multiple recvs per page */ 508 /* XXX needs + offset for multiple recvs per page */
344 ret = rds_page_copy_to_user(frag->f_page, 509 ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
345 frag->f_offset + frag_off, 510 frag->f_sg.offset + frag_off,
346 iov->iov_base + iov_off, 511 iov->iov_base + iov_off,
347 to_copy); 512 to_copy);
348 if (ret) { 513 if (ret) {
@@ -557,47 +722,6 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
557 return rds_ib_get_ack(ic); 722 return rds_ib_get_ack(ic);
558} 723}
559 724
560static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
561 struct rds_ib_recv_work *recv,
562 u32 data_len)
563{
564 struct rds_ib_connection *ic = conn->c_transport_data;
565 void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
566 void *addr;
567 u32 misplaced_hdr_bytes;
568
569 /*
570 * Support header at the front (RDS 3.1+) as well as header-at-end.
571 *
572 * Cases:
573 * 1) header all in header buff (great!)
574 * 2) header all in data page (copy all to header buff)
575 * 3) header split across hdr buf + data page
576 * (move bit in hdr buff to end before copying other bit from data page)
577 */
578 if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
579 return hdr_buff;
580
581 if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
582 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
583 memcpy(hdr_buff,
584 addr + recv->r_frag->f_offset + data_len,
585 sizeof(struct rds_header));
586 kunmap_atomic(addr, KM_SOFTIRQ0);
587 return hdr_buff;
588 }
589
590 misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
591
592 memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
593
594 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
595 memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
596 sizeof(struct rds_header) - misplaced_hdr_bytes);
597 kunmap_atomic(addr, KM_SOFTIRQ0);
598 return hdr_buff;
599}
600
601/* 725/*
602 * It's kind of lame that we're copying from the posted receive pages into 726 * It's kind of lame that we're copying from the posted receive pages into
603 * long-lived bitmaps. We could have posted the bitmaps and rdma written into 727 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
@@ -639,7 +763,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
639 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 763 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
640 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ 764 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
641 765
642 addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0); 766 addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0);
643 767
644 src = addr + frag_off; 768 src = addr + frag_off;
645 dst = (void *)map->m_page_addrs[map_page] + map_off; 769 dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -710,7 +834,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
710 } 834 }
711 data_len -= sizeof(struct rds_header); 835 data_len -= sizeof(struct rds_header);
712 836
713 ihdr = rds_ib_get_header(conn, recv, data_len); 837 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
714 838
715 /* Validate the checksum. */ 839 /* Validate the checksum. */
716 if (!rds_message_verify_checksum(ihdr)) { 840 if (!rds_message_verify_checksum(ihdr)) {
@@ -742,12 +866,12 @@ static void rds_ib_process_recv(struct rds_connection *conn,
742 * the inc is freed. We don't go that route, so we have to drop the 866 * the inc is freed. We don't go that route, so we have to drop the
743 * page ref ourselves. We can't just leave the page on the recv 867 * page ref ourselves. We can't just leave the page on the recv
744 * because that confuses the dma mapping of pages and each recv's use 868 * because that confuses the dma mapping of pages and each recv's use
745 * of a partial page. We can leave the frag, though, it will be 869 * of a partial page.
746 * reused.
747 * 870 *
748 * FIXME: Fold this into the code path below. 871 * FIXME: Fold this into the code path below.
749 */ 872 */
750 rds_ib_frag_drop_page(recv->r_frag); 873 rds_ib_frag_free(ic, recv->r_frag);
874 recv->r_frag = NULL;
751 return; 875 return;
752 } 876 }
753 877
@@ -757,7 +881,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
757 * into the inc and save the inc so we can hang upcoming fragments 881 * into the inc and save the inc so we can hang upcoming fragments
758 * off its list. 882 * off its list.
759 */ 883 */
760 if (ibinc == NULL) { 884 if (!ibinc) {
761 ibinc = recv->r_ibinc; 885 ibinc = recv->r_ibinc;
762 recv->r_ibinc = NULL; 886 recv->r_ibinc = NULL;
763 ic->i_ibinc = ibinc; 887 ic->i_ibinc = ibinc;
@@ -842,32 +966,38 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
842 struct rds_ib_recv_work *recv; 966 struct rds_ib_recv_work *recv;
843 967
844 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { 968 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
845 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 969 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
846 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 970 (unsigned long long)wc.wr_id, wc.status,
971 rds_ib_wc_status_str(wc.status), wc.byte_len,
847 be32_to_cpu(wc.ex.imm_data)); 972 be32_to_cpu(wc.ex.imm_data));
848 rds_ib_stats_inc(s_ib_rx_cq_event); 973 rds_ib_stats_inc(s_ib_rx_cq_event);
849 974
850 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; 975 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
851 976
852 rds_ib_recv_unmap_page(ic, recv); 977 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
853 978
854 /* 979 /*
855 * Also process recvs in connecting state because it is possible 980 * Also process recvs in connecting state because it is possible
856 * to get a recv completion _before_ the rdmacm ESTABLISHED 981 * to get a recv completion _before_ the rdmacm ESTABLISHED
857 * event is processed. 982 * event is processed.
858 */ 983 */
859 if (rds_conn_up(conn) || rds_conn_connecting(conn)) { 984 if (wc.status == IB_WC_SUCCESS) {
985 rds_ib_process_recv(conn, recv, wc.byte_len, state);
986 } else {
860 /* We expect errors as the qp is drained during shutdown */ 987 /* We expect errors as the qp is drained during shutdown */
861 if (wc.status == IB_WC_SUCCESS) { 988 if (rds_conn_up(conn) || rds_conn_connecting(conn))
862 rds_ib_process_recv(conn, recv, wc.byte_len, state); 989 rds_ib_conn_error(conn, "recv completion on %pI4 had "
863 } else { 990 "status %u (%s), disconnecting and "
864 rds_ib_conn_error(conn, "recv completion on " 991 "reconnecting\n", &conn->c_faddr,
865 "%pI4 had status %u, disconnecting and " 992 wc.status,
866 "reconnecting\n", &conn->c_faddr, 993 rds_ib_wc_status_str(wc.status));
867 wc.status);
868 }
869 } 994 }
870 995
996 /*
997 * It's very important that we only free this ring entry if we've truly
998 * freed the resources allocated to the entry. The refilling path can
999 * leak if we don't.
1000 */
871 rds_ib_ring_free(&ic->i_recv_ring, 1); 1001 rds_ib_ring_free(&ic->i_recv_ring, 1);
872 } 1002 }
873} 1003}
@@ -897,11 +1027,8 @@ void rds_ib_recv_tasklet_fn(unsigned long data)
897 if (rds_ib_ring_empty(&ic->i_recv_ring)) 1027 if (rds_ib_ring_empty(&ic->i_recv_ring))
898 rds_ib_stats_inc(s_ib_rx_ring_empty); 1028 rds_ib_stats_inc(s_ib_rx_ring_empty);
899 1029
900 /*
901 * If the ring is running low, then schedule the thread to refill.
902 */
903 if (rds_ib_ring_low(&ic->i_recv_ring)) 1030 if (rds_ib_ring_low(&ic->i_recv_ring))
904 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 1031 rds_ib_recv_refill(conn, 0);
905} 1032}
906 1033
907int rds_ib_recv(struct rds_connection *conn) 1034int rds_ib_recv(struct rds_connection *conn)
@@ -910,25 +1037,13 @@ int rds_ib_recv(struct rds_connection *conn)
910 int ret = 0; 1037 int ret = 0;
911 1038
912 rdsdebug("conn %p\n", conn); 1039 rdsdebug("conn %p\n", conn);
913
914 /*
915 * If we get a temporary posting failure in this context then
916 * we're really low and we want the caller to back off for a bit.
917 */
918 mutex_lock(&ic->i_recv_mutex);
919 if (rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
920 ret = -ENOMEM;
921 else
922 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
923 mutex_unlock(&ic->i_recv_mutex);
924
925 if (rds_conn_up(conn)) 1040 if (rds_conn_up(conn))
926 rds_ib_attempt_ack(ic); 1041 rds_ib_attempt_ack(ic);
927 1042
928 return ret; 1043 return ret;
929} 1044}
930 1045
931int __init rds_ib_recv_init(void) 1046int rds_ib_recv_init(void)
932{ 1047{
933 struct sysinfo si; 1048 struct sysinfo si;
934 int ret = -ENOMEM; 1049 int ret = -ENOMEM;
@@ -939,14 +1054,14 @@ int __init rds_ib_recv_init(void)
939 1054
940 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", 1055 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
941 sizeof(struct rds_ib_incoming), 1056 sizeof(struct rds_ib_incoming),
942 0, 0, NULL); 1057 0, SLAB_HWCACHE_ALIGN, NULL);
943 if (rds_ib_incoming_slab == NULL) 1058 if (!rds_ib_incoming_slab)
944 goto out; 1059 goto out;
945 1060
946 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", 1061 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
947 sizeof(struct rds_page_frag), 1062 sizeof(struct rds_page_frag),
948 0, 0, NULL); 1063 0, SLAB_HWCACHE_ALIGN, NULL);
949 if (rds_ib_frag_slab == NULL) 1064 if (!rds_ib_frag_slab)
950 kmem_cache_destroy(rds_ib_incoming_slab); 1065 kmem_cache_destroy(rds_ib_incoming_slab);
951 else 1066 else
952 ret = 0; 1067 ret = 0;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 17fa80803ab0..71f373c421bc 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -36,11 +36,49 @@
36#include <linux/dmapool.h> 36#include <linux/dmapool.h>
37 37
38#include "rds.h" 38#include "rds.h"
39#include "rdma.h"
40#include "ib.h" 39#include "ib.h"
41 40
42static void rds_ib_send_rdma_complete(struct rds_message *rm, 41static char *rds_ib_wc_status_strings[] = {
43 int wc_status) 42#define RDS_IB_WC_STATUS_STR(foo) \
43 [IB_WC_##foo] = __stringify(IB_WC_##foo)
44 RDS_IB_WC_STATUS_STR(SUCCESS),
45 RDS_IB_WC_STATUS_STR(LOC_LEN_ERR),
46 RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR),
47 RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR),
48 RDS_IB_WC_STATUS_STR(LOC_PROT_ERR),
49 RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR),
50 RDS_IB_WC_STATUS_STR(MW_BIND_ERR),
51 RDS_IB_WC_STATUS_STR(BAD_RESP_ERR),
52 RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR),
53 RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR),
54 RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR),
55 RDS_IB_WC_STATUS_STR(REM_OP_ERR),
56 RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR),
57 RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR),
58 RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR),
59 RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR),
60 RDS_IB_WC_STATUS_STR(REM_ABORT_ERR),
61 RDS_IB_WC_STATUS_STR(INV_EECN_ERR),
62 RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR),
63 RDS_IB_WC_STATUS_STR(FATAL_ERR),
64 RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR),
65 RDS_IB_WC_STATUS_STR(GENERAL_ERR),
66#undef RDS_IB_WC_STATUS_STR
67};
68
69char *rds_ib_wc_status_str(enum ib_wc_status status)
70{
71 return rds_str_array(rds_ib_wc_status_strings,
72 ARRAY_SIZE(rds_ib_wc_status_strings), status);
73}
74
75/*
76 * Convert IB-specific error message to RDS error message and call core
77 * completion handler.
78 */
79static void rds_ib_send_complete(struct rds_message *rm,
80 int wc_status,
81 void (*complete)(struct rds_message *rm, int status))
44{ 82{
45 int notify_status; 83 int notify_status;
46 84
@@ -60,69 +98,125 @@ static void rds_ib_send_rdma_complete(struct rds_message *rm,
60 notify_status = RDS_RDMA_OTHER_ERROR; 98 notify_status = RDS_RDMA_OTHER_ERROR;
61 break; 99 break;
62 } 100 }
63 rds_rdma_send_complete(rm, notify_status); 101 complete(rm, notify_status);
102}
103
104static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
105 struct rm_data_op *op,
106 int wc_status)
107{
108 if (op->op_nents)
109 ib_dma_unmap_sg(ic->i_cm_id->device,
110 op->op_sg, op->op_nents,
111 DMA_TO_DEVICE);
64} 112}
65 113
66static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, 114static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
67 struct rds_rdma_op *op) 115 struct rm_rdma_op *op,
116 int wc_status)
68{ 117{
69 if (op->r_mapped) { 118 if (op->op_mapped) {
70 ib_dma_unmap_sg(ic->i_cm_id->device, 119 ib_dma_unmap_sg(ic->i_cm_id->device,
71 op->r_sg, op->r_nents, 120 op->op_sg, op->op_nents,
72 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 121 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
73 op->r_mapped = 0; 122 op->op_mapped = 0;
74 } 123 }
124
125 /* If the user asked for a completion notification on this
126 * message, we can implement three different semantics:
127 * 1. Notify when we received the ACK on the RDS message
128 * that was queued with the RDMA. This provides reliable
129 * notification of RDMA status at the expense of a one-way
130 * packet delay.
131 * 2. Notify when the IB stack gives us the completion event for
132 * the RDMA operation.
133 * 3. Notify when the IB stack gives us the completion event for
134 * the accompanying RDS messages.
135 * Here, we implement approach #3. To implement approach #2,
136 * we would need to take an event for the rdma WR. To implement #1,
137 * don't call rds_rdma_send_complete at all, and fall back to the notify
138 * handling in the ACK processing code.
139 *
140 * Note: There's no need to explicitly sync any RDMA buffers using
141 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
142 * operation itself unmapped the RDMA buffers, which takes care
143 * of synching.
144 */
145 rds_ib_send_complete(container_of(op, struct rds_message, rdma),
146 wc_status, rds_rdma_send_complete);
147
148 if (op->op_write)
149 rds_stats_add(s_send_rdma_bytes, op->op_bytes);
150 else
151 rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
75} 152}
76 153
77static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, 154static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
78 struct rds_ib_send_work *send, 155 struct rm_atomic_op *op,
79 int wc_status) 156 int wc_status)
80{ 157{
81 struct rds_message *rm = send->s_rm; 158 /* unmap atomic recvbuf */
82 159 if (op->op_mapped) {
83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm); 160 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
84 161 DMA_FROM_DEVICE);
85 ib_dma_unmap_sg(ic->i_cm_id->device, 162 op->op_mapped = 0;
86 rm->m_sg, rm->m_nents, 163 }
87 DMA_TO_DEVICE);
88
89 if (rm->m_rdma_op != NULL) {
90 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
91
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
97 * packet delay.
98 * 2. Notify when the IB stack gives us the completion event for
99 * the RDMA operation.
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
106 *
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
110 * of synching.
111 */
112 rds_ib_send_rdma_complete(rm, wc_status);
113 164
114 if (rm->m_rdma_op->r_write) 165 rds_ib_send_complete(container_of(op, struct rds_message, atomic),
115 rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); 166 wc_status, rds_atomic_send_complete);
116 else 167
117 rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); 168 if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
169 rds_ib_stats_inc(s_ib_atomic_cswp);
170 else
171 rds_ib_stats_inc(s_ib_atomic_fadd);
172}
173
174/*
175 * Unmap the resources associated with a struct send_work.
176 *
177 * Returns the rm for no good reason other than it is unobtainable
178 * other than by switching on wr.opcode, currently, and the caller,
179 * the event handler, needs it.
180 */
181static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
182 struct rds_ib_send_work *send,
183 int wc_status)
184{
185 struct rds_message *rm = NULL;
186
187 /* In the error case, wc.opcode sometimes contains garbage */
188 switch (send->s_wr.opcode) {
189 case IB_WR_SEND:
190 if (send->s_op) {
191 rm = container_of(send->s_op, struct rds_message, data);
192 rds_ib_send_unmap_data(ic, send->s_op, wc_status);
193 }
194 break;
195 case IB_WR_RDMA_WRITE:
196 case IB_WR_RDMA_READ:
197 if (send->s_op) {
198 rm = container_of(send->s_op, struct rds_message, rdma);
199 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
200 }
201 break;
202 case IB_WR_ATOMIC_FETCH_AND_ADD:
203 case IB_WR_ATOMIC_CMP_AND_SWP:
204 if (send->s_op) {
205 rm = container_of(send->s_op, struct rds_message, atomic);
206 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
207 }
208 break;
209 default:
210 if (printk_ratelimit())
211 printk(KERN_NOTICE
212 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
213 __func__, send->s_wr.opcode);
214 break;
118 } 215 }
119 216
120 /* If anyone waited for this message to get flushed out, wake 217 send->s_wr.opcode = 0xdead;
121 * them up now */
122 rds_message_unmapped(rm);
123 218
124 rds_message_put(rm); 219 return rm;
125 send->s_rm = NULL;
126} 220}
127 221
128void rds_ib_send_init_ring(struct rds_ib_connection *ic) 222void rds_ib_send_init_ring(struct rds_ib_connection *ic)
@@ -133,23 +227,18 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic)
133 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 227 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
134 struct ib_sge *sge; 228 struct ib_sge *sge;
135 229
136 send->s_rm = NULL;
137 send->s_op = NULL; 230 send->s_op = NULL;
138 231
139 send->s_wr.wr_id = i; 232 send->s_wr.wr_id = i;
140 send->s_wr.sg_list = send->s_sge; 233 send->s_wr.sg_list = send->s_sge;
141 send->s_wr.num_sge = 1;
142 send->s_wr.opcode = IB_WR_SEND;
143 send->s_wr.send_flags = 0;
144 send->s_wr.ex.imm_data = 0; 234 send->s_wr.ex.imm_data = 0;
145 235
146 sge = rds_ib_data_sge(ic, send->s_sge); 236 sge = &send->s_sge[0];
147 sge->lkey = ic->i_mr->lkey;
148
149 sge = rds_ib_header_sge(ic, send->s_sge);
150 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); 237 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
151 sge->length = sizeof(struct rds_header); 238 sge->length = sizeof(struct rds_header);
152 sge->lkey = ic->i_mr->lkey; 239 sge->lkey = ic->i_mr->lkey;
240
241 send->s_sge[1].lkey = ic->i_mr->lkey;
153 } 242 }
154} 243}
155 244
@@ -159,16 +248,24 @@ void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
159 u32 i; 248 u32 i;
160 249
161 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 250 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
162 if (send->s_wr.opcode == 0xdead) 251 if (send->s_op && send->s_wr.opcode != 0xdead)
163 continue; 252 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
164 if (send->s_rm)
165 rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
166 if (send->s_op)
167 rds_ib_send_unmap_rdma(ic, send->s_op);
168 } 253 }
169} 254}
170 255
171/* 256/*
257 * The only fast path caller always has a non-zero nr, so we don't
258 * bother testing nr before performing the atomic sub.
259 */
260static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
261{
262 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
263 waitqueue_active(&rds_ib_ring_empty_wait))
264 wake_up(&rds_ib_ring_empty_wait);
265 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
266}
267
268/*
172 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc 269 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
173 * operations performed in the send path. As the sender allocs and potentially 270 * operations performed in the send path. As the sender allocs and potentially
174 * unallocs the next free entry in the ring it doesn't alter which is 271 * unallocs the next free entry in the ring it doesn't alter which is
@@ -178,12 +275,14 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
178{ 275{
179 struct rds_connection *conn = context; 276 struct rds_connection *conn = context;
180 struct rds_ib_connection *ic = conn->c_transport_data; 277 struct rds_ib_connection *ic = conn->c_transport_data;
278 struct rds_message *rm = NULL;
181 struct ib_wc wc; 279 struct ib_wc wc;
182 struct rds_ib_send_work *send; 280 struct rds_ib_send_work *send;
183 u32 completed; 281 u32 completed;
184 u32 oldest; 282 u32 oldest;
185 u32 i = 0; 283 u32 i = 0;
186 int ret; 284 int ret;
285 int nr_sig = 0;
187 286
188 rdsdebug("cq %p conn %p\n", cq, conn); 287 rdsdebug("cq %p conn %p\n", cq, conn);
189 rds_ib_stats_inc(s_ib_tx_cq_call); 288 rds_ib_stats_inc(s_ib_tx_cq_call);
@@ -192,8 +291,9 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
192 rdsdebug("ib_req_notify_cq send failed: %d\n", ret); 291 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
193 292
194 while (ib_poll_cq(cq, 1, &wc) > 0) { 293 while (ib_poll_cq(cq, 1, &wc) > 0) {
195 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 294 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
196 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 295 (unsigned long long)wc.wr_id, wc.status,
296 rds_ib_wc_status_str(wc.status), wc.byte_len,
197 be32_to_cpu(wc.ex.imm_data)); 297 be32_to_cpu(wc.ex.imm_data));
198 rds_ib_stats_inc(s_ib_tx_cq_event); 298 rds_ib_stats_inc(s_ib_tx_cq_event);
199 299
@@ -210,51 +310,30 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
210 310
211 for (i = 0; i < completed; i++) { 311 for (i = 0; i < completed; i++) {
212 send = &ic->i_sends[oldest]; 312 send = &ic->i_sends[oldest];
313 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
314 nr_sig++;
213 315
214 /* In the error case, wc.opcode sometimes contains garbage */ 316 rm = rds_ib_send_unmap_op(ic, send, wc.status);
215 switch (send->s_wr.opcode) {
216 case IB_WR_SEND:
217 if (send->s_rm)
218 rds_ib_send_unmap_rm(ic, send, wc.status);
219 break;
220 case IB_WR_RDMA_WRITE:
221 case IB_WR_RDMA_READ:
222 /* Nothing to be done - the SG list will be unmapped
223 * when the SEND completes. */
224 break;
225 default:
226 if (printk_ratelimit())
227 printk(KERN_NOTICE
228 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
229 __func__, send->s_wr.opcode);
230 break;
231 }
232 317
233 send->s_wr.opcode = 0xdead;
234 send->s_wr.num_sge = 1;
235 if (send->s_queued + HZ/2 < jiffies) 318 if (send->s_queued + HZ/2 < jiffies)
236 rds_ib_stats_inc(s_ib_tx_stalled); 319 rds_ib_stats_inc(s_ib_tx_stalled);
237 320
238 /* If a RDMA operation produced an error, signal this right 321 if (send->s_op) {
239 * away. If we don't, the subsequent SEND that goes with this 322 if (send->s_op == rm->m_final_op) {
240 * RDMA will be canceled with ERR_WFLUSH, and the application 323 /* If anyone waited for this message to get flushed out, wake
241 * never learn that the RDMA failed. */ 324 * them up now */
242 if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) { 325 rds_message_unmapped(rm);
243 struct rds_message *rm;
244
245 rm = rds_send_get_message(conn, send->s_op);
246 if (rm) {
247 if (rm->m_rdma_op)
248 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
249 rds_ib_send_rdma_complete(rm, wc.status);
250 rds_message_put(rm);
251 } 326 }
327 rds_message_put(rm);
328 send->s_op = NULL;
252 } 329 }
253 330
254 oldest = (oldest + 1) % ic->i_send_ring.w_nr; 331 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
255 } 332 }
256 333
257 rds_ib_ring_free(&ic->i_send_ring, completed); 334 rds_ib_ring_free(&ic->i_send_ring, completed);
335 rds_ib_sub_signaled(ic, nr_sig);
336 nr_sig = 0;
258 337
259 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || 338 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
260 test_bit(0, &conn->c_map_queued)) 339 test_bit(0, &conn->c_map_queued))
@@ -262,10 +341,10 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
262 341
263 /* We expect errors as the qp is drained during shutdown */ 342 /* We expect errors as the qp is drained during shutdown */
264 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) { 343 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
265 rds_ib_conn_error(conn, 344 rds_ib_conn_error(conn, "send completion on %pI4 had status "
266 "send completion on %pI4 " 345 "%u (%s), disconnecting and reconnecting\n",
267 "had status %u, disconnecting and reconnecting\n", 346 &conn->c_faddr, wc.status,
268 &conn->c_faddr, wc.status); 347 rds_ib_wc_status_str(wc.status));
269 } 348 }
270 } 349 }
271} 350}
@@ -294,7 +373,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
294 * credits (see rds_ib_send_add_credits below). 373 * credits (see rds_ib_send_add_credits below).
295 * 374 *
296 * The RDS send code is essentially single-threaded; rds_send_xmit 375 * The RDS send code is essentially single-threaded; rds_send_xmit
297 * grabs c_send_lock to ensure exclusive access to the send ring. 376 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
298 * However, the ACK sending code is independent and can race with 377 * However, the ACK sending code is independent and can race with
299 * message SENDs. 378 * message SENDs.
300 * 379 *
@@ -413,40 +492,21 @@ void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
413 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 492 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
414} 493}
415 494
416static inline void 495static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
417rds_ib_xmit_populate_wr(struct rds_ib_connection *ic, 496 struct rds_ib_send_work *send,
418 struct rds_ib_send_work *send, unsigned int pos, 497 bool notify)
419 unsigned long buffer, unsigned int length,
420 int send_flags)
421{ 498{
422 struct ib_sge *sge; 499 /*
423 500 * We want to delay signaling completions just enough to get
424 WARN_ON(pos != send - ic->i_sends); 501 * the batching benefits but not so much that we create dead time
425 502 * on the wire.
426 send->s_wr.send_flags = send_flags; 503 */
427 send->s_wr.opcode = IB_WR_SEND; 504 if (ic->i_unsignaled_wrs-- == 0 || notify) {
428 send->s_wr.num_sge = 2; 505 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
429 send->s_wr.next = NULL; 506 send->s_wr.send_flags |= IB_SEND_SIGNALED;
430 send->s_queued = jiffies; 507 return 1;
431 send->s_op = NULL;
432
433 if (length != 0) {
434 sge = rds_ib_data_sge(ic, send->s_sge);
435 sge->addr = buffer;
436 sge->length = length;
437 sge->lkey = ic->i_mr->lkey;
438
439 sge = rds_ib_header_sge(ic, send->s_sge);
440 } else {
441 /* We're sending a packet with no payload. There is only
442 * one SGE */
443 send->s_wr.num_sge = 1;
444 sge = &send->s_sge[0];
445 } 508 }
446 509 return 0;
447 sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
448 sge->length = sizeof(struct rds_header);
449 sge->lkey = ic->i_mr->lkey;
450} 510}
451 511
452/* 512/*
@@ -475,13 +535,14 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
475 u32 pos; 535 u32 pos;
476 u32 i; 536 u32 i;
477 u32 work_alloc; 537 u32 work_alloc;
478 u32 credit_alloc; 538 u32 credit_alloc = 0;
479 u32 posted; 539 u32 posted;
480 u32 adv_credits = 0; 540 u32 adv_credits = 0;
481 int send_flags = 0; 541 int send_flags = 0;
482 int sent; 542 int bytes_sent = 0;
483 int ret; 543 int ret;
484 int flow_controlled = 0; 544 int flow_controlled = 0;
545 int nr_sig = 0;
485 546
486 BUG_ON(off % RDS_FRAG_SIZE); 547 BUG_ON(off % RDS_FRAG_SIZE);
487 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); 548 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
@@ -507,14 +568,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
507 goto out; 568 goto out;
508 } 569 }
509 570
510 credit_alloc = work_alloc;
511 if (ic->i_flowctl) { 571 if (ic->i_flowctl) {
512 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); 572 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
513 adv_credits += posted; 573 adv_credits += posted;
514 if (credit_alloc < work_alloc) { 574 if (credit_alloc < work_alloc) {
515 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); 575 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
516 work_alloc = credit_alloc; 576 work_alloc = credit_alloc;
517 flow_controlled++; 577 flow_controlled = 1;
518 } 578 }
519 if (work_alloc == 0) { 579 if (work_alloc == 0) {
520 set_bit(RDS_LL_SEND_FULL, &conn->c_flags); 580 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
@@ -525,31 +585,25 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
525 } 585 }
526 586
527 /* map the message the first time we see it */ 587 /* map the message the first time we see it */
528 if (ic->i_rm == NULL) { 588 if (!ic->i_data_op) {
529 /* 589 if (rm->data.op_nents) {
530 printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n", 590 rm->data.op_count = ib_dma_map_sg(dev,
531 be16_to_cpu(rm->m_inc.i_hdr.h_dport), 591 rm->data.op_sg,
532 rm->m_inc.i_hdr.h_flags, 592 rm->data.op_nents,
533 be32_to_cpu(rm->m_inc.i_hdr.h_len)); 593 DMA_TO_DEVICE);
534 */ 594 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
535 if (rm->m_nents) { 595 if (rm->data.op_count == 0) {
536 rm->m_count = ib_dma_map_sg(dev,
537 rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
538 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
539 if (rm->m_count == 0) {
540 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 596 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
541 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 597 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
542 ret = -ENOMEM; /* XXX ? */ 598 ret = -ENOMEM; /* XXX ? */
543 goto out; 599 goto out;
544 } 600 }
545 } else { 601 } else {
546 rm->m_count = 0; 602 rm->data.op_count = 0;
547 } 603 }
548 604
549 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
550 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
551 rds_message_addref(rm); 605 rds_message_addref(rm);
552 ic->i_rm = rm; 606 ic->i_data_op = &rm->data;
553 607
554 /* Finalize the header */ 608 /* Finalize the header */
555 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) 609 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
@@ -559,10 +613,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
559 613
560 /* If it has a RDMA op, tell the peer we did it. This is 614 /* If it has a RDMA op, tell the peer we did it. This is
561 * used by the peer to release use-once RDMA MRs. */ 615 * used by the peer to release use-once RDMA MRs. */
562 if (rm->m_rdma_op) { 616 if (rm->rdma.op_active) {
563 struct rds_ext_header_rdma ext_hdr; 617 struct rds_ext_header_rdma ext_hdr;
564 618
565 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); 619 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
566 rds_message_add_extension(&rm->m_inc.i_hdr, 620 rds_message_add_extension(&rm->m_inc.i_hdr,
567 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 621 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
568 } 622 }
@@ -582,99 +636,77 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
582 /* 636 /*
583 * Update adv_credits since we reset the ACK_REQUIRED bit. 637 * Update adv_credits since we reset the ACK_REQUIRED bit.
584 */ 638 */
585 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); 639 if (ic->i_flowctl) {
586 adv_credits += posted; 640 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
587 BUG_ON(adv_credits > 255); 641 adv_credits += posted;
642 BUG_ON(adv_credits > 255);
643 }
588 } 644 }
589 645
590 send = &ic->i_sends[pos];
591 first = send;
592 prev = NULL;
593 scat = &rm->m_sg[sg];
594 sent = 0;
595 i = 0;
596
597 /* Sometimes you want to put a fence between an RDMA 646 /* Sometimes you want to put a fence between an RDMA
598 * READ and the following SEND. 647 * READ and the following SEND.
599 * We could either do this all the time 648 * We could either do this all the time
600 * or when requested by the user. Right now, we let 649 * or when requested by the user. Right now, we let
601 * the application choose. 650 * the application choose.
602 */ 651 */
603 if (rm->m_rdma_op && rm->m_rdma_op->r_fence) 652 if (rm->rdma.op_active && rm->rdma.op_fence)
604 send_flags = IB_SEND_FENCE; 653 send_flags = IB_SEND_FENCE;
605 654
606 /* 655 /* Each frag gets a header. Msgs may be 0 bytes */
607 * We could be copying the header into the unused tail of the page. 656 send = &ic->i_sends[pos];
608 * That would need to be changed in the future when those pages might 657 first = send;
609 * be mapped userspace pages or page cache pages. So instead we always 658 prev = NULL;
610 * use a second sge and our long-lived ring of mapped headers. We send 659 scat = &ic->i_data_op->op_sg[sg];
611 * the header after the data so that the data payload can be aligned on 660 i = 0;
612 * the receiver. 661 do {
613 */ 662 unsigned int len = 0;
614 663
615 /* handle a 0-len message */ 664 /* Set up the header */
616 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) { 665 send->s_wr.send_flags = send_flags;
617 rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags); 666 send->s_wr.opcode = IB_WR_SEND;
618 goto add_header; 667 send->s_wr.num_sge = 1;
619 } 668 send->s_wr.next = NULL;
669 send->s_queued = jiffies;
670 send->s_op = NULL;
620 671
621 /* if there's data reference it with a chain of work reqs */ 672 send->s_sge[0].addr = ic->i_send_hdrs_dma
622 for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { 673 + (pos * sizeof(struct rds_header));
623 unsigned int len; 674 send->s_sge[0].length = sizeof(struct rds_header);
624 675
625 send = &ic->i_sends[pos]; 676 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
626 677
627 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); 678 /* Set up the data, if present */
628 rds_ib_xmit_populate_wr(ic, send, pos, 679 if (i < work_alloc
629 ib_sg_dma_address(dev, scat) + off, len, 680 && scat != &rm->data.op_sg[rm->data.op_count]) {
630 send_flags); 681 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
682 send->s_wr.num_sge = 2;
631 683
632 /* 684 send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
633 * We want to delay signaling completions just enough to get 685 send->s_sge[1].length = len;
634 * the batching benefits but not so much that we create dead time
635 * on the wire.
636 */
637 if (ic->i_unsignaled_wrs-- == 0) {
638 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
639 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
640 }
641 686
642 ic->i_unsignaled_bytes -= len; 687 bytes_sent += len;
643 if (ic->i_unsignaled_bytes <= 0) { 688 off += len;
644 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes; 689 if (off == ib_sg_dma_len(dev, scat)) {
645 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 690 scat++;
691 off = 0;
692 }
646 } 693 }
647 694
695 rds_ib_set_wr_signal_state(ic, send, 0);
696
648 /* 697 /*
649 * Always signal the last one if we're stopping due to flow control. 698 * Always signal the last one if we're stopping due to flow control.
650 */ 699 */
651 if (flow_controlled && i == (work_alloc-1)) 700 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
652 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 701 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
653 702
703 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
704 nr_sig++;
705
654 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 706 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
655 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 707 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
656 708
657 sent += len; 709 if (ic->i_flowctl && adv_credits) {
658 off += len;
659 if (off == ib_sg_dma_len(dev, scat)) {
660 scat++;
661 off = 0;
662 }
663
664add_header:
665 /* Tack on the header after the data. The header SGE should already
666 * have been set up to point to the right header buffer. */
667 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
668
669 if (0) {
670 struct rds_header *hdr = &ic->i_send_hdrs[pos];
671
672 printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
673 be16_to_cpu(hdr->h_dport),
674 hdr->h_flags,
675 be32_to_cpu(hdr->h_len));
676 }
677 if (adv_credits) {
678 struct rds_header *hdr = &ic->i_send_hdrs[pos]; 710 struct rds_header *hdr = &ic->i_send_hdrs[pos];
679 711
680 /* add credit and redo the header checksum */ 712 /* add credit and redo the header checksum */
@@ -689,20 +721,25 @@ add_header:
689 prev = send; 721 prev = send;
690 722
691 pos = (pos + 1) % ic->i_send_ring.w_nr; 723 pos = (pos + 1) % ic->i_send_ring.w_nr;
692 } 724 send = &ic->i_sends[pos];
725 i++;
726
727 } while (i < work_alloc
728 && scat != &rm->data.op_sg[rm->data.op_count]);
693 729
694 /* Account the RDS header in the number of bytes we sent, but just once. 730 /* Account the RDS header in the number of bytes we sent, but just once.
695 * The caller has no concept of fragmentation. */ 731 * The caller has no concept of fragmentation. */
696 if (hdr_off == 0) 732 if (hdr_off == 0)
697 sent += sizeof(struct rds_header); 733 bytes_sent += sizeof(struct rds_header);
698 734
699 /* if we finished the message then send completion owns it */ 735 /* if we finished the message then send completion owns it */
700 if (scat == &rm->m_sg[rm->m_count]) { 736 if (scat == &rm->data.op_sg[rm->data.op_count]) {
701 prev->s_rm = ic->i_rm; 737 prev->s_op = ic->i_data_op;
702 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 738 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
703 ic->i_rm = NULL; 739 ic->i_data_op = NULL;
704 } 740 }
705 741
742 /* Put back wrs & credits we didn't use */
706 if (i < work_alloc) { 743 if (i < work_alloc) {
707 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); 744 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
708 work_alloc = i; 745 work_alloc = i;
@@ -710,6 +747,9 @@ add_header:
710 if (ic->i_flowctl && i < credit_alloc) 747 if (ic->i_flowctl && i < credit_alloc)
711 rds_ib_send_add_credits(conn, credit_alloc - i); 748 rds_ib_send_add_credits(conn, credit_alloc - i);
712 749
750 if (nr_sig)
751 atomic_add(nr_sig, &ic->i_signaled_sends);
752
713 /* XXX need to worry about failed_wr and partial sends. */ 753 /* XXX need to worry about failed_wr and partial sends. */
714 failed_wr = &first->s_wr; 754 failed_wr = &first->s_wr;
715 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 755 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
@@ -720,32 +760,127 @@ add_header:
720 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 " 760 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
721 "returned %d\n", &conn->c_faddr, ret); 761 "returned %d\n", &conn->c_faddr, ret);
722 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 762 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
723 if (prev->s_rm) { 763 rds_ib_sub_signaled(ic, nr_sig);
724 ic->i_rm = prev->s_rm; 764 if (prev->s_op) {
725 prev->s_rm = NULL; 765 ic->i_data_op = prev->s_op;
766 prev->s_op = NULL;
726 } 767 }
727 768
728 rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); 769 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
729 goto out; 770 goto out;
730 } 771 }
731 772
732 ret = sent; 773 ret = bytes_sent;
733out: 774out:
734 BUG_ON(adv_credits); 775 BUG_ON(adv_credits);
735 return ret; 776 return ret;
736} 777}
737 778
738int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) 779/*
780 * Issue atomic operation.
781 * A simplified version of the rdma case, we always map 1 SG, and
782 * only 8 bytes, for the return value from the atomic operation.
783 */
784int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
785{
786 struct rds_ib_connection *ic = conn->c_transport_data;
787 struct rds_ib_send_work *send = NULL;
788 struct ib_send_wr *failed_wr;
789 struct rds_ib_device *rds_ibdev;
790 u32 pos;
791 u32 work_alloc;
792 int ret;
793 int nr_sig = 0;
794
795 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
796
797 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
798 if (work_alloc != 1) {
799 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
800 rds_ib_stats_inc(s_ib_tx_ring_full);
801 ret = -ENOMEM;
802 goto out;
803 }
804
805 /* address of send request in ring */
806 send = &ic->i_sends[pos];
807 send->s_queued = jiffies;
808
809 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
810 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
811 send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare;
812 send->s_wr.wr.atomic.swap = op->op_m_cswp.swap;
813 send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask;
814 send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask;
815 } else { /* FADD */
816 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
817 send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add;
818 send->s_wr.wr.atomic.swap = 0;
819 send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask;
820 send->s_wr.wr.atomic.swap_mask = 0;
821 }
822 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
823 send->s_wr.num_sge = 1;
824 send->s_wr.next = NULL;
825 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
826 send->s_wr.wr.atomic.rkey = op->op_rkey;
827 send->s_op = op;
828 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
829
830 /* map 8 byte retval buffer to the device */
831 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
832 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
833 if (ret != 1) {
834 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
835 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
836 ret = -ENOMEM; /* XXX ? */
837 goto out;
838 }
839
840 /* Convert our struct scatterlist to struct ib_sge */
841 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
842 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
843 send->s_sge[0].lkey = ic->i_mr->lkey;
844
845 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
846 send->s_sge[0].addr, send->s_sge[0].length);
847
848 if (nr_sig)
849 atomic_add(nr_sig, &ic->i_signaled_sends);
850
851 failed_wr = &send->s_wr;
852 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
853 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
854 send, &send->s_wr, ret, failed_wr);
855 BUG_ON(failed_wr != &send->s_wr);
856 if (ret) {
857 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
858 "returned %d\n", &conn->c_faddr, ret);
859 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
860 rds_ib_sub_signaled(ic, nr_sig);
861 goto out;
862 }
863
864 if (unlikely(failed_wr != &send->s_wr)) {
865 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
866 BUG_ON(failed_wr != &send->s_wr);
867 }
868
869out:
870 return ret;
871}
872
873int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
739{ 874{
740 struct rds_ib_connection *ic = conn->c_transport_data; 875 struct rds_ib_connection *ic = conn->c_transport_data;
741 struct rds_ib_send_work *send = NULL; 876 struct rds_ib_send_work *send = NULL;
742 struct rds_ib_send_work *first; 877 struct rds_ib_send_work *first;
743 struct rds_ib_send_work *prev; 878 struct rds_ib_send_work *prev;
744 struct ib_send_wr *failed_wr; 879 struct ib_send_wr *failed_wr;
745 struct rds_ib_device *rds_ibdev;
746 struct scatterlist *scat; 880 struct scatterlist *scat;
747 unsigned long len; 881 unsigned long len;
748 u64 remote_addr = op->r_remote_addr; 882 u64 remote_addr = op->op_remote_addr;
883 u32 max_sge = ic->rds_ibdev->max_sge;
749 u32 pos; 884 u32 pos;
750 u32 work_alloc; 885 u32 work_alloc;
751 u32 i; 886 u32 i;
@@ -753,29 +888,28 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
753 int sent; 888 int sent;
754 int ret; 889 int ret;
755 int num_sge; 890 int num_sge;
756 891 int nr_sig = 0;
757 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 892
758 893 /* map the op the first time we see it */
759 /* map the message the first time we see it */ 894 if (!op->op_mapped) {
760 if (!op->r_mapped) { 895 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
761 op->r_count = ib_dma_map_sg(ic->i_cm_id->device, 896 op->op_sg, op->op_nents, (op->op_write) ?
762 op->r_sg, op->r_nents, (op->r_write) ? 897 DMA_TO_DEVICE : DMA_FROM_DEVICE);
763 DMA_TO_DEVICE : DMA_FROM_DEVICE); 898 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
764 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); 899 if (op->op_count == 0) {
765 if (op->r_count == 0) {
766 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 900 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
767 ret = -ENOMEM; /* XXX ? */ 901 ret = -ENOMEM; /* XXX ? */
768 goto out; 902 goto out;
769 } 903 }
770 904
771 op->r_mapped = 1; 905 op->op_mapped = 1;
772 } 906 }
773 907
774 /* 908 /*
775 * Instead of knowing how to return a partial rdma read/write we insist that there 909 * Instead of knowing how to return a partial rdma read/write we insist that there
776 * be enough work requests to send the entire message. 910 * be enough work requests to send the entire message.
777 */ 911 */
778 i = ceil(op->r_count, rds_ibdev->max_sge); 912 i = ceil(op->op_count, max_sge);
779 913
780 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 914 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
781 if (work_alloc != i) { 915 if (work_alloc != i) {
@@ -788,30 +922,24 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
788 send = &ic->i_sends[pos]; 922 send = &ic->i_sends[pos];
789 first = send; 923 first = send;
790 prev = NULL; 924 prev = NULL;
791 scat = &op->r_sg[0]; 925 scat = &op->op_sg[0];
792 sent = 0; 926 sent = 0;
793 num_sge = op->r_count; 927 num_sge = op->op_count;
794 928
795 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { 929 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
796 send->s_wr.send_flags = 0; 930 send->s_wr.send_flags = 0;
797 send->s_queued = jiffies; 931 send->s_queued = jiffies;
798 /* 932 send->s_op = NULL;
799 * We want to delay signaling completions just enough to get 933
800 * the batching benefits but not so much that we create dead time on the wire. 934 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
801 */
802 if (ic->i_unsignaled_wrs-- == 0) {
803 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
804 send->s_wr.send_flags = IB_SEND_SIGNALED;
805 }
806 935
807 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; 936 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
808 send->s_wr.wr.rdma.remote_addr = remote_addr; 937 send->s_wr.wr.rdma.remote_addr = remote_addr;
809 send->s_wr.wr.rdma.rkey = op->r_key; 938 send->s_wr.wr.rdma.rkey = op->op_rkey;
810 send->s_op = op;
811 939
812 if (num_sge > rds_ibdev->max_sge) { 940 if (num_sge > max_sge) {
813 send->s_wr.num_sge = rds_ibdev->max_sge; 941 send->s_wr.num_sge = max_sge;
814 num_sge -= rds_ibdev->max_sge; 942 num_sge -= max_sge;
815 } else { 943 } else {
816 send->s_wr.num_sge = num_sge; 944 send->s_wr.num_sge = num_sge;
817 } 945 }
@@ -821,7 +949,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
821 if (prev) 949 if (prev)
822 prev->s_wr.next = &send->s_wr; 950 prev->s_wr.next = &send->s_wr;
823 951
824 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { 952 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
825 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 953 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
826 send->s_sge[j].addr = 954 send->s_sge[j].addr =
827 ib_sg_dma_address(ic->i_cm_id->device, scat); 955 ib_sg_dma_address(ic->i_cm_id->device, scat);
@@ -843,15 +971,20 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
843 send = ic->i_sends; 971 send = ic->i_sends;
844 } 972 }
845 973
846 /* if we finished the message then send completion owns it */ 974 /* give a reference to the last op */
847 if (scat == &op->r_sg[op->r_count]) 975 if (scat == &op->op_sg[op->op_count]) {
848 prev->s_wr.send_flags = IB_SEND_SIGNALED; 976 prev->s_op = op;
977 rds_message_addref(container_of(op, struct rds_message, rdma));
978 }
849 979
850 if (i < work_alloc) { 980 if (i < work_alloc) {
851 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); 981 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
852 work_alloc = i; 982 work_alloc = i;
853 } 983 }
854 984
985 if (nr_sig)
986 atomic_add(nr_sig, &ic->i_signaled_sends);
987
855 failed_wr = &first->s_wr; 988 failed_wr = &first->s_wr;
856 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 989 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
857 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 990 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
@@ -861,6 +994,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
861 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " 994 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
862 "returned %d\n", &conn->c_faddr, ret); 995 "returned %d\n", &conn->c_faddr, ret);
863 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 996 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
997 rds_ib_sub_signaled(ic, nr_sig);
864 goto out; 998 goto out;
865 } 999 }
866 1000
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index d2c904dd6fbc..2d5965d6e97c 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -67,6 +67,8 @@ static const char *const rds_ib_stat_names[] = {
67 "ib_rdma_mr_pool_flush", 67 "ib_rdma_mr_pool_flush",
68 "ib_rdma_mr_pool_wait", 68 "ib_rdma_mr_pool_wait",
69 "ib_rdma_mr_pool_depleted", 69 "ib_rdma_mr_pool_depleted",
70 "ib_atomic_cswp",
71 "ib_atomic_fadd",
70}; 72};
71 73
72unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, 74unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 03f01cb4e0fe..fc3da37220fd 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -49,10 +49,6 @@ unsigned long rds_ib_sysctl_max_unsig_wrs = 16;
49static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1; 49static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1;
50static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64; 50static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64;
51 51
52unsigned long rds_ib_sysctl_max_unsig_bytes = (16 << 20);
53static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1;
54static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL;
55
56/* 52/*
57 * This sysctl does nothing. 53 * This sysctl does nothing.
58 * 54 *
@@ -94,15 +90,6 @@ ctl_table rds_ib_sysctl_table[] = {
94 .extra2 = &rds_ib_sysctl_max_unsig_wr_max, 90 .extra2 = &rds_ib_sysctl_max_unsig_wr_max,
95 }, 91 },
96 { 92 {
97 .procname = "max_unsignaled_bytes",
98 .data = &rds_ib_sysctl_max_unsig_bytes,
99 .maxlen = sizeof(unsigned long),
100 .mode = 0644,
101 .proc_handler = proc_doulongvec_minmax,
102 .extra1 = &rds_ib_sysctl_max_unsig_bytes_min,
103 .extra2 = &rds_ib_sysctl_max_unsig_bytes_max,
104 },
105 {
106 .procname = "max_recv_allocation", 93 .procname = "max_recv_allocation",
107 .data = &rds_ib_sysctl_max_recv_allocation, 94 .data = &rds_ib_sysctl_max_recv_allocation,
108 .maxlen = sizeof(unsigned long), 95 .maxlen = sizeof(unsigned long),
@@ -132,10 +119,10 @@ void rds_ib_sysctl_exit(void)
132 unregister_sysctl_table(rds_ib_sysctl_hdr); 119 unregister_sysctl_table(rds_ib_sysctl_hdr);
133} 120}
134 121
135int __init rds_ib_sysctl_init(void) 122int rds_ib_sysctl_init(void)
136{ 123{
137 rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); 124 rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table);
138 if (rds_ib_sysctl_hdr == NULL) 125 if (!rds_ib_sysctl_hdr)
139 return -ENOMEM; 126 return -ENOMEM;
140 return 0; 127 return 0;
141} 128}
diff --git a/net/rds/info.c b/net/rds/info.c
index c45c4173a44d..4fdf1b6e84ff 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -76,7 +76,7 @@ void rds_info_register_func(int optname, rds_info_func func)
76 BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST); 76 BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST);
77 77
78 spin_lock(&rds_info_lock); 78 spin_lock(&rds_info_lock);
79 BUG_ON(rds_info_funcs[offset] != NULL); 79 BUG_ON(rds_info_funcs[offset]);
80 rds_info_funcs[offset] = func; 80 rds_info_funcs[offset] = func;
81 spin_unlock(&rds_info_lock); 81 spin_unlock(&rds_info_lock);
82} 82}
@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(rds_info_deregister_func);
102 */ 102 */
103void rds_info_iter_unmap(struct rds_info_iterator *iter) 103void rds_info_iter_unmap(struct rds_info_iterator *iter)
104{ 104{
105 if (iter->addr != NULL) { 105 if (iter->addr) {
106 kunmap_atomic(iter->addr, KM_USER0); 106 kunmap_atomic(iter->addr, KM_USER0);
107 iter->addr = NULL; 107 iter->addr = NULL;
108 } 108 }
@@ -117,7 +117,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
117 unsigned long this; 117 unsigned long this;
118 118
119 while (bytes) { 119 while (bytes) {
120 if (iter->addr == NULL) 120 if (!iter->addr)
121 iter->addr = kmap_atomic(*iter->pages, KM_USER0); 121 iter->addr = kmap_atomic(*iter->pages, KM_USER0);
122 122
123 this = min(bytes, PAGE_SIZE - iter->offset); 123 this = min(bytes, PAGE_SIZE - iter->offset);
@@ -188,7 +188,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
188 >> PAGE_SHIFT; 188 >> PAGE_SHIFT;
189 189
190 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 190 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
191 if (pages == NULL) { 191 if (!pages) {
192 ret = -ENOMEM; 192 ret = -ENOMEM;
193 goto out; 193 goto out;
194 } 194 }
@@ -206,7 +206,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
206 206
207call_func: 207call_func:
208 func = rds_info_funcs[optname - RDS_INFO_FIRST]; 208 func = rds_info_funcs[optname - RDS_INFO_FIRST];
209 if (func == NULL) { 209 if (!func) {
210 ret = -ENOPROTOOPT; 210 ret = -ENOPROTOOPT;
211 goto out; 211 goto out;
212 } 212 }
@@ -234,7 +234,7 @@ call_func:
234 ret = -EFAULT; 234 ret = -EFAULT;
235 235
236out: 236out:
237 for (i = 0; pages != NULL && i < nr_pages; i++) 237 for (i = 0; pages && i < nr_pages; i++)
238 put_page(pages[i]); 238 put_page(pages[i]);
239 kfree(pages); 239 kfree(pages);
240 240
diff --git a/net/rds/iw.c b/net/rds/iw.c
index c8f3d3525cb9..56808cac0fc7 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -264,7 +264,6 @@ struct rds_transport rds_iw_transport = {
264 .laddr_check = rds_iw_laddr_check, 264 .laddr_check = rds_iw_laddr_check,
265 .xmit_complete = rds_iw_xmit_complete, 265 .xmit_complete = rds_iw_xmit_complete,
266 .xmit = rds_iw_xmit, 266 .xmit = rds_iw_xmit,
267 .xmit_cong_map = NULL,
268 .xmit_rdma = rds_iw_xmit_rdma, 267 .xmit_rdma = rds_iw_xmit_rdma,
269 .recv = rds_iw_recv, 268 .recv = rds_iw_recv,
270 .conn_alloc = rds_iw_conn_alloc, 269 .conn_alloc = rds_iw_conn_alloc,
@@ -272,7 +271,6 @@ struct rds_transport rds_iw_transport = {
272 .conn_connect = rds_iw_conn_connect, 271 .conn_connect = rds_iw_conn_connect,
273 .conn_shutdown = rds_iw_conn_shutdown, 272 .conn_shutdown = rds_iw_conn_shutdown,
274 .inc_copy_to_user = rds_iw_inc_copy_to_user, 273 .inc_copy_to_user = rds_iw_inc_copy_to_user,
275 .inc_purge = rds_iw_inc_purge,
276 .inc_free = rds_iw_inc_free, 274 .inc_free = rds_iw_inc_free,
277 .cm_initiate_connect = rds_iw_cm_initiate_connect, 275 .cm_initiate_connect = rds_iw_cm_initiate_connect,
278 .cm_handle_connect = rds_iw_cm_handle_connect, 276 .cm_handle_connect = rds_iw_cm_handle_connect,
@@ -289,7 +287,7 @@ struct rds_transport rds_iw_transport = {
289 .t_prefer_loopback = 1, 287 .t_prefer_loopback = 1,
290}; 288};
291 289
292int __init rds_iw_init(void) 290int rds_iw_init(void)
293{ 291{
294 int ret; 292 int ret;
295 293
diff --git a/net/rds/iw.h b/net/rds/iw.h
index eef2f0c28476..543e665fafe3 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -70,7 +70,7 @@ struct rds_iw_send_work {
70 struct rds_message *s_rm; 70 struct rds_message *s_rm;
71 71
72 /* We should really put these into a union: */ 72 /* We should really put these into a union: */
73 struct rds_rdma_op *s_op; 73 struct rm_rdma_op *s_op;
74 struct rds_iw_mapping *s_mapping; 74 struct rds_iw_mapping *s_mapping;
75 struct ib_mr *s_mr; 75 struct ib_mr *s_mr;
76 struct ib_fast_reg_page_list *s_page_list; 76 struct ib_fast_reg_page_list *s_page_list;
@@ -284,7 +284,7 @@ void rds_iw_conn_free(void *arg);
284int rds_iw_conn_connect(struct rds_connection *conn); 284int rds_iw_conn_connect(struct rds_connection *conn);
285void rds_iw_conn_shutdown(struct rds_connection *conn); 285void rds_iw_conn_shutdown(struct rds_connection *conn);
286void rds_iw_state_change(struct sock *sk); 286void rds_iw_state_change(struct sock *sk);
287int __init rds_iw_listen_init(void); 287int rds_iw_listen_init(void);
288void rds_iw_listen_stop(void); 288void rds_iw_listen_stop(void);
289void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...); 289void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...);
290int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, 290int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -321,12 +321,11 @@ void rds_iw_flush_mrs(void);
321void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id); 321void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
322 322
323/* ib_recv.c */ 323/* ib_recv.c */
324int __init rds_iw_recv_init(void); 324int rds_iw_recv_init(void);
325void rds_iw_recv_exit(void); 325void rds_iw_recv_exit(void);
326int rds_iw_recv(struct rds_connection *conn); 326int rds_iw_recv(struct rds_connection *conn);
327int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 327int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
328 gfp_t page_gfp, int prefill); 328 gfp_t page_gfp, int prefill);
329void rds_iw_inc_purge(struct rds_incoming *inc);
330void rds_iw_inc_free(struct rds_incoming *inc); 329void rds_iw_inc_free(struct rds_incoming *inc);
331int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 330int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
332 size_t size); 331 size_t size);
@@ -358,7 +357,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
358void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context); 357void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
359void rds_iw_send_init_ring(struct rds_iw_connection *ic); 358void rds_iw_send_init_ring(struct rds_iw_connection *ic);
360void rds_iw_send_clear_ring(struct rds_iw_connection *ic); 359void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
361int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); 360int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
362void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); 361void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
363void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); 362void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
364int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, 363int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
@@ -371,7 +370,7 @@ unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
371 unsigned int avail); 370 unsigned int avail);
372 371
373/* ib_sysctl.c */ 372/* ib_sysctl.c */
374int __init rds_iw_sysctl_init(void); 373int rds_iw_sysctl_init(void);
375void rds_iw_sysctl_exit(void); 374void rds_iw_sysctl_exit(void);
376extern unsigned long rds_iw_sysctl_max_send_wr; 375extern unsigned long rds_iw_sysctl_max_send_wr;
377extern unsigned long rds_iw_sysctl_max_recv_wr; 376extern unsigned long rds_iw_sysctl_max_recv_wr;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index b5dd6ac39be8..712cf2d1f28e 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -257,7 +257,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
257 * the rds_iwdev at all. 257 * the rds_iwdev at all.
258 */ 258 */
259 rds_iwdev = ib_get_client_data(dev, &rds_iw_client); 259 rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
260 if (rds_iwdev == NULL) { 260 if (!rds_iwdev) {
261 if (printk_ratelimit()) 261 if (printk_ratelimit())
262 printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n", 262 printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
263 dev->name); 263 dev->name);
@@ -292,7 +292,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
292 ic->i_send_ring.w_nr * 292 ic->i_send_ring.w_nr *
293 sizeof(struct rds_header), 293 sizeof(struct rds_header),
294 &ic->i_send_hdrs_dma, GFP_KERNEL); 294 &ic->i_send_hdrs_dma, GFP_KERNEL);
295 if (ic->i_send_hdrs == NULL) { 295 if (!ic->i_send_hdrs) {
296 ret = -ENOMEM; 296 ret = -ENOMEM;
297 rdsdebug("ib_dma_alloc_coherent send failed\n"); 297 rdsdebug("ib_dma_alloc_coherent send failed\n");
298 goto out; 298 goto out;
@@ -302,7 +302,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
302 ic->i_recv_ring.w_nr * 302 ic->i_recv_ring.w_nr *
303 sizeof(struct rds_header), 303 sizeof(struct rds_header),
304 &ic->i_recv_hdrs_dma, GFP_KERNEL); 304 &ic->i_recv_hdrs_dma, GFP_KERNEL);
305 if (ic->i_recv_hdrs == NULL) { 305 if (!ic->i_recv_hdrs) {
306 ret = -ENOMEM; 306 ret = -ENOMEM;
307 rdsdebug("ib_dma_alloc_coherent recv failed\n"); 307 rdsdebug("ib_dma_alloc_coherent recv failed\n");
308 goto out; 308 goto out;
@@ -310,14 +310,14 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
310 310
311 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), 311 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
312 &ic->i_ack_dma, GFP_KERNEL); 312 &ic->i_ack_dma, GFP_KERNEL);
313 if (ic->i_ack == NULL) { 313 if (!ic->i_ack) {
314 ret = -ENOMEM; 314 ret = -ENOMEM;
315 rdsdebug("ib_dma_alloc_coherent ack failed\n"); 315 rdsdebug("ib_dma_alloc_coherent ack failed\n");
316 goto out; 316 goto out;
317 } 317 }
318 318
319 ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work)); 319 ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
320 if (ic->i_sends == NULL) { 320 if (!ic->i_sends) {
321 ret = -ENOMEM; 321 ret = -ENOMEM;
322 rdsdebug("send allocation failed\n"); 322 rdsdebug("send allocation failed\n");
323 goto out; 323 goto out;
@@ -325,7 +325,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
325 rds_iw_send_init_ring(ic); 325 rds_iw_send_init_ring(ic);
326 326
327 ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work)); 327 ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
328 if (ic->i_recvs == NULL) { 328 if (!ic->i_recvs) {
329 ret = -ENOMEM; 329 ret = -ENOMEM;
330 rdsdebug("recv allocation failed\n"); 330 rdsdebug("recv allocation failed\n");
331 goto out; 331 goto out;
@@ -696,7 +696,7 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
696 696
697 /* XXX too lazy? */ 697 /* XXX too lazy? */
698 ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL); 698 ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL);
699 if (ic == NULL) 699 if (!ic)
700 return -ENOMEM; 700 return -ENOMEM;
701 701
702 INIT_LIST_HEAD(&ic->iw_node); 702 INIT_LIST_HEAD(&ic->iw_node);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 13dc1862d862..4e152e2daa3d 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -34,7 +34,6 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35
36#include "rds.h" 36#include "rds.h"
37#include "rdma.h"
38#include "iw.h" 37#include "iw.h"
39 38
40 39
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 3d479067d54d..5e57347f49ff 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -53,7 +53,7 @@ static void rds_iw_frag_drop_page(struct rds_page_frag *frag)
53static void rds_iw_frag_free(struct rds_page_frag *frag) 53static void rds_iw_frag_free(struct rds_page_frag *frag)
54{ 54{
55 rdsdebug("frag %p page %p\n", frag, frag->f_page); 55 rdsdebug("frag %p page %p\n", frag, frag->f_page);
56 BUG_ON(frag->f_page != NULL); 56 BUG_ON(frag->f_page);
57 kmem_cache_free(rds_iw_frag_slab, frag); 57 kmem_cache_free(rds_iw_frag_slab, frag);
58} 58}
59 59
@@ -143,14 +143,14 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
143 struct ib_sge *sge; 143 struct ib_sge *sge;
144 int ret = -ENOMEM; 144 int ret = -ENOMEM;
145 145
146 if (recv->r_iwinc == NULL) { 146 if (!recv->r_iwinc) {
147 if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) { 147 if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
148 rds_iw_stats_inc(s_iw_rx_alloc_limit); 148 rds_iw_stats_inc(s_iw_rx_alloc_limit);
149 goto out; 149 goto out;
150 } 150 }
151 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, 151 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
152 kptr_gfp); 152 kptr_gfp);
153 if (recv->r_iwinc == NULL) { 153 if (!recv->r_iwinc) {
154 atomic_dec(&rds_iw_allocation); 154 atomic_dec(&rds_iw_allocation);
155 goto out; 155 goto out;
156 } 156 }
@@ -158,17 +158,17 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
158 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); 158 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
159 } 159 }
160 160
161 if (recv->r_frag == NULL) { 161 if (!recv->r_frag) {
162 recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp); 162 recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp);
163 if (recv->r_frag == NULL) 163 if (!recv->r_frag)
164 goto out; 164 goto out;
165 INIT_LIST_HEAD(&recv->r_frag->f_item); 165 INIT_LIST_HEAD(&recv->r_frag->f_item);
166 recv->r_frag->f_page = NULL; 166 recv->r_frag->f_page = NULL;
167 } 167 }
168 168
169 if (ic->i_frag.f_page == NULL) { 169 if (!ic->i_frag.f_page) {
170 ic->i_frag.f_page = alloc_page(page_gfp); 170 ic->i_frag.f_page = alloc_page(page_gfp);
171 if (ic->i_frag.f_page == NULL) 171 if (!ic->i_frag.f_page)
172 goto out; 172 goto out;
173 ic->i_frag.f_offset = 0; 173 ic->i_frag.f_offset = 0;
174 } 174 }
@@ -273,7 +273,7 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
273 return ret; 273 return ret;
274} 274}
275 275
276void rds_iw_inc_purge(struct rds_incoming *inc) 276static void rds_iw_inc_purge(struct rds_incoming *inc)
277{ 277{
278 struct rds_iw_incoming *iwinc; 278 struct rds_iw_incoming *iwinc;
279 struct rds_page_frag *frag; 279 struct rds_page_frag *frag;
@@ -716,7 +716,7 @@ static void rds_iw_process_recv(struct rds_connection *conn,
716 * into the inc and save the inc so we can hang upcoming fragments 716 * into the inc and save the inc so we can hang upcoming fragments
717 * off its list. 717 * off its list.
718 */ 718 */
719 if (iwinc == NULL) { 719 if (!iwinc) {
720 iwinc = recv->r_iwinc; 720 iwinc = recv->r_iwinc;
721 recv->r_iwinc = NULL; 721 recv->r_iwinc = NULL;
722 ic->i_iwinc = iwinc; 722 ic->i_iwinc = iwinc;
@@ -887,7 +887,7 @@ int rds_iw_recv(struct rds_connection *conn)
887 return ret; 887 return ret;
888} 888}
889 889
890int __init rds_iw_recv_init(void) 890int rds_iw_recv_init(void)
891{ 891{
892 struct sysinfo si; 892 struct sysinfo si;
893 int ret = -ENOMEM; 893 int ret = -ENOMEM;
@@ -899,13 +899,13 @@ int __init rds_iw_recv_init(void)
899 rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming", 899 rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming",
900 sizeof(struct rds_iw_incoming), 900 sizeof(struct rds_iw_incoming),
901 0, 0, NULL); 901 0, 0, NULL);
902 if (rds_iw_incoming_slab == NULL) 902 if (!rds_iw_incoming_slab)
903 goto out; 903 goto out;
904 904
905 rds_iw_frag_slab = kmem_cache_create("rds_iw_frag", 905 rds_iw_frag_slab = kmem_cache_create("rds_iw_frag",
906 sizeof(struct rds_page_frag), 906 sizeof(struct rds_page_frag),
907 0, 0, NULL); 907 0, 0, NULL);
908 if (rds_iw_frag_slab == NULL) 908 if (!rds_iw_frag_slab)
909 kmem_cache_destroy(rds_iw_incoming_slab); 909 kmem_cache_destroy(rds_iw_incoming_slab);
910 else 910 else
911 ret = 0; 911 ret = 0;
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 52182ff7519e..6280ea020d4e 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -36,7 +36,6 @@
36#include <linux/dmapool.h> 36#include <linux/dmapool.h>
37 37
38#include "rds.h" 38#include "rds.h"
39#include "rdma.h"
40#include "iw.h" 39#include "iw.h"
41 40
42static void rds_iw_send_rdma_complete(struct rds_message *rm, 41static void rds_iw_send_rdma_complete(struct rds_message *rm,
@@ -64,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm,
64} 63}
65 64
66static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, 65static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
67 struct rds_rdma_op *op) 66 struct rm_rdma_op *op)
68{ 67{
69 if (op->r_mapped) { 68 if (op->op_mapped) {
70 ib_dma_unmap_sg(ic->i_cm_id->device, 69 ib_dma_unmap_sg(ic->i_cm_id->device,
71 op->r_sg, op->r_nents, 70 op->op_sg, op->op_nents,
72 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 71 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
73 op->r_mapped = 0; 72 op->op_mapped = 0;
74 } 73 }
75} 74}
76 75
@@ -83,11 +82,11 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm); 82 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
84 83
85 ib_dma_unmap_sg(ic->i_cm_id->device, 84 ib_dma_unmap_sg(ic->i_cm_id->device,
86 rm->m_sg, rm->m_nents, 85 rm->data.op_sg, rm->data.op_nents,
87 DMA_TO_DEVICE); 86 DMA_TO_DEVICE);
88 87
89 if (rm->m_rdma_op != NULL) { 88 if (rm->rdma.op_active) {
90 rds_iw_send_unmap_rdma(ic, rm->m_rdma_op); 89 rds_iw_send_unmap_rdma(ic, &rm->rdma);
91 90
92 /* If the user asked for a completion notification on this 91 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics: 92 * message, we can implement three different semantics:
@@ -111,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
111 */ 110 */
112 rds_iw_send_rdma_complete(rm, wc_status); 111 rds_iw_send_rdma_complete(rm, wc_status);
113 112
114 if (rm->m_rdma_op->r_write) 113 if (rm->rdma.op_write)
115 rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); 114 rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
116 else 115 else
117 rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); 116 rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
118 } 117 }
119 118
120 /* If anyone waited for this message to get flushed out, wake 119 /* If anyone waited for this message to get flushed out, wake
@@ -556,25 +555,27 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
556 } 555 }
557 556
558 /* map the message the first time we see it */ 557 /* map the message the first time we see it */
559 if (ic->i_rm == NULL) { 558 if (!ic->i_rm) {
560 /* 559 /*
561 printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n", 560 printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n",
562 be16_to_cpu(rm->m_inc.i_hdr.h_dport), 561 be16_to_cpu(rm->m_inc.i_hdr.h_dport),
563 rm->m_inc.i_hdr.h_flags, 562 rm->m_inc.i_hdr.h_flags,
564 be32_to_cpu(rm->m_inc.i_hdr.h_len)); 563 be32_to_cpu(rm->m_inc.i_hdr.h_len));
565 */ 564 */
566 if (rm->m_nents) { 565 if (rm->data.op_nents) {
567 rm->m_count = ib_dma_map_sg(dev, 566 rm->data.op_count = ib_dma_map_sg(dev,
568 rm->m_sg, rm->m_nents, DMA_TO_DEVICE); 567 rm->data.op_sg,
569 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count); 568 rm->data.op_nents,
570 if (rm->m_count == 0) { 569 DMA_TO_DEVICE);
570 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
571 if (rm->data.op_count == 0) {
571 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); 572 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
572 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); 573 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
573 ret = -ENOMEM; /* XXX ? */ 574 ret = -ENOMEM; /* XXX ? */
574 goto out; 575 goto out;
575 } 576 }
576 } else { 577 } else {
577 rm->m_count = 0; 578 rm->data.op_count = 0;
578 } 579 }
579 580
580 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; 581 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
@@ -590,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
590 591
591 /* If it has a RDMA op, tell the peer we did it. This is 592 /* If it has a RDMA op, tell the peer we did it. This is
592 * used by the peer to release use-once RDMA MRs. */ 593 * used by the peer to release use-once RDMA MRs. */
593 if (rm->m_rdma_op) { 594 if (rm->rdma.op_active) {
594 struct rds_ext_header_rdma ext_hdr; 595 struct rds_ext_header_rdma ext_hdr;
595 596
596 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); 597 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
597 rds_message_add_extension(&rm->m_inc.i_hdr, 598 rds_message_add_extension(&rm->m_inc.i_hdr,
598 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 599 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
599 } 600 }
@@ -621,7 +622,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
621 send = &ic->i_sends[pos]; 622 send = &ic->i_sends[pos];
622 first = send; 623 first = send;
623 prev = NULL; 624 prev = NULL;
624 scat = &rm->m_sg[sg]; 625 scat = &rm->data.op_sg[sg];
625 sent = 0; 626 sent = 0;
626 i = 0; 627 i = 0;
627 628
@@ -631,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
631 * or when requested by the user. Right now, we let 632 * or when requested by the user. Right now, we let
632 * the application choose. 633 * the application choose.
633 */ 634 */
634 if (rm->m_rdma_op && rm->m_rdma_op->r_fence) 635 if (rm->rdma.op_active && rm->rdma.op_fence)
635 send_flags = IB_SEND_FENCE; 636 send_flags = IB_SEND_FENCE;
636 637
637 /* 638 /*
@@ -650,7 +651,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
650 } 651 }
651 652
652 /* if there's data reference it with a chain of work reqs */ 653 /* if there's data reference it with a chain of work reqs */
653 for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { 654 for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) {
654 unsigned int len; 655 unsigned int len;
655 656
656 send = &ic->i_sends[pos]; 657 send = &ic->i_sends[pos];
@@ -728,7 +729,7 @@ add_header:
728 sent += sizeof(struct rds_header); 729 sent += sizeof(struct rds_header);
729 730
730 /* if we finished the message then send completion owns it */ 731 /* if we finished the message then send completion owns it */
731 if (scat == &rm->m_sg[rm->m_count]) { 732 if (scat == &rm->data.op_sg[rm->data.op_count]) {
732 prev->s_rm = ic->i_rm; 733 prev->s_rm = ic->i_rm;
733 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 734 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
734 ic->i_rm = NULL; 735 ic->i_rm = NULL;
@@ -784,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
784 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); 785 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
785} 786}
786 787
787int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) 788int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
788{ 789{
789 struct rds_iw_connection *ic = conn->c_transport_data; 790 struct rds_iw_connection *ic = conn->c_transport_data;
790 struct rds_iw_send_work *send = NULL; 791 struct rds_iw_send_work *send = NULL;
@@ -794,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
794 struct rds_iw_device *rds_iwdev; 795 struct rds_iw_device *rds_iwdev;
795 struct scatterlist *scat; 796 struct scatterlist *scat;
796 unsigned long len; 797 unsigned long len;
797 u64 remote_addr = op->r_remote_addr; 798 u64 remote_addr = op->op_remote_addr;
798 u32 pos, fr_pos; 799 u32 pos, fr_pos;
799 u32 work_alloc; 800 u32 work_alloc;
800 u32 i; 801 u32 i;
@@ -806,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
806 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); 807 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
807 808
808 /* map the message the first time we see it */ 809 /* map the message the first time we see it */
809 if (!op->r_mapped) { 810 if (!op->op_mapped) {
810 op->r_count = ib_dma_map_sg(ic->i_cm_id->device, 811 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
811 op->r_sg, op->r_nents, (op->r_write) ? 812 op->op_sg, op->op_nents, (op->op_write) ?
812 DMA_TO_DEVICE : DMA_FROM_DEVICE); 813 DMA_TO_DEVICE : DMA_FROM_DEVICE);
813 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); 814 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
814 if (op->r_count == 0) { 815 if (op->op_count == 0) {
815 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); 816 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
816 ret = -ENOMEM; /* XXX ? */ 817 ret = -ENOMEM; /* XXX ? */
817 goto out; 818 goto out;
818 } 819 }
819 820
820 op->r_mapped = 1; 821 op->op_mapped = 1;
821 } 822 }
822 823
823 if (!op->r_write) { 824 if (!op->op_write) {
824 /* Alloc space on the send queue for the fastreg */ 825 /* Alloc space on the send queue for the fastreg */
825 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); 826 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
826 if (work_alloc != 1) { 827 if (work_alloc != 1) {
@@ -835,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
835 * Instead of knowing how to return a partial rdma read/write we insist that there 836 * Instead of knowing how to return a partial rdma read/write we insist that there
836 * be enough work requests to send the entire message. 837 * be enough work requests to send the entire message.
837 */ 838 */
838 i = ceil(op->r_count, rds_iwdev->max_sge); 839 i = ceil(op->op_count, rds_iwdev->max_sge);
839 840
840 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); 841 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
841 if (work_alloc != i) { 842 if (work_alloc != i) {
@@ -846,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
846 } 847 }
847 848
848 send = &ic->i_sends[pos]; 849 send = &ic->i_sends[pos];
849 if (!op->r_write) { 850 if (!op->op_write) {
850 first = prev = &ic->i_sends[fr_pos]; 851 first = prev = &ic->i_sends[fr_pos];
851 } else { 852 } else {
852 first = send; 853 first = send;
853 prev = NULL; 854 prev = NULL;
854 } 855 }
855 scat = &op->r_sg[0]; 856 scat = &op->op_sg[0];
856 sent = 0; 857 sent = 0;
857 num_sge = op->r_count; 858 num_sge = op->op_count;
858 859
859 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { 860 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
860 send->s_wr.send_flags = 0; 861 send->s_wr.send_flags = 0;
861 send->s_queued = jiffies; 862 send->s_queued = jiffies;
862 863
@@ -873,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
873 * for local access after RDS is finished with it, using 874 * for local access after RDS is finished with it, using
874 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. 875 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
875 */ 876 */
876 if (op->r_write) 877 if (op->op_write)
877 send->s_wr.opcode = IB_WR_RDMA_WRITE; 878 send->s_wr.opcode = IB_WR_RDMA_WRITE;
878 else 879 else
879 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; 880 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
880 881
881 send->s_wr.wr.rdma.remote_addr = remote_addr; 882 send->s_wr.wr.rdma.remote_addr = remote_addr;
882 send->s_wr.wr.rdma.rkey = op->r_key; 883 send->s_wr.wr.rdma.rkey = op->op_rkey;
883 send->s_op = op; 884 send->s_op = op;
884 885
885 if (num_sge > rds_iwdev->max_sge) { 886 if (num_sge > rds_iwdev->max_sge) {
@@ -893,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
893 if (prev) 894 if (prev)
894 prev->s_wr.next = &send->s_wr; 895 prev->s_wr.next = &send->s_wr;
895 896
896 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { 897 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
897 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 898 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
898 899
899 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) 900 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
@@ -927,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
927 } 928 }
928 929
929 /* if we finished the message then send completion owns it */ 930 /* if we finished the message then send completion owns it */
930 if (scat == &op->r_sg[op->r_count]) 931 if (scat == &op->op_sg[op->op_count])
931 first->s_wr.send_flags = IB_SEND_SIGNALED; 932 first->s_wr.send_flags = IB_SEND_SIGNALED;
932 933
933 if (i < work_alloc) { 934 if (i < work_alloc) {
@@ -941,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
941 * adapters do not allow using the lkey for this at all. To bypass this use a 942 * adapters do not allow using the lkey for this at all. To bypass this use a
942 * fastreg_mr (or possibly a dma_mr) 943 * fastreg_mr (or possibly a dma_mr)
943 */ 944 */
944 if (!op->r_write) { 945 if (!op->op_write) {
945 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], 946 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
946 op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); 947 op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
947 work_alloc++; 948 work_alloc++;
948 } 949 }
949 950
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index 1c4428a61a02..23e3a9a26aaf 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -122,10 +122,10 @@ void rds_iw_sysctl_exit(void)
122 unregister_sysctl_table(rds_iw_sysctl_hdr); 122 unregister_sysctl_table(rds_iw_sysctl_hdr);
123} 123}
124 124
125int __init rds_iw_sysctl_init(void) 125int rds_iw_sysctl_init(void)
126{ 126{
127 rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); 127 rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table);
128 if (rds_iw_sysctl_hdr == NULL) 128 if (!rds_iw_sysctl_hdr)
129 return -ENOMEM; 129 return -ENOMEM;
130 return 0; 130 return 0;
131} 131}
diff --git a/net/rds/loop.c b/net/rds/loop.c
index dd9879379457..c390156b426f 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,17 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
61 unsigned int hdr_off, unsigned int sg, 61 unsigned int hdr_off, unsigned int sg,
62 unsigned int off) 62 unsigned int off)
63{ 63{
64 /* Do not send cong updates to loopback */
65 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
66 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
67 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
68 }
69
64 BUG_ON(hdr_off || sg || off); 70 BUG_ON(hdr_off || sg || off);
65 71
66 rds_inc_init(&rm->m_inc, conn, conn->c_laddr); 72 rds_inc_init(&rm->m_inc, conn, conn->c_laddr);
67 rds_message_addref(rm); /* for the inc */ 73 /* For the embedded inc. Matching put is in loop_inc_free() */
74 rds_message_addref(rm);
68 75
69 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc, 76 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
70 GFP_KERNEL, KM_USER0); 77 GFP_KERNEL, KM_USER0);
@@ -77,16 +84,14 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
77 return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); 84 return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
78} 85}
79 86
80static int rds_loop_xmit_cong_map(struct rds_connection *conn, 87/*
81 struct rds_cong_map *map, 88 * See rds_loop_xmit(). Since our inc is embedded in the rm, we
82 unsigned long offset) 89 * make sure the rm lives at least until the inc is done.
90 */
91static void rds_loop_inc_free(struct rds_incoming *inc)
83{ 92{
84 BUG_ON(offset); 93 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
85 BUG_ON(map != conn->c_lcong); 94 rds_message_put(rm);
86
87 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
88
89 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
90} 95}
91 96
92/* we need to at least give the thread something to succeed */ 97/* we need to at least give the thread something to succeed */
@@ -112,7 +117,7 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
112 unsigned long flags; 117 unsigned long flags;
113 118
114 lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL); 119 lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL);
115 if (lc == NULL) 120 if (!lc)
116 return -ENOMEM; 121 return -ENOMEM;
117 122
118 INIT_LIST_HEAD(&lc->loop_node); 123 INIT_LIST_HEAD(&lc->loop_node);
@@ -169,14 +174,12 @@ void rds_loop_exit(void)
169 */ 174 */
170struct rds_transport rds_loop_transport = { 175struct rds_transport rds_loop_transport = {
171 .xmit = rds_loop_xmit, 176 .xmit = rds_loop_xmit,
172 .xmit_cong_map = rds_loop_xmit_cong_map,
173 .recv = rds_loop_recv, 177 .recv = rds_loop_recv,
174 .conn_alloc = rds_loop_conn_alloc, 178 .conn_alloc = rds_loop_conn_alloc,
175 .conn_free = rds_loop_conn_free, 179 .conn_free = rds_loop_conn_free,
176 .conn_connect = rds_loop_conn_connect, 180 .conn_connect = rds_loop_conn_connect,
177 .conn_shutdown = rds_loop_conn_shutdown, 181 .conn_shutdown = rds_loop_conn_shutdown,
178 .inc_copy_to_user = rds_message_inc_copy_to_user, 182 .inc_copy_to_user = rds_message_inc_copy_to_user,
179 .inc_purge = rds_message_inc_purge, 183 .inc_free = rds_loop_inc_free,
180 .inc_free = rds_message_inc_free,
181 .t_name = "loopback", 184 .t_name = "loopback",
182}; 185};
diff --git a/net/rds/message.c b/net/rds/message.c
index 9a1d67e001ba..84f937f11d47 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -34,9 +34,6 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35
36#include "rds.h" 36#include "rds.h"
37#include "rdma.h"
38
39static DECLARE_WAIT_QUEUE_HEAD(rds_message_flush_waitq);
40 37
41static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = { 38static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
42[RDS_EXTHDR_NONE] = 0, 39[RDS_EXTHDR_NONE] = 0,
@@ -63,29 +60,31 @@ static void rds_message_purge(struct rds_message *rm)
63 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags))) 60 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
64 return; 61 return;
65 62
66 for (i = 0; i < rm->m_nents; i++) { 63 for (i = 0; i < rm->data.op_nents; i++) {
67 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->m_sg[i])); 64 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
68 /* XXX will have to put_page for page refs */ 65 /* XXX will have to put_page for page refs */
69 __free_page(sg_page(&rm->m_sg[i])); 66 __free_page(sg_page(&rm->data.op_sg[i]));
70 } 67 }
71 rm->m_nents = 0; 68 rm->data.op_nents = 0;
72 69
73 if (rm->m_rdma_op) 70 if (rm->rdma.op_active)
74 rds_rdma_free_op(rm->m_rdma_op); 71 rds_rdma_free_op(&rm->rdma);
75 if (rm->m_rdma_mr) 72 if (rm->rdma.op_rdma_mr)
76 rds_mr_put(rm->m_rdma_mr); 73 rds_mr_put(rm->rdma.op_rdma_mr);
77}
78 74
79void rds_message_inc_purge(struct rds_incoming *inc) 75 if (rm->atomic.op_active)
80{ 76 rds_atomic_free_op(&rm->atomic);
81 struct rds_message *rm = container_of(inc, struct rds_message, m_inc); 77 if (rm->atomic.op_rdma_mr)
82 rds_message_purge(rm); 78 rds_mr_put(rm->atomic.op_rdma_mr);
83} 79}
84 80
85void rds_message_put(struct rds_message *rm) 81void rds_message_put(struct rds_message *rm)
86{ 82{
87 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); 83 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
88 84 if (atomic_read(&rm->m_refcount) == 0) {
85printk(KERN_CRIT "danger refcount zero on %p\n", rm);
86WARN_ON(1);
87 }
89 if (atomic_dec_and_test(&rm->m_refcount)) { 88 if (atomic_dec_and_test(&rm->m_refcount)) {
90 BUG_ON(!list_empty(&rm->m_sock_item)); 89 BUG_ON(!list_empty(&rm->m_sock_item));
91 BUG_ON(!list_empty(&rm->m_conn_item)); 90 BUG_ON(!list_empty(&rm->m_conn_item));
@@ -96,12 +95,6 @@ void rds_message_put(struct rds_message *rm)
96} 95}
97EXPORT_SYMBOL_GPL(rds_message_put); 96EXPORT_SYMBOL_GPL(rds_message_put);
98 97
99void rds_message_inc_free(struct rds_incoming *inc)
100{
101 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
102 rds_message_put(rm);
103}
104
105void rds_message_populate_header(struct rds_header *hdr, __be16 sport, 98void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
106 __be16 dport, u64 seq) 99 __be16 dport, u64 seq)
107{ 100{
@@ -214,41 +207,68 @@ int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 o
214} 207}
215EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension); 208EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
216 209
217struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp) 210/*
211 * Each rds_message is allocated with extra space for the scatterlist entries
212 * rds ops will need. This is to minimize memory allocation count. Then, each rds op
213 * can grab SGs when initializing its part of the rds_message.
214 */
215struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
218{ 216{
219 struct rds_message *rm; 217 struct rds_message *rm;
220 218
221 rm = kzalloc(sizeof(struct rds_message) + 219 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
222 (nents * sizeof(struct scatterlist)), gfp);
223 if (!rm) 220 if (!rm)
224 goto out; 221 goto out;
225 222
226 if (nents) 223 rm->m_used_sgs = 0;
227 sg_init_table(rm->m_sg, nents); 224 rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
225
228 atomic_set(&rm->m_refcount, 1); 226 atomic_set(&rm->m_refcount, 1);
229 INIT_LIST_HEAD(&rm->m_sock_item); 227 INIT_LIST_HEAD(&rm->m_sock_item);
230 INIT_LIST_HEAD(&rm->m_conn_item); 228 INIT_LIST_HEAD(&rm->m_conn_item);
231 spin_lock_init(&rm->m_rs_lock); 229 spin_lock_init(&rm->m_rs_lock);
230 init_waitqueue_head(&rm->m_flush_wait);
232 231
233out: 232out:
234 return rm; 233 return rm;
235} 234}
236 235
236/*
237 * RDS ops use this to grab SG entries from the rm's sg pool.
238 */
239struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
240{
241 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
242 struct scatterlist *sg_ret;
243
244 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
245 WARN_ON(!nents);
246
247 sg_ret = &sg_first[rm->m_used_sgs];
248 sg_init_table(sg_ret, nents);
249 rm->m_used_sgs += nents;
250
251 return sg_ret;
252}
253
237struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len) 254struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
238{ 255{
239 struct rds_message *rm; 256 struct rds_message *rm;
240 unsigned int i; 257 unsigned int i;
258 int num_sgs = ceil(total_len, PAGE_SIZE);
259 int extra_bytes = num_sgs * sizeof(struct scatterlist);
241 260
242 rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL); 261 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
243 if (rm == NULL) 262 if (!rm)
244 return ERR_PTR(-ENOMEM); 263 return ERR_PTR(-ENOMEM);
245 264
246 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); 265 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
247 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 266 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
248 rm->m_nents = ceil(total_len, PAGE_SIZE); 267 rm->data.op_nents = ceil(total_len, PAGE_SIZE);
268 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
249 269
250 for (i = 0; i < rm->m_nents; ++i) { 270 for (i = 0; i < rm->data.op_nents; ++i) {
251 sg_set_page(&rm->m_sg[i], 271 sg_set_page(&rm->data.op_sg[i],
252 virt_to_page(page_addrs[i]), 272 virt_to_page(page_addrs[i]),
253 PAGE_SIZE, 0); 273 PAGE_SIZE, 0);
254 } 274 }
@@ -256,40 +276,33 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
256 return rm; 276 return rm;
257} 277}
258 278
259struct rds_message *rds_message_copy_from_user(struct iovec *first_iov, 279int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
260 size_t total_len) 280 size_t total_len)
261{ 281{
262 unsigned long to_copy; 282 unsigned long to_copy;
263 unsigned long iov_off; 283 unsigned long iov_off;
264 unsigned long sg_off; 284 unsigned long sg_off;
265 struct rds_message *rm;
266 struct iovec *iov; 285 struct iovec *iov;
267 struct scatterlist *sg; 286 struct scatterlist *sg;
268 int ret; 287 int ret = 0;
269
270 rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
271 if (rm == NULL) {
272 ret = -ENOMEM;
273 goto out;
274 }
275 288
276 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 289 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
277 290
278 /* 291 /*
279 * now allocate and copy in the data payload. 292 * now allocate and copy in the data payload.
280 */ 293 */
281 sg = rm->m_sg; 294 sg = rm->data.op_sg;
282 iov = first_iov; 295 iov = first_iov;
283 iov_off = 0; 296 iov_off = 0;
284 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ 297 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
285 298
286 while (total_len) { 299 while (total_len) {
287 if (sg_page(sg) == NULL) { 300 if (!sg_page(sg)) {
288 ret = rds_page_remainder_alloc(sg, total_len, 301 ret = rds_page_remainder_alloc(sg, total_len,
289 GFP_HIGHUSER); 302 GFP_HIGHUSER);
290 if (ret) 303 if (ret)
291 goto out; 304 goto out;
292 rm->m_nents++; 305 rm->data.op_nents++;
293 sg_off = 0; 306 sg_off = 0;
294 } 307 }
295 308
@@ -320,14 +333,8 @@ struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
320 sg++; 333 sg++;
321 } 334 }
322 335
323 ret = 0;
324out: 336out:
325 if (ret) { 337 return ret;
326 if (rm)
327 rds_message_put(rm);
328 rm = ERR_PTR(ret);
329 }
330 return rm;
331} 338}
332 339
333int rds_message_inc_copy_to_user(struct rds_incoming *inc, 340int rds_message_inc_copy_to_user(struct rds_incoming *inc,
@@ -348,7 +355,7 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
348 355
349 iov = first_iov; 356 iov = first_iov;
350 iov_off = 0; 357 iov_off = 0;
351 sg = rm->m_sg; 358 sg = rm->data.op_sg;
352 vec_off = 0; 359 vec_off = 0;
353 copied = 0; 360 copied = 0;
354 361
@@ -394,15 +401,14 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
394 */ 401 */
395void rds_message_wait(struct rds_message *rm) 402void rds_message_wait(struct rds_message *rm)
396{ 403{
397 wait_event(rds_message_flush_waitq, 404 wait_event_interruptible(rm->m_flush_wait,
398 !test_bit(RDS_MSG_MAPPED, &rm->m_flags)); 405 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
399} 406}
400 407
401void rds_message_unmapped(struct rds_message *rm) 408void rds_message_unmapped(struct rds_message *rm)
402{ 409{
403 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); 410 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
404 if (waitqueue_active(&rds_message_flush_waitq)) 411 wake_up_interruptible(&rm->m_flush_wait);
405 wake_up(&rds_message_flush_waitq);
406} 412}
407EXPORT_SYMBOL_GPL(rds_message_unmapped); 413EXPORT_SYMBOL_GPL(rds_message_unmapped);
408 414
diff --git a/net/rds/page.c b/net/rds/page.c
index 595a952d4b17..5e44f5ae7898 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -116,7 +116,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
116 /* jump straight to allocation if we're trying for a huge page */ 116 /* jump straight to allocation if we're trying for a huge page */
117 if (bytes >= PAGE_SIZE) { 117 if (bytes >= PAGE_SIZE) {
118 page = alloc_page(gfp); 118 page = alloc_page(gfp);
119 if (page == NULL) { 119 if (!page) {
120 ret = -ENOMEM; 120 ret = -ENOMEM;
121 } else { 121 } else {
122 sg_set_page(scat, page, PAGE_SIZE, 0); 122 sg_set_page(scat, page, PAGE_SIZE, 0);
@@ -162,7 +162,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
162 rem = &per_cpu(rds_page_remainders, get_cpu()); 162 rem = &per_cpu(rds_page_remainders, get_cpu());
163 local_irq_save(flags); 163 local_irq_save(flags);
164 164
165 if (page == NULL) { 165 if (!page) {
166 ret = -ENOMEM; 166 ret = -ENOMEM;
167 break; 167 break;
168 } 168 }
@@ -186,6 +186,7 @@ out:
186 ret ? 0 : scat->length); 186 ret ? 0 : scat->length);
187 return ret; 187 return ret;
188} 188}
189EXPORT_SYMBOL_GPL(rds_page_remainder_alloc);
189 190
190static int rds_page_remainder_cpu_notify(struct notifier_block *self, 191static int rds_page_remainder_cpu_notify(struct notifier_block *self,
191 unsigned long action, void *hcpu) 192 unsigned long action, void *hcpu)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 75fd13bb631b..48064673fc76 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -35,7 +35,7 @@
35#include <linux/rbtree.h> 35#include <linux/rbtree.h>
36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ 36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37 37
38#include "rdma.h" 38#include "rds.h"
39 39
40/* 40/*
41 * XXX 41 * XXX
@@ -130,14 +130,22 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
130{ 130{
131 struct rds_mr *mr; 131 struct rds_mr *mr;
132 struct rb_node *node; 132 struct rb_node *node;
133 unsigned long flags;
133 134
134 /* Release any MRs associated with this socket */ 135 /* Release any MRs associated with this socket */
136 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
135 while ((node = rb_first(&rs->rs_rdma_keys))) { 137 while ((node = rb_first(&rs->rs_rdma_keys))) {
136 mr = container_of(node, struct rds_mr, r_rb_node); 138 mr = container_of(node, struct rds_mr, r_rb_node);
137 if (mr->r_trans == rs->rs_transport) 139 if (mr->r_trans == rs->rs_transport)
138 mr->r_invalidate = 0; 140 mr->r_invalidate = 0;
141 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
142 RB_CLEAR_NODE(&mr->r_rb_node);
143 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
144 rds_destroy_mr(mr);
139 rds_mr_put(mr); 145 rds_mr_put(mr);
146 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
140 } 147 }
148 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
141 149
142 if (rs->rs_transport && rs->rs_transport->flush_mrs) 150 if (rs->rs_transport && rs->rs_transport->flush_mrs)
143 rs->rs_transport->flush_mrs(); 151 rs->rs_transport->flush_mrs();
@@ -181,7 +189,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
181 goto out; 189 goto out;
182 } 190 }
183 191
184 if (rs->rs_transport->get_mr == NULL) { 192 if (!rs->rs_transport->get_mr) {
185 ret = -EOPNOTSUPP; 193 ret = -EOPNOTSUPP;
186 goto out; 194 goto out;
187 } 195 }
@@ -197,13 +205,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
197 205
198 /* XXX clamp nr_pages to limit the size of this alloc? */ 206 /* XXX clamp nr_pages to limit the size of this alloc? */
199 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
200 if (pages == NULL) { 208 if (!pages) {
201 ret = -ENOMEM; 209 ret = -ENOMEM;
202 goto out; 210 goto out;
203 } 211 }
204 212
205 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); 213 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
206 if (mr == NULL) { 214 if (!mr) {
207 ret = -ENOMEM; 215 ret = -ENOMEM;
208 goto out; 216 goto out;
209 } 217 }
@@ -230,13 +238,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
230 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to 238 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
231 * the zero page. 239 * the zero page.
232 */ 240 */
233 ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1); 241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
234 if (ret < 0) 242 if (ret < 0)
235 goto out; 243 goto out;
236 244
237 nents = ret; 245 nents = ret;
238 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); 246 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
239 if (sg == NULL) { 247 if (!sg) {
240 ret = -ENOMEM; 248 ret = -ENOMEM;
241 goto out; 249 goto out;
242 } 250 }
@@ -406,68 +414,127 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
406 414
407 spin_lock_irqsave(&rs->rs_rdma_lock, flags); 415 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
408 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); 416 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
409 if (mr && (mr->r_use_once || force)) { 417 if (!mr) {
418 printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
419 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
420 return;
421 }
422
423 if (mr->r_use_once || force) {
410 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); 424 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
411 RB_CLEAR_NODE(&mr->r_rb_node); 425 RB_CLEAR_NODE(&mr->r_rb_node);
412 zot_me = 1; 426 zot_me = 1;
413 } else if (mr) 427 }
414 atomic_inc(&mr->r_refcount);
415 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); 428 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
416 429
417 /* May have to issue a dma_sync on this memory region. 430 /* May have to issue a dma_sync on this memory region.
418 * Note we could avoid this if the operation was a RDMA READ, 431 * Note we could avoid this if the operation was a RDMA READ,
419 * but at this point we can't tell. */ 432 * but at this point we can't tell. */
420 if (mr != NULL) { 433 if (mr->r_trans->sync_mr)
421 if (mr->r_trans->sync_mr) 434 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
422 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); 435
423 436 /* If the MR was marked as invalidate, this will
424 /* If the MR was marked as invalidate, this will 437 * trigger an async flush. */
425 * trigger an async flush. */ 438 if (zot_me)
426 if (zot_me) 439 rds_destroy_mr(mr);
427 rds_destroy_mr(mr); 440 rds_mr_put(mr);
428 rds_mr_put(mr);
429 }
430} 441}
431 442
432void rds_rdma_free_op(struct rds_rdma_op *ro) 443void rds_rdma_free_op(struct rm_rdma_op *ro)
433{ 444{
434 unsigned int i; 445 unsigned int i;
435 446
436 for (i = 0; i < ro->r_nents; i++) { 447 for (i = 0; i < ro->op_nents; i++) {
437 struct page *page = sg_page(&ro->r_sg[i]); 448 struct page *page = sg_page(&ro->op_sg[i]);
438 449
439 /* Mark page dirty if it was possibly modified, which 450 /* Mark page dirty if it was possibly modified, which
440 * is the case for a RDMA_READ which copies from remote 451 * is the case for a RDMA_READ which copies from remote
441 * to local memory */ 452 * to local memory */
442 if (!ro->r_write) { 453 if (!ro->op_write) {
443 BUG_ON(in_interrupt()); 454 BUG_ON(irqs_disabled());
444 set_page_dirty(page); 455 set_page_dirty(page);
445 } 456 }
446 put_page(page); 457 put_page(page);
447 } 458 }
448 459
449 kfree(ro->r_notifier); 460 kfree(ro->op_notifier);
450 kfree(ro); 461 ro->op_notifier = NULL;
462 ro->op_active = 0;
463}
464
465void rds_atomic_free_op(struct rm_atomic_op *ao)
466{
467 struct page *page = sg_page(ao->op_sg);
468
469 /* Mark page dirty if it was possibly modified, which
470 * is the case for a RDMA_READ which copies from remote
471 * to local memory */
472 set_page_dirty(page);
473 put_page(page);
474
475 kfree(ao->op_notifier);
476 ao->op_notifier = NULL;
477 ao->op_active = 0;
478}
479
480
481/*
482 * Count the number of pages needed to describe an incoming iovec.
483 */
484static int rds_rdma_pages(struct rds_rdma_args *args)
485{
486 struct rds_iovec vec;
487 struct rds_iovec __user *local_vec;
488 unsigned int tot_pages = 0;
489 unsigned int nr_pages;
490 unsigned int i;
491
492 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
493
494 /* figure out the number of pages in the vector */
495 for (i = 0; i < args->nr_local; i++) {
496 if (copy_from_user(&vec, &local_vec[i],
497 sizeof(struct rds_iovec)))
498 return -EFAULT;
499
500 nr_pages = rds_pages_in_vec(&vec);
501 if (nr_pages == 0)
502 return -EINVAL;
503
504 tot_pages += nr_pages;
505 }
506
507 return tot_pages;
508}
509
510int rds_rdma_extra_size(struct rds_rdma_args *args)
511{
512 return rds_rdma_pages(args) * sizeof(struct scatterlist);
451} 513}
452 514
453/* 515/*
454 * args is a pointer to an in-kernel copy in the sendmsg cmsg. 516 * The application asks for a RDMA transfer.
517 * Extract all arguments and set up the rdma_op
455 */ 518 */
456static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs, 519int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
457 struct rds_rdma_args *args) 520 struct cmsghdr *cmsg)
458{ 521{
522 struct rds_rdma_args *args;
459 struct rds_iovec vec; 523 struct rds_iovec vec;
460 struct rds_rdma_op *op = NULL; 524 struct rm_rdma_op *op = &rm->rdma;
461 unsigned int nr_pages; 525 unsigned int nr_pages;
462 unsigned int max_pages;
463 unsigned int nr_bytes; 526 unsigned int nr_bytes;
464 struct page **pages = NULL; 527 struct page **pages = NULL;
465 struct rds_iovec __user *local_vec; 528 struct rds_iovec __user *local_vec;
466 struct scatterlist *sg;
467 unsigned int nr; 529 unsigned int nr;
468 unsigned int i, j; 530 unsigned int i, j;
469 int ret; 531 int ret = 0;
532
533 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
534 || rm->rdma.op_active)
535 return -EINVAL;
470 536
537 args = CMSG_DATA(cmsg);
471 538
472 if (rs->rs_bound_addr == 0) { 539 if (rs->rs_bound_addr == 0) {
473 ret = -ENOTCONN; /* XXX not a great errno */ 540 ret = -ENOTCONN; /* XXX not a great errno */
@@ -479,61 +546,38 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
479 goto out; 546 goto out;
480 } 547 }
481 548
482 nr_pages = 0; 549 nr_pages = rds_rdma_pages(args);
483 max_pages = 0; 550 if (nr_pages < 0)
484
485 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
486
487 /* figure out the number of pages in the vector */
488 for (i = 0; i < args->nr_local; i++) {
489 if (copy_from_user(&vec, &local_vec[i],
490 sizeof(struct rds_iovec))) {
491 ret = -EFAULT;
492 goto out;
493 }
494
495 nr = rds_pages_in_vec(&vec);
496 if (nr == 0) {
497 ret = -EINVAL;
498 goto out;
499 }
500
501 max_pages = max(nr, max_pages);
502 nr_pages += nr;
503 }
504
505 pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
506 if (pages == NULL) {
507 ret = -ENOMEM;
508 goto out; 551 goto out;
509 }
510 552
511 op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL); 553 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
512 if (op == NULL) { 554 if (!pages) {
513 ret = -ENOMEM; 555 ret = -ENOMEM;
514 goto out; 556 goto out;
515 } 557 }
516 558
517 op->r_write = !!(args->flags & RDS_RDMA_READWRITE); 559 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
518 op->r_fence = !!(args->flags & RDS_RDMA_FENCE); 560 op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
519 op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); 561 op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
520 op->r_recverr = rs->rs_recverr; 562 op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
563 op->op_active = 1;
564 op->op_recverr = rs->rs_recverr;
521 WARN_ON(!nr_pages); 565 WARN_ON(!nr_pages);
522 sg_init_table(op->r_sg, nr_pages); 566 op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
523 567
524 if (op->r_notify || op->r_recverr) { 568 if (op->op_notify || op->op_recverr) {
525 /* We allocate an uninitialized notifier here, because 569 /* We allocate an uninitialized notifier here, because
526 * we don't want to do that in the completion handler. We 570 * we don't want to do that in the completion handler. We
527 * would have to use GFP_ATOMIC there, and don't want to deal 571 * would have to use GFP_ATOMIC there, and don't want to deal
528 * with failed allocations. 572 * with failed allocations.
529 */ 573 */
530 op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); 574 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
531 if (!op->r_notifier) { 575 if (!op->op_notifier) {
532 ret = -ENOMEM; 576 ret = -ENOMEM;
533 goto out; 577 goto out;
534 } 578 }
535 op->r_notifier->n_user_token = args->user_token; 579 op->op_notifier->n_user_token = args->user_token;
536 op->r_notifier->n_status = RDS_RDMA_SUCCESS; 580 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
537 } 581 }
538 582
539 /* The cookie contains the R_Key of the remote memory region, and 583 /* The cookie contains the R_Key of the remote memory region, and
@@ -543,15 +587,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
543 * destination address (which is really an offset into the MR) 587 * destination address (which is really an offset into the MR)
544 * FIXME: We may want to move this into ib_rdma.c 588 * FIXME: We may want to move this into ib_rdma.c
545 */ 589 */
546 op->r_key = rds_rdma_cookie_key(args->cookie); 590 op->op_rkey = rds_rdma_cookie_key(args->cookie);
547 op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); 591 op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
548 592
549 nr_bytes = 0; 593 nr_bytes = 0;
550 594
551 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", 595 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
552 (unsigned long long)args->nr_local, 596 (unsigned long long)args->nr_local,
553 (unsigned long long)args->remote_vec.addr, 597 (unsigned long long)args->remote_vec.addr,
554 op->r_key); 598 op->op_rkey);
599
600 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
555 601
556 for (i = 0; i < args->nr_local; i++) { 602 for (i = 0; i < args->nr_local; i++) {
557 if (copy_from_user(&vec, &local_vec[i], 603 if (copy_from_user(&vec, &local_vec[i],
@@ -569,15 +615,10 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
569 rs->rs_user_addr = vec.addr; 615 rs->rs_user_addr = vec.addr;
570 rs->rs_user_bytes = vec.bytes; 616 rs->rs_user_bytes = vec.bytes;
571 617
572 /* did the user change the vec under us? */
573 if (nr > max_pages || op->r_nents + nr > nr_pages) {
574 ret = -EINVAL;
575 goto out;
576 }
577 /* If it's a WRITE operation, we want to pin the pages for reading. 618 /* If it's a WRITE operation, we want to pin the pages for reading.
578 * If it's a READ operation, we need to pin the pages for writing. 619 * If it's a READ operation, we need to pin the pages for writing.
579 */ 620 */
580 ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write); 621 ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write);
581 if (ret < 0) 622 if (ret < 0)
582 goto out; 623 goto out;
583 624
@@ -588,8 +629,9 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
588 629
589 for (j = 0; j < nr; j++) { 630 for (j = 0; j < nr; j++) {
590 unsigned int offset = vec.addr & ~PAGE_MASK; 631 unsigned int offset = vec.addr & ~PAGE_MASK;
632 struct scatterlist *sg;
591 633
592 sg = &op->r_sg[op->r_nents + j]; 634 sg = &op->op_sg[op->op_nents + j];
593 sg_set_page(sg, pages[j], 635 sg_set_page(sg, pages[j],
594 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), 636 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
595 offset); 637 offset);
@@ -601,10 +643,9 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
601 vec.bytes -= sg->length; 643 vec.bytes -= sg->length;
602 } 644 }
603 645
604 op->r_nents += nr; 646 op->op_nents += nr;
605 } 647 }
606 648
607
608 if (nr_bytes > args->remote_vec.bytes) { 649 if (nr_bytes > args->remote_vec.bytes) {
609 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", 650 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
610 nr_bytes, 651 nr_bytes,
@@ -612,38 +653,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
612 ret = -EINVAL; 653 ret = -EINVAL;
613 goto out; 654 goto out;
614 } 655 }
615 op->r_bytes = nr_bytes; 656 op->op_bytes = nr_bytes;
616 657
617 ret = 0; 658 ret = 0;
618out: 659out:
619 kfree(pages); 660 kfree(pages);
620 if (ret) { 661 if (ret)
621 if (op) 662 rds_rdma_free_op(op);
622 rds_rdma_free_op(op);
623 op = ERR_PTR(ret);
624 }
625 return op;
626}
627
628/*
629 * The application asks for a RDMA transfer.
630 * Extract all arguments and set up the rdma_op
631 */
632int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
633 struct cmsghdr *cmsg)
634{
635 struct rds_rdma_op *op;
636
637 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
638 rm->m_rdma_op != NULL)
639 return -EINVAL;
640 663
641 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
642 if (IS_ERR(op))
643 return PTR_ERR(op);
644 rds_stats_inc(s_send_rdma); 664 rds_stats_inc(s_send_rdma);
645 rm->m_rdma_op = op; 665
646 return 0; 666 return ret;
647} 667}
648 668
649/* 669/*
@@ -673,7 +693,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
673 693
674 spin_lock_irqsave(&rs->rs_rdma_lock, flags); 694 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
675 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); 695 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
676 if (mr == NULL) 696 if (!mr)
677 err = -EINVAL; /* invalid r_key */ 697 err = -EINVAL; /* invalid r_key */
678 else 698 else
679 atomic_inc(&mr->r_refcount); 699 atomic_inc(&mr->r_refcount);
@@ -681,7 +701,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
681 701
682 if (mr) { 702 if (mr) {
683 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); 703 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
684 rm->m_rdma_mr = mr; 704 rm->rdma.op_rdma_mr = mr;
685 } 705 }
686 return err; 706 return err;
687} 707}
@@ -699,5 +719,98 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
699 rm->m_rdma_cookie != 0) 719 rm->m_rdma_cookie != 0)
700 return -EINVAL; 720 return -EINVAL;
701 721
702 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr); 722 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
723}
724
725/*
726 * Fill in rds_message for an atomic request.
727 */
728int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
729 struct cmsghdr *cmsg)
730{
731 struct page *page = NULL;
732 struct rds_atomic_args *args;
733 int ret = 0;
734
735 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
736 || rm->atomic.op_active)
737 return -EINVAL;
738
739 args = CMSG_DATA(cmsg);
740
741 /* Nonmasked & masked cmsg ops converted to masked hw ops */
742 switch (cmsg->cmsg_type) {
743 case RDS_CMSG_ATOMIC_FADD:
744 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
745 rm->atomic.op_m_fadd.add = args->fadd.add;
746 rm->atomic.op_m_fadd.nocarry_mask = 0;
747 break;
748 case RDS_CMSG_MASKED_ATOMIC_FADD:
749 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
750 rm->atomic.op_m_fadd.add = args->m_fadd.add;
751 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
752 break;
753 case RDS_CMSG_ATOMIC_CSWP:
754 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
755 rm->atomic.op_m_cswp.compare = args->cswp.compare;
756 rm->atomic.op_m_cswp.swap = args->cswp.swap;
757 rm->atomic.op_m_cswp.compare_mask = ~0;
758 rm->atomic.op_m_cswp.swap_mask = ~0;
759 break;
760 case RDS_CMSG_MASKED_ATOMIC_CSWP:
761 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
762 rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
763 rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
764 rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
765 rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
766 break;
767 default:
768 BUG(); /* should never happen */
769 }
770
771 rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
772 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
773 rm->atomic.op_active = 1;
774 rm->atomic.op_recverr = rs->rs_recverr;
775 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
776
777 /* verify 8 byte-aligned */
778 if (args->local_addr & 0x7) {
779 ret = -EFAULT;
780 goto err;
781 }
782
783 ret = rds_pin_pages(args->local_addr, 1, &page, 1);
784 if (ret != 1)
785 goto err;
786 ret = 0;
787
788 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
789
790 if (rm->atomic.op_notify || rm->atomic.op_recverr) {
791 /* We allocate an uninitialized notifier here, because
792 * we don't want to do that in the completion handler. We
793 * would have to use GFP_ATOMIC there, and don't want to deal
794 * with failed allocations.
795 */
796 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
797 if (!rm->atomic.op_notifier) {
798 ret = -ENOMEM;
799 goto err;
800 }
801
802 rm->atomic.op_notifier->n_user_token = args->user_token;
803 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
804 }
805
806 rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
807 rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
808
809 return ret;
810err:
811 if (page)
812 put_page(page);
813 kfree(rm->atomic.op_notifier);
814
815 return ret;
703} 816}
diff --git a/net/rds/rdma.h b/net/rds/rdma.h
deleted file mode 100644
index 909c39835a5d..000000000000
--- a/net/rds/rdma.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef _RDS_RDMA_H
2#define _RDS_RDMA_H
3
4#include <linux/rbtree.h>
5#include <linux/spinlock.h>
6#include <linux/scatterlist.h>
7
8#include "rds.h"
9
10struct rds_mr {
11 struct rb_node r_rb_node;
12 atomic_t r_refcount;
13 u32 r_key;
14
15 /* A copy of the creation flags */
16 unsigned int r_use_once:1;
17 unsigned int r_invalidate:1;
18 unsigned int r_write:1;
19
20 /* This is for RDS_MR_DEAD.
21 * It would be nice & consistent to make this part of the above
22 * bit field here, but we need to use test_and_set_bit.
23 */
24 unsigned long r_state;
25 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
26 struct rds_transport *r_trans;
27 void *r_trans_private;
28};
29
30/* Flags for mr->r_state */
31#define RDS_MR_DEAD 0
32
33struct rds_rdma_op {
34 u32 r_key;
35 u64 r_remote_addr;
36 unsigned int r_write:1;
37 unsigned int r_fence:1;
38 unsigned int r_notify:1;
39 unsigned int r_recverr:1;
40 unsigned int r_mapped:1;
41 struct rds_notifier *r_notifier;
42 unsigned int r_bytes;
43 unsigned int r_nents;
44 unsigned int r_count;
45 struct scatterlist r_sg[0];
46};
47
48static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
49{
50 return r_key | (((u64) offset) << 32);
51}
52
53static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
54{
55 return cookie;
56}
57
58static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
59{
60 return cookie >> 32;
61}
62
63int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
64int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
65int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
66void rds_rdma_drop_keys(struct rds_sock *rs);
67int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
68 struct cmsghdr *cmsg);
69int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
70 struct cmsghdr *cmsg);
71int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
72 struct cmsghdr *cmsg);
73int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
74 struct cmsghdr *cmsg);
75void rds_rdma_free_op(struct rds_rdma_op *ro);
76void rds_rdma_send_complete(struct rds_message *rm, int);
77
78extern void __rds_put_mr_final(struct rds_mr *mr);
79static inline void rds_mr_put(struct rds_mr *mr)
80{
81 if (atomic_dec_and_test(&mr->r_refcount))
82 __rds_put_mr_final(mr);
83}
84
85#endif
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index e599ba2f950d..e6ed10aee190 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -36,6 +36,34 @@
36 36
37static struct rdma_cm_id *rds_rdma_listen_id; 37static struct rdma_cm_id *rds_rdma_listen_id;
38 38
39static char *rds_cm_event_strings[] = {
40#define RDS_CM_EVENT_STRING(foo) \
41 [RDMA_CM_EVENT_##foo] = __stringify(RDMA_CM_EVENT_##foo)
42 RDS_CM_EVENT_STRING(ADDR_RESOLVED),
43 RDS_CM_EVENT_STRING(ADDR_ERROR),
44 RDS_CM_EVENT_STRING(ROUTE_RESOLVED),
45 RDS_CM_EVENT_STRING(ROUTE_ERROR),
46 RDS_CM_EVENT_STRING(CONNECT_REQUEST),
47 RDS_CM_EVENT_STRING(CONNECT_RESPONSE),
48 RDS_CM_EVENT_STRING(CONNECT_ERROR),
49 RDS_CM_EVENT_STRING(UNREACHABLE),
50 RDS_CM_EVENT_STRING(REJECTED),
51 RDS_CM_EVENT_STRING(ESTABLISHED),
52 RDS_CM_EVENT_STRING(DISCONNECTED),
53 RDS_CM_EVENT_STRING(DEVICE_REMOVAL),
54 RDS_CM_EVENT_STRING(MULTICAST_JOIN),
55 RDS_CM_EVENT_STRING(MULTICAST_ERROR),
56 RDS_CM_EVENT_STRING(ADDR_CHANGE),
57 RDS_CM_EVENT_STRING(TIMEWAIT_EXIT),
58#undef RDS_CM_EVENT_STRING
59};
60
61static char *rds_cm_event_str(enum rdma_cm_event_type type)
62{
63 return rds_str_array(rds_cm_event_strings,
64 ARRAY_SIZE(rds_cm_event_strings), type);
65};
66
39int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, 67int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
40 struct rdma_cm_event *event) 68 struct rdma_cm_event *event)
41{ 69{
@@ -44,8 +72,8 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
44 struct rds_transport *trans; 72 struct rds_transport *trans;
45 int ret = 0; 73 int ret = 0;
46 74
47 rdsdebug("conn %p id %p handling event %u\n", conn, cm_id, 75 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id,
48 event->event); 76 event->event, rds_cm_event_str(event->event));
49 77
50 if (cm_id->device->node_type == RDMA_NODE_RNIC) 78 if (cm_id->device->node_type == RDMA_NODE_RNIC)
51 trans = &rds_iw_transport; 79 trans = &rds_iw_transport;
@@ -109,7 +137,8 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
109 137
110 default: 138 default:
111 /* things like device disconnect? */ 139 /* things like device disconnect? */
112 printk(KERN_ERR "RDS: unknown event %u!\n", event->event); 140 printk(KERN_ERR "RDS: unknown event %u (%s)!\n",
141 event->event, rds_cm_event_str(event->event));
113 break; 142 break;
114 } 143 }
115 144
@@ -117,12 +146,13 @@ out:
117 if (conn) 146 if (conn)
118 mutex_unlock(&conn->c_cm_lock); 147 mutex_unlock(&conn->c_cm_lock);
119 148
120 rdsdebug("id %p event %u handling ret %d\n", cm_id, event->event, ret); 149 rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event,
150 rds_cm_event_str(event->event), ret);
121 151
122 return ret; 152 return ret;
123} 153}
124 154
125static int __init rds_rdma_listen_init(void) 155static int rds_rdma_listen_init(void)
126{ 156{
127 struct sockaddr_in sin; 157 struct sockaddr_in sin;
128 struct rdma_cm_id *cm_id; 158 struct rdma_cm_id *cm_id;
@@ -177,7 +207,7 @@ static void rds_rdma_listen_stop(void)
177 } 207 }
178} 208}
179 209
180int __init rds_rdma_init(void) 210int rds_rdma_init(void)
181{ 211{
182 int ret; 212 int ret;
183 213
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c224b5bb3ba9..8103dcf8b976 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -80,6 +80,7 @@ enum {
80/* Bits for c_flags */ 80/* Bits for c_flags */
81#define RDS_LL_SEND_FULL 0 81#define RDS_LL_SEND_FULL 0
82#define RDS_RECONNECT_PENDING 1 82#define RDS_RECONNECT_PENDING 1
83#define RDS_IN_XMIT 2
83 84
84struct rds_connection { 85struct rds_connection {
85 struct hlist_node c_hash_node; 86 struct hlist_node c_hash_node;
@@ -91,12 +92,13 @@ struct rds_connection {
91 struct rds_cong_map *c_lcong; 92 struct rds_cong_map *c_lcong;
92 struct rds_cong_map *c_fcong; 93 struct rds_cong_map *c_fcong;
93 94
94 struct mutex c_send_lock; /* protect send ring */
95 struct rds_message *c_xmit_rm; 95 struct rds_message *c_xmit_rm;
96 unsigned long c_xmit_sg; 96 unsigned long c_xmit_sg;
97 unsigned int c_xmit_hdr_off; 97 unsigned int c_xmit_hdr_off;
98 unsigned int c_xmit_data_off; 98 unsigned int c_xmit_data_off;
99 unsigned int c_xmit_atomic_sent;
99 unsigned int c_xmit_rdma_sent; 100 unsigned int c_xmit_rdma_sent;
101 unsigned int c_xmit_data_sent;
100 102
101 spinlock_t c_lock; /* protect msg queues */ 103 spinlock_t c_lock; /* protect msg queues */
102 u64 c_next_tx_seq; 104 u64 c_next_tx_seq;
@@ -116,11 +118,10 @@ struct rds_connection {
116 struct delayed_work c_conn_w; 118 struct delayed_work c_conn_w;
117 struct work_struct c_down_w; 119 struct work_struct c_down_w;
118 struct mutex c_cm_lock; /* protect conn state & cm */ 120 struct mutex c_cm_lock; /* protect conn state & cm */
121 wait_queue_head_t c_waitq;
119 122
120 struct list_head c_map_item; 123 struct list_head c_map_item;
121 unsigned long c_map_queued; 124 unsigned long c_map_queued;
122 unsigned long c_map_offset;
123 unsigned long c_map_bytes;
124 125
125 unsigned int c_unacked_packets; 126 unsigned int c_unacked_packets;
126 unsigned int c_unacked_bytes; 127 unsigned int c_unacked_bytes;
@@ -206,6 +207,48 @@ struct rds_incoming {
206 rds_rdma_cookie_t i_rdma_cookie; 207 rds_rdma_cookie_t i_rdma_cookie;
207}; 208};
208 209
210struct rds_mr {
211 struct rb_node r_rb_node;
212 atomic_t r_refcount;
213 u32 r_key;
214
215 /* A copy of the creation flags */
216 unsigned int r_use_once:1;
217 unsigned int r_invalidate:1;
218 unsigned int r_write:1;
219
220 /* This is for RDS_MR_DEAD.
221 * It would be nice & consistent to make this part of the above
222 * bit field here, but we need to use test_and_set_bit.
223 */
224 unsigned long r_state;
225 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
226 struct rds_transport *r_trans;
227 void *r_trans_private;
228};
229
230/* Flags for mr->r_state */
231#define RDS_MR_DEAD 0
232
233static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
234{
235 return r_key | (((u64) offset) << 32);
236}
237
238static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
239{
240 return cookie;
241}
242
243static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
244{
245 return cookie >> 32;
246}
247
248/* atomic operation types */
249#define RDS_ATOMIC_TYPE_CSWP 0
250#define RDS_ATOMIC_TYPE_FADD 1
251
209/* 252/*
210 * m_sock_item and m_conn_item are on lists that are serialized under 253 * m_sock_item and m_conn_item are on lists that are serialized under
211 * conn->c_lock. m_sock_item has additional meaning in that once it is empty 254 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
@@ -258,13 +301,71 @@ struct rds_message {
258 * -> rs->rs_lock 301 * -> rs->rs_lock
259 */ 302 */
260 spinlock_t m_rs_lock; 303 spinlock_t m_rs_lock;
304 wait_queue_head_t m_flush_wait;
305
261 struct rds_sock *m_rs; 306 struct rds_sock *m_rs;
262 struct rds_rdma_op *m_rdma_op; 307
308 /* cookie to send to remote, in rds header */
263 rds_rdma_cookie_t m_rdma_cookie; 309 rds_rdma_cookie_t m_rdma_cookie;
264 struct rds_mr *m_rdma_mr; 310
265 unsigned int m_nents; 311 unsigned int m_used_sgs;
266 unsigned int m_count; 312 unsigned int m_total_sgs;
267 struct scatterlist m_sg[0]; 313
314 void *m_final_op;
315
316 struct {
317 struct rm_atomic_op {
318 int op_type;
319 union {
320 struct {
321 uint64_t compare;
322 uint64_t swap;
323 uint64_t compare_mask;
324 uint64_t swap_mask;
325 } op_m_cswp;
326 struct {
327 uint64_t add;
328 uint64_t nocarry_mask;
329 } op_m_fadd;
330 };
331
332 u32 op_rkey;
333 u64 op_remote_addr;
334 unsigned int op_notify:1;
335 unsigned int op_recverr:1;
336 unsigned int op_mapped:1;
337 unsigned int op_silent:1;
338 unsigned int op_active:1;
339 struct scatterlist *op_sg;
340 struct rds_notifier *op_notifier;
341
342 struct rds_mr *op_rdma_mr;
343 } atomic;
344 struct rm_rdma_op {
345 u32 op_rkey;
346 u64 op_remote_addr;
347 unsigned int op_write:1;
348 unsigned int op_fence:1;
349 unsigned int op_notify:1;
350 unsigned int op_recverr:1;
351 unsigned int op_mapped:1;
352 unsigned int op_silent:1;
353 unsigned int op_active:1;
354 unsigned int op_bytes;
355 unsigned int op_nents;
356 unsigned int op_count;
357 struct scatterlist *op_sg;
358 struct rds_notifier *op_notifier;
359
360 struct rds_mr *op_rdma_mr;
361 } rdma;
362 struct rm_data_op {
363 unsigned int op_active:1;
364 unsigned int op_nents;
365 unsigned int op_count;
366 struct scatterlist *op_sg;
367 } data;
368 };
268}; 369};
269 370
270/* 371/*
@@ -305,10 +406,6 @@ struct rds_notifier {
305 * transport is responsible for other serialization, including 406 * transport is responsible for other serialization, including
306 * rds_recv_incoming(). This is called in process context but 407 * rds_recv_incoming(). This is called in process context but
307 * should try hard not to block. 408 * should try hard not to block.
308 *
309 * @xmit_cong_map: This asks the transport to send the local bitmap down the
310 * given connection. XXX get a better story about the bitmap
311 * flag and header.
312 */ 409 */
313 410
314#define RDS_TRANS_IB 0 411#define RDS_TRANS_IB 0
@@ -332,13 +429,11 @@ struct rds_transport {
332 void (*xmit_complete)(struct rds_connection *conn); 429 void (*xmit_complete)(struct rds_connection *conn);
333 int (*xmit)(struct rds_connection *conn, struct rds_message *rm, 430 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
334 unsigned int hdr_off, unsigned int sg, unsigned int off); 431 unsigned int hdr_off, unsigned int sg, unsigned int off);
335 int (*xmit_cong_map)(struct rds_connection *conn, 432 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
336 struct rds_cong_map *map, unsigned long offset); 433 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
337 int (*xmit_rdma)(struct rds_connection *conn, struct rds_rdma_op *op);
338 int (*recv)(struct rds_connection *conn); 434 int (*recv)(struct rds_connection *conn);
339 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov, 435 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
340 size_t size); 436 size_t size);
341 void (*inc_purge)(struct rds_incoming *inc);
342 void (*inc_free)(struct rds_incoming *inc); 437 void (*inc_free)(struct rds_incoming *inc);
343 438
344 int (*cm_handle_connect)(struct rdma_cm_id *cm_id, 439 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
@@ -367,17 +462,11 @@ struct rds_sock {
367 * bound_addr used for both incoming and outgoing, no INADDR_ANY 462 * bound_addr used for both incoming and outgoing, no INADDR_ANY
368 * support. 463 * support.
369 */ 464 */
370 struct rb_node rs_bound_node; 465 struct hlist_node rs_bound_node;
371 __be32 rs_bound_addr; 466 __be32 rs_bound_addr;
372 __be32 rs_conn_addr; 467 __be32 rs_conn_addr;
373 __be16 rs_bound_port; 468 __be16 rs_bound_port;
374 __be16 rs_conn_port; 469 __be16 rs_conn_port;
375
376 /*
377 * This is only used to communicate the transport between bind and
378 * initiating connections. All other trans use is referenced through
379 * the connection.
380 */
381 struct rds_transport *rs_transport; 470 struct rds_transport *rs_transport;
382 471
383 /* 472 /*
@@ -466,8 +555,8 @@ struct rds_statistics {
466 uint64_t s_recv_ping; 555 uint64_t s_recv_ping;
467 uint64_t s_send_queue_empty; 556 uint64_t s_send_queue_empty;
468 uint64_t s_send_queue_full; 557 uint64_t s_send_queue_full;
469 uint64_t s_send_sem_contention; 558 uint64_t s_send_lock_contention;
470 uint64_t s_send_sem_queue_raced; 559 uint64_t s_send_lock_queue_raced;
471 uint64_t s_send_immediate_retry; 560 uint64_t s_send_immediate_retry;
472 uint64_t s_send_delayed_retry; 561 uint64_t s_send_delayed_retry;
473 uint64_t s_send_drop_acked; 562 uint64_t s_send_drop_acked;
@@ -487,6 +576,7 @@ struct rds_statistics {
487}; 576};
488 577
489/* af_rds.c */ 578/* af_rds.c */
579char *rds_str_array(char **array, size_t elements, size_t index);
490void rds_sock_addref(struct rds_sock *rs); 580void rds_sock_addref(struct rds_sock *rs);
491void rds_sock_put(struct rds_sock *rs); 581void rds_sock_put(struct rds_sock *rs);
492void rds_wake_sk_sleep(struct rds_sock *rs); 582void rds_wake_sk_sleep(struct rds_sock *rs);
@@ -521,15 +611,17 @@ void rds_cong_exit(void);
521struct rds_message *rds_cong_update_alloc(struct rds_connection *conn); 611struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
522 612
523/* conn.c */ 613/* conn.c */
524int __init rds_conn_init(void); 614int rds_conn_init(void);
525void rds_conn_exit(void); 615void rds_conn_exit(void);
526struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, 616struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
527 struct rds_transport *trans, gfp_t gfp); 617 struct rds_transport *trans, gfp_t gfp);
528struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, 618struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
529 struct rds_transport *trans, gfp_t gfp); 619 struct rds_transport *trans, gfp_t gfp);
620void rds_conn_shutdown(struct rds_connection *conn);
530void rds_conn_destroy(struct rds_connection *conn); 621void rds_conn_destroy(struct rds_connection *conn);
531void rds_conn_reset(struct rds_connection *conn); 622void rds_conn_reset(struct rds_connection *conn);
532void rds_conn_drop(struct rds_connection *conn); 623void rds_conn_drop(struct rds_connection *conn);
624void rds_conn_connect_if_down(struct rds_connection *conn);
533void rds_for_each_conn_info(struct socket *sock, unsigned int len, 625void rds_for_each_conn_info(struct socket *sock, unsigned int len,
534 struct rds_info_iterator *iter, 626 struct rds_info_iterator *iter,
535 struct rds_info_lengths *lens, 627 struct rds_info_lengths *lens,
@@ -566,7 +658,8 @@ rds_conn_connecting(struct rds_connection *conn)
566 658
567/* message.c */ 659/* message.c */
568struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); 660struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
569struct rds_message *rds_message_copy_from_user(struct iovec *first_iov, 661struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
662int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
570 size_t total_len); 663 size_t total_len);
571struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); 664struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
572void rds_message_populate_header(struct rds_header *hdr, __be16 sport, 665void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
@@ -580,7 +673,6 @@ int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *vers
580int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset); 673int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
581int rds_message_inc_copy_to_user(struct rds_incoming *inc, 674int rds_message_inc_copy_to_user(struct rds_incoming *inc,
582 struct iovec *first_iov, size_t size); 675 struct iovec *first_iov, size_t size);
583void rds_message_inc_purge(struct rds_incoming *inc);
584void rds_message_inc_free(struct rds_incoming *inc); 676void rds_message_inc_free(struct rds_incoming *inc);
585void rds_message_addref(struct rds_message *rm); 677void rds_message_addref(struct rds_message *rm);
586void rds_message_put(struct rds_message *rm); 678void rds_message_put(struct rds_message *rm);
@@ -636,14 +728,39 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
636typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack); 728typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
637void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 729void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
638 is_acked_func is_acked); 730 is_acked_func is_acked);
639int rds_send_acked_before(struct rds_connection *conn, u64 seq);
640void rds_send_remove_from_sock(struct list_head *messages, int status); 731void rds_send_remove_from_sock(struct list_head *messages, int status);
641int rds_send_pong(struct rds_connection *conn, __be16 dport); 732int rds_send_pong(struct rds_connection *conn, __be16 dport);
642struct rds_message *rds_send_get_message(struct rds_connection *, 733struct rds_message *rds_send_get_message(struct rds_connection *,
643 struct rds_rdma_op *); 734 struct rm_rdma_op *);
644 735
645/* rdma.c */ 736/* rdma.c */
646void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); 737void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
738int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
739int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
740int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
741void rds_rdma_drop_keys(struct rds_sock *rs);
742int rds_rdma_extra_size(struct rds_rdma_args *args);
743int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
744 struct cmsghdr *cmsg);
745int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
746 struct cmsghdr *cmsg);
747int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
748 struct cmsghdr *cmsg);
749int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
750 struct cmsghdr *cmsg);
751void rds_rdma_free_op(struct rm_rdma_op *ro);
752void rds_atomic_free_op(struct rm_atomic_op *ao);
753void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
754void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
755int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
756 struct cmsghdr *cmsg);
757
758extern void __rds_put_mr_final(struct rds_mr *mr);
759static inline void rds_mr_put(struct rds_mr *mr)
760{
761 if (atomic_dec_and_test(&mr->r_refcount))
762 __rds_put_mr_final(mr);
763}
647 764
648/* stats.c */ 765/* stats.c */
649DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); 766DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
@@ -657,14 +774,14 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
657 put_cpu(); \ 774 put_cpu(); \
658} while (0) 775} while (0)
659#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count) 776#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
660int __init rds_stats_init(void); 777int rds_stats_init(void);
661void rds_stats_exit(void); 778void rds_stats_exit(void);
662void rds_stats_info_copy(struct rds_info_iterator *iter, 779void rds_stats_info_copy(struct rds_info_iterator *iter,
663 uint64_t *values, const char *const *names, 780 uint64_t *values, const char *const *names,
664 size_t nr); 781 size_t nr);
665 782
666/* sysctl.c */ 783/* sysctl.c */
667int __init rds_sysctl_init(void); 784int rds_sysctl_init(void);
668void rds_sysctl_exit(void); 785void rds_sysctl_exit(void);
669extern unsigned long rds_sysctl_sndbuf_min; 786extern unsigned long rds_sysctl_sndbuf_min;
670extern unsigned long rds_sysctl_sndbuf_default; 787extern unsigned long rds_sysctl_sndbuf_default;
@@ -678,9 +795,10 @@ extern unsigned long rds_sysctl_trace_flags;
678extern unsigned int rds_sysctl_trace_level; 795extern unsigned int rds_sysctl_trace_level;
679 796
680/* threads.c */ 797/* threads.c */
681int __init rds_threads_init(void); 798int rds_threads_init(void);
682void rds_threads_exit(void); 799void rds_threads_exit(void);
683extern struct workqueue_struct *rds_wq; 800extern struct workqueue_struct *rds_wq;
801void rds_queue_reconnect(struct rds_connection *conn);
684void rds_connect_worker(struct work_struct *); 802void rds_connect_worker(struct work_struct *);
685void rds_shutdown_worker(struct work_struct *); 803void rds_shutdown_worker(struct work_struct *);
686void rds_send_worker(struct work_struct *); 804void rds_send_worker(struct work_struct *);
@@ -691,9 +809,10 @@ void rds_connect_complete(struct rds_connection *conn);
691int rds_trans_register(struct rds_transport *trans); 809int rds_trans_register(struct rds_transport *trans);
692void rds_trans_unregister(struct rds_transport *trans); 810void rds_trans_unregister(struct rds_transport *trans);
693struct rds_transport *rds_trans_get_preferred(__be32 addr); 811struct rds_transport *rds_trans_get_preferred(__be32 addr);
812void rds_trans_put(struct rds_transport *trans);
694unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, 813unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
695 unsigned int avail); 814 unsigned int avail);
696int __init rds_trans_init(void); 815int rds_trans_init(void);
697void rds_trans_exit(void); 816void rds_trans_exit(void);
698 817
699#endif 818#endif
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c93588c2d553..68800f02aa30 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -36,7 +36,6 @@
36#include <linux/in.h> 36#include <linux/in.h>
37 37
38#include "rds.h" 38#include "rds.h"
39#include "rdma.h"
40 39
41void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, 40void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
42 __be32 saddr) 41 __be32 saddr)
@@ -210,7 +209,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
210 } 209 }
211 210
212 rs = rds_find_bound(daddr, inc->i_hdr.h_dport); 211 rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
213 if (rs == NULL) { 212 if (!rs) {
214 rds_stats_inc(s_recv_drop_no_sock); 213 rds_stats_inc(s_recv_drop_no_sock);
215 goto out; 214 goto out;
216 } 215 }
@@ -251,7 +250,7 @@ static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
251{ 250{
252 unsigned long flags; 251 unsigned long flags;
253 252
254 if (*inc == NULL) { 253 if (!*inc) {
255 read_lock_irqsave(&rs->rs_recv_lock, flags); 254 read_lock_irqsave(&rs->rs_recv_lock, flags);
256 if (!list_empty(&rs->rs_recv_queue)) { 255 if (!list_empty(&rs->rs_recv_queue)) {
257 *inc = list_entry(rs->rs_recv_queue.next, 256 *inc = list_entry(rs->rs_recv_queue.next,
@@ -334,10 +333,10 @@ int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
334 333
335 if (msghdr) { 334 if (msghdr) {
336 cmsg.user_token = notifier->n_user_token; 335 cmsg.user_token = notifier->n_user_token;
337 cmsg.status = notifier->n_status; 336 cmsg.status = notifier->n_status;
338 337
339 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS, 338 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
340 sizeof(cmsg), &cmsg); 339 sizeof(cmsg), &cmsg);
341 if (err) 340 if (err)
342 break; 341 break;
343 } 342 }
diff --git a/net/rds/send.c b/net/rds/send.c
index 9c1c6bcaa6c9..9b951a0ab6b7 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -37,7 +37,6 @@
37#include <linux/list.h> 37#include <linux/list.h>
38 38
39#include "rds.h" 39#include "rds.h"
40#include "rdma.h"
41 40
42/* When transmitting messages in rds_send_xmit, we need to emerge from 41/* When transmitting messages in rds_send_xmit, we need to emerge from
43 * time to time and briefly release the CPU. Otherwise the softlock watchdog 42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
@@ -54,7 +53,8 @@ module_param(send_batch_count, int, 0444);
54MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); 53MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
55 54
56/* 55/*
57 * Reset the send state. Caller must hold c_send_lock when calling here. 56 * Reset the send state. Callers must ensure that this doesn't race with
57 * rds_send_xmit().
58 */ 58 */
59void rds_send_reset(struct rds_connection *conn) 59void rds_send_reset(struct rds_connection *conn)
60{ 60{
@@ -62,18 +62,22 @@ void rds_send_reset(struct rds_connection *conn)
62 unsigned long flags; 62 unsigned long flags;
63 63
64 if (conn->c_xmit_rm) { 64 if (conn->c_xmit_rm) {
65 rm = conn->c_xmit_rm;
66 conn->c_xmit_rm = NULL;
65 /* Tell the user the RDMA op is no longer mapped by the 67 /* Tell the user the RDMA op is no longer mapped by the
66 * transport. This isn't entirely true (it's flushed out 68 * transport. This isn't entirely true (it's flushed out
67 * independently) but as the connection is down, there's 69 * independently) but as the connection is down, there's
68 * no ongoing RDMA to/from that memory */ 70 * no ongoing RDMA to/from that memory */
69 rds_message_unmapped(conn->c_xmit_rm); 71 rds_message_unmapped(rm);
70 rds_message_put(conn->c_xmit_rm); 72 rds_message_put(rm);
71 conn->c_xmit_rm = NULL;
72 } 73 }
74
73 conn->c_xmit_sg = 0; 75 conn->c_xmit_sg = 0;
74 conn->c_xmit_hdr_off = 0; 76 conn->c_xmit_hdr_off = 0;
75 conn->c_xmit_data_off = 0; 77 conn->c_xmit_data_off = 0;
78 conn->c_xmit_atomic_sent = 0;
76 conn->c_xmit_rdma_sent = 0; 79 conn->c_xmit_rdma_sent = 0;
80 conn->c_xmit_data_sent = 0;
77 81
78 conn->c_map_queued = 0; 82 conn->c_map_queued = 0;
79 83
@@ -90,6 +94,25 @@ void rds_send_reset(struct rds_connection *conn)
90 spin_unlock_irqrestore(&conn->c_lock, flags); 94 spin_unlock_irqrestore(&conn->c_lock, flags);
91} 95}
92 96
97static int acquire_in_xmit(struct rds_connection *conn)
98{
99 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
100}
101
102static void release_in_xmit(struct rds_connection *conn)
103{
104 clear_bit(RDS_IN_XMIT, &conn->c_flags);
105 smp_mb__after_clear_bit();
106 /*
107 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
108 * hot path and finding waiters is very rare. We don't want to walk
109 * the system-wide hashed waitqueue buckets in the fast path only to
110 * almost never find waiters.
111 */
112 if (waitqueue_active(&conn->c_waitq))
113 wake_up_all(&conn->c_waitq);
114}
115
93/* 116/*
94 * We're making the concious trade-off here to only send one message 117 * We're making the concious trade-off here to only send one message
95 * down the connection at a time. 118 * down the connection at a time.
@@ -109,102 +132,69 @@ int rds_send_xmit(struct rds_connection *conn)
109 struct rds_message *rm; 132 struct rds_message *rm;
110 unsigned long flags; 133 unsigned long flags;
111 unsigned int tmp; 134 unsigned int tmp;
112 unsigned int send_quota = send_batch_count;
113 struct scatterlist *sg; 135 struct scatterlist *sg;
114 int ret = 0; 136 int ret = 0;
115 int was_empty = 0;
116 LIST_HEAD(to_be_dropped); 137 LIST_HEAD(to_be_dropped);
117 138
139restart:
140
118 /* 141 /*
119 * sendmsg calls here after having queued its message on the send 142 * sendmsg calls here after having queued its message on the send
120 * queue. We only have one task feeding the connection at a time. If 143 * queue. We only have one task feeding the connection at a time. If
121 * another thread is already feeding the queue then we back off. This 144 * another thread is already feeding the queue then we back off. This
122 * avoids blocking the caller and trading per-connection data between 145 * avoids blocking the caller and trading per-connection data between
123 * caches per message. 146 * caches per message.
124 *
125 * The sem holder will issue a retry if they notice that someone queued
126 * a message after they stopped walking the send queue but before they
127 * dropped the sem.
128 */ 147 */
129 if (!mutex_trylock(&conn->c_send_lock)) { 148 if (!acquire_in_xmit(conn)) {
130 rds_stats_inc(s_send_sem_contention); 149 rds_stats_inc(s_send_lock_contention);
131 ret = -ENOMEM; 150 ret = -ENOMEM;
132 goto out; 151 goto out;
133 } 152 }
134 153
154 /*
155 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
156 * we do the opposite to avoid races.
157 */
158 if (!rds_conn_up(conn)) {
159 release_in_xmit(conn);
160 ret = 0;
161 goto out;
162 }
163
135 if (conn->c_trans->xmit_prepare) 164 if (conn->c_trans->xmit_prepare)
136 conn->c_trans->xmit_prepare(conn); 165 conn->c_trans->xmit_prepare(conn);
137 166
138 /* 167 /*
139 * spin trying to push headers and data down the connection until 168 * spin trying to push headers and data down the connection until
140 * the connection doens't make forward progress. 169 * the connection doesn't make forward progress.
141 */ 170 */
142 while (--send_quota) { 171 while (1) {
143 /*
144 * See if need to send a congestion map update if we're
145 * between sending messages. The send_sem protects our sole
146 * use of c_map_offset and _bytes.
147 * Note this is used only by transports that define a special
148 * xmit_cong_map function. For all others, we create allocate
149 * a cong_map message and treat it just like any other send.
150 */
151 if (conn->c_map_bytes) {
152 ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
153 conn->c_map_offset);
154 if (ret <= 0)
155 break;
156 172
157 conn->c_map_offset += ret;
158 conn->c_map_bytes -= ret;
159 if (conn->c_map_bytes)
160 continue;
161 }
162
163 /* If we're done sending the current message, clear the
164 * offset and S/G temporaries.
165 */
166 rm = conn->c_xmit_rm; 173 rm = conn->c_xmit_rm;
167 if (rm != NULL &&
168 conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
169 conn->c_xmit_sg == rm->m_nents) {
170 conn->c_xmit_rm = NULL;
171 conn->c_xmit_sg = 0;
172 conn->c_xmit_hdr_off = 0;
173 conn->c_xmit_data_off = 0;
174 conn->c_xmit_rdma_sent = 0;
175 174
176 /* Release the reference to the previous message. */ 175 /*
177 rds_message_put(rm); 176 * If between sending messages, we can send a pending congestion
178 rm = NULL; 177 * map update.
179 }
180
181 /* If we're asked to send a cong map update, do so.
182 */ 178 */
183 if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) { 179 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
184 if (conn->c_trans->xmit_cong_map != NULL) {
185 conn->c_map_offset = 0;
186 conn->c_map_bytes = sizeof(struct rds_header) +
187 RDS_CONG_MAP_BYTES;
188 continue;
189 }
190
191 rm = rds_cong_update_alloc(conn); 180 rm = rds_cong_update_alloc(conn);
192 if (IS_ERR(rm)) { 181 if (IS_ERR(rm)) {
193 ret = PTR_ERR(rm); 182 ret = PTR_ERR(rm);
194 break; 183 break;
195 } 184 }
185 rm->data.op_active = 1;
196 186
197 conn->c_xmit_rm = rm; 187 conn->c_xmit_rm = rm;
198 } 188 }
199 189
200 /* 190 /*
201 * Grab the next message from the send queue, if there is one. 191 * If not already working on one, grab the next message.
202 * 192 *
203 * c_xmit_rm holds a ref while we're sending this message down 193 * c_xmit_rm holds a ref while we're sending this message down
204 * the connction. We can use this ref while holding the 194 * the connction. We can use this ref while holding the
205 * send_sem.. rds_send_reset() is serialized with it. 195 * send_sem.. rds_send_reset() is serialized with it.
206 */ 196 */
207 if (rm == NULL) { 197 if (!rm) {
208 unsigned int len; 198 unsigned int len;
209 199
210 spin_lock_irqsave(&conn->c_lock, flags); 200 spin_lock_irqsave(&conn->c_lock, flags);
@@ -224,10 +214,8 @@ int rds_send_xmit(struct rds_connection *conn)
224 214
225 spin_unlock_irqrestore(&conn->c_lock, flags); 215 spin_unlock_irqrestore(&conn->c_lock, flags);
226 216
227 if (rm == NULL) { 217 if (!rm)
228 was_empty = 1;
229 break; 218 break;
230 }
231 219
232 /* Unfortunately, the way Infiniband deals with 220 /* Unfortunately, the way Infiniband deals with
233 * RDMA to a bad MR key is by moving the entire 221 * RDMA to a bad MR key is by moving the entire
@@ -236,13 +224,12 @@ int rds_send_xmit(struct rds_connection *conn)
236 * connection. 224 * connection.
237 * Therefore, we never retransmit messages with RDMA ops. 225 * Therefore, we never retransmit messages with RDMA ops.
238 */ 226 */
239 if (rm->m_rdma_op && 227 if (rm->rdma.op_active &&
240 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { 228 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
241 spin_lock_irqsave(&conn->c_lock, flags); 229 spin_lock_irqsave(&conn->c_lock, flags);
242 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 230 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
243 list_move(&rm->m_conn_item, &to_be_dropped); 231 list_move(&rm->m_conn_item, &to_be_dropped);
244 spin_unlock_irqrestore(&conn->c_lock, flags); 232 spin_unlock_irqrestore(&conn->c_lock, flags);
245 rds_message_put(rm);
246 continue; 233 continue;
247 } 234 }
248 235
@@ -263,23 +250,55 @@ int rds_send_xmit(struct rds_connection *conn)
263 conn->c_xmit_rm = rm; 250 conn->c_xmit_rm = rm;
264 } 251 }
265 252
266 /* 253 /* The transport either sends the whole rdma or none of it */
267 * Try and send an rdma message. Let's see if we can 254 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
268 * keep this simple and require that the transport either 255 rm->m_final_op = &rm->rdma;
269 * send the whole rdma or none of it. 256 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
270 */
271 if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
272 ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
273 if (ret) 257 if (ret)
274 break; 258 break;
275 conn->c_xmit_rdma_sent = 1; 259 conn->c_xmit_rdma_sent = 1;
260
276 /* The transport owns the mapped memory for now. 261 /* The transport owns the mapped memory for now.
277 * You can't unmap it while it's on the send queue */ 262 * You can't unmap it while it's on the send queue */
278 set_bit(RDS_MSG_MAPPED, &rm->m_flags); 263 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
279 } 264 }
280 265
281 if (conn->c_xmit_hdr_off < sizeof(struct rds_header) || 266 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
282 conn->c_xmit_sg < rm->m_nents) { 267 rm->m_final_op = &rm->atomic;
268 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
269 if (ret)
270 break;
271 conn->c_xmit_atomic_sent = 1;
272
273 /* The transport owns the mapped memory for now.
274 * You can't unmap it while it's on the send queue */
275 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
276 }
277
278 /*
279 * A number of cases require an RDS header to be sent
280 * even if there is no data.
281 * We permit 0-byte sends; rds-ping depends on this.
282 * However, if there are exclusively attached silent ops,
283 * we skip the hdr/data send, to enable silent operation.
284 */
285 if (rm->data.op_nents == 0) {
286 int ops_present;
287 int all_ops_are_silent = 1;
288
289 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
290 if (rm->atomic.op_active && !rm->atomic.op_silent)
291 all_ops_are_silent = 0;
292 if (rm->rdma.op_active && !rm->rdma.op_silent)
293 all_ops_are_silent = 0;
294
295 if (ops_present && all_ops_are_silent
296 && !rm->m_rdma_cookie)
297 rm->data.op_active = 0;
298 }
299
300 if (rm->data.op_active && !conn->c_xmit_data_sent) {
301 rm->m_final_op = &rm->data;
283 ret = conn->c_trans->xmit(conn, rm, 302 ret = conn->c_trans->xmit(conn, rm,
284 conn->c_xmit_hdr_off, 303 conn->c_xmit_hdr_off,
285 conn->c_xmit_sg, 304 conn->c_xmit_sg,
@@ -295,7 +314,7 @@ int rds_send_xmit(struct rds_connection *conn)
295 ret -= tmp; 314 ret -= tmp;
296 } 315 }
297 316
298 sg = &rm->m_sg[conn->c_xmit_sg]; 317 sg = &rm->data.op_sg[conn->c_xmit_sg];
299 while (ret) { 318 while (ret) {
300 tmp = min_t(int, ret, sg->length - 319 tmp = min_t(int, ret, sg->length -
301 conn->c_xmit_data_off); 320 conn->c_xmit_data_off);
@@ -306,49 +325,63 @@ int rds_send_xmit(struct rds_connection *conn)
306 sg++; 325 sg++;
307 conn->c_xmit_sg++; 326 conn->c_xmit_sg++;
308 BUG_ON(ret != 0 && 327 BUG_ON(ret != 0 &&
309 conn->c_xmit_sg == rm->m_nents); 328 conn->c_xmit_sg == rm->data.op_nents);
310 } 329 }
311 } 330 }
331
332 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
333 (conn->c_xmit_sg == rm->data.op_nents))
334 conn->c_xmit_data_sent = 1;
312 } 335 }
313 }
314 336
315 /* Nuke any messages we decided not to retransmit. */ 337 /*
316 if (!list_empty(&to_be_dropped)) 338 * A rm will only take multiple times through this loop
317 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); 339 * if there is a data op. Thus, if the data is sent (or there was
340 * none), then we're done with the rm.
341 */
342 if (!rm->data.op_active || conn->c_xmit_data_sent) {
343 conn->c_xmit_rm = NULL;
344 conn->c_xmit_sg = 0;
345 conn->c_xmit_hdr_off = 0;
346 conn->c_xmit_data_off = 0;
347 conn->c_xmit_rdma_sent = 0;
348 conn->c_xmit_atomic_sent = 0;
349 conn->c_xmit_data_sent = 0;
350
351 rds_message_put(rm);
352 }
353 }
318 354
319 if (conn->c_trans->xmit_complete) 355 if (conn->c_trans->xmit_complete)
320 conn->c_trans->xmit_complete(conn); 356 conn->c_trans->xmit_complete(conn);
321 357
322 /* 358 release_in_xmit(conn);
323 * We might be racing with another sender who queued a message but
324 * backed off on noticing that we held the c_send_lock. If we check
325 * for queued messages after dropping the sem then either we'll
326 * see the queued message or the queuer will get the sem. If we
327 * notice the queued message then we trigger an immediate retry.
328 *
329 * We need to be careful only to do this when we stopped processing
330 * the send queue because it was empty. It's the only way we
331 * stop processing the loop when the transport hasn't taken
332 * responsibility for forward progress.
333 */
334 mutex_unlock(&conn->c_send_lock);
335 359
336 if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) { 360 /* Nuke any messages we decided not to retransmit. */
337 /* We exhausted the send quota, but there's work left to 361 if (!list_empty(&to_be_dropped)) {
338 * do. Return and (re-)schedule the send worker. 362 /* irqs on here, so we can put(), unlike above */
339 */ 363 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
340 ret = -EAGAIN; 364 rds_message_put(rm);
365 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
341 } 366 }
342 367
343 if (ret == 0 && was_empty) { 368 /*
344 /* A simple bit test would be way faster than taking the 369 * Other senders can queue a message after we last test the send queue
345 * spin lock */ 370 * but before we clear RDS_IN_XMIT. In that case they'd back off and
346 spin_lock_irqsave(&conn->c_lock, flags); 371 * not try and send their newly queued message. We need to check the
372 * send queue after having cleared RDS_IN_XMIT so that their message
373 * doesn't get stuck on the send queue.
374 *
375 * If the transport cannot continue (i.e ret != 0), then it must
376 * call us when more room is available, such as from the tx
377 * completion handler.
378 */
379 if (ret == 0) {
380 smp_mb();
347 if (!list_empty(&conn->c_send_queue)) { 381 if (!list_empty(&conn->c_send_queue)) {
348 rds_stats_inc(s_send_sem_queue_raced); 382 rds_stats_inc(s_send_lock_queue_raced);
349 ret = -EAGAIN; 383 goto restart;
350 } 384 }
351 spin_unlock_irqrestore(&conn->c_lock, flags);
352 } 385 }
353out: 386out:
354 return ret; 387 return ret;
@@ -376,52 +409,60 @@ static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
376} 409}
377 410
378/* 411/*
379 * Returns true if there are no messages on the send and retransmit queues 412 * This is pretty similar to what happens below in the ACK
380 * which have a sequence number greater than or equal to the given sequence 413 * handling code - except that we call here as soon as we get
381 * number. 414 * the IB send completion on the RDMA op and the accompanying
415 * message.
382 */ 416 */
383int rds_send_acked_before(struct rds_connection *conn, u64 seq) 417void rds_rdma_send_complete(struct rds_message *rm, int status)
384{ 418{
385 struct rds_message *rm, *tmp; 419 struct rds_sock *rs = NULL;
386 int ret = 1; 420 struct rm_rdma_op *ro;
421 struct rds_notifier *notifier;
422 unsigned long flags;
387 423
388 spin_lock(&conn->c_lock); 424 spin_lock_irqsave(&rm->m_rs_lock, flags);
389 425
390 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 426 ro = &rm->rdma;
391 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq) 427 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
392 ret = 0; 428 ro->op_active && ro->op_notify && ro->op_notifier) {
393 break; 429 notifier = ro->op_notifier;
394 } 430 rs = rm->m_rs;
431 sock_hold(rds_rs_to_sk(rs));
395 432
396 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { 433 notifier->n_status = status;
397 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq) 434 spin_lock(&rs->rs_lock);
398 ret = 0; 435 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
399 break; 436 spin_unlock(&rs->rs_lock);
437
438 ro->op_notifier = NULL;
400 } 439 }
401 440
402 spin_unlock(&conn->c_lock); 441 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
403 442
404 return ret; 443 if (rs) {
444 rds_wake_sk_sleep(rs);
445 sock_put(rds_rs_to_sk(rs));
446 }
405} 447}
448EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
406 449
407/* 450/*
408 * This is pretty similar to what happens below in the ACK 451 * Just like above, except looks at atomic op
409 * handling code - except that we call here as soon as we get
410 * the IB send completion on the RDMA op and the accompanying
411 * message.
412 */ 452 */
413void rds_rdma_send_complete(struct rds_message *rm, int status) 453void rds_atomic_send_complete(struct rds_message *rm, int status)
414{ 454{
415 struct rds_sock *rs = NULL; 455 struct rds_sock *rs = NULL;
416 struct rds_rdma_op *ro; 456 struct rm_atomic_op *ao;
417 struct rds_notifier *notifier; 457 struct rds_notifier *notifier;
458 unsigned long flags;
418 459
419 spin_lock(&rm->m_rs_lock); 460 spin_lock_irqsave(&rm->m_rs_lock, flags);
420 461
421 ro = rm->m_rdma_op; 462 ao = &rm->atomic;
422 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && 463 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
423 ro && ro->r_notify && ro->r_notifier) { 464 && ao->op_active && ao->op_notify && ao->op_notifier) {
424 notifier = ro->r_notifier; 465 notifier = ao->op_notifier;
425 rs = rm->m_rs; 466 rs = rm->m_rs;
426 sock_hold(rds_rs_to_sk(rs)); 467 sock_hold(rds_rs_to_sk(rs));
427 468
@@ -430,17 +471,17 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
430 list_add_tail(&notifier->n_list, &rs->rs_notify_queue); 471 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
431 spin_unlock(&rs->rs_lock); 472 spin_unlock(&rs->rs_lock);
432 473
433 ro->r_notifier = NULL; 474 ao->op_notifier = NULL;
434 } 475 }
435 476
436 spin_unlock(&rm->m_rs_lock); 477 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
437 478
438 if (rs) { 479 if (rs) {
439 rds_wake_sk_sleep(rs); 480 rds_wake_sk_sleep(rs);
440 sock_put(rds_rs_to_sk(rs)); 481 sock_put(rds_rs_to_sk(rs));
441 } 482 }
442} 483}
443EXPORT_SYMBOL_GPL(rds_rdma_send_complete); 484EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
444 485
445/* 486/*
446 * This is the same as rds_rdma_send_complete except we 487 * This is the same as rds_rdma_send_complete except we
@@ -448,15 +489,23 @@ EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
448 * socket, socket lock) and can just move the notifier. 489 * socket, socket lock) and can just move the notifier.
449 */ 490 */
450static inline void 491static inline void
451__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) 492__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
452{ 493{
453 struct rds_rdma_op *ro; 494 struct rm_rdma_op *ro;
495 struct rm_atomic_op *ao;
496
497 ro = &rm->rdma;
498 if (ro->op_active && ro->op_notify && ro->op_notifier) {
499 ro->op_notifier->n_status = status;
500 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
501 ro->op_notifier = NULL;
502 }
454 503
455 ro = rm->m_rdma_op; 504 ao = &rm->atomic;
456 if (ro && ro->r_notify && ro->r_notifier) { 505 if (ao->op_active && ao->op_notify && ao->op_notifier) {
457 ro->r_notifier->n_status = status; 506 ao->op_notifier->n_status = status;
458 list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue); 507 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
459 ro->r_notifier = NULL; 508 ao->op_notifier = NULL;
460 } 509 }
461 510
462 /* No need to wake the app - caller does this */ 511 /* No need to wake the app - caller does this */
@@ -468,7 +517,7 @@ __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status
468 * So speed is not an issue here. 517 * So speed is not an issue here.
469 */ 518 */
470struct rds_message *rds_send_get_message(struct rds_connection *conn, 519struct rds_message *rds_send_get_message(struct rds_connection *conn,
471 struct rds_rdma_op *op) 520 struct rm_rdma_op *op)
472{ 521{
473 struct rds_message *rm, *tmp, *found = NULL; 522 struct rds_message *rm, *tmp, *found = NULL;
474 unsigned long flags; 523 unsigned long flags;
@@ -476,7 +525,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
476 spin_lock_irqsave(&conn->c_lock, flags); 525 spin_lock_irqsave(&conn->c_lock, flags);
477 526
478 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 527 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
479 if (rm->m_rdma_op == op) { 528 if (&rm->rdma == op) {
480 atomic_inc(&rm->m_refcount); 529 atomic_inc(&rm->m_refcount);
481 found = rm; 530 found = rm;
482 goto out; 531 goto out;
@@ -484,7 +533,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
484 } 533 }
485 534
486 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { 535 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
487 if (rm->m_rdma_op == op) { 536 if (&rm->rdma == op) {
488 atomic_inc(&rm->m_refcount); 537 atomic_inc(&rm->m_refcount);
489 found = rm; 538 found = rm;
490 break; 539 break;
@@ -544,19 +593,20 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
544 spin_lock(&rs->rs_lock); 593 spin_lock(&rs->rs_lock);
545 594
546 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { 595 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
547 struct rds_rdma_op *ro = rm->m_rdma_op; 596 struct rm_rdma_op *ro = &rm->rdma;
548 struct rds_notifier *notifier; 597 struct rds_notifier *notifier;
549 598
550 list_del_init(&rm->m_sock_item); 599 list_del_init(&rm->m_sock_item);
551 rds_send_sndbuf_remove(rs, rm); 600 rds_send_sndbuf_remove(rs, rm);
552 601
553 if (ro && ro->r_notifier && (status || ro->r_notify)) { 602 if (ro->op_active && ro->op_notifier &&
554 notifier = ro->r_notifier; 603 (ro->op_notify || (ro->op_recverr && status))) {
604 notifier = ro->op_notifier;
555 list_add_tail(&notifier->n_list, 605 list_add_tail(&notifier->n_list,
556 &rs->rs_notify_queue); 606 &rs->rs_notify_queue);
557 if (!notifier->n_status) 607 if (!notifier->n_status)
558 notifier->n_status = status; 608 notifier->n_status = status;
559 rm->m_rdma_op->r_notifier = NULL; 609 rm->rdma.op_notifier = NULL;
560 } 610 }
561 was_on_sock = 1; 611 was_on_sock = 1;
562 rm->m_rs = NULL; 612 rm->m_rs = NULL;
@@ -619,9 +669,8 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
619{ 669{
620 struct rds_message *rm, *tmp; 670 struct rds_message *rm, *tmp;
621 struct rds_connection *conn; 671 struct rds_connection *conn;
622 unsigned long flags, flags2; 672 unsigned long flags;
623 LIST_HEAD(list); 673 LIST_HEAD(list);
624 int wake = 0;
625 674
626 /* get all the messages we're dropping under the rs lock */ 675 /* get all the messages we're dropping under the rs lock */
627 spin_lock_irqsave(&rs->rs_lock, flags); 676 spin_lock_irqsave(&rs->rs_lock, flags);
@@ -631,59 +680,54 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
631 dest->sin_port != rm->m_inc.i_hdr.h_dport)) 680 dest->sin_port != rm->m_inc.i_hdr.h_dport))
632 continue; 681 continue;
633 682
634 wake = 1;
635 list_move(&rm->m_sock_item, &list); 683 list_move(&rm->m_sock_item, &list);
636 rds_send_sndbuf_remove(rs, rm); 684 rds_send_sndbuf_remove(rs, rm);
637 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); 685 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
638 } 686 }
639 687
640 /* order flag updates with the rs lock */ 688 /* order flag updates with the rs lock */
641 if (wake) 689 smp_mb__after_clear_bit();
642 smp_mb__after_clear_bit();
643 690
644 spin_unlock_irqrestore(&rs->rs_lock, flags); 691 spin_unlock_irqrestore(&rs->rs_lock, flags);
645 692
646 conn = NULL; 693 if (list_empty(&list))
694 return;
647 695
648 /* now remove the messages from the conn list as needed */ 696 /* Remove the messages from the conn */
649 list_for_each_entry(rm, &list, m_sock_item) { 697 list_for_each_entry(rm, &list, m_sock_item) {
650 /* We do this here rather than in the loop above, so that
651 * we don't have to nest m_rs_lock under rs->rs_lock */
652 spin_lock_irqsave(&rm->m_rs_lock, flags2);
653 /* If this is a RDMA operation, notify the app. */
654 spin_lock(&rs->rs_lock);
655 __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
656 spin_unlock(&rs->rs_lock);
657 rm->m_rs = NULL;
658 spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
659 698
699 conn = rm->m_inc.i_conn;
700
701 spin_lock_irqsave(&conn->c_lock, flags);
660 /* 702 /*
661 * If we see this flag cleared then we're *sure* that someone 703 * Maybe someone else beat us to removing rm from the conn.
662 * else beat us to removing it from the conn. If we race 704 * If we race with their flag update we'll get the lock and
663 * with their flag update we'll get the lock and then really 705 * then really see that the flag has been cleared.
664 * see that the flag has been cleared.
665 */ 706 */
666 if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 707 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
708 spin_unlock_irqrestore(&conn->c_lock, flags);
667 continue; 709 continue;
668
669 if (conn != rm->m_inc.i_conn) {
670 if (conn)
671 spin_unlock_irqrestore(&conn->c_lock, flags);
672 conn = rm->m_inc.i_conn;
673 spin_lock_irqsave(&conn->c_lock, flags);
674 } 710 }
711 list_del_init(&rm->m_conn_item);
712 spin_unlock_irqrestore(&conn->c_lock, flags);
675 713
676 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { 714 /*
677 list_del_init(&rm->m_conn_item); 715 * Couldn't grab m_rs_lock in top loop (lock ordering),
678 rds_message_put(rm); 716 * but we can now.
679 } 717 */
680 } 718 spin_lock_irqsave(&rm->m_rs_lock, flags);
681 719
682 if (conn) 720 spin_lock(&rs->rs_lock);
683 spin_unlock_irqrestore(&conn->c_lock, flags); 721 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
722 spin_unlock(&rs->rs_lock);
684 723
685 if (wake) 724 rm->m_rs = NULL;
686 rds_wake_sk_sleep(rs); 725 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
726
727 rds_message_put(rm);
728 }
729
730 rds_wake_sk_sleep(rs);
687 731
688 while (!list_empty(&list)) { 732 while (!list_empty(&list)) {
689 rm = list_entry(list.next, struct rds_message, m_sock_item); 733 rm = list_entry(list.next, struct rds_message, m_sock_item);
@@ -763,6 +807,63 @@ out:
763 return *queued; 807 return *queued;
764} 808}
765 809
810/*
811 * rds_message is getting to be quite complicated, and we'd like to allocate
812 * it all in one go. This figures out how big it needs to be up front.
813 */
814static int rds_rm_size(struct msghdr *msg, int data_len)
815{
816 struct cmsghdr *cmsg;
817 int size = 0;
818 int cmsg_groups = 0;
819 int retval;
820
821 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
822 if (!CMSG_OK(msg, cmsg))
823 return -EINVAL;
824
825 if (cmsg->cmsg_level != SOL_RDS)
826 continue;
827
828 switch (cmsg->cmsg_type) {
829 case RDS_CMSG_RDMA_ARGS:
830 cmsg_groups |= 1;
831 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
832 if (retval < 0)
833 return retval;
834 size += retval;
835
836 break;
837
838 case RDS_CMSG_RDMA_DEST:
839 case RDS_CMSG_RDMA_MAP:
840 cmsg_groups |= 2;
841 /* these are valid but do no add any size */
842 break;
843
844 case RDS_CMSG_ATOMIC_CSWP:
845 case RDS_CMSG_ATOMIC_FADD:
846 case RDS_CMSG_MASKED_ATOMIC_CSWP:
847 case RDS_CMSG_MASKED_ATOMIC_FADD:
848 cmsg_groups |= 1;
849 size += sizeof(struct scatterlist);
850 break;
851
852 default:
853 return -EINVAL;
854 }
855
856 }
857
858 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
859
860 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
861 if (cmsg_groups == 3)
862 return -EINVAL;
863
864 return size;
865}
866
766static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, 867static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
767 struct msghdr *msg, int *allocated_mr) 868 struct msghdr *msg, int *allocated_mr)
768{ 869{
@@ -777,7 +878,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
777 continue; 878 continue;
778 879
779 /* As a side effect, RDMA_DEST and RDMA_MAP will set 880 /* As a side effect, RDMA_DEST and RDMA_MAP will set
780 * rm->m_rdma_cookie and rm->m_rdma_mr. 881 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
781 */ 882 */
782 switch (cmsg->cmsg_type) { 883 switch (cmsg->cmsg_type) {
783 case RDS_CMSG_RDMA_ARGS: 884 case RDS_CMSG_RDMA_ARGS:
@@ -793,6 +894,12 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
793 if (!ret) 894 if (!ret)
794 *allocated_mr = 1; 895 *allocated_mr = 1;
795 break; 896 break;
897 case RDS_CMSG_ATOMIC_CSWP:
898 case RDS_CMSG_ATOMIC_FADD:
899 case RDS_CMSG_MASKED_ATOMIC_CSWP:
900 case RDS_CMSG_MASKED_ATOMIC_FADD:
901 ret = rds_cmsg_atomic(rs, rm, cmsg);
902 break;
796 903
797 default: 904 default:
798 return -EINVAL; 905 return -EINVAL;
@@ -850,13 +957,26 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
850 goto out; 957 goto out;
851 } 958 }
852 959
853 rm = rds_message_copy_from_user(msg->msg_iov, payload_len); 960 /* size of rm including all sgs */
854 if (IS_ERR(rm)) { 961 ret = rds_rm_size(msg, payload_len);
855 ret = PTR_ERR(rm); 962 if (ret < 0)
856 rm = NULL; 963 goto out;
964
965 rm = rds_message_alloc(ret, GFP_KERNEL);
966 if (!rm) {
967 ret = -ENOMEM;
857 goto out; 968 goto out;
858 } 969 }
859 970
971 /* Attach data to the rm */
972 if (payload_len) {
973 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
974 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
975 if (ret)
976 goto out;
977 }
978 rm->data.op_active = 1;
979
860 rm->m_daddr = daddr; 980 rm->m_daddr = daddr;
861 981
862 /* rds_conn_create has a spinlock that runs with IRQ off. 982 /* rds_conn_create has a spinlock that runs with IRQ off.
@@ -879,22 +999,23 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
879 if (ret) 999 if (ret)
880 goto out; 1000 goto out;
881 1001
882 if ((rm->m_rdma_cookie || rm->m_rdma_op) && 1002 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
883 conn->c_trans->xmit_rdma == NULL) {
884 if (printk_ratelimit()) 1003 if (printk_ratelimit())
885 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", 1004 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
886 rm->m_rdma_op, conn->c_trans->xmit_rdma); 1005 &rm->rdma, conn->c_trans->xmit_rdma);
887 ret = -EOPNOTSUPP; 1006 ret = -EOPNOTSUPP;
888 goto out; 1007 goto out;
889 } 1008 }
890 1009
891 /* If the connection is down, trigger a connect. We may 1010 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
892 * have scheduled a delayed reconnect however - in this case 1011 if (printk_ratelimit())
893 * we should not interfere. 1012 printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
894 */ 1013 &rm->atomic, conn->c_trans->xmit_atomic);
895 if (rds_conn_state(conn) == RDS_CONN_DOWN && 1014 ret = -EOPNOTSUPP;
896 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 1015 goto out;
897 queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 1016 }
1017
1018 rds_conn_connect_if_down(conn);
898 1019
899 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); 1020 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
900 if (ret) { 1021 if (ret) {
@@ -938,7 +1059,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
938 rds_stats_inc(s_send_queued); 1059 rds_stats_inc(s_send_queued);
939 1060
940 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) 1061 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
941 rds_send_worker(&conn->c_send_w.work); 1062 rds_send_xmit(conn);
942 1063
943 rds_message_put(rm); 1064 rds_message_put(rm);
944 return payload_len; 1065 return payload_len;
@@ -966,20 +1087,15 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
966 int ret = 0; 1087 int ret = 0;
967 1088
968 rm = rds_message_alloc(0, GFP_ATOMIC); 1089 rm = rds_message_alloc(0, GFP_ATOMIC);
969 if (rm == NULL) { 1090 if (!rm) {
970 ret = -ENOMEM; 1091 ret = -ENOMEM;
971 goto out; 1092 goto out;
972 } 1093 }
973 1094
974 rm->m_daddr = conn->c_faddr; 1095 rm->m_daddr = conn->c_faddr;
1096 rm->data.op_active = 1;
975 1097
976 /* If the connection is down, trigger a connect. We may 1098 rds_conn_connect_if_down(conn);
977 * have scheduled a delayed reconnect however - in this case
978 * we should not interfere.
979 */
980 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
981 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
982 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
983 1099
984 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); 1100 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
985 if (ret) 1101 if (ret)
@@ -999,7 +1115,9 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
999 rds_stats_inc(s_send_queued); 1115 rds_stats_inc(s_send_queued);
1000 rds_stats_inc(s_send_pong); 1116 rds_stats_inc(s_send_pong);
1001 1117
1002 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 1118 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1119 rds_send_xmit(conn);
1120
1003 rds_message_put(rm); 1121 rds_message_put(rm);
1004 return 0; 1122 return 0;
1005 1123
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 7598eb07cfb1..10c759ccac0c 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -57,8 +57,8 @@ static const char *const rds_stat_names[] = {
57 "recv_ping", 57 "recv_ping",
58 "send_queue_empty", 58 "send_queue_empty",
59 "send_queue_full", 59 "send_queue_full",
60 "send_sem_contention", 60 "send_lock_contention",
61 "send_sem_queue_raced", 61 "send_lock_queue_raced",
62 "send_immediate_retry", 62 "send_immediate_retry",
63 "send_delayed_retry", 63 "send_delayed_retry",
64 "send_drop_acked", 64 "send_drop_acked",
@@ -143,7 +143,7 @@ void rds_stats_exit(void)
143 rds_info_deregister_func(RDS_INFO_COUNTERS, rds_stats_info); 143 rds_info_deregister_func(RDS_INFO_COUNTERS, rds_stats_info);
144} 144}
145 145
146int __init rds_stats_init(void) 146int rds_stats_init(void)
147{ 147{
148 rds_info_register_func(RDS_INFO_COUNTERS, rds_stats_info); 148 rds_info_register_func(RDS_INFO_COUNTERS, rds_stats_info);
149 return 0; 149 return 0;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 7829a20325d3..25ad0c77a26c 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -105,13 +105,13 @@ void rds_sysctl_exit(void)
105 unregister_sysctl_table(rds_sysctl_reg_table); 105 unregister_sysctl_table(rds_sysctl_reg_table);
106} 106}
107 107
108int __init rds_sysctl_init(void) 108int rds_sysctl_init(void)
109{ 109{
110 rds_sysctl_reconnect_min = msecs_to_jiffies(1); 110 rds_sysctl_reconnect_min = msecs_to_jiffies(1);
111 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; 111 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
112 112
113 rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); 113 rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table);
114 if (rds_sysctl_reg_table == NULL) 114 if (!rds_sysctl_reg_table)
115 return -ENOMEM; 115 return -ENOMEM;
116 return 0; 116 return 0;
117} 117}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index babf4577ff7d..eeb08e6ab96b 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -200,7 +200,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
200 struct rds_tcp_connection *tc; 200 struct rds_tcp_connection *tc;
201 201
202 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); 202 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
203 if (tc == NULL) 203 if (!tc)
204 return -ENOMEM; 204 return -ENOMEM;
205 205
206 tc->t_sock = NULL; 206 tc->t_sock = NULL;
@@ -258,7 +258,6 @@ struct rds_transport rds_tcp_transport = {
258 .laddr_check = rds_tcp_laddr_check, 258 .laddr_check = rds_tcp_laddr_check,
259 .xmit_prepare = rds_tcp_xmit_prepare, 259 .xmit_prepare = rds_tcp_xmit_prepare,
260 .xmit_complete = rds_tcp_xmit_complete, 260 .xmit_complete = rds_tcp_xmit_complete,
261 .xmit_cong_map = rds_tcp_xmit_cong_map,
262 .xmit = rds_tcp_xmit, 261 .xmit = rds_tcp_xmit,
263 .recv = rds_tcp_recv, 262 .recv = rds_tcp_recv,
264 .conn_alloc = rds_tcp_conn_alloc, 263 .conn_alloc = rds_tcp_conn_alloc,
@@ -266,7 +265,6 @@ struct rds_transport rds_tcp_transport = {
266 .conn_connect = rds_tcp_conn_connect, 265 .conn_connect = rds_tcp_conn_connect,
267 .conn_shutdown = rds_tcp_conn_shutdown, 266 .conn_shutdown = rds_tcp_conn_shutdown,
268 .inc_copy_to_user = rds_tcp_inc_copy_to_user, 267 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
269 .inc_purge = rds_tcp_inc_purge,
270 .inc_free = rds_tcp_inc_free, 268 .inc_free = rds_tcp_inc_free,
271 .stats_info_copy = rds_tcp_stats_info_copy, 269 .stats_info_copy = rds_tcp_stats_info_copy,
272 .exit = rds_tcp_exit, 270 .exit = rds_tcp_exit,
@@ -276,14 +274,14 @@ struct rds_transport rds_tcp_transport = {
276 .t_prefer_loopback = 1, 274 .t_prefer_loopback = 1,
277}; 275};
278 276
279int __init rds_tcp_init(void) 277int rds_tcp_init(void)
280{ 278{
281 int ret; 279 int ret;
282 280
283 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", 281 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
284 sizeof(struct rds_tcp_connection), 282 sizeof(struct rds_tcp_connection),
285 0, 0, NULL); 283 0, 0, NULL);
286 if (rds_tcp_conn_slab == NULL) { 284 if (!rds_tcp_conn_slab) {
287 ret = -ENOMEM; 285 ret = -ENOMEM;
288 goto out; 286 goto out;
289 } 287 }
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 844fa6b9cf5a..f5e6f7bebb50 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -43,7 +43,7 @@ struct rds_tcp_statistics {
43}; 43};
44 44
45/* tcp.c */ 45/* tcp.c */
46int __init rds_tcp_init(void); 46int rds_tcp_init(void);
47void rds_tcp_exit(void); 47void rds_tcp_exit(void);
48void rds_tcp_tune(struct socket *sock); 48void rds_tcp_tune(struct socket *sock);
49void rds_tcp_nonagle(struct socket *sock); 49void rds_tcp_nonagle(struct socket *sock);
@@ -61,16 +61,15 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn);
61void rds_tcp_state_change(struct sock *sk); 61void rds_tcp_state_change(struct sock *sk);
62 62
63/* tcp_listen.c */ 63/* tcp_listen.c */
64int __init rds_tcp_listen_init(void); 64int rds_tcp_listen_init(void);
65void rds_tcp_listen_stop(void); 65void rds_tcp_listen_stop(void);
66void rds_tcp_listen_data_ready(struct sock *sk, int bytes); 66void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
67 67
68/* tcp_recv.c */ 68/* tcp_recv.c */
69int __init rds_tcp_recv_init(void); 69int rds_tcp_recv_init(void);
70void rds_tcp_recv_exit(void); 70void rds_tcp_recv_exit(void);
71void rds_tcp_data_ready(struct sock *sk, int bytes); 71void rds_tcp_data_ready(struct sock *sk, int bytes);
72int rds_tcp_recv(struct rds_connection *conn); 72int rds_tcp_recv(struct rds_connection *conn);
73void rds_tcp_inc_purge(struct rds_incoming *inc);
74void rds_tcp_inc_free(struct rds_incoming *inc); 73void rds_tcp_inc_free(struct rds_incoming *inc);
75int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 74int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
76 size_t size); 75 size_t size);
@@ -81,8 +80,6 @@ void rds_tcp_xmit_complete(struct rds_connection *conn);
81int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, 80int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
82 unsigned int hdr_off, unsigned int sg, unsigned int off); 81 unsigned int hdr_off, unsigned int sg, unsigned int off);
83void rds_tcp_write_space(struct sock *sk); 82void rds_tcp_write_space(struct sock *sk);
84int rds_tcp_xmit_cong_map(struct rds_connection *conn,
85 struct rds_cong_map *map, unsigned long offset);
86 83
87/* tcp_stats.c */ 84/* tcp_stats.c */
88DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats); 85DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index c397524c039c..a65ee78db0c5 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -45,7 +45,7 @@ void rds_tcp_state_change(struct sock *sk)
45 45
46 read_lock(&sk->sk_callback_lock); 46 read_lock(&sk->sk_callback_lock);
47 conn = sk->sk_user_data; 47 conn = sk->sk_user_data;
48 if (conn == NULL) { 48 if (!conn) {
49 state_change = sk->sk_state_change; 49 state_change = sk->sk_state_change;
50 goto out; 50 goto out;
51 } 51 }
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 975183fe6950..ae27869dfc21 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -116,7 +116,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
116 116
117 read_lock(&sk->sk_callback_lock); 117 read_lock(&sk->sk_callback_lock);
118 ready = sk->sk_user_data; 118 ready = sk->sk_user_data;
119 if (ready == NULL) { /* check for teardown race */ 119 if (!ready) { /* check for teardown race */
120 ready = sk->sk_data_ready; 120 ready = sk->sk_data_ready;
121 goto out; 121 goto out;
122 } 122 }
@@ -135,7 +135,7 @@ out:
135 ready(sk, bytes); 135 ready(sk, bytes);
136} 136}
137 137
138int __init rds_tcp_listen_init(void) 138int rds_tcp_listen_init(void)
139{ 139{
140 struct sockaddr_in sin; 140 struct sockaddr_in sin;
141 struct socket *sock = NULL; 141 struct socket *sock = NULL;
@@ -178,7 +178,7 @@ void rds_tcp_listen_stop(void)
178 struct socket *sock = rds_tcp_listen_sock; 178 struct socket *sock = rds_tcp_listen_sock;
179 struct sock *sk; 179 struct sock *sk;
180 180
181 if (sock == NULL) 181 if (!sock)
182 return; 182 return;
183 183
184 sk = sock->sk; 184 sk = sock->sk;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 1aba6878fa5d..7017f3af80b6 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -39,7 +39,7 @@
39 39
40static struct kmem_cache *rds_tcp_incoming_slab; 40static struct kmem_cache *rds_tcp_incoming_slab;
41 41
42void rds_tcp_inc_purge(struct rds_incoming *inc) 42static void rds_tcp_inc_purge(struct rds_incoming *inc)
43{ 43{
44 struct rds_tcp_incoming *tinc; 44 struct rds_tcp_incoming *tinc;
45 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); 45 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
@@ -190,10 +190,10 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
190 * processing. 190 * processing.
191 */ 191 */
192 while (left) { 192 while (left) {
193 if (tinc == NULL) { 193 if (!tinc) {
194 tinc = kmem_cache_alloc(rds_tcp_incoming_slab, 194 tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
195 arg->gfp); 195 arg->gfp);
196 if (tinc == NULL) { 196 if (!tinc) {
197 desc->error = -ENOMEM; 197 desc->error = -ENOMEM;
198 goto out; 198 goto out;
199 } 199 }
@@ -229,7 +229,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
229 229
230 if (left && tc->t_tinc_data_rem) { 230 if (left && tc->t_tinc_data_rem) {
231 clone = skb_clone(skb, arg->gfp); 231 clone = skb_clone(skb, arg->gfp);
232 if (clone == NULL) { 232 if (!clone) {
233 desc->error = -ENOMEM; 233 desc->error = -ENOMEM;
234 goto out; 234 goto out;
235 } 235 }
@@ -326,7 +326,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
326 326
327 read_lock(&sk->sk_callback_lock); 327 read_lock(&sk->sk_callback_lock);
328 conn = sk->sk_user_data; 328 conn = sk->sk_user_data;
329 if (conn == NULL) { /* check for teardown race */ 329 if (!conn) { /* check for teardown race */
330 ready = sk->sk_data_ready; 330 ready = sk->sk_data_ready;
331 goto out; 331 goto out;
332 } 332 }
@@ -342,12 +342,12 @@ out:
342 ready(sk, bytes); 342 ready(sk, bytes);
343} 343}
344 344
345int __init rds_tcp_recv_init(void) 345int rds_tcp_recv_init(void)
346{ 346{
347 rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming", 347 rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
348 sizeof(struct rds_tcp_incoming), 348 sizeof(struct rds_tcp_incoming),
349 0, 0, NULL); 349 0, 0, NULL);
350 if (rds_tcp_incoming_slab == NULL) 350 if (!rds_tcp_incoming_slab)
351 return -ENOMEM; 351 return -ENOMEM;
352 return 0; 352 return 0;
353} 353}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index a28b895ff0d1..2979fb4a4b9a 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -77,56 +77,6 @@ int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
77} 77}
78 78
79/* the core send_sem serializes this with other xmit and shutdown */ 79/* the core send_sem serializes this with other xmit and shutdown */
80int rds_tcp_xmit_cong_map(struct rds_connection *conn,
81 struct rds_cong_map *map, unsigned long offset)
82{
83 static struct rds_header rds_tcp_map_header = {
84 .h_flags = RDS_FLAG_CONG_BITMAP,
85 };
86 struct rds_tcp_connection *tc = conn->c_transport_data;
87 unsigned long i;
88 int ret;
89 int copied = 0;
90
91 /* Some problem claims cpu_to_be32(constant) isn't a constant. */
92 rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES);
93
94 if (offset < sizeof(struct rds_header)) {
95 ret = rds_tcp_sendmsg(tc->t_sock,
96 (void *)&rds_tcp_map_header + offset,
97 sizeof(struct rds_header) - offset);
98 if (ret <= 0)
99 return ret;
100 offset += ret;
101 copied = ret;
102 if (offset < sizeof(struct rds_header))
103 return ret;
104 }
105
106 offset -= sizeof(struct rds_header);
107 i = offset / PAGE_SIZE;
108 offset = offset % PAGE_SIZE;
109 BUG_ON(i >= RDS_CONG_MAP_PAGES);
110
111 do {
112 ret = tc->t_sock->ops->sendpage(tc->t_sock,
113 virt_to_page(map->m_page_addrs[i]),
114 offset, PAGE_SIZE - offset,
115 MSG_DONTWAIT);
116 if (ret <= 0)
117 break;
118 copied += ret;
119 offset += ret;
120 if (offset == PAGE_SIZE) {
121 offset = 0;
122 i++;
123 }
124 } while (i < RDS_CONG_MAP_PAGES);
125
126 return copied ? copied : ret;
127}
128
129/* the core send_sem serializes this with other xmit and shutdown */
130int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, 80int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
131 unsigned int hdr_off, unsigned int sg, unsigned int off) 81 unsigned int hdr_off, unsigned int sg, unsigned int off)
132{ 82{
@@ -166,21 +116,21 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
166 goto out; 116 goto out;
167 } 117 }
168 118
169 while (sg < rm->m_nents) { 119 while (sg < rm->data.op_nents) {
170 ret = tc->t_sock->ops->sendpage(tc->t_sock, 120 ret = tc->t_sock->ops->sendpage(tc->t_sock,
171 sg_page(&rm->m_sg[sg]), 121 sg_page(&rm->data.op_sg[sg]),
172 rm->m_sg[sg].offset + off, 122 rm->data.op_sg[sg].offset + off,
173 rm->m_sg[sg].length - off, 123 rm->data.op_sg[sg].length - off,
174 MSG_DONTWAIT|MSG_NOSIGNAL); 124 MSG_DONTWAIT|MSG_NOSIGNAL);
175 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]), 125 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
176 rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off, 126 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
177 ret); 127 ret);
178 if (ret <= 0) 128 if (ret <= 0)
179 break; 129 break;
180 130
181 off += ret; 131 off += ret;
182 done += ret; 132 done += ret;
183 if (off == rm->m_sg[sg].length) { 133 if (off == rm->data.op_sg[sg].length) {
184 off = 0; 134 off = 0;
185 sg++; 135 sg++;
186 } 136 }
@@ -226,7 +176,7 @@ void rds_tcp_write_space(struct sock *sk)
226 176
227 read_lock(&sk->sk_callback_lock); 177 read_lock(&sk->sk_callback_lock);
228 conn = sk->sk_user_data; 178 conn = sk->sk_user_data;
229 if (conn == NULL) { 179 if (!conn) {
230 write_space = sk->sk_write_space; 180 write_space = sk->sk_write_space;
231 goto out; 181 goto out;
232 } 182 }
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 786c20eaaf5e..0fd90f8c5f59 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -61,7 +61,7 @@
61 * 61 *
62 * Transition to state DISCONNECTING/DOWN: 62 * Transition to state DISCONNECTING/DOWN:
63 * - Inside the shutdown worker; synchronizes with xmit path 63 * - Inside the shutdown worker; synchronizes with xmit path
64 * through c_send_lock, and with connection management callbacks 64 * through RDS_IN_XMIT, and with connection management callbacks
65 * via c_cm_lock. 65 * via c_cm_lock.
66 * 66 *
67 * For receive callbacks, we rely on the underlying transport 67 * For receive callbacks, we rely on the underlying transport
@@ -110,7 +110,7 @@ EXPORT_SYMBOL_GPL(rds_connect_complete);
110 * We should *always* start with a random backoff; otherwise a broken connection 110 * We should *always* start with a random backoff; otherwise a broken connection
111 * will always take several iterations to be re-established. 111 * will always take several iterations to be re-established.
112 */ 112 */
113static void rds_queue_reconnect(struct rds_connection *conn) 113void rds_queue_reconnect(struct rds_connection *conn)
114{ 114{
115 unsigned long rand; 115 unsigned long rand;
116 116
@@ -156,58 +156,6 @@ void rds_connect_worker(struct work_struct *work)
156 } 156 }
157} 157}
158 158
159void rds_shutdown_worker(struct work_struct *work)
160{
161 struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
162
163 /* shut it down unless it's down already */
164 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
165 /*
166 * Quiesce the connection mgmt handlers before we start tearing
167 * things down. We don't hold the mutex for the entire
168 * duration of the shutdown operation, else we may be
169 * deadlocking with the CM handler. Instead, the CM event
170 * handler is supposed to check for state DISCONNECTING
171 */
172 mutex_lock(&conn->c_cm_lock);
173 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) &&
174 !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
175 rds_conn_error(conn, "shutdown called in state %d\n",
176 atomic_read(&conn->c_state));
177 mutex_unlock(&conn->c_cm_lock);
178 return;
179 }
180 mutex_unlock(&conn->c_cm_lock);
181
182 mutex_lock(&conn->c_send_lock);
183 conn->c_trans->conn_shutdown(conn);
184 rds_conn_reset(conn);
185 mutex_unlock(&conn->c_send_lock);
186
187 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
188 /* This can happen - eg when we're in the middle of tearing
189 * down the connection, and someone unloads the rds module.
190 * Quite reproduceable with loopback connections.
191 * Mostly harmless.
192 */
193 rds_conn_error(conn,
194 "%s: failed to transition to state DOWN, "
195 "current state is %d\n",
196 __func__,
197 atomic_read(&conn->c_state));
198 return;
199 }
200 }
201
202 /* Then reconnect if it's still live.
203 * The passive side of an IB loopback connection is never added
204 * to the conn hash, so we never trigger a reconnect on this
205 * conn - the reconnect is always triggered by the active peer. */
206 cancel_delayed_work(&conn->c_conn_w);
207 if (!hlist_unhashed(&conn->c_hash_node))
208 rds_queue_reconnect(conn);
209}
210
211void rds_send_worker(struct work_struct *work) 159void rds_send_worker(struct work_struct *work)
212{ 160{
213 struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); 161 struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work);
@@ -252,15 +200,22 @@ void rds_recv_worker(struct work_struct *work)
252 } 200 }
253} 201}
254 202
203void rds_shutdown_worker(struct work_struct *work)
204{
205 struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
206
207 rds_conn_shutdown(conn);
208}
209
255void rds_threads_exit(void) 210void rds_threads_exit(void)
256{ 211{
257 destroy_workqueue(rds_wq); 212 destroy_workqueue(rds_wq);
258} 213}
259 214
260int __init rds_threads_init(void) 215int rds_threads_init(void)
261{ 216{
262 rds_wq = create_workqueue("krdsd"); 217 rds_wq = create_singlethread_workqueue("krdsd");
263 if (rds_wq == NULL) 218 if (!rds_wq)
264 return -ENOMEM; 219 return -ENOMEM;
265 220
266 return 0; 221 return 0;
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 7e1067901353..7f2ac4fec367 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -71,19 +71,28 @@ void rds_trans_unregister(struct rds_transport *trans)
71} 71}
72EXPORT_SYMBOL_GPL(rds_trans_unregister); 72EXPORT_SYMBOL_GPL(rds_trans_unregister);
73 73
74void rds_trans_put(struct rds_transport *trans)
75{
76 if (trans && trans->t_owner)
77 module_put(trans->t_owner);
78}
79
74struct rds_transport *rds_trans_get_preferred(__be32 addr) 80struct rds_transport *rds_trans_get_preferred(__be32 addr)
75{ 81{
76 struct rds_transport *ret = NULL; 82 struct rds_transport *ret = NULL;
77 int i; 83 struct rds_transport *trans;
84 unsigned int i;
78 85
79 if (IN_LOOPBACK(ntohl(addr))) 86 if (IN_LOOPBACK(ntohl(addr)))
80 return &rds_loop_transport; 87 return &rds_loop_transport;
81 88
82 down_read(&rds_trans_sem); 89 down_read(&rds_trans_sem);
83 for (i = 0; i < RDS_TRANS_COUNT; i++) 90 for (i = 0; i < RDS_TRANS_COUNT; i++) {
84 { 91 trans = transports[i];
85 if (transports[i] && (transports[i]->laddr_check(addr) == 0)) { 92
86 ret = transports[i]; 93 if (trans && (trans->laddr_check(addr) == 0) &&
94 (!trans->t_owner || try_module_get(trans->t_owner))) {
95 ret = trans;
87 break; 96 break;
88 } 97 }
89 } 98 }
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
new file mode 100644
index 000000000000..e6b5190daddd
--- /dev/null
+++ b/net/rds/xlist.h
@@ -0,0 +1,80 @@
1#ifndef _LINUX_XLIST_H
2#define _LINUX_XLIST_H
3
4#include <linux/stddef.h>
5#include <linux/poison.h>
6#include <linux/prefetch.h>
7#include <asm/system.h>
8
9struct xlist_head {
10 struct xlist_head *next;
11};
12
13static inline void INIT_XLIST_HEAD(struct xlist_head *list)
14{
15 list->next = NULL;
16}
17
18static inline int xlist_empty(struct xlist_head *head)
19{
20 return head->next == NULL;
21}
22
23static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
24 struct xlist_head *head)
25{
26 struct xlist_head *cur;
27 struct xlist_head *check;
28
29 while (1) {
30 cur = head->next;
31 tail->next = cur;
32 check = cmpxchg(&head->next, cur, new);
33 if (check == cur)
34 break;
35 }
36}
37
38static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
39{
40 struct xlist_head *cur;
41 struct xlist_head *check;
42 struct xlist_head *next;
43
44 while (1) {
45 cur = head->next;
46 if (!cur)
47 goto out;
48
49 next = cur->next;
50 check = cmpxchg(&head->next, cur, next);
51 if (check == cur)
52 goto out;
53 }
54out:
55 return cur;
56}
57
58static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
59{
60 struct xlist_head *cur;
61
62 cur = head->next;
63 if (!cur)
64 return NULL;
65
66 head->next = cur->next;
67 return cur;
68}
69
70static inline void xlist_splice(struct xlist_head *list,
71 struct xlist_head *head)
72{
73 struct xlist_head *cur;
74
75 WARN_ON(head->next);
76 cur = xchg(&list->next, NULL);
77 head->next = cur;
78}
79
80#endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2f691fb180d1..a36270a994d7 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -518,6 +518,16 @@ config NET_ACT_SKBEDIT
518 To compile this code as a module, choose M here: the 518 To compile this code as a module, choose M here: the
519 module will be called act_skbedit. 519 module will be called act_skbedit.
520 520
521config NET_ACT_CSUM
522 tristate "Checksum Updating"
523 depends on NET_CLS_ACT && INET
524 ---help---
525 Say Y here to update some common checksum after some direct
526 packet alterations.
527
528 To compile this code as a module, choose M here: the
529 module will be called act_csum.
530
521config NET_CLS_IND 531config NET_CLS_IND
522 bool "Incoming device classification" 532 bool "Incoming device classification"
523 depends on NET_CLS_U32 || NET_CLS_FW 533 depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index f14e71bfa58f..960f5dba6304 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_ACT_NAT) += act_nat.o
15obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o 15obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
16obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o 16obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
17obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o 17obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
18obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
18obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o 19obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
19obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o 20obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
20obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o 21obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
new file mode 100644
index 000000000000..67dc7ce9b63a
--- /dev/null
+++ b/net/sched/act_csum.c
@@ -0,0 +1,595 @@
1/*
2 * Checksum updating actions
3 *
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18
19#include <linux/netlink.h>
20#include <net/netlink.h>
21#include <linux/rtnetlink.h>
22
23#include <linux/skbuff.h>
24
25#include <net/ip.h>
26#include <net/ipv6.h>
27#include <net/icmp.h>
28#include <linux/icmpv6.h>
29#include <linux/igmp.h>
30#include <net/tcp.h>
31#include <net/udp.h>
32#include <net/ip6_checksum.h>
33
34#include <net/act_api.h>
35
36#include <linux/tc_act/tc_csum.h>
37#include <net/tc_act/tc_csum.h>
38
39#define CSUM_TAB_MASK 15
40static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1];
41static u32 csum_idx_gen;
42static DEFINE_RWLOCK(csum_lock);
43
44static struct tcf_hashinfo csum_hash_info = {
45 .htab = tcf_csum_ht,
46 .hmask = CSUM_TAB_MASK,
47 .lock = &csum_lock,
48};
49
50static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
51 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
52};
53
54static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
55 struct tc_action *a, int ovr, int bind)
56{
57 struct nlattr *tb[TCA_CSUM_MAX + 1];
58 struct tc_csum *parm;
59 struct tcf_common *pc;
60 struct tcf_csum *p;
61 int ret = 0, err;
62
63 if (nla == NULL)
64 return -EINVAL;
65
66 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
67 if (err < 0)
68 return err;
69
70 if (tb[TCA_CSUM_PARMS] == NULL)
71 return -EINVAL;
72 parm = nla_data(tb[TCA_CSUM_PARMS]);
73
74 pc = tcf_hash_check(parm->index, a, bind, &csum_hash_info);
75 if (!pc) {
76 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
77 &csum_idx_gen, &csum_hash_info);
78 if (IS_ERR(pc))
79 return PTR_ERR(pc);
80 p = to_tcf_csum(pc);
81 ret = ACT_P_CREATED;
82 } else {
83 p = to_tcf_csum(pc);
84 if (!ovr) {
85 tcf_hash_release(pc, bind, &csum_hash_info);
86 return -EEXIST;
87 }
88 }
89
90 spin_lock_bh(&p->tcf_lock);
91 p->tcf_action = parm->action;
92 p->update_flags = parm->update_flags;
93 spin_unlock_bh(&p->tcf_lock);
94
95 if (ret == ACT_P_CREATED)
96 tcf_hash_insert(pc, &csum_hash_info);
97
98 return ret;
99}
100
101static int tcf_csum_cleanup(struct tc_action *a, int bind)
102{
103 struct tcf_csum *p = a->priv;
104 return tcf_hash_release(&p->common, bind, &csum_hash_info);
105}
106
107/**
108 * tcf_csum_skb_nextlayer - Get next layer pointer
109 * @skb: sk_buff to use
110 * @ihl: previous summed headers length
111 * @ipl: complete packet length
112 * @jhl: next header length
113 *
114 * Check the expected next layer availability in the specified sk_buff.
115 * Return the next layer pointer if pass, NULL otherwise.
116 */
117static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
118 unsigned int ihl, unsigned int ipl,
119 unsigned int jhl)
120{
121 int ntkoff = skb_network_offset(skb);
122 int hl = ihl + jhl;
123
124 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
125 (skb_cloned(skb) &&
126 !skb_clone_writable(skb, hl + ntkoff) &&
127 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
128 return NULL;
129 else
130 return (void *)(skb_network_header(skb) + ihl);
131}
132
133static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
134 unsigned int ihl, unsigned int ipl)
135{
136 struct icmphdr *icmph;
137
138 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
139 if (icmph == NULL)
140 return 0;
141
142 icmph->checksum = 0;
143 skb->csum = csum_partial(icmph, ipl - ihl, 0);
144 icmph->checksum = csum_fold(skb->csum);
145
146 skb->ip_summed = CHECKSUM_NONE;
147
148 return 1;
149}
150
151static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
152 unsigned int ihl, unsigned int ipl)
153{
154 struct igmphdr *igmph;
155
156 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
157 if (igmph == NULL)
158 return 0;
159
160 igmph->csum = 0;
161 skb->csum = csum_partial(igmph, ipl - ihl, 0);
162 igmph->csum = csum_fold(skb->csum);
163
164 skb->ip_summed = CHECKSUM_NONE;
165
166 return 1;
167}
168
169static int tcf_csum_ipv6_icmp(struct sk_buff *skb, struct ipv6hdr *ip6h,
170 unsigned int ihl, unsigned int ipl)
171{
172 struct icmp6hdr *icmp6h;
173
174 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
175 if (icmp6h == NULL)
176 return 0;
177
178 icmp6h->icmp6_cksum = 0;
179 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
180 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
181 ipl - ihl, IPPROTO_ICMPV6,
182 skb->csum);
183
184 skb->ip_summed = CHECKSUM_NONE;
185
186 return 1;
187}
188
189static int tcf_csum_ipv4_tcp(struct sk_buff *skb, struct iphdr *iph,
190 unsigned int ihl, unsigned int ipl)
191{
192 struct tcphdr *tcph;
193
194 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
195 if (tcph == NULL)
196 return 0;
197
198 tcph->check = 0;
199 skb->csum = csum_partial(tcph, ipl - ihl, 0);
200 tcph->check = tcp_v4_check(ipl - ihl,
201 iph->saddr, iph->daddr, skb->csum);
202
203 skb->ip_summed = CHECKSUM_NONE;
204
205 return 1;
206}
207
208static int tcf_csum_ipv6_tcp(struct sk_buff *skb, struct ipv6hdr *ip6h,
209 unsigned int ihl, unsigned int ipl)
210{
211 struct tcphdr *tcph;
212
213 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
214 if (tcph == NULL)
215 return 0;
216
217 tcph->check = 0;
218 skb->csum = csum_partial(tcph, ipl - ihl, 0);
219 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
220 ipl - ihl, IPPROTO_TCP,
221 skb->csum);
222
223 skb->ip_summed = CHECKSUM_NONE;
224
225 return 1;
226}
227
228static int tcf_csum_ipv4_udp(struct sk_buff *skb, struct iphdr *iph,
229 unsigned int ihl, unsigned int ipl, int udplite)
230{
231 struct udphdr *udph;
232 u16 ul;
233
234 /*
235 * Support both UDP and UDPLITE checksum algorithms, Don't use
236 * udph->len to get the real length without any protocol check,
237 * UDPLITE uses udph->len for another thing,
238 * Use iph->tot_len, or just ipl.
239 */
240
241 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
242 if (udph == NULL)
243 return 0;
244
245 ul = ntohs(udph->len);
246
247 if (udplite || udph->check) {
248
249 udph->check = 0;
250
251 if (udplite) {
252 if (ul == 0)
253 skb->csum = csum_partial(udph, ipl - ihl, 0);
254 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
255 skb->csum = csum_partial(udph, ul, 0);
256 else
257 goto ignore_obscure_skb;
258 } else {
259 if (ul != ipl - ihl)
260 goto ignore_obscure_skb;
261
262 skb->csum = csum_partial(udph, ul, 0);
263 }
264
265 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
266 ul, iph->protocol,
267 skb->csum);
268
269 if (!udph->check)
270 udph->check = CSUM_MANGLED_0;
271 }
272
273 skb->ip_summed = CHECKSUM_NONE;
274
275ignore_obscure_skb:
276 return 1;
277}
278
279static int tcf_csum_ipv6_udp(struct sk_buff *skb, struct ipv6hdr *ip6h,
280 unsigned int ihl, unsigned int ipl, int udplite)
281{
282 struct udphdr *udph;
283 u16 ul;
284
285 /*
286 * Support both UDP and UDPLITE checksum algorithms, Don't use
287 * udph->len to get the real length without any protocol check,
288 * UDPLITE uses udph->len for another thing,
289 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
290 */
291
292 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
293 if (udph == NULL)
294 return 0;
295
296 ul = ntohs(udph->len);
297
298 udph->check = 0;
299
300 if (udplite) {
301 if (ul == 0)
302 skb->csum = csum_partial(udph, ipl - ihl, 0);
303
304 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
305 skb->csum = csum_partial(udph, ul, 0);
306
307 else
308 goto ignore_obscure_skb;
309 } else {
310 if (ul != ipl - ihl)
311 goto ignore_obscure_skb;
312
313 skb->csum = csum_partial(udph, ul, 0);
314 }
315
316 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
317 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
318 skb->csum);
319
320 if (!udph->check)
321 udph->check = CSUM_MANGLED_0;
322
323 skb->ip_summed = CHECKSUM_NONE;
324
325ignore_obscure_skb:
326 return 1;
327}
328
329static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
330{
331 struct iphdr *iph;
332 int ntkoff;
333
334 ntkoff = skb_network_offset(skb);
335
336 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
337 goto fail;
338
339 iph = ip_hdr(skb);
340
341 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
342 case IPPROTO_ICMP:
343 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
344 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
345 ntohs(iph->tot_len)))
346 goto fail;
347 break;
348 case IPPROTO_IGMP:
349 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
350 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
351 ntohs(iph->tot_len)))
352 goto fail;
353 break;
354 case IPPROTO_TCP:
355 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
356 if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4,
357 ntohs(iph->tot_len)))
358 goto fail;
359 break;
360 case IPPROTO_UDP:
361 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
362 if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
363 ntohs(iph->tot_len), 0))
364 goto fail;
365 break;
366 case IPPROTO_UDPLITE:
367 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
368 if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
369 ntohs(iph->tot_len), 1))
370 goto fail;
371 break;
372 }
373
374 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
375 if (skb_cloned(skb) &&
376 !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
377 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
378 goto fail;
379
380 ip_send_check(iph);
381 }
382
383 return 1;
384
385fail:
386 return 0;
387}
388
389static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
390 unsigned int ixhl, unsigned int *pl)
391{
392 int off, len, optlen;
393 unsigned char *xh = (void *)ip6xh;
394
395 off = sizeof(*ip6xh);
396 len = ixhl - off;
397
398 while (len > 1) {
399 switch (xh[off]) {
400 case IPV6_TLV_PAD0:
401 optlen = 1;
402 break;
403 case IPV6_TLV_JUMBO:
404 optlen = xh[off + 1] + 2;
405 if (optlen != 6 || len < 6 || (off & 3) != 2)
406 /* wrong jumbo option length/alignment */
407 return 0;
408 *pl = ntohl(*(__be32 *)(xh + off + 2));
409 goto done;
410 default:
411 optlen = xh[off + 1] + 2;
412 if (optlen > len)
413 /* ignore obscure options */
414 goto done;
415 break;
416 }
417 off += optlen;
418 len -= optlen;
419 }
420
421done:
422 return 1;
423}
424
425static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
426{
427 struct ipv6hdr *ip6h;
428 struct ipv6_opt_hdr *ip6xh;
429 unsigned int hl, ixhl;
430 unsigned int pl;
431 int ntkoff;
432 u8 nexthdr;
433
434 ntkoff = skb_network_offset(skb);
435
436 hl = sizeof(*ip6h);
437
438 if (!pskb_may_pull(skb, hl + ntkoff))
439 goto fail;
440
441 ip6h = ipv6_hdr(skb);
442
443 pl = ntohs(ip6h->payload_len);
444 nexthdr = ip6h->nexthdr;
445
446 do {
447 switch (nexthdr) {
448 case NEXTHDR_FRAGMENT:
449 goto ignore_skb;
450 case NEXTHDR_ROUTING:
451 case NEXTHDR_HOP:
452 case NEXTHDR_DEST:
453 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
454 goto fail;
455 ip6xh = (void *)(skb_network_header(skb) + hl);
456 ixhl = ipv6_optlen(ip6xh);
457 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
458 goto fail;
459 if ((nexthdr == NEXTHDR_HOP) &&
460 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
461 goto fail;
462 nexthdr = ip6xh->nexthdr;
463 hl += ixhl;
464 break;
465 case IPPROTO_ICMPV6:
466 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
467 if (!tcf_csum_ipv6_icmp(skb, ip6h,
468 hl, pl + sizeof(*ip6h)))
469 goto fail;
470 goto done;
471 case IPPROTO_TCP:
472 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
473 if (!tcf_csum_ipv6_tcp(skb, ip6h,
474 hl, pl + sizeof(*ip6h)))
475 goto fail;
476 goto done;
477 case IPPROTO_UDP:
478 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
479 if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
480 pl + sizeof(*ip6h), 0))
481 goto fail;
482 goto done;
483 case IPPROTO_UDPLITE:
484 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
485 if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
486 pl + sizeof(*ip6h), 1))
487 goto fail;
488 goto done;
489 default:
490 goto ignore_skb;
491 }
492 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
493
494done:
495ignore_skb:
496 return 1;
497
498fail:
499 return 0;
500}
501
502static int tcf_csum(struct sk_buff *skb,
503 struct tc_action *a, struct tcf_result *res)
504{
505 struct tcf_csum *p = a->priv;
506 int action;
507 u32 update_flags;
508
509 spin_lock(&p->tcf_lock);
510 p->tcf_tm.lastuse = jiffies;
511 p->tcf_bstats.bytes += qdisc_pkt_len(skb);
512 p->tcf_bstats.packets++;
513 action = p->tcf_action;
514 update_flags = p->update_flags;
515 spin_unlock(&p->tcf_lock);
516
517 if (unlikely(action == TC_ACT_SHOT))
518 goto drop;
519
520 switch (skb->protocol) {
521 case cpu_to_be16(ETH_P_IP):
522 if (!tcf_csum_ipv4(skb, update_flags))
523 goto drop;
524 break;
525 case cpu_to_be16(ETH_P_IPV6):
526 if (!tcf_csum_ipv6(skb, update_flags))
527 goto drop;
528 break;
529 }
530
531 return action;
532
533drop:
534 spin_lock(&p->tcf_lock);
535 p->tcf_qstats.drops++;
536 spin_unlock(&p->tcf_lock);
537 return TC_ACT_SHOT;
538}
539
540static int tcf_csum_dump(struct sk_buff *skb,
541 struct tc_action *a, int bind, int ref)
542{
543 unsigned char *b = skb_tail_pointer(skb);
544 struct tcf_csum *p = a->priv;
545 struct tc_csum opt = {
546 .update_flags = p->update_flags,
547 .index = p->tcf_index,
548 .action = p->tcf_action,
549 .refcnt = p->tcf_refcnt - ref,
550 .bindcnt = p->tcf_bindcnt - bind,
551 };
552 struct tcf_t t;
553
554 NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
555 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
556 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
557 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
558 NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
559
560 return skb->len;
561
562nla_put_failure:
563 nlmsg_trim(skb, b);
564 return -1;
565}
566
567static struct tc_action_ops act_csum_ops = {
568 .kind = "csum",
569 .hinfo = &csum_hash_info,
570 .type = TCA_ACT_CSUM,
571 .capab = TCA_CAP_NONE,
572 .owner = THIS_MODULE,
573 .act = tcf_csum,
574 .dump = tcf_csum_dump,
575 .cleanup = tcf_csum_cleanup,
576 .lookup = tcf_hash_search,
577 .init = tcf_csum_init,
578 .walk = tcf_generic_walker
579};
580
581MODULE_DESCRIPTION("Checksum updating actions");
582MODULE_LICENSE("GPL");
583
584static int __init csum_init_module(void)
585{
586 return tcf_register_action(&act_csum_ops);
587}
588
589static void __exit csum_cleanup_module(void)
590{
591 tcf_unregister_action(&act_csum_ops);
592}
593
594module_init(csum_init_module);
595module_exit(csum_cleanup_module);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e17096e3913c..5b271a18bc3a 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -111,44 +111,41 @@ static u32 flow_get_proto(struct sk_buff *skb)
111 } 111 }
112} 112}
113 113
114static int has_ports(u8 protocol)
115{
116 switch (protocol) {
117 case IPPROTO_TCP:
118 case IPPROTO_UDP:
119 case IPPROTO_UDPLITE:
120 case IPPROTO_SCTP:
121 case IPPROTO_DCCP:
122 case IPPROTO_ESP:
123 return 1;
124 default:
125 return 0;
126 }
127}
128
129static u32 flow_get_proto_src(struct sk_buff *skb) 114static u32 flow_get_proto_src(struct sk_buff *skb)
130{ 115{
131 switch (skb->protocol) { 116 switch (skb->protocol) {
132 case htons(ETH_P_IP): { 117 case htons(ETH_P_IP): {
133 struct iphdr *iph; 118 struct iphdr *iph;
119 int poff;
134 120
135 if (!pskb_network_may_pull(skb, sizeof(*iph))) 121 if (!pskb_network_may_pull(skb, sizeof(*iph)))
136 break; 122 break;
137 iph = ip_hdr(skb); 123 iph = ip_hdr(skb);
138 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 124 if (iph->frag_off & htons(IP_MF|IP_OFFSET))
139 has_ports(iph->protocol) && 125 break;
140 pskb_network_may_pull(skb, iph->ihl * 4 + 2)) 126 poff = proto_ports_offset(iph->protocol);
141 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 127 if (poff >= 0 &&
128 pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) {
129 iph = ip_hdr(skb);
130 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
131 poff));
132 }
142 break; 133 break;
143 } 134 }
144 case htons(ETH_P_IPV6): { 135 case htons(ETH_P_IPV6): {
145 struct ipv6hdr *iph; 136 struct ipv6hdr *iph;
137 int poff;
146 138
147 if (!pskb_network_may_pull(skb, sizeof(*iph) + 2)) 139 if (!pskb_network_may_pull(skb, sizeof(*iph)))
148 break; 140 break;
149 iph = ipv6_hdr(skb); 141 iph = ipv6_hdr(skb);
150 if (has_ports(iph->nexthdr)) 142 poff = proto_ports_offset(iph->nexthdr);
151 return ntohs(*(__be16 *)&iph[1]); 143 if (poff >= 0 &&
144 pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) {
145 iph = ipv6_hdr(skb);
146 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
147 poff));
148 }
152 break; 149 break;
153 } 150 }
154 } 151 }
@@ -161,24 +158,36 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
161 switch (skb->protocol) { 158 switch (skb->protocol) {
162 case htons(ETH_P_IP): { 159 case htons(ETH_P_IP): {
163 struct iphdr *iph; 160 struct iphdr *iph;
161 int poff;
164 162
165 if (!pskb_network_may_pull(skb, sizeof(*iph))) 163 if (!pskb_network_may_pull(skb, sizeof(*iph)))
166 break; 164 break;
167 iph = ip_hdr(skb); 165 iph = ip_hdr(skb);
168 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 166 if (iph->frag_off & htons(IP_MF|IP_OFFSET))
169 has_ports(iph->protocol) && 167 break;
170 pskb_network_may_pull(skb, iph->ihl * 4 + 4)) 168 poff = proto_ports_offset(iph->protocol);
171 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 169 if (poff >= 0 &&
170 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
171 iph = ip_hdr(skb);
172 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
173 2 + poff));
174 }
172 break; 175 break;
173 } 176 }
174 case htons(ETH_P_IPV6): { 177 case htons(ETH_P_IPV6): {
175 struct ipv6hdr *iph; 178 struct ipv6hdr *iph;
179 int poff;
176 180
177 if (!pskb_network_may_pull(skb, sizeof(*iph) + 4)) 181 if (!pskb_network_may_pull(skb, sizeof(*iph)))
178 break; 182 break;
179 iph = ipv6_hdr(skb); 183 iph = ipv6_hdr(skb);
180 if (has_ports(iph->nexthdr)) 184 poff = proto_ports_offset(iph->nexthdr);
181 return ntohs(*(__be16 *)((void *)&iph[1] + 2)); 185 if (poff >= 0 &&
186 pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) {
187 iph = ipv6_hdr(skb);
188 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
189 poff + 2));
190 }
182 break; 191 break;
183 } 192 }
184 } 193 }
@@ -297,6 +306,11 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb)
297 return tag & VLAN_VID_MASK; 306 return tag & VLAN_VID_MASK;
298} 307}
299 308
309static u32 flow_get_rxhash(struct sk_buff *skb)
310{
311 return skb_get_rxhash(skb);
312}
313
300static u32 flow_key_get(struct sk_buff *skb, int key) 314static u32 flow_key_get(struct sk_buff *skb, int key)
301{ 315{
302 switch (key) { 316 switch (key) {
@@ -334,6 +348,8 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
334 return flow_get_skgid(skb); 348 return flow_get_skgid(skb);
335 case FLOW_KEY_VLAN_TAG: 349 case FLOW_KEY_VLAN_TAG:
336 return flow_get_vlan_tag(skb); 350 return flow_get_vlan_tag(skb);
351 case FLOW_KEY_RXHASH:
352 return flow_get_rxhash(skb);
337 default: 353 default:
338 WARN_ON(1); 354 WARN_ON(1);
339 return 0; 355 return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 3bcac8aa333c..34da5e29ea1a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -223,6 +223,11 @@ META_COLLECTOR(int_maclen)
223 dst->value = skb->mac_len; 223 dst->value = skb->mac_len;
224} 224}
225 225
226META_COLLECTOR(int_rxhash)
227{
228 dst->value = skb_get_rxhash(skb);
229}
230
226/************************************************************************** 231/**************************************************************************
227 * Netfilter 232 * Netfilter
228 **************************************************************************/ 233 **************************************************************************/
@@ -541,6 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
541 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off), 546 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
542 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend), 547 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
543 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag), 548 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
549 [META_ID(RXHASH)] = META_FUNC(int_rxhash),
544 } 550 }
545}; 551};
546 552
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 408eea7086aa..6fb3d41c0e41 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -360,7 +360,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
360 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); 360 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
361 } 361 }
362 362
363 if (!s || tsize != s->tsize || (!tab && tsize > 0)) 363 if (tsize != s->tsize || (!tab && tsize > 0))
364 return ERR_PTR(-EINVAL); 364 return ERR_PTR(-EINVAL);
365 365
366 spin_lock(&qdisc_stab_lock); 366 spin_lock(&qdisc_stab_lock);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 201cbac2b32c..3cf478d012dd 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -123,40 +123,39 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
123 case htons(ETH_P_IP): 123 case htons(ETH_P_IP):
124 { 124 {
125 const struct iphdr *iph; 125 const struct iphdr *iph;
126 int poff;
126 127
127 if (!pskb_network_may_pull(skb, sizeof(*iph))) 128 if (!pskb_network_may_pull(skb, sizeof(*iph)))
128 goto err; 129 goto err;
129 iph = ip_hdr(skb); 130 iph = ip_hdr(skb);
130 h = (__force u32)iph->daddr; 131 h = (__force u32)iph->daddr;
131 h2 = (__force u32)iph->saddr ^ iph->protocol; 132 h2 = (__force u32)iph->saddr ^ iph->protocol;
132 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 133 if (iph->frag_off & htons(IP_MF|IP_OFFSET))
133 (iph->protocol == IPPROTO_TCP || 134 break;
134 iph->protocol == IPPROTO_UDP || 135 poff = proto_ports_offset(iph->protocol);
135 iph->protocol == IPPROTO_UDPLITE || 136 if (poff >= 0 &&
136 iph->protocol == IPPROTO_SCTP || 137 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
137 iph->protocol == IPPROTO_DCCP || 138 iph = ip_hdr(skb);
138 iph->protocol == IPPROTO_ESP) && 139 h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
139 pskb_network_may_pull(skb, iph->ihl * 4 + 4)) 140 }
140 h2 ^= *(((u32*)iph) + iph->ihl);
141 break; 141 break;
142 } 142 }
143 case htons(ETH_P_IPV6): 143 case htons(ETH_P_IPV6):
144 { 144 {
145 struct ipv6hdr *iph; 145 struct ipv6hdr *iph;
146 int poff;
146 147
147 if (!pskb_network_may_pull(skb, sizeof(*iph))) 148 if (!pskb_network_may_pull(skb, sizeof(*iph)))
148 goto err; 149 goto err;
149 iph = ipv6_hdr(skb); 150 iph = ipv6_hdr(skb);
150 h = (__force u32)iph->daddr.s6_addr32[3]; 151 h = (__force u32)iph->daddr.s6_addr32[3];
151 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; 152 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
152 if ((iph->nexthdr == IPPROTO_TCP || 153 poff = proto_ports_offset(iph->nexthdr);
153 iph->nexthdr == IPPROTO_UDP || 154 if (poff >= 0 &&
154 iph->nexthdr == IPPROTO_UDPLITE || 155 pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
155 iph->nexthdr == IPPROTO_SCTP || 156 iph = ipv6_hdr(skb);
156 iph->nexthdr == IPPROTO_DCCP || 157 h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
157 iph->nexthdr == IPPROTO_ESP) && 158 }
158 pskb_network_may_pull(skb, sizeof(*iph) + 4))
159 h2 ^= *(u32*)&iph[1];
160 break; 159 break;
161 } 160 }
162 default: 161 default:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0b85e5256434..5f1fb8bd862d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -48,6 +48,8 @@
48 * be incorporated into the next SCTP release. 48 * be incorporated into the next SCTP release.
49 */ 49 */
50 50
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
51#include <linux/types.h> 53#include <linux/types.h>
52#include <linux/fcntl.h> 54#include <linux/fcntl.h>
53#include <linux/poll.h> 55#include <linux/poll.h>
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 476caaf100ed..6c8556459a75 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -37,6 +37,8 @@
37 * be incorporated into the next SCTP release. 37 * be incorporated into the next SCTP release.
38 */ 38 */
39 39
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
40#include <linux/types.h> 42#include <linux/types.h>
41#include <linux/kernel.h> 43#include <linux/kernel.h>
42#include <linux/net.h> 44#include <linux/net.h>
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index ccb6dc48d15b..397296fb156f 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -43,6 +43,8 @@
43 * be incorporated into the next SCTP release. 43 * be incorporated into the next SCTP release.
44 */ 44 */
45 45
46#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
47
46#include <net/sctp/sctp.h> 48#include <net/sctp/sctp.h>
47#include <net/sctp/sm.h> 49#include <net/sctp/sm.h>
48#include <linux/interrupt.h> 50#include <linux/interrupt.h>
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 732689140fb8..95e0c8eda1a0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -47,6 +47,8 @@
47 * be incorporated into the next SCTP release. 47 * be incorporated into the next SCTP release.
48 */ 48 */
49 49
50#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51
50#include <linux/module.h> 52#include <linux/module.h>
51#include <linux/errno.h> 53#include <linux/errno.h>
52#include <linux/types.h> 54#include <linux/types.h>
@@ -336,7 +338,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
336 memcpy(saddr, baddr, sizeof(union sctp_addr)); 338 memcpy(saddr, baddr, sizeof(union sctp_addr));
337 SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr); 339 SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr);
338 } else { 340 } else {
339 printk(KERN_ERR "%s: asoc:%p Could not find a valid source " 341 pr_err("%s: asoc:%p Could not find a valid source "
340 "address for the dest:%pI6\n", 342 "address for the dest:%pI6\n",
341 __func__, asoc, &daddr->v6.sin6_addr); 343 __func__, asoc, &daddr->v6.sin6_addr);
342 } 344 }
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index f73ec0ea93ba..8ef8e7d9eb61 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -38,6 +38,8 @@
38 * be incorporated into the next SCTP release. 38 * be incorporated into the next SCTP release.
39 */ 39 */
40 40
41#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
42
41#include <linux/kernel.h> 43#include <linux/kernel.h>
42#include <net/sctp/sctp.h> 44#include <net/sctp/sctp.h>
43 45
@@ -134,8 +136,7 @@ void sctp_dbg_objcnt_init(void)
134 ent = proc_create("sctp_dbg_objcnt", 0, 136 ent = proc_create("sctp_dbg_objcnt", 0,
135 proc_net_sctp, &sctp_objcnt_ops); 137 proc_net_sctp, &sctp_objcnt_ops);
136 if (!ent) 138 if (!ent)
137 printk(KERN_WARNING 139 pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
138 "sctp_dbg_objcnt: Unable to create /proc entry.\n");
139} 140}
140 141
141/* Cleanup the objcount entry in the proc filesystem. */ 142/* Cleanup the objcount entry in the proc filesystem. */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a646681f5acd..901764b17aee 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -41,6 +41,8 @@
41 * be incorporated into the next SCTP release. 41 * be incorporated into the next SCTP release.
42 */ 42 */
43 43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
44#include <linux/types.h> 46#include <linux/types.h>
45#include <linux/kernel.h> 47#include <linux/kernel.h>
46#include <linux/wait.h> 48#include <linux/wait.h>
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c04b2eb59186..8c6d379b4bb6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -46,6 +46,8 @@
46 * be incorporated into the next SCTP release. 46 * be incorporated into the next SCTP release.
47 */ 47 */
48 48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
49#include <linux/types.h> 51#include <linux/types.h>
50#include <linux/list.h> /* For struct list_head */ 52#include <linux/list.h> /* For struct list_head */
51#include <linux/socket.h> 53#include <linux/socket.h>
@@ -1463,23 +1465,23 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1463 /* Display the end of the 1465 /* Display the end of the
1464 * current range. 1466 * current range.
1465 */ 1467 */
1466 SCTP_DEBUG_PRINTK("-%08x", 1468 SCTP_DEBUG_PRINTK_CONT("-%08x",
1467 dbg_last_ack_tsn); 1469 dbg_last_ack_tsn);
1468 } 1470 }
1469 1471
1470 /* Start a new range. */ 1472 /* Start a new range. */
1471 SCTP_DEBUG_PRINTK(",%08x", tsn); 1473 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1472 dbg_ack_tsn = tsn; 1474 dbg_ack_tsn = tsn;
1473 break; 1475 break;
1474 1476
1475 case 1: /* The last TSN was NOT ACKed. */ 1477 case 1: /* The last TSN was NOT ACKed. */
1476 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1478 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1477 /* Display the end of current range. */ 1479 /* Display the end of current range. */
1478 SCTP_DEBUG_PRINTK("-%08x", 1480 SCTP_DEBUG_PRINTK_CONT("-%08x",
1479 dbg_last_kept_tsn); 1481 dbg_last_kept_tsn);
1480 } 1482 }
1481 1483
1482 SCTP_DEBUG_PRINTK("\n"); 1484 SCTP_DEBUG_PRINTK_CONT("\n");
1483 1485
1484 /* FALL THROUGH... */ 1486 /* FALL THROUGH... */
1485 default: 1487 default:
@@ -1526,18 +1528,18 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1526 break; 1528 break;
1527 1529
1528 if (dbg_last_kept_tsn != dbg_kept_tsn) 1530 if (dbg_last_kept_tsn != dbg_kept_tsn)
1529 SCTP_DEBUG_PRINTK("-%08x", 1531 SCTP_DEBUG_PRINTK_CONT("-%08x",
1530 dbg_last_kept_tsn); 1532 dbg_last_kept_tsn);
1531 1533
1532 SCTP_DEBUG_PRINTK(",%08x", tsn); 1534 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1533 dbg_kept_tsn = tsn; 1535 dbg_kept_tsn = tsn;
1534 break; 1536 break;
1535 1537
1536 case 0: 1538 case 0:
1537 if (dbg_last_ack_tsn != dbg_ack_tsn) 1539 if (dbg_last_ack_tsn != dbg_ack_tsn)
1538 SCTP_DEBUG_PRINTK("-%08x", 1540 SCTP_DEBUG_PRINTK_CONT("-%08x",
1539 dbg_last_ack_tsn); 1541 dbg_last_ack_tsn);
1540 SCTP_DEBUG_PRINTK("\n"); 1542 SCTP_DEBUG_PRINTK_CONT("\n");
1541 1543
1542 /* FALL THROUGH... */ 1544 /* FALL THROUGH... */
1543 default: 1545 default:
@@ -1556,17 +1558,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1556 switch (dbg_prt_state) { 1558 switch (dbg_prt_state) {
1557 case 0: 1559 case 0:
1558 if (dbg_last_ack_tsn != dbg_ack_tsn) { 1560 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1559 SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn); 1561 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
1560 } else { 1562 } else {
1561 SCTP_DEBUG_PRINTK("\n"); 1563 SCTP_DEBUG_PRINTK_CONT("\n");
1562 } 1564 }
1563 break; 1565 break;
1564 1566
1565 case 1: 1567 case 1:
1566 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1568 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1567 SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn); 1569 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
1568 } else { 1570 } else {
1569 SCTP_DEBUG_PRINTK("\n"); 1571 SCTP_DEBUG_PRINTK_CONT("\n");
1570 } 1572 }
1571 } 1573 }
1572#endif /* SCTP_DEBUG */ 1574#endif /* SCTP_DEBUG */
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index db3a42b8b349..2e63e9dc010e 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -22,6 +22,8 @@
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/kprobes.h> 28#include <linux/kprobes.h>
27#include <linux/socket.h> 29#include <linux/socket.h>
@@ -192,7 +194,7 @@ static __init int sctpprobe_init(void)
192 if (ret) 194 if (ret)
193 goto remove_proc; 195 goto remove_proc;
194 196
195 pr_info("SCTP probe registered (port=%d)\n", port); 197 pr_info("probe registered (port=%d)\n", port);
196 198
197 return 0; 199 return 0;
198 200
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5027b83f1cc0..f774e657641a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -46,6 +46,8 @@
46 * be incorporated into the next SCTP release. 46 * be incorporated into the next SCTP release.
47 */ 47 */
48 48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
49#include <linux/module.h> 51#include <linux/module.h>
50#include <linux/init.h> 52#include <linux/init.h>
51#include <linux/netdevice.h> 53#include <linux/netdevice.h>
@@ -707,8 +709,7 @@ static int sctp_ctl_sock_init(void)
707 &init_net); 709 &init_net);
708 710
709 if (err < 0) { 711 if (err < 0) {
710 printk(KERN_ERR 712 pr_err("Failed to create the SCTP control socket\n");
711 "SCTP: Failed to create the SCTP control socket.\n");
712 return err; 713 return err;
713 } 714 }
714 return 0; 715 return 0;
@@ -1206,7 +1207,7 @@ SCTP_STATIC __init int sctp_init(void)
1206 __get_free_pages(GFP_ATOMIC, order); 1207 __get_free_pages(GFP_ATOMIC, order);
1207 } while (!sctp_assoc_hashtable && --order > 0); 1208 } while (!sctp_assoc_hashtable && --order > 0);
1208 if (!sctp_assoc_hashtable) { 1209 if (!sctp_assoc_hashtable) {
1209 printk(KERN_ERR "SCTP: Failed association hash alloc.\n"); 1210 pr_err("Failed association hash alloc\n");
1210 status = -ENOMEM; 1211 status = -ENOMEM;
1211 goto err_ahash_alloc; 1212 goto err_ahash_alloc;
1212 } 1213 }
@@ -1220,7 +1221,7 @@ SCTP_STATIC __init int sctp_init(void)
1220 sctp_ep_hashtable = (struct sctp_hashbucket *) 1221 sctp_ep_hashtable = (struct sctp_hashbucket *)
1221 kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); 1222 kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
1222 if (!sctp_ep_hashtable) { 1223 if (!sctp_ep_hashtable) {
1223 printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n"); 1224 pr_err("Failed endpoint_hash alloc\n");
1224 status = -ENOMEM; 1225 status = -ENOMEM;
1225 goto err_ehash_alloc; 1226 goto err_ehash_alloc;
1226 } 1227 }
@@ -1239,7 +1240,7 @@ SCTP_STATIC __init int sctp_init(void)
1239 __get_free_pages(GFP_ATOMIC, order); 1240 __get_free_pages(GFP_ATOMIC, order);
1240 } while (!sctp_port_hashtable && --order > 0); 1241 } while (!sctp_port_hashtable && --order > 0);
1241 if (!sctp_port_hashtable) { 1242 if (!sctp_port_hashtable) {
1242 printk(KERN_ERR "SCTP: Failed bind hash alloc."); 1243 pr_err("Failed bind hash alloc\n");
1243 status = -ENOMEM; 1244 status = -ENOMEM;
1244 goto err_bhash_alloc; 1245 goto err_bhash_alloc;
1245 } 1246 }
@@ -1248,8 +1249,7 @@ SCTP_STATIC __init int sctp_init(void)
1248 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); 1249 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
1249 } 1250 }
1250 1251
1251 printk(KERN_INFO "SCTP: Hash tables configured " 1252 pr_info("Hash tables configured (established %d bind %d)\n",
1252 "(established %d bind %d)\n",
1253 sctp_assoc_hashsize, sctp_port_hashsize); 1253 sctp_assoc_hashsize, sctp_port_hashsize);
1254 1254
1255 /* Disable ADDIP by default. */ 1255 /* Disable ADDIP by default. */
@@ -1290,8 +1290,7 @@ SCTP_STATIC __init int sctp_init(void)
1290 1290
1291 /* Initialize the control inode/socket for handling OOTB packets. */ 1291 /* Initialize the control inode/socket for handling OOTB packets. */
1292 if ((status = sctp_ctl_sock_init())) { 1292 if ((status = sctp_ctl_sock_init())) {
1293 printk (KERN_ERR 1293 pr_err("Failed to initialize the SCTP control sock\n");
1294 "SCTP: Failed to initialize the SCTP control sock.\n");
1295 goto err_ctl_sock_init; 1294 goto err_ctl_sock_init;
1296 } 1295 }
1297 1296
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 246f92924658..2cc46f0962ca 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -50,6 +50,8 @@
50 * be incorporated into the next SCTP release. 50 * be incorporated into the next SCTP release.
51 */ 51 */
52 52
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
53#include <linux/types.h> 55#include <linux/types.h>
54#include <linux/kernel.h> 56#include <linux/kernel.h>
55#include <linux/ip.h> 57#include <linux/ip.h>
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index f5e5e27cac5e..b21b218d564f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -47,6 +47,8 @@
47 * be incorporated into the next SCTP release. 47 * be incorporated into the next SCTP release.
48 */ 48 */
49 49
50#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51
50#include <linux/skbuff.h> 52#include <linux/skbuff.h>
51#include <linux/types.h> 53#include <linux/types.h>
52#include <linux/socket.h> 54#include <linux/socket.h>
@@ -1146,26 +1148,23 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1146 1148
1147 case SCTP_DISPOSITION_VIOLATION: 1149 case SCTP_DISPOSITION_VIOLATION:
1148 if (net_ratelimit()) 1150 if (net_ratelimit())
1149 printk(KERN_ERR "sctp protocol violation state %d " 1151 pr_err("protocol violation state %d chunkid %d\n",
1150 "chunkid %d\n", state, subtype.chunk); 1152 state, subtype.chunk);
1151 break; 1153 break;
1152 1154
1153 case SCTP_DISPOSITION_NOT_IMPL: 1155 case SCTP_DISPOSITION_NOT_IMPL:
1154 printk(KERN_WARNING "sctp unimplemented feature in state %d, " 1156 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1155 "event_type %d, event_id %d\n", 1157 state, event_type, subtype.chunk);
1156 state, event_type, subtype.chunk);
1157 break; 1158 break;
1158 1159
1159 case SCTP_DISPOSITION_BUG: 1160 case SCTP_DISPOSITION_BUG:
1160 printk(KERN_ERR "sctp bug in state %d, " 1161 pr_err("bug in state %d, event_type %d, event_id %d\n",
1161 "event_type %d, event_id %d\n",
1162 state, event_type, subtype.chunk); 1162 state, event_type, subtype.chunk);
1163 BUG(); 1163 BUG();
1164 break; 1164 break;
1165 1165
1166 default: 1166 default:
1167 printk(KERN_ERR "sctp impossible disposition %d " 1167 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1168 "in state %d, event_type %d, event_id %d\n",
1169 status, state, event_type, subtype.chunk); 1168 status, state, event_type, subtype.chunk);
1170 BUG(); 1169 BUG();
1171 break; 1170 break;
@@ -1679,8 +1678,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1679 sctp_cmd_send_asconf(asoc); 1678 sctp_cmd_send_asconf(asoc);
1680 break; 1679 break;
1681 default: 1680 default:
1682 printk(KERN_WARNING "Impossible command: %u, %p\n", 1681 pr_warn("Impossible command: %u, %p\n",
1683 cmd->verb, cmd->obj.ptr); 1682 cmd->verb, cmd->obj.ptr);
1684 break; 1683 break;
1685 } 1684 }
1686 1685
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d344dc481ccc..4b4eb7c96bbd 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -50,6 +50,8 @@
50 * be incorporated into the next SCTP release. 50 * be incorporated into the next SCTP release.
51 */ 51 */
52 52
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
53#include <linux/types.h> 55#include <linux/types.h>
54#include <linux/kernel.h> 56#include <linux/kernel.h>
55#include <linux/ip.h> 57#include <linux/ip.h>
@@ -1138,18 +1140,16 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1138 if (unlikely(!link)) { 1140 if (unlikely(!link)) {
1139 if (from_addr.sa.sa_family == AF_INET6) { 1141 if (from_addr.sa.sa_family == AF_INET6) {
1140 if (net_ratelimit()) 1142 if (net_ratelimit())
1141 printk(KERN_WARNING 1143 pr_warn("%s association %p could not find address %pI6\n",
1142 "%s association %p could not find address %pI6\n", 1144 __func__,
1143 __func__, 1145 asoc,
1144 asoc, 1146 &from_addr.v6.sin6_addr);
1145 &from_addr.v6.sin6_addr);
1146 } else { 1147 } else {
1147 if (net_ratelimit()) 1148 if (net_ratelimit())
1148 printk(KERN_WARNING 1149 pr_warn("%s association %p could not find address %pI4\n",
1149 "%s association %p could not find address %pI4\n", 1150 __func__,
1150 __func__, 1151 asoc,
1151 asoc, 1152 &from_addr.v4.sin_addr.s_addr);
1152 &from_addr.v4.sin_addr.s_addr);
1153 } 1153 }
1154 return SCTP_DISPOSITION_DISCARD; 1154 return SCTP_DISPOSITION_DISCARD;
1155 } 1155 }
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 6d9b3aafcc5d..546d4387fb3c 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -46,6 +46,8 @@
46 * be incorporated into the next SCTP release. 46 * be incorporated into the next SCTP release.
47 */ 47 */
48 48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
49#include <linux/skbuff.h> 51#include <linux/skbuff.h>
50#include <net/sctp/sctp.h> 52#include <net/sctp/sctp.h>
51#include <net/sctp/sm.h> 53#include <net/sctp/sm.h>
@@ -66,15 +68,19 @@ static const sctp_sm_table_entry_t bug = {
66 .name = "sctp_sf_bug" 68 .name = "sctp_sf_bug"
67}; 69};
68 70
69#define DO_LOOKUP(_max, _type, _table) \ 71#define DO_LOOKUP(_max, _type, _table) \
70 if ((event_subtype._type > (_max))) { \ 72({ \
71 printk(KERN_WARNING \ 73 const sctp_sm_table_entry_t *rtn; \
72 "sctp table %p possible attack:" \ 74 \
73 " event %d exceeds max %d\n", \ 75 if ((event_subtype._type > (_max))) { \
74 _table, event_subtype._type, _max); \ 76 pr_warn("table %p possible attack: event %d exceeds max %d\n", \
75 return &bug; \ 77 _table, event_subtype._type, _max); \
76 } \ 78 rtn = &bug; \
77 return &_table[event_subtype._type][(int)state]; 79 } else \
80 rtn = &_table[event_subtype._type][(int)state]; \
81 \
82 rtn; \
83})
78 84
79const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, 85const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
80 sctp_state_t state, 86 sctp_state_t state,
@@ -83,21 +89,15 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
83 switch (event_type) { 89 switch (event_type) {
84 case SCTP_EVENT_T_CHUNK: 90 case SCTP_EVENT_T_CHUNK:
85 return sctp_chunk_event_lookup(event_subtype.chunk, state); 91 return sctp_chunk_event_lookup(event_subtype.chunk, state);
86 break;
87 case SCTP_EVENT_T_TIMEOUT: 92 case SCTP_EVENT_T_TIMEOUT:
88 DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout, 93 return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
89 timeout_event_table); 94 timeout_event_table);
90 break;
91
92 case SCTP_EVENT_T_OTHER: 95 case SCTP_EVENT_T_OTHER:
93 DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other, other_event_table); 96 return DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other,
94 break; 97 other_event_table);
95
96 case SCTP_EVENT_T_PRIMITIVE: 98 case SCTP_EVENT_T_PRIMITIVE:
97 DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive, 99 return DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive,
98 primitive_event_table); 100 primitive_event_table);
99 break;
100
101 default: 101 default:
102 /* Yikes! We got an illegal event type. */ 102 /* Yikes! We got an illegal event type. */
103 return &bug; 103 return &bug;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ca44917872d2..6a691d84aef4 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -57,6 +57,8 @@
57 * be incorporated into the next SCTP release. 57 * be incorporated into the next SCTP release.
58 */ 58 */
59 59
60#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
60#include <linux/types.h> 62#include <linux/types.h>
61#include <linux/kernel.h> 63#include <linux/kernel.h>
62#include <linux/wait.h> 64#include <linux/wait.h>
@@ -2458,9 +2460,8 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
2458 if (params.sack_delay == 0 && params.sack_freq == 0) 2460 if (params.sack_delay == 0 && params.sack_freq == 0)
2459 return 0; 2461 return 0;
2460 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2462 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2461 printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value " 2463 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
2462 "in delayed_ack socket option deprecated\n"); 2464 pr_warn("Use struct sctp_sack_info instead\n");
2463 printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
2464 if (copy_from_user(&params, optval, optlen)) 2465 if (copy_from_user(&params, optval, optlen))
2465 return -EFAULT; 2466 return -EFAULT;
2466 2467
@@ -2868,10 +2869,8 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
2868 int val; 2869 int val;
2869 2870
2870 if (optlen == sizeof(int)) { 2871 if (optlen == sizeof(int)) {
2871 printk(KERN_WARNING 2872 pr_warn("Use of int in maxseg socket option deprecated\n");
2872 "SCTP: Use of int in maxseg socket option deprecated\n"); 2873 pr_warn("Use struct sctp_assoc_value instead\n");
2873 printk(KERN_WARNING
2874 "SCTP: Use struct sctp_assoc_value instead\n");
2875 if (copy_from_user(&val, optval, optlen)) 2874 if (copy_from_user(&val, optval, optlen))
2876 return -EFAULT; 2875 return -EFAULT;
2877 params.assoc_id = 0; 2876 params.assoc_id = 0;
@@ -3121,10 +3120,8 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
3121 int assoc_id = 0; 3120 int assoc_id = 0;
3122 3121
3123 if (optlen == sizeof(int)) { 3122 if (optlen == sizeof(int)) {
3124 printk(KERN_WARNING 3123 pr_warn("Use of int in max_burst socket option deprecated\n");
3125 "SCTP: Use of int in max_burst socket option deprecated\n"); 3124 pr_warn("Use struct sctp_assoc_value instead\n");
3126 printk(KERN_WARNING
3127 "SCTP: Use struct sctp_assoc_value instead\n");
3128 if (copy_from_user(&val, optval, optlen)) 3125 if (copy_from_user(&val, optval, optlen))
3129 return -EFAULT; 3126 return -EFAULT;
3130 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3127 } else if (optlen == sizeof(struct sctp_assoc_value)) {
@@ -3595,7 +3592,40 @@ out:
3595/* The SCTP ioctl handler. */ 3592/* The SCTP ioctl handler. */
3596SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3593SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
3597{ 3594{
3598 return -ENOIOCTLCMD; 3595 int rc = -ENOTCONN;
3596
3597 sctp_lock_sock(sk);
3598
3599 /*
3600 * SEQPACKET-style sockets in LISTENING state are valid, for
3601 * SCTP, so only discard TCP-style sockets in LISTENING state.
3602 */
3603 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
3604 goto out;
3605
3606 switch (cmd) {
3607 case SIOCINQ: {
3608 struct sk_buff *skb;
3609 unsigned int amount = 0;
3610
3611 skb = skb_peek(&sk->sk_receive_queue);
3612 if (skb != NULL) {
3613 /*
3614 * We will only return the amount of this packet since
3615 * that is all that will be read.
3616 */
3617 amount = skb->len;
3618 }
3619 rc = put_user(amount, (int __user *)arg);
3620 }
3621 break;
3622 default:
3623 rc = -ENOIOCTLCMD;
3624 break;
3625 }
3626out:
3627 sctp_release_sock(sk);
3628 return rc;
3599} 3629}
3600 3630
3601/* This is the function which gets called during socket creation to 3631/* This is the function which gets called during socket creation to
@@ -4281,9 +4311,8 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
4281 if (copy_from_user(&params, optval, len)) 4311 if (copy_from_user(&params, optval, len))
4282 return -EFAULT; 4312 return -EFAULT;
4283 } else if (len == sizeof(struct sctp_assoc_value)) { 4313 } else if (len == sizeof(struct sctp_assoc_value)) {
4284 printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value " 4314 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
4285 "in delayed_ack socket option deprecated\n"); 4315 pr_warn("Use struct sctp_sack_info instead\n");
4286 printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
4287 if (copy_from_user(&params, optval, len)) 4316 if (copy_from_user(&params, optval, len))
4288 return -EFAULT; 4317 return -EFAULT;
4289 } else 4318 } else
@@ -4929,10 +4958,8 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
4929 struct sctp_association *asoc; 4958 struct sctp_association *asoc;
4930 4959
4931 if (len == sizeof(int)) { 4960 if (len == sizeof(int)) {
4932 printk(KERN_WARNING 4961 pr_warn("Use of int in maxseg socket option deprecated\n");
4933 "SCTP: Use of int in maxseg socket option deprecated\n"); 4962 pr_warn("Use struct sctp_assoc_value instead\n");
4934 printk(KERN_WARNING
4935 "SCTP: Use struct sctp_assoc_value instead\n");
4936 params.assoc_id = 0; 4963 params.assoc_id = 0;
4937 } else if (len >= sizeof(struct sctp_assoc_value)) { 4964 } else if (len >= sizeof(struct sctp_assoc_value)) {
4938 len = sizeof(struct sctp_assoc_value); 4965 len = sizeof(struct sctp_assoc_value);
@@ -5023,10 +5050,8 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
5023 struct sctp_association *asoc; 5050 struct sctp_association *asoc;
5024 5051
5025 if (len == sizeof(int)) { 5052 if (len == sizeof(int)) {
5026 printk(KERN_WARNING 5053 pr_warn("Use of int in max_burst socket option deprecated\n");
5027 "SCTP: Use of int in max_burst socket option deprecated\n"); 5054 pr_warn("Use struct sctp_assoc_value instead\n");
5028 printk(KERN_WARNING
5029 "SCTP: Use struct sctp_assoc_value instead\n");
5030 params.assoc_id = 0; 5055 params.assoc_id = 0;
5031 } else if (len >= sizeof(struct sctp_assoc_value)) { 5056 } else if (len >= sizeof(struct sctp_assoc_value)) {
5032 len = sizeof(struct sctp_assoc_value); 5057 len = sizeof(struct sctp_assoc_value);
@@ -5586,8 +5611,7 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
5586 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); 5611 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
5587 if (IS_ERR(tfm)) { 5612 if (IS_ERR(tfm)) {
5588 if (net_ratelimit()) { 5613 if (net_ratelimit()) {
5589 printk(KERN_INFO 5614 pr_info("failed to load transform for %s: %ld\n",
5590 "SCTP: failed to load transform for %s: %ld\n",
5591 sctp_hmac_alg, PTR_ERR(tfm)); 5615 sctp_hmac_alg, PTR_ERR(tfm));
5592 } 5616 }
5593 return -ENOSYS; 5617 return -ENOSYS;
@@ -5716,13 +5740,12 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
5716 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 5740 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
5717 mask |= POLLERR; 5741 mask |= POLLERR;
5718 if (sk->sk_shutdown & RCV_SHUTDOWN) 5742 if (sk->sk_shutdown & RCV_SHUTDOWN)
5719 mask |= POLLRDHUP; 5743 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
5720 if (sk->sk_shutdown == SHUTDOWN_MASK) 5744 if (sk->sk_shutdown == SHUTDOWN_MASK)
5721 mask |= POLLHUP; 5745 mask |= POLLHUP;
5722 5746
5723 /* Is it readable? Reconsider this code with TCP-style support. */ 5747 /* Is it readable? Reconsider this code with TCP-style support. */
5724 if (!skb_queue_empty(&sk->sk_receive_queue) || 5748 if (!skb_queue_empty(&sk->sk_receive_queue))
5725 (sk->sk_shutdown & RCV_SHUTDOWN))
5726 mask |= POLLIN | POLLRDNORM; 5749 mask |= POLLIN | POLLRDNORM;
5727 5750
5728 /* The association is either gone or not ready. */ 5751 /* The association is either gone or not ready. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 132046cb82fc..d3ae493d234a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -48,6 +48,8 @@
48 * be incorporated into the next SCTP release. 48 * be incorporated into the next SCTP release.
49 */ 49 */
50 50
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
51#include <linux/slab.h> 53#include <linux/slab.h>
52#include <linux/types.h> 54#include <linux/types.h>
53#include <linux/random.h> 55#include <linux/random.h>
@@ -244,10 +246,9 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
244 struct dst_entry *dst; 246 struct dst_entry *dst;
245 247
246 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 248 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
247 printk(KERN_WARNING "%s: Reported pmtu %d too low, " 249 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
248 "using default minimum of %d\n", 250 __func__, pmtu,
249 __func__, pmtu, 251 SCTP_DEFAULT_MINSEGMENT);
250 SCTP_DEFAULT_MINSEGMENT);
251 /* Use default minimum segment size and disable 252 /* Use default minimum segment size and disable
252 * pmtu discovery on this transport. 253 * pmtu discovery on this transport.
253 */ 254 */
diff --git a/net/socket.c b/net/socket.c
index 2270b941bcc7..717a5f1c8792 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -535,14 +535,13 @@ void sock_release(struct socket *sock)
535} 535}
536EXPORT_SYMBOL(sock_release); 536EXPORT_SYMBOL(sock_release);
537 537
538int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, 538int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
539 union skb_shared_tx *shtx)
540{ 539{
541 shtx->flags = 0; 540 *tx_flags = 0;
542 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 541 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
543 shtx->hardware = 1; 542 *tx_flags |= SKBTX_HW_TSTAMP;
544 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 543 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
545 shtx->software = 1; 544 *tx_flags |= SKBTX_SW_TSTAMP;
546 return 0; 545 return 0;
547} 546}
548EXPORT_SYMBOL(sock_tx_timestamp); 547EXPORT_SYMBOL(sock_tx_timestamp);
@@ -1919,7 +1918,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1919 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted 1918 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted
1920 * checking falls down on this. 1919 * checking falls down on this.
1921 */ 1920 */
1922 if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control, 1921 if (copy_from_user(ctl_buf,
1922 (void __user __force *)msg_sys.msg_control,
1923 ctl_len)) 1923 ctl_len))
1924 goto out_freectl; 1924 goto out_freectl;
1925 msg_sys.msg_control = ctl_buf; 1925 msg_sys.msg_control = ctl_buf;
@@ -3054,14 +3054,19 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
3054 char *optval, int *optlen) 3054 char *optval, int *optlen)
3055{ 3055{
3056 mm_segment_t oldfs = get_fs(); 3056 mm_segment_t oldfs = get_fs();
3057 char __user *uoptval;
3058 int __user *uoptlen;
3057 int err; 3059 int err;
3058 3060
3061 uoptval = (char __user __force *) optval;
3062 uoptlen = (int __user __force *) optlen;
3063
3059 set_fs(KERNEL_DS); 3064 set_fs(KERNEL_DS);
3060 if (level == SOL_SOCKET) 3065 if (level == SOL_SOCKET)
3061 err = sock_getsockopt(sock, level, optname, optval, optlen); 3066 err = sock_getsockopt(sock, level, optname, uoptval, uoptlen);
3062 else 3067 else
3063 err = sock->ops->getsockopt(sock, level, optname, optval, 3068 err = sock->ops->getsockopt(sock, level, optname, uoptval,
3064 optlen); 3069 uoptlen);
3065 set_fs(oldfs); 3070 set_fs(oldfs);
3066 return err; 3071 return err;
3067} 3072}
@@ -3071,13 +3076,16 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
3071 char *optval, unsigned int optlen) 3076 char *optval, unsigned int optlen)
3072{ 3077{
3073 mm_segment_t oldfs = get_fs(); 3078 mm_segment_t oldfs = get_fs();
3079 char __user *uoptval;
3074 int err; 3080 int err;
3075 3081
3082 uoptval = (char __user __force *) optval;
3083
3076 set_fs(KERNEL_DS); 3084 set_fs(KERNEL_DS);
3077 if (level == SOL_SOCKET) 3085 if (level == SOL_SOCKET)
3078 err = sock_setsockopt(sock, level, optname, optval, optlen); 3086 err = sock_setsockopt(sock, level, optname, uoptval, optlen);
3079 else 3087 else
3080 err = sock->ops->setsockopt(sock, level, optname, optval, 3088 err = sock->ops->setsockopt(sock, level, optname, uoptval,
3081 optlen); 3089 optlen);
3082 set_fs(oldfs); 3090 set_fs(oldfs);
3083 return err; 3091 return err;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a008c6689305..b11248c2d788 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -143,6 +143,19 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
143} 143}
144 144
145 145
146static void bclink_set_last_sent(void)
147{
148 if (bcl->next_out)
149 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
150 else
151 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
152}
153
154u32 tipc_bclink_get_last_sent(void)
155{
156 return bcl->fsm_msg_cnt;
157}
158
146/** 159/**
147 * bclink_set_gap - set gap according to contents of current deferred pkt queue 160 * bclink_set_gap - set gap according to contents of current deferred pkt queue
148 * 161 *
@@ -237,8 +250,10 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
237 250
238 /* Try resolving broadcast link congestion, if necessary */ 251 /* Try resolving broadcast link congestion, if necessary */
239 252
240 if (unlikely(bcl->next_out)) 253 if (unlikely(bcl->next_out)) {
241 tipc_link_push_queue(bcl); 254 tipc_link_push_queue(bcl);
255 bclink_set_last_sent();
256 }
242 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 257 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
243 tipc_link_wakeup_ports(bcl, 0); 258 tipc_link_wakeup_ports(bcl, 0);
244 spin_unlock_bh(&bc_lock); 259 spin_unlock_bh(&bc_lock);
@@ -395,7 +410,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
395 if (unlikely(res == -ELINKCONG)) 410 if (unlikely(res == -ELINKCONG))
396 buf_discard(buf); 411 buf_discard(buf);
397 else 412 else
398 bcl->stats.sent_info++; 413 bclink_set_last_sent();
399 414
400 if (bcl->out_queue_size > bcl->stats.max_queue_sz) 415 if (bcl->out_queue_size > bcl->stats.max_queue_sz)
401 bcl->stats.max_queue_sz = bcl->out_queue_size; 416 bcl->stats.max_queue_sz = bcl->out_queue_size;
@@ -529,15 +544,6 @@ receive:
529 tipc_node_unlock(node); 544 tipc_node_unlock(node);
530} 545}
531 546
532u32 tipc_bclink_get_last_sent(void)
533{
534 u32 last_sent = mod(bcl->next_out_no - 1);
535
536 if (bcl->next_out)
537 last_sent = mod(buf_seqno(bcl->next_out) - 1);
538 return last_sent;
539}
540
541u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 547u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
542{ 548{
543 return (n_ptr->bclink.supported && 549 return (n_ptr->bclink.supported &&
@@ -570,6 +576,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
570 msg = buf_msg(buf); 576 msg = buf_msg(buf);
571 msg_set_non_seq(msg, 1); 577 msg_set_non_seq(msg, 1);
572 msg_set_mc_netid(msg, tipc_net_id); 578 msg_set_mc_netid(msg, tipc_net_id);
579 bcl->stats.sent_info++;
573 } 580 }
574 581
575 /* Send buffer over bearers until all targets reached */ 582 /* Send buffer over bearers until all targets reached */
@@ -609,11 +616,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
609 bcbearer->remains = bcbearer->remains_new; 616 bcbearer->remains = bcbearer->remains_new;
610 } 617 }
611 618
612 /* Unable to reach all targets */ 619 /*
620 * Unable to reach all targets (indicate success, since currently
621 * there isn't code in place to properly block & unblock the
622 * pseudo-bearer used by the broadcast link)
623 */
613 624
614 bcbearer->bearer.publ.blocked = 1; 625 return TIPC_OK;
615 bcl->stats.bearer_congs++;
616 return 1;
617} 626}
618 627
619/** 628/**
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 696468117985..466b861dab91 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -169,6 +169,7 @@ void tipc_core_stop(void)
169 tipc_nametbl_stop(); 169 tipc_nametbl_stop();
170 tipc_ref_table_stop(); 170 tipc_ref_table_stop();
171 tipc_socket_stop(); 171 tipc_socket_stop();
172 tipc_log_resize(0);
172} 173}
173 174
174/** 175/**
@@ -203,7 +204,9 @@ static int __init tipc_init(void)
203{ 204{
204 int res; 205 int res;
205 206
206 tipc_log_resize(CONFIG_TIPC_LOG); 207 if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
208 warn("Unable to create log buffer\n");
209
207 info("Activated (version " TIPC_MOD_VER 210 info("Activated (version " TIPC_MOD_VER
208 " compiled " __DATE__ " " __TIME__ ")\n"); 211 " compiled " __DATE__ " " __TIME__ ")\n");
209 212
@@ -230,7 +233,6 @@ static void __exit tipc_exit(void)
230 tipc_core_stop_net(); 233 tipc_core_stop_net();
231 tipc_core_stop(); 234 tipc_core_stop();
232 info("Deactivated\n"); 235 info("Deactivated\n");
233 tipc_log_resize(0);
234} 236}
235 237
236module_init(tipc_init); 238module_init(tipc_init);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index fc1fcf5e6b53..f28d1ae93125 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -203,6 +203,14 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
203 return; 203 return;
204 } 204 }
205 spin_lock_bh(&n_ptr->lock); 205 spin_lock_bh(&n_ptr->lock);
206
207 /* Don't talk to neighbor during cleanup after last session */
208
209 if (n_ptr->cleanup_required) {
210 spin_unlock_bh(&n_ptr->lock);
211 return;
212 }
213
206 link = n_ptr->links[b_ptr->identity]; 214 link = n_ptr->links[b_ptr->identity];
207 if (!link) { 215 if (!link) {
208 dbg("creating link\n"); 216 dbg("creating link\n");
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 6230d16020c4..6e988ba485fd 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -72,17 +72,26 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
72{ 72{
73 struct sk_buff *clone; 73 struct sk_buff *clone;
74 struct net_device *dev; 74 struct net_device *dev;
75 int delta;
75 76
76 clone = skb_clone(buf, GFP_ATOMIC); 77 clone = skb_clone(buf, GFP_ATOMIC);
77 if (clone) { 78 if (!clone)
78 skb_reset_network_header(clone); 79 return 0;
79 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev; 80
80 clone->dev = dev; 81 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
81 dev_hard_header(clone, dev, ETH_P_TIPC, 82 delta = dev->hard_header_len - skb_headroom(buf);
82 &dest->dev_addr.eth_addr, 83
83 dev->dev_addr, clone->len); 84 if ((delta > 0) &&
84 dev_queue_xmit(clone); 85 pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
86 kfree_skb(clone);
87 return 0;
85 } 88 }
89
90 skb_reset_network_header(clone);
91 clone->dev = dev;
92 dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr,
93 dev->dev_addr, clone->len);
94 dev_queue_xmit(clone);
86 return 0; 95 return 0;
87} 96}
88 97
@@ -92,15 +101,12 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
92 * Accept only packets explicitly sent to this node, or broadcast packets; 101 * Accept only packets explicitly sent to this node, or broadcast packets;
93 * ignores packets sent using Ethernet multicast, and traffic sent to other 102 * ignores packets sent using Ethernet multicast, and traffic sent to other
94 * nodes (which can happen if interface is running in promiscuous mode). 103 * nodes (which can happen if interface is running in promiscuous mode).
95 * Routine truncates any Ethernet padding/CRC appended to the message,
96 * and ensures message size matches actual length
97 */ 104 */
98 105
99static int recv_msg(struct sk_buff *buf, struct net_device *dev, 106static int recv_msg(struct sk_buff *buf, struct net_device *dev,
100 struct packet_type *pt, struct net_device *orig_dev) 107 struct packet_type *pt, struct net_device *orig_dev)
101{ 108{
102 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv; 109 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
103 u32 size;
104 110
105 if (!net_eq(dev_net(dev), &init_net)) { 111 if (!net_eq(dev_net(dev), &init_net)) {
106 kfree_skb(buf); 112 kfree_skb(buf);
@@ -109,13 +115,9 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
109 115
110 if (likely(eb_ptr->bearer)) { 116 if (likely(eb_ptr->bearer)) {
111 if (likely(buf->pkt_type <= PACKET_BROADCAST)) { 117 if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
112 size = msg_size((struct tipc_msg *)buf->data); 118 buf->next = NULL;
113 skb_trim(buf, size); 119 tipc_recv_msg(buf, eb_ptr->bearer);
114 if (likely(buf->len == size)) { 120 return 0;
115 buf->next = NULL;
116 tipc_recv_msg(buf, eb_ptr->bearer);
117 return 0;
118 }
119 } 121 }
120 } 122 }
121 kfree_skb(buf); 123 kfree_skb(buf);
@@ -133,6 +135,16 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
133 struct eth_bearer *eb_ptr = &eth_bearers[0]; 135 struct eth_bearer *eb_ptr = &eth_bearers[0];
134 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS]; 136 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
135 char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1; 137 char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
138 int pending_dev = 0;
139
140 /* Find unused Ethernet bearer structure */
141
142 while (eb_ptr->dev) {
143 if (!eb_ptr->bearer)
144 pending_dev++;
145 if (++eb_ptr == stop)
146 return pending_dev ? -EAGAIN : -EDQUOT;
147 }
136 148
137 /* Find device with specified name */ 149 /* Find device with specified name */
138 150
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a3616b99529b..a6a3102bb4d6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1802,6 +1802,15 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1802 return pskb_may_pull(buf, hdr_size); 1802 return pskb_may_pull(buf, hdr_size);
1803} 1803}
1804 1804
1805/**
1806 * tipc_recv_msg - process TIPC messages arriving from off-node
1807 * @head: pointer to message buffer chain
1808 * @tb_ptr: pointer to bearer message arrived on
1809 *
1810 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1811 * structure (i.e. cannot be NULL), but bearer can be inactive.
1812 */
1813
1805void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) 1814void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1806{ 1815{
1807 read_lock_bh(&tipc_net_lock); 1816 read_lock_bh(&tipc_net_lock);
@@ -1819,6 +1828,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1819 1828
1820 head = head->next; 1829 head = head->next;
1821 1830
1831 /* Ensure bearer is still enabled */
1832
1833 if (unlikely(!b_ptr->active))
1834 goto cont;
1835
1822 /* Ensure message is well-formed */ 1836 /* Ensure message is well-formed */
1823 1837
1824 if (unlikely(!link_recv_buf_validate(buf))) 1838 if (unlikely(!link_recv_buf_validate(buf)))
@@ -1855,13 +1869,22 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1855 goto cont; 1869 goto cont;
1856 } 1870 }
1857 1871
1858 /* Locate unicast link endpoint that should handle message */ 1872 /* Locate neighboring node that sent message */
1859 1873
1860 n_ptr = tipc_node_find(msg_prevnode(msg)); 1874 n_ptr = tipc_node_find(msg_prevnode(msg));
1861 if (unlikely(!n_ptr)) 1875 if (unlikely(!n_ptr))
1862 goto cont; 1876 goto cont;
1863 tipc_node_lock(n_ptr); 1877 tipc_node_lock(n_ptr);
1864 1878
1879 /* Don't talk to neighbor during cleanup after last session */
1880
1881 if (n_ptr->cleanup_required) {
1882 tipc_node_unlock(n_ptr);
1883 goto cont;
1884 }
1885
1886 /* Locate unicast link endpoint that should handle message */
1887
1865 l_ptr = n_ptr->links[b_ptr->identity]; 1888 l_ptr = n_ptr->links[b_ptr->identity];
1866 if (unlikely(!l_ptr)) { 1889 if (unlikely(!l_ptr)) {
1867 tipc_node_unlock(n_ptr); 1890 tipc_node_unlock(n_ptr);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 8ba79620db3f..c13c2c7c4b57 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -613,8 +613,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
613} 613}
614 614
615/* 615/*
616 * tipc_nametbl_translate(): Translate tipc_name -> tipc_portid. 616 * tipc_nametbl_translate - translate name to port id
617 * Very time-critical.
618 * 617 *
619 * Note: on entry 'destnode' is the search domain used during translation; 618 * Note: on entry 'destnode' is the search domain used during translation;
620 * on exit it passes back the node address of the matching port (if any) 619 * on exit it passes back the node address of the matching port (if any)
@@ -685,7 +684,6 @@ found:
685 } 684 }
686 spin_unlock_bh(&seq->lock); 685 spin_unlock_bh(&seq->lock);
687not_found: 686not_found:
688 *destnode = 0;
689 read_unlock_bh(&tipc_nametbl_lock); 687 read_unlock_bh(&tipc_nametbl_lock);
690 return 0; 688 return 0;
691} 689}
@@ -877,7 +875,7 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
877 u32 index) 875 u32 index)
878{ 876{
879 char portIdStr[27]; 877 char portIdStr[27];
880 char *scopeStr; 878 const char *scope_str[] = {"", " zone", " cluster", " node"};
881 struct publication *publ = sseq->zone_list; 879 struct publication *publ = sseq->zone_list;
882 880
883 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper); 881 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
@@ -893,15 +891,8 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
893 tipc_node(publ->node), publ->ref); 891 tipc_node(publ->node), publ->ref);
894 tipc_printf(buf, "%-26s ", portIdStr); 892 tipc_printf(buf, "%-26s ", portIdStr);
895 if (depth > 3) { 893 if (depth > 3) {
896 if (publ->node != tipc_own_addr) 894 tipc_printf(buf, "%-10u %s", publ->key,
897 scopeStr = ""; 895 scope_str[publ->scope]);
898 else if (publ->scope == TIPC_NODE_SCOPE)
899 scopeStr = "node";
900 else if (publ->scope == TIPC_CLUSTER_SCOPE)
901 scopeStr = "cluster";
902 else
903 scopeStr = "zone";
904 tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
905 } 896 }
906 897
907 publ = publ->zone_list_next; 898 publ = publ->zone_list_next;
@@ -951,24 +942,19 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
951 942
952static void nametbl_header(struct print_buf *buf, u32 depth) 943static void nametbl_header(struct print_buf *buf, u32 depth)
953{ 944{
954 tipc_printf(buf, "Type "); 945 const char *header[] = {
955 946 "Type ",
956 if (depth > 1) 947 "Lower Upper ",
957 tipc_printf(buf, "Lower Upper "); 948 "Port Identity ",
958 if (depth > 2) 949 "Publication Scope"
959 tipc_printf(buf, "Port Identity "); 950 };
960 if (depth > 3) 951
961 tipc_printf(buf, "Publication"); 952 int i;
962 953
963 tipc_printf(buf, "\n-----------"); 954 if (depth > 4)
964 955 depth = 4;
965 if (depth > 1) 956 for (i = 0; i < depth; i++)
966 tipc_printf(buf, "--------------------- "); 957 tipc_printf(buf, header[i]);
967 if (depth > 2)
968 tipc_printf(buf, "-------------------------- ");
969 if (depth > 3)
970 tipc_printf(buf, "------------------");
971
972 tipc_printf(buf, "\n"); 958 tipc_printf(buf, "\n");
973} 959}
974 960
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f61b7694138b..7e05af47a196 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -248,6 +248,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
248 248
249 /* Handle message for another node */ 249 /* Handle message for another node */
250 msg_dbg(msg, "NET>SEND>: "); 250 msg_dbg(msg, "NET>SEND>: ");
251 skb_trim(buf, msg_size(msg));
251 tipc_link_send(buf, dnode, msg_link_selector(msg)); 252 tipc_link_send(buf, dnode, msg_link_selector(msg));
252} 253}
253 254
diff --git a/net/tipc/node.c b/net/tipc/node.c
index b634942caba5..b702c7bf580f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -237,8 +237,7 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
237 237
238int tipc_node_has_active_links(struct tipc_node *n_ptr) 238int tipc_node_has_active_links(struct tipc_node *n_ptr)
239{ 239{
240 return (n_ptr && 240 return n_ptr->active_links[0] != NULL;
241 ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
242} 241}
243 242
244int tipc_node_has_redundant_links(struct tipc_node *n_ptr) 243int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
@@ -384,6 +383,20 @@ static void node_established_contact(struct tipc_node *n_ptr)
384 tipc_highest_allowed_slave); 383 tipc_highest_allowed_slave);
385} 384}
386 385
386static void node_cleanup_finished(unsigned long node_addr)
387{
388 struct tipc_node *n_ptr;
389
390 read_lock_bh(&tipc_net_lock);
391 n_ptr = tipc_node_find(node_addr);
392 if (n_ptr) {
393 tipc_node_lock(n_ptr);
394 n_ptr->cleanup_required = 0;
395 tipc_node_unlock(n_ptr);
396 }
397 read_unlock_bh(&tipc_net_lock);
398}
399
387static void node_lost_contact(struct tipc_node *n_ptr) 400static void node_lost_contact(struct tipc_node *n_ptr)
388{ 401{
389 struct cluster *c_ptr; 402 struct cluster *c_ptr;
@@ -458,6 +471,11 @@ static void node_lost_contact(struct tipc_node *n_ptr)
458 tipc_k_signal((Handler)ns->handle_node_down, 471 tipc_k_signal((Handler)ns->handle_node_down,
459 (unsigned long)ns->usr_handle); 472 (unsigned long)ns->usr_handle);
460 } 473 }
474
475 /* Prevent re-contact with node until all cleanup is done */
476
477 n_ptr->cleanup_required = 1;
478 tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
461} 479}
462 480
463/** 481/**
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 6f990da5d143..45f3db3a595d 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -52,6 +52,7 @@
52 * @active_links: pointers to active links to node 52 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node 53 * @links: pointers to all links to node
54 * @working_links: number of working links to node (both active and standby) 54 * @working_links: number of working links to node (both active and standby)
55 * @cleanup_required: non-zero if cleaning up after a prior loss of contact
55 * @link_cnt: number of links to node 56 * @link_cnt: number of links to node
56 * @permit_changeover: non-zero if node has redundant links to this system 57 * @permit_changeover: non-zero if node has redundant links to this system
57 * @routers: bitmap (used for multicluster communication) 58 * @routers: bitmap (used for multicluster communication)
@@ -78,6 +79,7 @@ struct tipc_node {
78 struct link *links[MAX_BEARERS]; 79 struct link *links[MAX_BEARERS];
79 int link_cnt; 80 int link_cnt;
80 int working_links; 81 int working_links;
82 int cleanup_required;
81 int permit_changeover; 83 int permit_changeover;
82 u32 routers[512/32]; 84 u32 routers[512/32];
83 int last_router; 85 int last_router;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 0737680e9266..d760336f2ca8 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -588,19 +588,10 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
588 if (!p_ptr) { 588 if (!p_ptr) {
589 err = TIPC_ERR_NO_PORT; 589 err = TIPC_ERR_NO_PORT;
590 } else if (p_ptr->publ.connected) { 590 } else if (p_ptr->publ.connected) {
591 if (port_peernode(p_ptr) != msg_orignode(msg)) 591 if ((port_peernode(p_ptr) != msg_orignode(msg)) ||
592 (port_peerport(p_ptr) != msg_origport(msg))) {
592 err = TIPC_ERR_NO_PORT; 593 err = TIPC_ERR_NO_PORT;
593 if (port_peerport(p_ptr) != msg_origport(msg)) 594 } else if (msg_type(msg) == CONN_ACK) {
594 err = TIPC_ERR_NO_PORT;
595 if (!err && msg_routed(msg)) {
596 u32 seqno = msg_transp_seqno(msg);
597 u32 myno = ++p_ptr->last_in_seqno;
598 if (seqno != myno) {
599 err = TIPC_ERR_NO_PORT;
600 abort_buf = port_build_self_abort_msg(p_ptr, err);
601 }
602 }
603 if (msg_type(msg) == CONN_ACK) {
604 int wakeup = tipc_port_congested(p_ptr) && 595 int wakeup = tipc_port_congested(p_ptr) &&
605 p_ptr->publ.congested && 596 p_ptr->publ.congested &&
606 p_ptr->wakeup; 597 p_ptr->wakeup;
@@ -1473,7 +1464,7 @@ int tipc_forward2name(u32 ref,
1473 msg_set_destnode(msg, destnode); 1464 msg_set_destnode(msg, destnode);
1474 msg_set_destport(msg, destport); 1465 msg_set_destport(msg, destport);
1475 1466
1476 if (likely(destport || destnode)) { 1467 if (likely(destport)) {
1477 p_ptr->sent++; 1468 p_ptr->sent++;
1478 if (likely(destnode == tipc_own_addr)) 1469 if (likely(destnode == tipc_own_addr))
1479 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1470 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
@@ -1551,7 +1542,7 @@ int tipc_forward_buf2name(u32 ref,
1551 skb_push(buf, LONG_H_SIZE); 1542 skb_push(buf, LONG_H_SIZE);
1552 skb_copy_to_linear_data(buf, msg, LONG_H_SIZE); 1543 skb_copy_to_linear_data(buf, msg, LONG_H_SIZE);
1553 msg_dbg(buf_msg(buf),"PREP:"); 1544 msg_dbg(buf_msg(buf),"PREP:");
1554 if (likely(destport || destnode)) { 1545 if (likely(destport)) {
1555 p_ptr->sent++; 1546 p_ptr->sent++;
1556 if (destnode == tipc_own_addr) 1547 if (destnode == tipc_own_addr)
1557 return tipc_port_recv_msg(buf); 1548 return tipc_port_recv_msg(buf);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 66e889ba48fd..f7ac94de24fe 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -64,6 +64,7 @@ struct tipc_sock {
64 struct sock sk; 64 struct sock sk;
65 struct tipc_port *p; 65 struct tipc_port *p;
66 struct tipc_portid peer_name; 66 struct tipc_portid peer_name;
67 long conn_timeout;
67}; 68};
68 69
69#define tipc_sk(sk) ((struct tipc_sock *)(sk)) 70#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -240,9 +241,9 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
240 sock->state = state; 241 sock->state = state;
241 242
242 sock_init_data(sock, sk); 243 sock_init_data(sock, sk);
243 sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
244 sk->sk_backlog_rcv = backlog_rcv; 244 sk->sk_backlog_rcv = backlog_rcv;
245 tipc_sk(sk)->p = tp_ptr; 245 tipc_sk(sk)->p = tp_ptr;
246 tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
246 247
247 spin_unlock_bh(tp_ptr->lock); 248 spin_unlock_bh(tp_ptr->lock);
248 249
@@ -429,36 +430,55 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
429 * to handle any preventable race conditions, so TIPC will do the same ... 430 * to handle any preventable race conditions, so TIPC will do the same ...
430 * 431 *
431 * TIPC sets the returned events as follows: 432 * TIPC sets the returned events as follows:
432 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty 433 *
433 * or if a connection-oriented socket is does not have an active connection 434 * socket state flags set
434 * (i.e. a read operation will not block). 435 * ------------ ---------
435 * b) POLLOUT is set except when a socket's connection has been terminated 436 * unconnected no read flags
436 * (i.e. a write operation will not block). 437 * no write flags
437 * c) POLLHUP is set when a socket's connection has been terminated. 438 *
438 * 439 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
439 * IMPORTANT: The fact that a read or write operation will not block does NOT 440 * no write flags
440 * imply that the operation will succeed! 441 *
442 * connected POLLIN/POLLRDNORM if data in rx queue
443 * POLLOUT if port is not congested
444 *
445 * disconnecting POLLIN/POLLRDNORM/POLLHUP
446 * no write flags
447 *
448 * listening POLLIN if SYN in rx queue
449 * no write flags
450 *
451 * ready POLLIN/POLLRDNORM if data in rx queue
452 * [connectionless] POLLOUT (since port cannot be congested)
453 *
454 * IMPORTANT: The fact that a read or write operation is indicated does NOT
455 * imply that the operation will succeed, merely that it should be performed
456 * and will not block.
441 */ 457 */
442 458
443static unsigned int poll(struct file *file, struct socket *sock, 459static unsigned int poll(struct file *file, struct socket *sock,
444 poll_table *wait) 460 poll_table *wait)
445{ 461{
446 struct sock *sk = sock->sk; 462 struct sock *sk = sock->sk;
447 u32 mask; 463 u32 mask = 0;
448 464
449 poll_wait(file, sk_sleep(sk), wait); 465 poll_wait(file, sk_sleep(sk), wait);
450 466
451 if (!skb_queue_empty(&sk->sk_receive_queue) || 467 switch ((int)sock->state) {
452 (sock->state == SS_UNCONNECTED) || 468 case SS_READY:
453 (sock->state == SS_DISCONNECTING)) 469 case SS_CONNECTED:
454 mask = (POLLRDNORM | POLLIN); 470 if (!tipc_sk_port(sk)->congested)
455 else 471 mask |= POLLOUT;
456 mask = 0; 472 /* fall thru' */
457 473 case SS_CONNECTING:
458 if (sock->state == SS_DISCONNECTING) 474 case SS_LISTENING:
459 mask |= POLLHUP; 475 if (!skb_queue_empty(&sk->sk_receive_queue))
460 else 476 mask |= (POLLIN | POLLRDNORM);
461 mask |= POLLOUT; 477 break;
478 case SS_DISCONNECTING:
479 mask = (POLLIN | POLLRDNORM | POLLHUP);
480 break;
481 }
462 482
463 return mask; 483 return mask;
464} 484}
@@ -1026,9 +1046,8 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1026 struct sk_buff *buf; 1046 struct sk_buff *buf;
1027 struct tipc_msg *msg; 1047 struct tipc_msg *msg;
1028 unsigned int sz; 1048 unsigned int sz;
1029 int sz_to_copy; 1049 int sz_to_copy, target, needed;
1030 int sz_copied = 0; 1050 int sz_copied = 0;
1031 int needed;
1032 char __user *crs = m->msg_iov->iov_base; 1051 char __user *crs = m->msg_iov->iov_base;
1033 unsigned char *buf_crs; 1052 unsigned char *buf_crs;
1034 u32 err; 1053 u32 err;
@@ -1050,6 +1069,8 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1050 goto exit; 1069 goto exit;
1051 } 1070 }
1052 1071
1072 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1073
1053restart: 1074restart:
1054 1075
1055 /* Look for a message in receive queue; wait if necessary */ 1076 /* Look for a message in receive queue; wait if necessary */
@@ -1138,7 +1159,7 @@ restart:
1138 1159
1139 if ((sz_copied < buf_len) && /* didn't get all requested data */ 1160 if ((sz_copied < buf_len) && /* didn't get all requested data */
1140 (!skb_queue_empty(&sk->sk_receive_queue) || 1161 (!skb_queue_empty(&sk->sk_receive_queue) ||
1141 (flags & MSG_WAITALL)) && /* and more is ready or required */ 1162 (sz_copied < target)) && /* and more is ready or required */
1142 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */ 1163 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1143 (!err)) /* and haven't reached a FIN */ 1164 (!err)) /* and haven't reached a FIN */
1144 goto restart; 1165 goto restart;
@@ -1365,6 +1386,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1365 struct msghdr m = {NULL,}; 1386 struct msghdr m = {NULL,};
1366 struct sk_buff *buf; 1387 struct sk_buff *buf;
1367 struct tipc_msg *msg; 1388 struct tipc_msg *msg;
1389 long timeout;
1368 int res; 1390 int res;
1369 1391
1370 lock_sock(sk); 1392 lock_sock(sk);
@@ -1379,7 +1401,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1379 /* For now, TIPC does not support the non-blocking form of connect() */ 1401 /* For now, TIPC does not support the non-blocking form of connect() */
1380 1402
1381 if (flags & O_NONBLOCK) { 1403 if (flags & O_NONBLOCK) {
1382 res = -EWOULDBLOCK; 1404 res = -EOPNOTSUPP;
1383 goto exit; 1405 goto exit;
1384 } 1406 }
1385 1407
@@ -1425,11 +1447,12 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1425 1447
1426 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1448 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1427 1449
1450 timeout = tipc_sk(sk)->conn_timeout;
1428 release_sock(sk); 1451 release_sock(sk);
1429 res = wait_event_interruptible_timeout(*sk_sleep(sk), 1452 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1430 (!skb_queue_empty(&sk->sk_receive_queue) || 1453 (!skb_queue_empty(&sk->sk_receive_queue) ||
1431 (sock->state != SS_CONNECTING)), 1454 (sock->state != SS_CONNECTING)),
1432 sk->sk_rcvtimeo); 1455 timeout ? timeout : MAX_SCHEDULE_TIMEOUT);
1433 lock_sock(sk); 1456 lock_sock(sk);
1434 1457
1435 if (res > 0) { 1458 if (res > 0) {
@@ -1692,7 +1715,7 @@ static int setsockopt(struct socket *sock,
1692 res = tipc_set_portunreturnable(tport->ref, value); 1715 res = tipc_set_portunreturnable(tport->ref, value);
1693 break; 1716 break;
1694 case TIPC_CONN_TIMEOUT: 1717 case TIPC_CONN_TIMEOUT:
1695 sk->sk_rcvtimeo = msecs_to_jiffies(value); 1718 tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value);
1696 /* no need to set "res", since already 0 at this point */ 1719 /* no need to set "res", since already 0 at this point */
1697 break; 1720 break;
1698 default: 1721 default:
@@ -1747,7 +1770,7 @@ static int getsockopt(struct socket *sock,
1747 res = tipc_portunreturnable(tport->ref, &value); 1770 res = tipc_portunreturnable(tport->ref, &value);
1748 break; 1771 break;
1749 case TIPC_CONN_TIMEOUT: 1772 case TIPC_CONN_TIMEOUT:
1750 value = jiffies_to_msecs(sk->sk_rcvtimeo); 1773 value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout);
1751 /* no need to set "res", since already 0 at this point */ 1774 /* no need to set "res", since already 0 at this point */
1752 break; 1775 break;
1753 case TIPC_NODE_RECVQ_DEPTH: 1776 case TIPC_NODE_RECVQ_DEPTH:
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0b39b2451ea5..c586da3f4f18 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2033,11 +2033,10 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
2033 if (sk->sk_shutdown == SHUTDOWN_MASK) 2033 if (sk->sk_shutdown == SHUTDOWN_MASK)
2034 mask |= POLLHUP; 2034 mask |= POLLHUP;
2035 if (sk->sk_shutdown & RCV_SHUTDOWN) 2035 if (sk->sk_shutdown & RCV_SHUTDOWN)
2036 mask |= POLLRDHUP; 2036 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2037 2037
2038 /* readable? */ 2038 /* readable? */
2039 if (!skb_queue_empty(&sk->sk_receive_queue) || 2039 if (!skb_queue_empty(&sk->sk_receive_queue))
2040 (sk->sk_shutdown & RCV_SHUTDOWN))
2041 mask |= POLLIN | POLLRDNORM; 2040 mask |= POLLIN | POLLRDNORM;
2042 2041
2043 /* Connection-based need to check for termination and startup */ 2042 /* Connection-based need to check for termination and startup */
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d6d046b9f6f2..d587ad284b3d 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -253,11 +253,16 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
253 WARN_ON(err); 253 WARN_ON(err);
254 wdev->netdev->features |= NETIF_F_NETNS_LOCAL; 254 wdev->netdev->features |= NETIF_F_NETNS_LOCAL;
255 } 255 }
256
257 return err;
256 } 258 }
257 259
258 wiphy_net_set(&rdev->wiphy, net); 260 wiphy_net_set(&rdev->wiphy, net);
259 261
260 return err; 262 err = device_rename(&rdev->wiphy.dev, dev_name(&rdev->wiphy.dev));
263 WARN_ON(err);
264
265 return 0;
261} 266}
262 267
263static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) 268static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
@@ -428,7 +433,7 @@ int wiphy_register(struct wiphy *wiphy)
428 433
429 /* sanity check ifmodes */ 434 /* sanity check ifmodes */
430 WARN_ON(!ifmodes); 435 WARN_ON(!ifmodes);
431 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; 436 ifmodes &= ((1 << NUM_NL80211_IFTYPES) - 1) & ~1;
432 if (WARN_ON(ifmodes != wiphy->interface_modes)) 437 if (WARN_ON(ifmodes != wiphy->interface_modes))
433 wiphy->interface_modes = ifmodes; 438 wiphy->interface_modes = ifmodes;
434 439
@@ -683,8 +688,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
683 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); 688 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
684 INIT_LIST_HEAD(&wdev->event_list); 689 INIT_LIST_HEAD(&wdev->event_list);
685 spin_lock_init(&wdev->event_lock); 690 spin_lock_init(&wdev->event_lock);
686 INIT_LIST_HEAD(&wdev->action_registrations); 691 INIT_LIST_HEAD(&wdev->mgmt_registrations);
687 spin_lock_init(&wdev->action_registrations_lock); 692 spin_lock_init(&wdev->mgmt_registrations_lock);
688 693
689 mutex_lock(&rdev->devlist_mtx); 694 mutex_lock(&rdev->devlist_mtx);
690 list_add_rcu(&wdev->list, &rdev->netdev_list); 695 list_add_rcu(&wdev->list, &rdev->netdev_list);
@@ -804,7 +809,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
804 sysfs_remove_link(&dev->dev.kobj, "phy80211"); 809 sysfs_remove_link(&dev->dev.kobj, "phy80211");
805 list_del_rcu(&wdev->list); 810 list_del_rcu(&wdev->list);
806 rdev->devlist_generation++; 811 rdev->devlist_generation++;
807 cfg80211_mlme_purge_actions(wdev); 812 cfg80211_mlme_purge_registrations(wdev);
808#ifdef CONFIG_CFG80211_WEXT 813#ifdef CONFIG_CFG80211_WEXT
809 kfree(wdev->wext.keys); 814 kfree(wdev->wext.keys);
810#endif 815#endif
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 63d57ae399c3..58ab2c791d28 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -331,16 +331,17 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
331 const u8 *resp_ie, size_t resp_ie_len, 331 const u8 *resp_ie, size_t resp_ie_len,
332 u16 status, bool wextev, 332 u16 status, bool wextev,
333 struct cfg80211_bss *bss); 333 struct cfg80211_bss *bss);
334int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid, 334int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
335 const u8 *match_data, int match_len); 335 u16 frame_type, const u8 *match_data,
336void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid); 336 int match_len);
337void cfg80211_mlme_purge_actions(struct wireless_dev *wdev); 337void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
338int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, 338void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
339 struct net_device *dev, 339int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
340 struct ieee80211_channel *chan, 340 struct net_device *dev,
341 enum nl80211_channel_type channel_type, 341 struct ieee80211_channel *chan,
342 bool channel_type_valid, 342 enum nl80211_channel_type channel_type,
343 const u8 *buf, size_t len, u64 *cookie); 343 bool channel_type_valid,
344 const u8 *buf, size_t len, u64 *cookie);
344 345
345/* SME */ 346/* SME */
346int __cfg80211_connect(struct cfg80211_registered_device *rdev, 347int __cfg80211_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index d1a3fb99fdf2..8515b1e5c578 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -149,7 +149,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
149 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 149 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
150 const u8 *bssid = mgmt->bssid; 150 const u8 *bssid = mgmt->bssid;
151 int i; 151 int i;
152 bool found = false; 152 bool found = false, was_current = false;
153 153
154 ASSERT_WDEV_LOCK(wdev); 154 ASSERT_WDEV_LOCK(wdev);
155 155
@@ -159,6 +159,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
159 cfg80211_put_bss(&wdev->current_bss->pub); 159 cfg80211_put_bss(&wdev->current_bss->pub);
160 wdev->current_bss = NULL; 160 wdev->current_bss = NULL;
161 found = true; 161 found = true;
162 was_current = true;
162 } else for (i = 0; i < MAX_AUTH_BSSES; i++) { 163 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
163 if (wdev->auth_bsses[i] && 164 if (wdev->auth_bsses[i] &&
164 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { 165 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
@@ -183,7 +184,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
183 184
184 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); 185 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
185 186
186 if (wdev->sme_state == CFG80211_SME_CONNECTED) { 187 if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) {
187 u16 reason_code; 188 u16 reason_code;
188 bool from_ap; 189 bool from_ap;
189 190
@@ -747,31 +748,51 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
747} 748}
748EXPORT_SYMBOL(cfg80211_new_sta); 749EXPORT_SYMBOL(cfg80211_new_sta);
749 750
750struct cfg80211_action_registration { 751struct cfg80211_mgmt_registration {
751 struct list_head list; 752 struct list_head list;
752 753
753 u32 nlpid; 754 u32 nlpid;
754 755
755 int match_len; 756 int match_len;
756 757
758 __le16 frame_type;
759
757 u8 match[]; 760 u8 match[];
758}; 761};
759 762
760int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid, 763int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
761 const u8 *match_data, int match_len) 764 u16 frame_type, const u8 *match_data,
765 int match_len)
762{ 766{
763 struct cfg80211_action_registration *reg, *nreg; 767 struct cfg80211_mgmt_registration *reg, *nreg;
764 int err = 0; 768 int err = 0;
769 u16 mgmt_type;
770
771 if (!wdev->wiphy->mgmt_stypes)
772 return -EOPNOTSUPP;
773
774 if ((frame_type & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT)
775 return -EINVAL;
776
777 if (frame_type & ~(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE))
778 return -EINVAL;
779
780 mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
781 if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].rx & BIT(mgmt_type)))
782 return -EINVAL;
765 783
766 nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL); 784 nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL);
767 if (!nreg) 785 if (!nreg)
768 return -ENOMEM; 786 return -ENOMEM;
769 787
770 spin_lock_bh(&wdev->action_registrations_lock); 788 spin_lock_bh(&wdev->mgmt_registrations_lock);
771 789
772 list_for_each_entry(reg, &wdev->action_registrations, list) { 790 list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
773 int mlen = min(match_len, reg->match_len); 791 int mlen = min(match_len, reg->match_len);
774 792
793 if (frame_type != le16_to_cpu(reg->frame_type))
794 continue;
795
775 if (memcmp(reg->match, match_data, mlen) == 0) { 796 if (memcmp(reg->match, match_data, mlen) == 0) {
776 err = -EALREADY; 797 err = -EALREADY;
777 break; 798 break;
@@ -786,62 +807,75 @@ int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
786 memcpy(nreg->match, match_data, match_len); 807 memcpy(nreg->match, match_data, match_len);
787 nreg->match_len = match_len; 808 nreg->match_len = match_len;
788 nreg->nlpid = snd_pid; 809 nreg->nlpid = snd_pid;
789 list_add(&nreg->list, &wdev->action_registrations); 810 nreg->frame_type = cpu_to_le16(frame_type);
811 list_add(&nreg->list, &wdev->mgmt_registrations);
790 812
791 out: 813 out:
792 spin_unlock_bh(&wdev->action_registrations_lock); 814 spin_unlock_bh(&wdev->mgmt_registrations_lock);
793 return err; 815 return err;
794} 816}
795 817
796void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid) 818void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
797{ 819{
798 struct cfg80211_action_registration *reg, *tmp; 820 struct cfg80211_mgmt_registration *reg, *tmp;
799 821
800 spin_lock_bh(&wdev->action_registrations_lock); 822 spin_lock_bh(&wdev->mgmt_registrations_lock);
801 823
802 list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) { 824 list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
803 if (reg->nlpid == nlpid) { 825 if (reg->nlpid == nlpid) {
804 list_del(&reg->list); 826 list_del(&reg->list);
805 kfree(reg); 827 kfree(reg);
806 } 828 }
807 } 829 }
808 830
809 spin_unlock_bh(&wdev->action_registrations_lock); 831 spin_unlock_bh(&wdev->mgmt_registrations_lock);
810} 832}
811 833
812void cfg80211_mlme_purge_actions(struct wireless_dev *wdev) 834void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
813{ 835{
814 struct cfg80211_action_registration *reg, *tmp; 836 struct cfg80211_mgmt_registration *reg, *tmp;
815 837
816 spin_lock_bh(&wdev->action_registrations_lock); 838 spin_lock_bh(&wdev->mgmt_registrations_lock);
817 839
818 list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) { 840 list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
819 list_del(&reg->list); 841 list_del(&reg->list);
820 kfree(reg); 842 kfree(reg);
821 } 843 }
822 844
823 spin_unlock_bh(&wdev->action_registrations_lock); 845 spin_unlock_bh(&wdev->mgmt_registrations_lock);
824} 846}
825 847
826int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, 848int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
827 struct net_device *dev, 849 struct net_device *dev,
828 struct ieee80211_channel *chan, 850 struct ieee80211_channel *chan,
829 enum nl80211_channel_type channel_type, 851 enum nl80211_channel_type channel_type,
830 bool channel_type_valid, 852 bool channel_type_valid,
831 const u8 *buf, size_t len, u64 *cookie) 853 const u8 *buf, size_t len, u64 *cookie)
832{ 854{
833 struct wireless_dev *wdev = dev->ieee80211_ptr; 855 struct wireless_dev *wdev = dev->ieee80211_ptr;
834 const struct ieee80211_mgmt *mgmt; 856 const struct ieee80211_mgmt *mgmt;
857 u16 stype;
858
859 if (!wdev->wiphy->mgmt_stypes)
860 return -EOPNOTSUPP;
835 861
836 if (rdev->ops->action == NULL) 862 if (!rdev->ops->mgmt_tx)
837 return -EOPNOTSUPP; 863 return -EOPNOTSUPP;
864
838 if (len < 24 + 1) 865 if (len < 24 + 1)
839 return -EINVAL; 866 return -EINVAL;
840 867
841 mgmt = (const struct ieee80211_mgmt *) buf; 868 mgmt = (const struct ieee80211_mgmt *) buf;
842 if (!ieee80211_is_action(mgmt->frame_control)) 869
870 if (!ieee80211_is_mgmt(mgmt->frame_control))
843 return -EINVAL; 871 return -EINVAL;
844 if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { 872
873 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
874 if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].tx & BIT(stype >> 4)))
875 return -EINVAL;
876
877 if (ieee80211_is_action(mgmt->frame_control) &&
878 mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
845 /* Verify that we are associated with the destination AP */ 879 /* Verify that we are associated with the destination AP */
846 wdev_lock(wdev); 880 wdev_lock(wdev);
847 881
@@ -862,64 +896,75 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
862 return -EINVAL; 896 return -EINVAL;
863 897
864 /* Transmit the Action frame as requested by user space */ 898 /* Transmit the Action frame as requested by user space */
865 return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, 899 return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, channel_type,
866 channel_type_valid, buf, len, cookie); 900 channel_type_valid, buf, len, cookie);
867} 901}
868 902
869bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, 903bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
870 size_t len, gfp_t gfp) 904 size_t len, gfp_t gfp)
871{ 905{
872 struct wireless_dev *wdev = dev->ieee80211_ptr; 906 struct wireless_dev *wdev = dev->ieee80211_ptr;
873 struct wiphy *wiphy = wdev->wiphy; 907 struct wiphy *wiphy = wdev->wiphy;
874 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 908 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
875 struct cfg80211_action_registration *reg; 909 struct cfg80211_mgmt_registration *reg;
876 const u8 *action_data; 910 const struct ieee80211_txrx_stypes *stypes =
877 int action_data_len; 911 &wiphy->mgmt_stypes[wdev->iftype];
912 struct ieee80211_mgmt *mgmt = (void *)buf;
913 const u8 *data;
914 int data_len;
878 bool result = false; 915 bool result = false;
916 __le16 ftype = mgmt->frame_control &
917 cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
918 u16 stype;
879 919
880 /* frame length - min size excluding category */ 920 stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
881 action_data_len = len - (IEEE80211_MIN_ACTION_SIZE - 1);
882 921
883 /* action data starts with category */ 922 if (!(stypes->rx & BIT(stype)))
884 action_data = buf + IEEE80211_MIN_ACTION_SIZE - 1; 923 return false;
885 924
886 spin_lock_bh(&wdev->action_registrations_lock); 925 data = buf + ieee80211_hdrlen(mgmt->frame_control);
926 data_len = len - ieee80211_hdrlen(mgmt->frame_control);
927
928 spin_lock_bh(&wdev->mgmt_registrations_lock);
929
930 list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
931 if (reg->frame_type != ftype)
932 continue;
887 933
888 list_for_each_entry(reg, &wdev->action_registrations, list) { 934 if (reg->match_len > data_len)
889 if (reg->match_len > action_data_len)
890 continue; 935 continue;
891 936
892 if (memcmp(reg->match, action_data, reg->match_len)) 937 if (memcmp(reg->match, data, reg->match_len))
893 continue; 938 continue;
894 939
895 /* found match! */ 940 /* found match! */
896 941
897 /* Indicate the received Action frame to user space */ 942 /* Indicate the received Action frame to user space */
898 if (nl80211_send_action(rdev, dev, reg->nlpid, freq, 943 if (nl80211_send_mgmt(rdev, dev, reg->nlpid, freq,
899 buf, len, gfp)) 944 buf, len, gfp))
900 continue; 945 continue;
901 946
902 result = true; 947 result = true;
903 break; 948 break;
904 } 949 }
905 950
906 spin_unlock_bh(&wdev->action_registrations_lock); 951 spin_unlock_bh(&wdev->mgmt_registrations_lock);
907 952
908 return result; 953 return result;
909} 954}
910EXPORT_SYMBOL(cfg80211_rx_action); 955EXPORT_SYMBOL(cfg80211_rx_mgmt);
911 956
912void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, 957void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
913 const u8 *buf, size_t len, bool ack, gfp_t gfp) 958 const u8 *buf, size_t len, bool ack, gfp_t gfp)
914{ 959{
915 struct wireless_dev *wdev = dev->ieee80211_ptr; 960 struct wireless_dev *wdev = dev->ieee80211_ptr;
916 struct wiphy *wiphy = wdev->wiphy; 961 struct wiphy *wiphy = wdev->wiphy;
917 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 962 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
918 963
919 /* Indicate TX status of the Action frame to user space */ 964 /* Indicate TX status of the Action frame to user space */
920 nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); 965 nl80211_send_mgmt_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
921} 966}
922EXPORT_SYMBOL(cfg80211_action_tx_status); 967EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
923 968
924void cfg80211_cqm_rssi_notify(struct net_device *dev, 969void cfg80211_cqm_rssi_notify(struct net_device *dev,
925 enum nl80211_cqm_rssi_threshold_event rssi_event, 970 enum nl80211_cqm_rssi_threshold_event rssi_event,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 37902a54e9c1..85a23de7bff3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -136,6 +136,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
136 .len = sizeof(struct nl80211_sta_flag_update), 136 .len = sizeof(struct nl80211_sta_flag_update),
137 }, 137 },
138 [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, 138 [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG },
139 [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 },
140 [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
139 [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, 141 [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
140 [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, 142 [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
141 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, 143 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
@@ -156,6 +158,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
156 158
157 [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 }, 159 [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 },
158 [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 }, 160 [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 },
161 [NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 },
159}; 162};
160 163
161/* policy for the attributes */ 164/* policy for the attributes */
@@ -437,6 +440,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
437 struct ieee80211_rate *rate; 440 struct ieee80211_rate *rate;
438 int i; 441 int i;
439 u16 ifmodes = dev->wiphy.interface_modes; 442 u16 ifmodes = dev->wiphy.interface_modes;
443 const struct ieee80211_txrx_stypes *mgmt_stypes =
444 dev->wiphy.mgmt_stypes;
440 445
441 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); 446 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
442 if (!hdr) 447 if (!hdr)
@@ -471,6 +476,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
471 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, 476 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
472 dev->wiphy.max_num_pmkids); 477 dev->wiphy.max_num_pmkids);
473 478
479 if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
480 NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
481
474 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 482 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
475 if (!nl_modes) 483 if (!nl_modes)
476 goto nla_put_failure; 484 goto nla_put_failure;
@@ -587,7 +595,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
587 CMD(flush_pmksa, FLUSH_PMKSA); 595 CMD(flush_pmksa, FLUSH_PMKSA);
588 CMD(remain_on_channel, REMAIN_ON_CHANNEL); 596 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
589 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); 597 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
590 CMD(action, ACTION); 598 CMD(mgmt_tx, FRAME);
591 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 599 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
592 i++; 600 i++;
593 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 601 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -608,6 +616,55 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
608 616
609 nla_nest_end(msg, nl_cmds); 617 nla_nest_end(msg, nl_cmds);
610 618
619 if (mgmt_stypes) {
620 u16 stypes;
621 struct nlattr *nl_ftypes, *nl_ifs;
622 enum nl80211_iftype ift;
623
624 nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES);
625 if (!nl_ifs)
626 goto nla_put_failure;
627
628 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
629 nl_ftypes = nla_nest_start(msg, ift);
630 if (!nl_ftypes)
631 goto nla_put_failure;
632 i = 0;
633 stypes = mgmt_stypes[ift].tx;
634 while (stypes) {
635 if (stypes & 1)
636 NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
637 (i << 4) | IEEE80211_FTYPE_MGMT);
638 stypes >>= 1;
639 i++;
640 }
641 nla_nest_end(msg, nl_ftypes);
642 }
643
644 nla_nest_end(msg, nl_ifs);
645
646 nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES);
647 if (!nl_ifs)
648 goto nla_put_failure;
649
650 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
651 nl_ftypes = nla_nest_start(msg, ift);
652 if (!nl_ftypes)
653 goto nla_put_failure;
654 i = 0;
655 stypes = mgmt_stypes[ift].rx;
656 while (stypes) {
657 if (stypes & 1)
658 NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
659 (i << 4) | IEEE80211_FTYPE_MGMT);
660 stypes >>= 1;
661 i++;
662 }
663 nla_nest_end(msg, nl_ftypes);
664 }
665 nla_nest_end(msg, nl_ifs);
666 }
667
611 return genlmsg_end(msg, hdr); 668 return genlmsg_end(msg, hdr);
612 669
613 nla_put_failure: 670 nla_put_failure:
@@ -3572,6 +3629,21 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
3572 if (err) 3629 if (err)
3573 goto unlock_rtnl; 3630 goto unlock_rtnl;
3574 3631
3632 if (key.idx >= 0) {
3633 int i;
3634 bool ok = false;
3635 for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) {
3636 if (key.p.cipher == rdev->wiphy.cipher_suites[i]) {
3637 ok = true;
3638 break;
3639 }
3640 }
3641 if (!ok) {
3642 err = -EINVAL;
3643 goto out;
3644 }
3645 }
3646
3575 if (!rdev->ops->auth) { 3647 if (!rdev->ops->auth) {
3576 err = -EOPNOTSUPP; 3648 err = -EOPNOTSUPP;
3577 goto out; 3649 goto out;
@@ -3624,7 +3696,8 @@ unlock_rtnl:
3624 return err; 3696 return err;
3625} 3697}
3626 3698
3627static int nl80211_crypto_settings(struct genl_info *info, 3699static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
3700 struct genl_info *info,
3628 struct cfg80211_crypto_settings *settings, 3701 struct cfg80211_crypto_settings *settings,
3629 int cipher_limit) 3702 int cipher_limit)
3630{ 3703{
@@ -3632,6 +3705,19 @@ static int nl80211_crypto_settings(struct genl_info *info,
3632 3705
3633 settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; 3706 settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT];
3634 3707
3708 if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) {
3709 u16 proto;
3710 proto = nla_get_u16(
3711 info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]);
3712 settings->control_port_ethertype = cpu_to_be16(proto);
3713 if (!(rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
3714 proto != ETH_P_PAE)
3715 return -EINVAL;
3716 if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT])
3717 settings->control_port_no_encrypt = true;
3718 } else
3719 settings->control_port_ethertype = cpu_to_be16(ETH_P_PAE);
3720
3635 if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) { 3721 if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) {
3636 void *data; 3722 void *data;
3637 int len, i; 3723 int len, i;
@@ -3759,7 +3845,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3759 if (info->attrs[NL80211_ATTR_PREV_BSSID]) 3845 if (info->attrs[NL80211_ATTR_PREV_BSSID])
3760 prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); 3846 prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
3761 3847
3762 err = nl80211_crypto_settings(info, &crypto, 1); 3848 err = nl80211_crypto_settings(rdev, info, &crypto, 1);
3763 if (!err) 3849 if (!err)
3764 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 3850 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
3765 ssid, ssid_len, ie, ie_len, use_mfp, 3851 ssid, ssid_len, ie, ie_len, use_mfp,
@@ -4236,7 +4322,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
4236 4322
4237 connect.privacy = info->attrs[NL80211_ATTR_PRIVACY]; 4323 connect.privacy = info->attrs[NL80211_ATTR_PRIVACY];
4238 4324
4239 err = nl80211_crypto_settings(info, &connect.crypto, 4325 err = nl80211_crypto_settings(rdev, info, &connect.crypto,
4240 NL80211_MAX_NR_CIPHER_SUITES); 4326 NL80211_MAX_NR_CIPHER_SUITES);
4241 if (err) 4327 if (err)
4242 return err; 4328 return err;
@@ -4717,17 +4803,18 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
4717 return err; 4803 return err;
4718} 4804}
4719 4805
4720static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info) 4806static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
4721{ 4807{
4722 struct cfg80211_registered_device *rdev; 4808 struct cfg80211_registered_device *rdev;
4723 struct net_device *dev; 4809 struct net_device *dev;
4810 u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION;
4724 int err; 4811 int err;
4725 4812
4726 if (!info->attrs[NL80211_ATTR_FRAME_MATCH]) 4813 if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
4727 return -EINVAL; 4814 return -EINVAL;
4728 4815
4729 if (nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]) < 1) 4816 if (info->attrs[NL80211_ATTR_FRAME_TYPE])
4730 return -EINVAL; 4817 frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]);
4731 4818
4732 rtnl_lock(); 4819 rtnl_lock();
4733 4820
@@ -4742,12 +4829,13 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
4742 } 4829 }
4743 4830
4744 /* not much point in registering if we can't reply */ 4831 /* not much point in registering if we can't reply */
4745 if (!rdev->ops->action) { 4832 if (!rdev->ops->mgmt_tx) {
4746 err = -EOPNOTSUPP; 4833 err = -EOPNOTSUPP;
4747 goto out; 4834 goto out;
4748 } 4835 }
4749 4836
4750 err = cfg80211_mlme_register_action(dev->ieee80211_ptr, info->snd_pid, 4837 err = cfg80211_mlme_register_mgmt(dev->ieee80211_ptr, info->snd_pid,
4838 frame_type,
4751 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), 4839 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
4752 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); 4840 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
4753 out: 4841 out:
@@ -4758,7 +4846,7 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
4758 return err; 4846 return err;
4759} 4847}
4760 4848
4761static int nl80211_action(struct sk_buff *skb, struct genl_info *info) 4849static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
4762{ 4850{
4763 struct cfg80211_registered_device *rdev; 4851 struct cfg80211_registered_device *rdev;
4764 struct net_device *dev; 4852 struct net_device *dev;
@@ -4781,7 +4869,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4781 if (err) 4869 if (err)
4782 goto unlock_rtnl; 4870 goto unlock_rtnl;
4783 4871
4784 if (!rdev->ops->action) { 4872 if (!rdev->ops->mgmt_tx) {
4785 err = -EOPNOTSUPP; 4873 err = -EOPNOTSUPP;
4786 goto out; 4874 goto out;
4787 } 4875 }
@@ -4824,17 +4912,17 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4824 } 4912 }
4825 4913
4826 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 4914 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4827 NL80211_CMD_ACTION); 4915 NL80211_CMD_FRAME);
4828 4916
4829 if (IS_ERR(hdr)) { 4917 if (IS_ERR(hdr)) {
4830 err = PTR_ERR(hdr); 4918 err = PTR_ERR(hdr);
4831 goto free_msg; 4919 goto free_msg;
4832 } 4920 }
4833 err = cfg80211_mlme_action(rdev, dev, chan, channel_type, 4921 err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, channel_type,
4834 channel_type_valid, 4922 channel_type_valid,
4835 nla_data(info->attrs[NL80211_ATTR_FRAME]), 4923 nla_data(info->attrs[NL80211_ATTR_FRAME]),
4836 nla_len(info->attrs[NL80211_ATTR_FRAME]), 4924 nla_len(info->attrs[NL80211_ATTR_FRAME]),
4837 &cookie); 4925 &cookie);
4838 if (err) 4926 if (err)
4839 goto free_msg; 4927 goto free_msg;
4840 4928
@@ -5333,14 +5421,14 @@ static struct genl_ops nl80211_ops[] = {
5333 .flags = GENL_ADMIN_PERM, 5421 .flags = GENL_ADMIN_PERM,
5334 }, 5422 },
5335 { 5423 {
5336 .cmd = NL80211_CMD_REGISTER_ACTION, 5424 .cmd = NL80211_CMD_REGISTER_FRAME,
5337 .doit = nl80211_register_action, 5425 .doit = nl80211_register_mgmt,
5338 .policy = nl80211_policy, 5426 .policy = nl80211_policy,
5339 .flags = GENL_ADMIN_PERM, 5427 .flags = GENL_ADMIN_PERM,
5340 }, 5428 },
5341 { 5429 {
5342 .cmd = NL80211_CMD_ACTION, 5430 .cmd = NL80211_CMD_FRAME,
5343 .doit = nl80211_action, 5431 .doit = nl80211_tx_mgmt,
5344 .policy = nl80211_policy, 5432 .policy = nl80211_policy,
5345 .flags = GENL_ADMIN_PERM, 5433 .flags = GENL_ADMIN_PERM,
5346 }, 5434 },
@@ -6040,9 +6128,9 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
6040 nl80211_mlme_mcgrp.id, gfp); 6128 nl80211_mlme_mcgrp.id, gfp);
6041} 6129}
6042 6130
6043int nl80211_send_action(struct cfg80211_registered_device *rdev, 6131int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
6044 struct net_device *netdev, u32 nlpid, 6132 struct net_device *netdev, u32 nlpid,
6045 int freq, const u8 *buf, size_t len, gfp_t gfp) 6133 int freq, const u8 *buf, size_t len, gfp_t gfp)
6046{ 6134{
6047 struct sk_buff *msg; 6135 struct sk_buff *msg;
6048 void *hdr; 6136 void *hdr;
@@ -6052,7 +6140,7 @@ int nl80211_send_action(struct cfg80211_registered_device *rdev,
6052 if (!msg) 6140 if (!msg)
6053 return -ENOMEM; 6141 return -ENOMEM;
6054 6142
6055 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION); 6143 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
6056 if (!hdr) { 6144 if (!hdr) {
6057 nlmsg_free(msg); 6145 nlmsg_free(msg);
6058 return -ENOMEM; 6146 return -ENOMEM;
@@ -6080,10 +6168,10 @@ int nl80211_send_action(struct cfg80211_registered_device *rdev,
6080 return -ENOBUFS; 6168 return -ENOBUFS;
6081} 6169}
6082 6170
6083void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, 6171void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
6084 struct net_device *netdev, u64 cookie, 6172 struct net_device *netdev, u64 cookie,
6085 const u8 *buf, size_t len, bool ack, 6173 const u8 *buf, size_t len, bool ack,
6086 gfp_t gfp) 6174 gfp_t gfp)
6087{ 6175{
6088 struct sk_buff *msg; 6176 struct sk_buff *msg;
6089 void *hdr; 6177 void *hdr;
@@ -6092,7 +6180,7 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
6092 if (!msg) 6180 if (!msg)
6093 return; 6181 return;
6094 6182
6095 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION_TX_STATUS); 6183 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME_TX_STATUS);
6096 if (!hdr) { 6184 if (!hdr) {
6097 nlmsg_free(msg); 6185 nlmsg_free(msg);
6098 return; 6186 return;
@@ -6179,7 +6267,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
6179 6267
6180 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) 6268 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
6181 list_for_each_entry_rcu(wdev, &rdev->netdev_list, list) 6269 list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
6182 cfg80211_mlme_unregister_actions(wdev, notify->pid); 6270 cfg80211_mlme_unregister_socket(wdev, notify->pid);
6183 6271
6184 rcu_read_unlock(); 6272 rcu_read_unlock();
6185 6273
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 2ad7fbc7d9f1..30d2f939150d 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -74,13 +74,13 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
74 struct net_device *dev, const u8 *mac_addr, 74 struct net_device *dev, const u8 *mac_addr,
75 struct station_info *sinfo, gfp_t gfp); 75 struct station_info *sinfo, gfp_t gfp);
76 76
77int nl80211_send_action(struct cfg80211_registered_device *rdev, 77int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
78 struct net_device *netdev, u32 nlpid, int freq, 78 struct net_device *netdev, u32 nlpid, int freq,
79 const u8 *buf, size_t len, gfp_t gfp); 79 const u8 *buf, size_t len, gfp_t gfp);
80void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, 80void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
81 struct net_device *netdev, u64 cookie, 81 struct net_device *netdev, u64 cookie,
82 const u8 *buf, size_t len, bool ack, 82 const u8 *buf, size_t len, bool ack,
83 gfp_t gfp); 83 gfp_t gfp);
84 84
85void 85void
86nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, 86nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f180db0de66c..b0d9a08447c9 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -36,6 +36,7 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/list.h> 37#include <linux/list.h>
38#include <linux/random.h> 38#include <linux/random.h>
39#include <linux/ctype.h>
39#include <linux/nl80211.h> 40#include <linux/nl80211.h>
40#include <linux/platform_device.h> 41#include <linux/platform_device.h>
41#include <net/cfg80211.h> 42#include <net/cfg80211.h>
@@ -181,14 +182,6 @@ static bool is_alpha2_set(const char *alpha2)
181 return false; 182 return false;
182} 183}
183 184
184static bool is_alpha_upper(char letter)
185{
186 /* ASCII A - Z */
187 if (letter >= 65 && letter <= 90)
188 return true;
189 return false;
190}
191
192static bool is_unknown_alpha2(const char *alpha2) 185static bool is_unknown_alpha2(const char *alpha2)
193{ 186{
194 if (!alpha2) 187 if (!alpha2)
@@ -220,7 +213,7 @@ static bool is_an_alpha2(const char *alpha2)
220{ 213{
221 if (!alpha2) 214 if (!alpha2)
222 return false; 215 return false;
223 if (is_alpha_upper(alpha2[0]) && is_alpha_upper(alpha2[1])) 216 if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
224 return true; 217 return true;
225 return false; 218 return false;
226} 219}
@@ -1399,6 +1392,11 @@ static DECLARE_WORK(reg_work, reg_todo);
1399 1392
1400static void queue_regulatory_request(struct regulatory_request *request) 1393static void queue_regulatory_request(struct regulatory_request *request)
1401{ 1394{
1395 if (isalpha(request->alpha2[0]))
1396 request->alpha2[0] = toupper(request->alpha2[0]);
1397 if (isalpha(request->alpha2[1]))
1398 request->alpha2[1] = toupper(request->alpha2[1]);
1399
1402 spin_lock(&reg_requests_lock); 1400 spin_lock(&reg_requests_lock);
1403 list_add_tail(&request->list, &reg_requests_list); 1401 list_add_tail(&request->list, &reg_requests_list);
1404 spin_unlock(&reg_requests_lock); 1402 spin_unlock(&reg_requests_lock);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9f2cef3e0ca0..74a9e3cce452 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -110,6 +110,13 @@ static int wiphy_resume(struct device *dev)
110 return ret; 110 return ret;
111} 111}
112 112
113static const void *wiphy_namespace(struct device *d)
114{
115 struct wiphy *wiphy = container_of(d, struct wiphy, dev);
116
117 return wiphy_net(wiphy);
118}
119
113struct class ieee80211_class = { 120struct class ieee80211_class = {
114 .name = "ieee80211", 121 .name = "ieee80211",
115 .owner = THIS_MODULE, 122 .owner = THIS_MODULE,
@@ -120,6 +127,8 @@ struct class ieee80211_class = {
120#endif 127#endif
121 .suspend = wiphy_suspend, 128 .suspend = wiphy_suspend,
122 .resume = wiphy_resume, 129 .resume = wiphy_resume,
130 .ns_type = &net_ns_type_operations,
131 .namespace = wiphy_namespace,
123}; 132};
124 133
125int wiphy_sysfs_init(void) 134int wiphy_sysfs_init(void)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 0c8a1e8b7690..bca32eb8f446 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -183,7 +183,14 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
183 return -EINVAL; 183 return -EINVAL;
184 break; 184 break;
185 default: 185 default:
186 return -EINVAL; 186 /*
187 * We don't know anything about this algorithm,
188 * allow using it -- but the driver must check
189 * all parameters! We still check below whether
190 * or not the driver supports this algorithm,
191 * of course.
192 */
193 break;
187 } 194 }
188 195
189 if (params->seq) { 196 if (params->seq) {
@@ -221,7 +228,7 @@ const unsigned char bridge_tunnel_header[] __aligned(2) =
221 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; 228 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
222EXPORT_SYMBOL(bridge_tunnel_header); 229EXPORT_SYMBOL(bridge_tunnel_header);
223 230
224unsigned int ieee80211_hdrlen(__le16 fc) 231unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
225{ 232{
226 unsigned int hdrlen = 24; 233 unsigned int hdrlen = 24;
227 234
@@ -823,7 +830,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
823 /* monitor can't bridge anyway */ 830 /* monitor can't bridge anyway */
824 break; 831 break;
825 case NL80211_IFTYPE_UNSPECIFIED: 832 case NL80211_IFTYPE_UNSPECIFIED:
826 case __NL80211_IFTYPE_AFTER_LAST: 833 case NUM_NL80211_IFTYPES:
827 /* not happening */ 834 /* not happening */
828 break; 835 break;
829 } 836 }
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 8f5116f5af19..dc675a3daa3d 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -611,7 +611,7 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
611#endif 611#endif
612 612
613#ifdef CONFIG_CFG80211_WEXT 613#ifdef CONFIG_CFG80211_WEXT
614 if (dev->ieee80211_ptr && dev->ieee80211_ptr && 614 if (dev->ieee80211_ptr &&
615 dev->ieee80211_ptr->wiphy && 615 dev->ieee80211_ptr->wiphy &&
616 dev->ieee80211_ptr->wiphy->wext && 616 dev->ieee80211_ptr->wiphy->wext &&
617 dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) 617 dev->ieee80211_ptr->wiphy->wext->get_wireless_stats)
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 9818198add8a..6fffe62d7c25 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -197,6 +197,8 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
197 wdev->wext.connect.ssid_len = len; 197 wdev->wext.connect.ssid_len = len;
198 198
199 wdev->wext.connect.crypto.control_port = false; 199 wdev->wext.connect.crypto.control_port = false;
200 wdev->wext.connect.crypto.control_port_ethertype =
201 cpu_to_be16(ETH_P_PAE);
200 202
201 err = cfg80211_mgd_wext_connect(rdev, wdev); 203 err = cfg80211_mgd_wext_connect(rdev, wdev);
202 out: 204 out: