aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig92
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/a2065.c10
-rw-r--r--drivers/net/appletalk/Kconfig1
-rw-r--r--drivers/net/ariadne.c15
-rw-r--r--drivers/net/arm/ks8695net.c2
-rw-r--r--drivers/net/atl1c/atl1c.h4
-rw-r--r--drivers/net/atl1c/atl1c_hw.c15
-rw-r--r--drivers/net/atl1c/atl1c_hw.h43
-rw-r--r--drivers/net/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c12
-rw-r--r--drivers/net/atl1e/atl1e_hw.c34
-rw-r--r--drivers/net/atl1e/atl1e_hw.h111
-rw-r--r--drivers/net/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/atlx/atl1.c77
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/ax88796.c810
-rw-r--r--drivers/net/benet/be.h55
-rw-r--r--drivers/net/benet/be_cmds.c207
-rw-r--r--drivers/net/benet/be_cmds.h96
-rw-r--r--drivers/net/benet/be_ethtool.c87
-rw-r--r--drivers/net/benet/be_hw.h110
-rw-r--r--drivers/net/benet/be_main.c624
-rw-r--r--drivers/net/bfin_mac.c13
-rw-r--r--drivers/net/bna/bnad.c108
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bnx2.c37
-rw-r--r--drivers/net/bnx2.h7
-rw-r--r--drivers/net/bnx2x/bnx2x.h64
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c157
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h35
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c137
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c97
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h118
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c2727
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c703
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/bonding/Makefile3
-rw-r--r--drivers/net/bonding/bond_3ad.c38
-rw-r--r--drivers/net/bonding/bond_3ad.h3
-rw-r--r--drivers/net/bonding/bond_alb.c6
-rw-r--r--drivers/net/bonding/bond_main.c648
-rw-r--r--drivers/net/bonding/bond_procfs.c275
-rw-r--r--drivers/net/bonding/bond_sysfs.c23
-rw-r--r--drivers/net/bonding/bonding.h112
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/can/Kconfig6
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/at91_can.c138
-rw-r--r--drivers/net/can/c_can/Kconfig15
-rw-r--r--drivers/net/can/c_can/Makefile8
-rw-r--r--drivers/net/can/c_can/c_can.c1152
-rw-r--r--drivers/net/can/c_can/c_can.h86
-rw-r--r--drivers/net/can/c_can/c_can_platform.c216
-rw-r--r--drivers/net/can/janz-ican3.c5
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c15
-rw-r--r--drivers/net/can/pch_can.c5
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c9
-rw-r--r--drivers/net/can/softing/Kconfig30
-rw-r--r--drivers/net/can/softing/Makefile6
-rw-r--r--drivers/net/can/softing/softing.h167
-rw-r--r--drivers/net/can/softing/softing_cs.c360
-rw-r--r--drivers/net/can/softing/softing_fw.c691
-rw-r--r--drivers/net/can/softing/softing_main.c894
-rw-r--r--drivers/net/can/softing/softing_platform.h40
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/cnic.c254
-rw-r--r--drivers/net/cnic.h2
-rw-r--r--drivers/net/cnic_if.h8
-rw-r--r--drivers/net/cs89x0.c19
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c14
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c5
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c80
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/davinci_cpdma.c11
-rw-r--r--drivers/net/davinci_cpdma.h1
-rw-r--r--drivers/net/davinci_emac.c9
-rw-r--r--drivers/net/depca.c6
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/dm9000.c24
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_hw.c4
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000e/defines.h1
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c92
-rw-r--r--drivers/net/e1000e/hw.h5
-rw-r--r--drivers/net/e1000e/ich8lan.c48
-rw-r--r--drivers/net/e1000e/lib.c4
-rw-r--r--drivers/net/e1000e/netdev.c193
-rw-r--r--drivers/net/e1000e/phy.c8
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h11
-rw-r--r--drivers/net/enic/enic_dev.c221
-rw-r--r--drivers/net/enic/enic_dev.h41
-rw-r--r--drivers/net/enic/enic_main.c326
-rw-r--r--drivers/net/enic/vnic_dev.c26
-rw-r--r--drivers/net/enic/vnic_dev.h8
-rw-r--r--drivers/net/enic/vnic_devcmd.h38
-rw-r--r--drivers/net/enic/vnic_rq.h5
-rw-r--r--drivers/net/eql.c10
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/fec.c653
-rw-r--r--drivers/net/fec_mpc52xx.c13
-rw-r--r--drivers/net/fec_mpc52xx.h2
-rw-r--r--drivers/net/fec_mpc52xx_phy.c5
-rw-r--r--drivers/net/forcedeth.c10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c9
-rw-r--r--drivers/net/fs_enet/mii-fec.c15
-rw-r--r--drivers/net/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ftmac100.c1198
-rw-r--r--drivers/net/ftmac100.h180
-rw-r--r--drivers/net/gianfar.c30
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/greth.c8
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/ibm_newemac/core.c9
-rw-r--r--drivers/net/ibm_newemac/mal.c9
-rw-r--r--drivers/net/ibm_newemac/rgmii.c9
-rw-r--r--drivers/net/ibm_newemac/tah.c9
-rw-r--r--drivers/net/ibm_newemac/zmii.c9
-rw-r--r--drivers/net/igb/e1000_82575.c296
-rw-r--r--drivers/net/igb/e1000_82575.h1
-rw-r--r--drivers/net/igb/e1000_defines.h52
-rw-r--r--drivers/net/igb/e1000_hw.h9
-rw-r--r--drivers/net/igb/e1000_mbx.c38
-rw-r--r--drivers/net/igb/e1000_nvm.c64
-rw-r--r--drivers/net/igb/e1000_nvm.h1
-rw-r--r--drivers/net/igb/e1000_regs.h27
-rw-r--r--drivers/net/igb/igb.h8
-rw-r--r--drivers/net/igb/igb_ethtool.c30
-rw-r--r--drivers/net/igb/igb_main.c232
-rw-r--r--drivers/net/igbvf/ethtool.c6
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/netdev.c63
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/ipg.c4
-rw-r--r--drivers/net/irda/irtty-sir.c2
-rw-r--r--drivers/net/irda/sh_irda.c14
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c39
-rw-r--r--drivers/net/ixgb/ixgb_main.c54
-rw-r--r--drivers/net/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c102
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c228
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c950
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c160
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c138
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h25
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c176
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h29
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c429
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c57
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c156
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c503
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c37
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c594
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c116
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h65
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c49
-rw-r--r--drivers/net/ixgbevf/defines.h2
-rw-r--r--drivers/net/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c98
-rw-r--r--drivers/net/ixgbevf/regs.h2
-rw-r--r--drivers/net/jme.c336
-rw-r--r--drivers/net/jme.h87
-rw-r--r--drivers/net/ks8842.c3
-rw-r--r--drivers/net/ksz884x.c2
-rw-r--r--drivers/net/ll_temac_main.c9
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvlan.c32
-rw-r--r--drivers/net/macvtap.c21
-rw-r--r--drivers/net/mii.c14
-rw-r--r--drivers/net/mlx4/alloc.c13
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c38
-rw-r--r--drivers/net/mlx4/en_ethtool.c66
-rw-r--r--drivers/net/mlx4/en_main.c22
-rw-r--r--drivers/net/mlx4/en_netdev.c202
-rw-r--r--drivers/net/mlx4/en_port.c13
-rw-r--r--drivers/net/mlx4/en_port.h19
-rw-r--r--drivers/net/mlx4/en_rx.c11
-rw-r--r--drivers/net/mlx4/en_tx.c72
-rw-r--r--drivers/net/mlx4/eq.c107
-rw-r--r--drivers/net/mlx4/fw.c25
-rw-r--r--drivers/net/mlx4/fw.h3
-rw-r--r--drivers/net/mlx4/main.c137
-rw-r--r--drivers/net/mlx4/mcg.c646
-rw-r--r--drivers/net/mlx4/mlx4.h50
-rw-r--r--drivers/net/mlx4/mlx4_en.h27
-rw-r--r--drivers/net/mlx4/pd.c102
-rw-r--r--drivers/net/mlx4/port.c165
-rw-r--r--drivers/net/mlx4/profile.c4
-rw-r--r--drivers/net/mv643xx_eth.c74
-rw-r--r--drivers/net/myri10ge/myri10ge.c42
-rw-r--r--drivers/net/myri_sbus.c8
-rw-r--r--drivers/net/netxen/netxen_nic.h6
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c15
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c60
-rw-r--r--drivers/net/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/niu.c74
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c126
-rw-r--r--drivers/net/pcmcia/axnet_cs.c6
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/mdio-gpio.c9
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/ppp_deflate.c2
-rw-r--r--drivers/net/ppp_generic.c148
-rw-r--r--drivers/net/pptp.c45
-rw-r--r--drivers/net/qla3xxx.c12
-rw-r--r--drivers/net/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c15
-rw-r--r--drivers/net/r6040.c115
-rw-r--r--drivers/net/r8169.c363
-rw-r--r--drivers/net/rionet.c6
-rw-r--r--drivers/net/s2io.c4
-rw-r--r--drivers/net/sfc/efx.c100
-rw-r--r--drivers/net/sfc/efx.h19
-rw-r--r--drivers/net/sfc/ethtool.c59
-rw-r--r--drivers/net/sfc/falcon.c22
-rw-r--r--drivers/net/sfc/falcon_boards.c2
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sfc/filter.c117
-rw-r--r--drivers/net/sfc/io.h15
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi.h4
-rw-r--r--drivers/net/sfc/mcdi_mac.c2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h2
-rw-r--r--drivers/net/sfc/mcdi_phy.c2
-rw-r--r--drivers/net/sfc/mdio_10g.c34
-rw-r--r--drivers/net/sfc/mdio_10g.h5
-rw-r--r--drivers/net/sfc/mtd.c2
-rw-r--r--drivers/net/sfc/net_driver.h83
-rw-r--r--drivers/net/sfc/nic.c73
-rw-r--r--drivers/net/sfc/nic.h9
-rw-r--r--drivers/net/sfc/phy.h2
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sfc/regs.h8
-rw-r--r--drivers/net/sfc/rx.c144
-rw-r--r--drivers/net/sfc/selftest.c4
-rw-r--r--drivers/net/sfc/selftest.h2
-rw-r--r--drivers/net/sfc/siena.c24
-rw-r--r--drivers/net/sfc/spi.h2
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c92
-rw-r--r--drivers/net/sfc/txc43128_phy.c4
-rw-r--r--drivers/net/sfc/workarounds.h4
-rw-r--r--drivers/net/sh_eth.c737
-rw-r--r--drivers/net/sh_eth.h654
-rw-r--r--drivers/net/sis900.c5
-rw-r--r--drivers/net/skfp/Makefile2
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/smc91x.c13
-rw-r--r--drivers/net/smc91x.h62
-rw-r--r--drivers/net/smsc911x.c9
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/sunbmac.c9
-rw-r--r--drivers/net/sungem.c58
-rw-r--r--drivers/net/sungem.h3
-rw-r--r--drivers/net/sunhme.c14
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/sunqe.c8
-rw-r--r--drivers/net/tg3.c444
-rw-r--r--drivers/net/tg3.h16
-rw-r--r--drivers/net/tile/tilepro.c965
-rw-r--r--drivers/net/tlan.c3840
-rw-r--r--drivers/net/tlan.h192
-rw-r--r--drivers/net/tun.c85
-rw-r--r--drivers/net/typhoon.c3
-rw-r--r--drivers/net/ucc_geth.c8
-rw-r--r--drivers/net/usb/cdc-phonet.c10
-rw-r--r--drivers/net/usb/cdc_ncm.c244
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c22
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/smsc95xx.c17
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/veth.c14
-rw-r--r--drivers/net/via-velocity.c9
-rw-r--r--drivers/net/via-velocity.h8
-rw-r--r--drivers/net/virtio_net.c27
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c93
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c278
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/vxge/vxge-config.c34
-rw-r--r--drivers/net/vxge/vxge-config.h10
-rw-r--r--drivers/net/vxge/vxge-ethtool.c4
-rw-r--r--drivers/net/vxge/vxge-main.c234
-rw-r--r--drivers/net/vxge/vxge-main.h23
-rw-r--r--drivers/net/vxge/vxge-traffic.c116
-rw-r--r--drivers/net/vxge/vxge-traffic.h14
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/lmc/Makefile2
-rw-r--r--drivers/net/wan/pc300_tty.c9
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c90
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h925
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c97
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c94
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c181
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h61
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c116
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c33
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/key.c32
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c70
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c196
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c11
-rw-r--r--drivers/net/wireless/libertas/if_spi.c65
-rw-r--r--drivers/net/wireless/mwifiex/11n.c922
-rw-r--r--drivers/net/wireless/mwifiex/11n.h178
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c423
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.h32
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c637
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h67
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig21
-rw-r--r--drivers/net/wireless/mwifiex/Makefile41
-rw-r--r--drivers/net/wireless/mwifiex/README204
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c1517
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h31
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c368
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1463
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c773
-rw-r--r--drivers/net/wireless/mwifiex/decl.h177
-rw-r--r--drivers/net/wireless/mwifiex/fw.h1376
-rw-r--r--drivers/net/wireless/mwifiex/init.c665
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h433
-rw-r--r--drivers/net/wireless/mwifiex/join.c1464
-rw-r--r--drivers/net/wireless/mwifiex/main.c1102
-rw-r--r--drivers/net/wireless/mwifiex/main.h1081
-rw-r--r--drivers/net/wireless/mwifiex/scan.c3098
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c1770
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h305
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c1226
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c986
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c405
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c2478
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c182
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c202
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c202
-rw-r--r--drivers/net/wireless/mwifiex/util.c252
-rw-r--r--drivers/net/wireless/mwifiex/util.h32
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c1237
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h112
-rw-r--r--drivers/net/wireless/mwl8k.c740
-rw-r--r--drivers/net/wireless/orinoco/cfg.c3
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/p54/p54spi.c3
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c7
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c31
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/wl1251/sdio.c2
-rw-r--r--drivers/net/wireless/wl1251/spi.c2
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/xen-netback/Makefile3
-rw-r--r--drivers/net/xen-netback/common.h161
-rw-r--r--drivers/net/xen-netback/interface.c424
-rw-r--r--drivers/net/xen-netback/netback.c1745
-rw-r--r--drivers/net/xen-netback/xenbus.c490
-rw-r--r--drivers/net/xen-netfront.c116
-rw-r--r--drivers/net/xilinx_emaclite.c9
417 files changed, 51889 insertions, 12506 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 16fe4f9b719..dc280bc8eba 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
238config AX88796 238config AX88796
239 tristate "ASIX AX88796 NE2000 clone support" 239 tristate "ASIX AX88796 NE2000 clone support"
240 depends on ARM || MIPS || SUPERH 240 depends on ARM || MIPS || SUPERH
241 select CRC32 241 select PHYLIB
242 select MII 242 select MDIO_BITBANG
243 help 243 help
244 AX88796 driver, using platform bus to provide 244 AX88796 driver, using platform bus to provide
245 chip detection and resources 245 chip detection and resources
@@ -1498,7 +1498,7 @@ config FORCEDETH
1498config CS89x0 1498config CS89x0
1499 tristate "CS89x0 support" 1499 tristate "CS89x0 support"
1500 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ 1500 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
1501 || ARCH_IXDP2X01 || MACH_MX31ADS) 1501 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
1502 ---help--- 1502 ---help---
1503 Support for CS89x0 chipset based Ethernet cards. If you have a 1503 Support for CS89x0 chipset based Ethernet cards. If you have a
1504 network (Ethernet) card of this type, say Y and read the 1504 network (Ethernet) card of this type, say Y and read the
@@ -1512,7 +1512,7 @@ config CS89x0
1512config CS89x0_NONISA_IRQ 1512config CS89x0_NONISA_IRQ
1513 def_bool y 1513 def_bool y
1514 depends on CS89x0 != n 1514 depends on CS89x0 != n
1515 depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS 1515 depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
1516 1516
1517config TC35815 1517config TC35815
1518 tristate "TOSHIBA TC35815 Ethernet support" 1518 tristate "TOSHIBA TC35815 Ethernet support"
@@ -1944,7 +1944,8 @@ config 68360_ENET
1944config FEC 1944config FEC
1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28 1947 IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
1948 default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
1948 select PHYLIB 1949 select PHYLIB
1949 help 1950 help
1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1951 Say Y here if you want to use the built-in 10/100 Fast ethernet
@@ -2007,6 +2008,15 @@ config BCM63XX_ENET
2007 This driver supports the ethernet MACs in the Broadcom 63xx 2008 This driver supports the ethernet MACs in the Broadcom 63xx
2008 MIPS chipset family (BCM63XX). 2009 MIPS chipset family (BCM63XX).
2009 2010
2011config FTMAC100
2012 tristate "Faraday FTMAC100 10/100 Ethernet support"
2013 depends on ARM
2014 select MII
2015 help
2016 This driver supports the FTMAC100 10/100 Ethernet controller
2017 from Faraday. It is used on Faraday A320, Andes AG101 and some
2018 other ARM/NDS32 SoC's.
2019
2010source "drivers/net/fs_enet/Kconfig" 2020source "drivers/net/fs_enet/Kconfig"
2011 2021
2012source "drivers/net/octeon/Kconfig" 2022source "drivers/net/octeon/Kconfig"
@@ -2098,7 +2108,9 @@ config E1000
2098 2108
2099config E1000E 2109config E1000E
2100 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" 2110 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
2111 select CRC32
2101 depends on PCI && (!SPARC32 || BROKEN) 2112 depends on PCI && (!SPARC32 || BROKEN)
2113 select CRC32
2102 ---help--- 2114 ---help---
2103 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit 2115 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
2104 ethernet family of adapters. For PCI or PCI-X e1000 adapters, 2116 ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -2235,15 +2247,6 @@ config R8169
2235 To compile this driver as a module, choose M here: the module 2247 To compile this driver as a module, choose M here: the module
2236 will be called r8169. This is recommended. 2248 will be called r8169. This is recommended.
2237 2249
2238config R8169_VLAN
2239 bool "VLAN support"
2240 depends on R8169 && VLAN_8021Q
2241 ---help---
2242 Say Y here for the r8169 driver to support the functions required
2243 by the kernel 802.1Q code.
2244
2245 If in doubt, say Y.
2246
2247config SB1250_MAC 2250config SB1250_MAC
2248 tristate "SB1250 Gigabit Ethernet support" 2251 tristate "SB1250 Gigabit Ethernet support"
2249 depends on SIBYTE_SB1xxx_SOC 2252 depends on SIBYTE_SB1xxx_SOC
@@ -2594,14 +2597,9 @@ config CHELSIO_T1_1G
2594 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2597 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2595 are using only 10G cards say 'N' here. 2598 are using only 10G cards say 'N' here.
2596 2599
2597config CHELSIO_T3_DEPENDS
2598 tristate
2599 depends on PCI && INET
2600 default y
2601
2602config CHELSIO_T3 2600config CHELSIO_T3
2603 tristate "Chelsio Communications T3 10Gb Ethernet support" 2601 tristate "Chelsio Communications T3 10Gb Ethernet support"
2604 depends on CHELSIO_T3_DEPENDS 2602 depends on PCI && INET
2605 select FW_LOADER 2603 select FW_LOADER
2606 select MDIO 2604 select MDIO
2607 help 2605 help
@@ -2619,14 +2617,9 @@ config CHELSIO_T3
2619 To compile this driver as a module, choose M here: the module 2617 To compile this driver as a module, choose M here: the module
2620 will be called cxgb3. 2618 will be called cxgb3.
2621 2619
2622config CHELSIO_T4_DEPENDS
2623 tristate
2624 depends on PCI && INET
2625 default y
2626
2627config CHELSIO_T4 2620config CHELSIO_T4
2628 tristate "Chelsio Communications T4 Ethernet support" 2621 tristate "Chelsio Communications T4 Ethernet support"
2629 depends on CHELSIO_T4_DEPENDS 2622 depends on PCI
2630 select FW_LOADER 2623 select FW_LOADER
2631 select MDIO 2624 select MDIO
2632 help 2625 help
@@ -2644,14 +2637,9 @@ config CHELSIO_T4
2644 To compile this driver as a module choose M here; the module 2637 To compile this driver as a module choose M here; the module
2645 will be called cxgb4. 2638 will be called cxgb4.
2646 2639
2647config CHELSIO_T4VF_DEPENDS
2648 tristate
2649 depends on PCI && INET
2650 default y
2651
2652config CHELSIO_T4VF 2640config CHELSIO_T4VF
2653 tristate "Chelsio Communications T4 Virtual Function Ethernet support" 2641 tristate "Chelsio Communications T4 Virtual Function Ethernet support"
2654 depends on CHELSIO_T4VF_DEPENDS 2642 depends on PCI
2655 help 2643 help
2656 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 2644 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
2657 adapters with PCI-E SR-IOV Virtual Functions. 2645 adapters with PCI-E SR-IOV Virtual Functions.
@@ -2864,7 +2852,7 @@ config MLX4_CORE
2864 default n 2852 default n
2865 2853
2866config MLX4_DEBUG 2854config MLX4_DEBUG
2867 bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) 2855 bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
2868 depends on MLX4_CORE 2856 depends on MLX4_CORE
2869 default y 2857 default y
2870 ---help--- 2858 ---help---
@@ -2966,12 +2954,38 @@ config XEN_NETDEV_FRONTEND
2966 select XEN_XENBUS_FRONTEND 2954 select XEN_XENBUS_FRONTEND
2967 default y 2955 default y
2968 help 2956 help
2969 The network device frontend driver allows the kernel to 2957 This driver provides support for Xen paravirtual network
2970 access network devices exported exported by a virtual 2958 devices exported by a Xen network driver domain (often
2971 machine containing a physical network device driver. The 2959 domain 0).
2972 frontend driver is intended for unprivileged guest domains; 2960
2973 if you are compiling a kernel for a Xen guest, you almost 2961 The corresponding Linux backend driver is enabled by the
2974 certainly want to enable this. 2962 CONFIG_XEN_NETDEV_BACKEND option.
2963
2964 If you are compiling a kernel for use as Xen guest, you
2965 should say Y here. To compile this driver as a module, chose
2966 M here: the module will be called xen-netfront.
2967
2968config XEN_NETDEV_BACKEND
2969 tristate "Xen backend network device"
2970 depends on XEN_BACKEND
2971 help
2972 This driver allows the kernel to act as a Xen network driver
2973 domain which exports paravirtual network devices to other
2974 Xen domains. These devices can be accessed by any operating
2975 system that implements a compatible front end.
2976
2977 The corresponding Linux frontend driver is enabled by the
2978 CONFIG_XEN_NETDEV_FRONTEND configuration option.
2979
2980 The backend driver presents a standard network device
2981 endpoint for each paravirtual network device to the driver
2982 domain network stack. These can then be bridged or routed
2983 etc in order to provide full network connectivity.
2984
2985 If you are compiling a kernel to run in a Xen network driver
2986 domain (often this is domain 0) you should say Y here. To
2987 compile this driver as a module, chose M here: the module
2988 will be called xen-netback.
2975 2989
2976config ISERIES_VETH 2990config ISERIES_VETH
2977 tristate "iSeries Virtual Ethernet driver support" 2991 tristate "iSeries Virtual Ethernet driver support"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90738d1399..01b604ad155 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
148obj-$(CONFIG_AX88796) += ax88796.o 148obj-$(CONFIG_AX88796) += ax88796.o
149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
150obj-$(CONFIG_FTMAC100) += ftmac100.o
150 151
151obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o 152obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
152obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 153obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
@@ -171,6 +172,7 @@ obj-$(CONFIG_SLIP) += slip.o
171obj-$(CONFIG_SLHC) += slhc.o 172obj-$(CONFIG_SLHC) += slhc.o
172 173
173obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o 174obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
175obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
174 176
175obj-$(CONFIG_DUMMY) += dummy.o 177obj-$(CONFIG_DUMMY) += dummy.o
176obj-$(CONFIG_IFB) += ifb.o 178obj-$(CONFIG_IFB) += ifb.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index f142cc21e45..deaa8bc16cf 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -711,14 +711,14 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
711 return -EBUSY; 711 return -EBUSY;
712 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); 712 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
713 if (!r2) { 713 if (!r2) {
714 release_resource(r1); 714 release_mem_region(base_addr, sizeof(struct lance_regs));
715 return -EBUSY; 715 return -EBUSY;
716 } 716 }
717 717
718 dev = alloc_etherdev(sizeof(struct lance_private)); 718 dev = alloc_etherdev(sizeof(struct lance_private));
719 if (dev == NULL) { 719 if (dev == NULL) {
720 release_resource(r1); 720 release_mem_region(base_addr, sizeof(struct lance_regs));
721 release_resource(r2); 721 release_mem_region(mem_start, A2065_RAM_SIZE);
722 return -ENOMEM; 722 return -ENOMEM;
723 } 723 }
724 724
@@ -764,8 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
764 764
765 err = register_netdev(dev); 765 err = register_netdev(dev);
766 if (err) { 766 if (err) {
767 release_resource(r1); 767 release_mem_region(base_addr, sizeof(struct lance_regs));
768 release_resource(r2); 768 release_mem_region(mem_start, A2065_RAM_SIZE);
769 free_netdev(dev); 769 free_netdev(dev);
770 return err; 770 return err;
771 } 771 }
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 0b376a99097..f5a89164e77 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -3,7 +3,6 @@
3# 3#
4config ATALK 4config ATALK
5 tristate "Appletalk protocol support" 5 tristate "Appletalk protocol support"
6 depends on BKL # waiting to be removed from net/appletalk/ddp.c
7 select LLC 6 select LLC
8 ---help--- 7 ---help---
9 AppleTalk is the protocol that Apple computers can use to communicate 8 AppleTalk is the protocol that Apple computers can use to communicate
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 39214e51245..b7f45cd756a 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -182,14 +182,14 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
182 return -EBUSY; 182 return -EBUSY;
183 r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); 183 r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
184 if (!r2) { 184 if (!r2) {
185 release_resource(r1); 185 release_mem_region(base_addr, sizeof(struct Am79C960));
186 return -EBUSY; 186 return -EBUSY;
187 } 187 }
188 188
189 dev = alloc_etherdev(sizeof(struct ariadne_private)); 189 dev = alloc_etherdev(sizeof(struct ariadne_private));
190 if (dev == NULL) { 190 if (dev == NULL) {
191 release_resource(r1); 191 release_mem_region(base_addr, sizeof(struct Am79C960));
192 release_resource(r2); 192 release_mem_region(mem_start, ARIADNE_RAM_SIZE);
193 return -ENOMEM; 193 return -ENOMEM;
194 } 194 }
195 195
@@ -213,8 +213,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
213 213
214 err = register_netdev(dev); 214 err = register_netdev(dev);
215 if (err) { 215 if (err) {
216 release_resource(r1); 216 release_mem_region(base_addr, sizeof(struct Am79C960));
217 release_resource(r2); 217 release_mem_region(mem_start, ARIADNE_RAM_SIZE);
218 free_netdev(dev); 218 free_netdev(dev);
219 return err; 219 return err;
220 } 220 }
@@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data)
425 int csr0, boguscnt; 425 int csr0, boguscnt;
426 int handled = 0; 426 int handled = 0;
427 427
428 if (dev == NULL) {
429 printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
430 return IRQ_NONE;
431 }
432
433 lance->RAP = CSR0; /* PCnet-ISA Controller Status */ 428 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
434 429
435 if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ 430 if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 62d6f88cbab..aa07657744c 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1644,7 +1644,7 @@ ks8695_cleanup(void)
1644module_init(ks8695_init); 1644module_init(ks8695_init);
1645module_exit(ks8695_cleanup); 1645module_exit(ks8695_cleanup);
1646 1646
1647MODULE_AUTHOR("Simtec Electronics") 1647MODULE_AUTHOR("Simtec Electronics");
1648MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); 1648MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1649MODULE_LICENSE("GPL"); 1649MODULE_LICENSE("GPL");
1650MODULE_ALIAS("platform:" MODULENAME); 1650MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 9ab58097fa2..7cb375e0e29 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -265,7 +265,7 @@ struct atl1c_recv_ret_status {
265 __le32 word3; 265 __le32 word3;
266}; 266};
267 267
268/* RFD desciptor */ 268/* RFD descriptor */
269struct atl1c_rx_free_desc { 269struct atl1c_rx_free_desc {
270 __le64 buffer_addr; 270 __le64 buffer_addr;
271}; 271};
@@ -531,7 +531,7 @@ struct atl1c_rfd_ring {
531 struct atl1c_buffer *buffer_info; 531 struct atl1c_buffer *buffer_info;
532}; 532};
533 533
534/* receive return desciptor (rrd) ring */ 534/* receive return descriptor (rrd) ring */
535struct atl1c_rrd_ring { 535struct atl1c_rrd_ring {
536 void *desc; /* descriptor ring virtual address */ 536 void *desc; /* descriptor ring virtual address */
537 dma_addr_t dma; /* descriptor ring physical address */ 537 dma_addr_t dma; /* descriptor ring physical address */
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf67200994..23f2ab0f2fa 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
345 */ 345 */
346static int atl1c_phy_setup_adv(struct atl1c_hw *hw) 346static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
347{ 347{
348 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK; 348 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
349 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & 349 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
350 ~GIGA_CR_1000T_SPEED_MASK; 350 ~GIGA_CR_1000T_SPEED_MASK;
351 351
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
373 } 373 }
374 374
375 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || 375 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
376 atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0) 376 atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
377 return -1; 377 return -1;
378 return 0; 378 return 0;
379} 379}
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
517 "Error Setting up Auto-Negotiation\n"); 517 "Error Setting up Auto-Negotiation\n");
518 return ret_val; 518 return ret_val;
519 } 519 }
520 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; 520 mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
521 break; 521 break;
522 case MEDIA_TYPE_100M_FULL: 522 case MEDIA_TYPE_100M_FULL:
523 mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX; 523 mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
524 break; 524 break;
525 case MEDIA_TYPE_100M_HALF: 525 case MEDIA_TYPE_100M_HALF:
526 mii_bmcr_data |= BMCR_SPEED_100; 526 mii_bmcr_data |= BMCR_SPEED100;
527 break; 527 break;
528 case MEDIA_TYPE_10M_FULL: 528 case MEDIA_TYPE_10M_FULL:
529 mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX; 529 mii_bmcr_data |= BMCR_FULLDPLX;
530 break; 530 break;
531 case MEDIA_TYPE_10M_HALF: 531 case MEDIA_TYPE_10M_HALF:
532 mii_bmcr_data |= BMCR_SPEED_10;
533 break; 532 break;
534 default: 533 default:
535 if (netif_msg_link(adapter)) 534 if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
657 err = atl1c_phy_setup_adv(hw); 656 err = atl1c_phy_setup_adv(hw);
658 if (err) 657 if (err)
659 return err; 658 return err;
660 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; 659 mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
661 660
662 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); 661 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
663} 662}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd675979aa..655fc6c4a8a 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
736#define REG_DEBUG_DATA0 0x1900 736#define REG_DEBUG_DATA0 0x1900
737#define REG_DEBUG_DATA1 0x1904 737#define REG_DEBUG_DATA1 0x1904
738 738
739/* PHY Control Register */
740#define MII_BMCR 0x00
741#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
742#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
743#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
744#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
745#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
746#define BMCR_POWER_DOWN 0x0800 /* Power down */
747#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
748#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
749#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
750#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
751#define BMCR_SPEED_MASK 0x2040
752#define BMCR_SPEED_1000 0x0040
753#define BMCR_SPEED_100 0x2000
754#define BMCR_SPEED_10 0x0000
755
756/* PHY Status Register */
757#define MII_BMSR 0x01
758#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
759#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
760#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
761#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
762#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
763#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
764#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
765#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
766#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
767#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
768#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
769#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
770#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
771#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
772#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
773
774#define MII_PHYSID1 0x02
775#define MII_PHYSID2 0x03
776#define L1D_MPW_PHYID1 0xD01C /* V7 */ 739#define L1D_MPW_PHYID1 0xD01C /* V7 */
777#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ 740#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
778#define L1D_MPW_PHYID3 0xD01E /* V8 */ 741#define L1D_MPW_PHYID3 0xD01E /* V8 */
779 742
780 743
781/* Autoneg Advertisement Register */ 744/* Autoneg Advertisement Register */
782#define MII_ADVERTISE 0x04 745#define ADVERTISE_DEFAULT_CAP \
783#define ADVERTISE_SPEED_MASK 0x01E0 746 (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
784#define ADVERTISE_DEFAULT_CAP 0x0DE0
785 747
786/* 1000BASE-T Control Register */ 748/* 1000BASE-T Control Register */
787#define MII_GIGA_CR 0x09
788#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ 749#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
789 750
790#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ 751#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a699bbf20eb..7d9d5067a65 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, 48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, 49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, 50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
51 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
51 /* required last entry */ 52 /* required last entry */
52 { 0 } 53 { 0 }
53}; 54};
@@ -1101,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1101 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); 1102 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
1102 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) & 1103 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
1103 DEVICE_CTRL_MAX_PAYLOAD_MASK; 1104 DEVICE_CTRL_MAX_PAYLOAD_MASK;
1104 hw->dmaw_block = min(max_pay_load, hw->dmaw_block); 1105 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
1105 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) & 1106 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
1106 DEVICE_CTRL_MAX_RREQ_SZ_MASK; 1107 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
1107 hw->dmar_block = min(max_pay_load, hw->dmar_block); 1108 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1108 1109
1109 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << 1110 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
1110 TXQ_NUM_TPD_BURST_SHIFT; 1111 TXQ_NUM_TPD_BURST_SHIFT;
@@ -2717,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2717 goto err_reset; 2718 goto err_reset;
2718 } 2719 }
2719 2720
2720 device_init_wakeup(&pdev->dev, 1);
2721 /* reset the controller to 2721 /* reset the controller to
2722 * put the device in a known good starting state */ 2722 * put the device in a known good starting state */
2723 err = atl1c_phy_init(&adapter->hw); 2723 err = atl1c_phy_init(&adapter->hw);
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c3b94..1209297433b 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
95 ecmd->advertising = hw->autoneg_advertised | 95 ecmd->advertising = hw->autoneg_advertised |
96 ADVERTISED_TP | ADVERTISED_Autoneg; 96 ADVERTISED_TP | ADVERTISED_Autoneg;
97 97
98 adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK; 98 adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
99 adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; 99 adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
100 if (hw->autoneg_advertised & ADVERTISE_10_HALF) 100 if (hw->autoneg_advertised & ADVERTISE_10_HALF)
101 adv4 |= MII_AR_10T_HD_CAPS; 101 adv4 |= ADVERTISE_10HALF;
102 if (hw->autoneg_advertised & ADVERTISE_10_FULL) 102 if (hw->autoneg_advertised & ADVERTISE_10_FULL)
103 adv4 |= MII_AR_10T_FD_CAPS; 103 adv4 |= ADVERTISE_10FULL;
104 if (hw->autoneg_advertised & ADVERTISE_100_HALF) 104 if (hw->autoneg_advertised & ADVERTISE_100_HALF)
105 adv4 |= MII_AR_100TX_HD_CAPS; 105 adv4 |= ADVERTISE_100HALF;
106 if (hw->autoneg_advertised & ADVERTISE_100_FULL) 106 if (hw->autoneg_advertised & ADVERTISE_100_FULL)
107 adv4 |= MII_AR_100TX_FD_CAPS; 107 adv4 |= ADVERTISE_100FULL;
108 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) 108 if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
109 adv9 |= MII_AT001_CR_1000T_FD_CAPS; 109 adv9 |= ADVERTISE_1000FULL;
110 110
111 if (adv4 != hw->mii_autoneg_adv_reg || 111 if (adv4 != hw->mii_autoneg_adv_reg ||
112 adv9 != hw->mii_1000t_ctrl_reg) { 112 adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043def8..923063d2e5b 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
318 * Advertisement Register (Address 4) and the 1000 mb speed bits in 318 * Advertisement Register (Address 4) and the 1000 mb speed bits in
319 * the 1000Base-T control Register (Address 9). 319 * the 1000Base-T control Register (Address 9).
320 */ 320 */
321 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; 321 mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
322 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; 322 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
323 323
324 /* 324 /*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
327 */ 327 */
328 switch (hw->media_type) { 328 switch (hw->media_type) {
329 case MEDIA_TYPE_AUTO_SENSOR: 329 case MEDIA_TYPE_AUTO_SENSOR:
330 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | 330 mii_autoneg_adv_reg |= ADVERTISE_ALL;
331 MII_AR_10T_FD_CAPS | 331 hw->autoneg_advertised = ADVERTISE_ALL;
332 MII_AR_100TX_HD_CAPS |
333 MII_AR_100TX_FD_CAPS);
334 hw->autoneg_advertised = ADVERTISE_10_HALF |
335 ADVERTISE_10_FULL |
336 ADVERTISE_100_HALF |
337 ADVERTISE_100_FULL;
338 if (hw->nic_type == athr_l1e) { 332 if (hw->nic_type == athr_l1e) {
339 mii_1000t_ctrl_reg |= 333 mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
340 MII_AT001_CR_1000T_FD_CAPS;
341 hw->autoneg_advertised |= ADVERTISE_1000_FULL; 334 hw->autoneg_advertised |= ADVERTISE_1000_FULL;
342 } 335 }
343 break; 336 break;
344 337
345 case MEDIA_TYPE_100M_FULL: 338 case MEDIA_TYPE_100M_FULL:
346 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; 339 mii_autoneg_adv_reg |= ADVERTISE_100FULL;
347 hw->autoneg_advertised = ADVERTISE_100_FULL; 340 hw->autoneg_advertised = ADVERTISE_100_FULL;
348 break; 341 break;
349 342
350 case MEDIA_TYPE_100M_HALF: 343 case MEDIA_TYPE_100M_HALF:
351 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; 344 mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
352 hw->autoneg_advertised = ADVERTISE_100_HALF; 345 hw->autoneg_advertised = ADVERTISE_100_HALF;
353 break; 346 break;
354 347
355 case MEDIA_TYPE_10M_FULL: 348 case MEDIA_TYPE_10M_FULL:
356 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; 349 mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
357 hw->autoneg_advertised = ADVERTISE_10_FULL; 350 hw->autoneg_advertised = ADVERTISE_10_FULL;
358 break; 351 break;
359 352
360 default: 353 default:
361 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; 354 mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
362 hw->autoneg_advertised = ADVERTISE_10_HALF; 355 hw->autoneg_advertised = ADVERTISE_10_HALF;
363 break; 356 break;
364 } 357 }
365 358
366 /* flow control fixed to enable all */ 359 /* flow control fixed to enable all */
367 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); 360 mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
368 361
369 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; 362 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
370 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; 363 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
374 return ret_val; 367 return ret_val;
375 368
376 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { 369 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
377 ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR, 370 ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
378 mii_1000t_ctrl_reg); 371 mii_1000t_ctrl_reg);
379 if (ret_val) 372 if (ret_val)
380 return ret_val; 373 return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
397 int ret_val; 390 int ret_val;
398 u16 phy_data; 391 u16 phy_data;
399 392
400 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 393 phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
401 394
402 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); 395 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
403 if (ret_val) { 396 if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
645 return err; 638 return err;
646 639
647 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { 640 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
648 err = atl1e_write_phy_reg(hw, MII_AT001_CR, 641 err = atl1e_write_phy_reg(hw, MII_CTRL1000,
649 hw->mii_1000t_ctrl_reg); 642 hw->mii_1000t_ctrl_reg);
650 if (err) 643 if (err)
651 return err; 644 return err;
652 } 645 }
653 646
654 err = atl1e_write_phy_reg(hw, MII_BMCR, 647 err = atl1e_write_phy_reg(hw, MII_BMCR,
655 MII_CR_RESET | MII_CR_AUTO_NEG_EN | 648 BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
656 MII_CR_RESTART_AUTO_NEG);
657 return err; 649 return err;
658} 650}
659 651
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d86cf..74df16aef79 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
629 629
630/***************************** MII definition ***************************************/ 630/***************************** MII definition ***************************************/
631/* PHY Common Register */ 631/* PHY Common Register */
632#define MII_BMCR 0x00
633#define MII_BMSR 0x01
634#define MII_PHYSID1 0x02
635#define MII_PHYSID2 0x03
636#define MII_ADVERTISE 0x04
637#define MII_LPA 0x05
638#define MII_EXPANSION 0x06
639#define MII_AT001_CR 0x09
640#define MII_AT001_SR 0x0A
641#define MII_AT001_ESR 0x0F
642#define MII_AT001_PSCR 0x10 632#define MII_AT001_PSCR 0x10
643#define MII_AT001_PSSR 0x11 633#define MII_AT001_PSSR 0x11
644#define MII_INT_CTRL 0x12 634#define MII_INT_CTRL 0x12
645#define MII_INT_STATUS 0x13 635#define MII_INT_STATUS 0x13
646#define MII_SMARTSPEED 0x14 636#define MII_SMARTSPEED 0x14
647#define MII_RERRCOUNTER 0x15
648#define MII_SREVISION 0x16
649#define MII_RESV1 0x17
650#define MII_LBRERROR 0x18 637#define MII_LBRERROR 0x18
651#define MII_PHYADDR 0x19
652#define MII_RESV2 0x1a 638#define MII_RESV2 0x1a
653#define MII_TPISTATUS 0x1b
654#define MII_NCONFIG 0x1c
655 639
656#define MII_DBG_ADDR 0x1D 640#define MII_DBG_ADDR 0x1D
657#define MII_DBG_DATA 0x1E 641#define MII_DBG_DATA 0x1E
658 642
659
660/* PHY Control Register */
661#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
662#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
663#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
664#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
665#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
666#define MII_CR_POWER_DOWN 0x0800 /* Power down */
667#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
668#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
669#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
670#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
671#define MII_CR_SPEED_MASK 0x2040
672#define MII_CR_SPEED_1000 0x0040
673#define MII_CR_SPEED_100 0x2000
674#define MII_CR_SPEED_10 0x0000
675
676
677/* PHY Status Register */
678#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
679#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
680#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
681#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
682#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
683#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
684#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
685#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
686#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
687#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
688#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
689#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
690#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
691#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
692#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
693
694/* Link partner ability register. */
695#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
696#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
697#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
698#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
699#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
700#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
701#define MII_LPA_PAUSE 0x0400 /* PAUSE */
702#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
703#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
704#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
705#define MII_LPA_NPAGE 0x8000 /* Next page bit */
706
707/* Autoneg Advertisement Register */ 643/* Autoneg Advertisement Register */
708#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ 644#define MII_AR_DEFAULT_CAP_MASK 0
709#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
710#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
711#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
712#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
713#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
714#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
715#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
716#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
717#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
718#define MII_AR_SPEED_MASK 0x01E0
719#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
720 645
721/* 1000BASE-T Control Register */ 646/* 1000BASE-T Control Register */
722#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 647#define MII_AT001_CR_1000T_SPEED_MASK \
723#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ 648 (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
724#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ 649#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
725/* 0=DTE device */
726#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
727/* 0=Configure PHY as Slave */
728#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
729/* 0=Automatic Master/Slave config */
730#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
731#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
732#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
733#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
734#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
735#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
736#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
737
738/* 1000BASE-T Status Register */
739#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
740#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
741#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
742#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
743#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
744#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
745#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
746#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
747
748/* Extended Status Register */
749#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
750#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
751#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
752#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
753 650
754/* AT001 PHY Specific Control Register */ 651/* AT001 PHY Specific Control Register */
755#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ 652#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index e28f8baf394..1ff001a8270 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
547 hw->device_id = pdev->device; 547 hw->device_id = pdev->device;
548 hw->subsystem_vendor_id = pdev->subsystem_vendor; 548 hw->subsystem_vendor_id = pdev->subsystem_vendor;
549 hw->subsystem_id = pdev->subsystem_device; 549 hw->subsystem_id = pdev->subsystem_device;
550 hw->revision_id = pdev->revision;
550 551
551 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
552 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 552 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
553 553
554 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); 554 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
@@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
932 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & 932 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
933 DEVICE_CTRL_MAX_PAYLOAD_MASK; 933 DEVICE_CTRL_MAX_PAYLOAD_MASK;
934 934
935 hw->dmaw_block = min(max_pay_load, hw->dmaw_block); 935 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
936 936
937 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & 937 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
938 DEVICE_CTRL_MAX_RREQ_SZ_MASK; 938 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
939 hw->dmar_block = min(max_pay_load, hw->dmar_block); 939 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
940 940
941 if (hw->nic_type != athr_l2e_revB) 941 if (hw->nic_type != athr_l2e_revB)
942 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, 942 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2051 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2051 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2052 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2052 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2053 2053
2054 mii_advertise_data = MII_AR_10T_HD_CAPS; 2054 mii_advertise_data = ADVERTISE_10HALF;
2055 2055
2056 if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) || 2056 if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
2057 (atl1e_write_phy_reg(hw, 2057 (atl1e_write_phy_reg(hw,
2058 MII_ADVERTISE, mii_advertise_data) != 0) || 2058 MII_ADVERTISE, mii_advertise_data) != 0) ||
2059 (atl1e_phy_commit(hw)) != 0) { 2059 (atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3b527687c28..67f40b9c16e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
950 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 950 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
951 951
952 adapter->wol = 0; 952 adapter->wol = 0;
953 device_set_wakeup_enable(&adapter->pdev->dev, false);
953 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; 954 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
954 adapter->ict = 50000; /* 100ms */ 955 adapter->ict = 50000; /* 100ms */
955 adapter->link_speed = SPEED_0; /* hardware init */ 956 adapter->link_speed = SPEED_0; /* hardware init */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
2735} 2736}
2736 2737
2737#ifdef CONFIG_PM 2738#ifdef CONFIG_PM
2738static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) 2739static int atl1_suspend(struct device *dev)
2739{ 2740{
2741 struct pci_dev *pdev = to_pci_dev(dev);
2740 struct net_device *netdev = pci_get_drvdata(pdev); 2742 struct net_device *netdev = pci_get_drvdata(pdev);
2741 struct atl1_adapter *adapter = netdev_priv(netdev); 2743 struct atl1_adapter *adapter = netdev_priv(netdev);
2742 struct atl1_hw *hw = &adapter->hw; 2744 struct atl1_hw *hw = &adapter->hw;
2743 u32 ctrl = 0; 2745 u32 ctrl = 0;
2744 u32 wufc = adapter->wol; 2746 u32 wufc = adapter->wol;
2745 u32 val; 2747 u32 val;
2746 int retval;
2747 u16 speed; 2748 u16 speed;
2748 u16 duplex; 2749 u16 duplex;
2749 2750
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2751 if (netif_running(netdev)) 2752 if (netif_running(netdev))
2752 atl1_down(adapter); 2753 atl1_down(adapter);
2753 2754
2754 retval = pci_save_state(pdev);
2755 if (retval)
2756 return retval;
2757
2758 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2755 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2759 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2756 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2760 val = ctrl & BMSR_LSTATUS; 2757 val = ctrl & BMSR_LSTATUS;
2761 if (val) 2758 if (val)
2762 wufc &= ~ATLX_WUFC_LNKC; 2759 wufc &= ~ATLX_WUFC_LNKC;
2760 if (!wufc)
2761 goto disable_wol;
2763 2762
2764 if (val && wufc) { 2763 if (val) {
2765 val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 2764 val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
2766 if (val) { 2765 if (val) {
2767 if (netif_msg_ifdown(adapter)) 2766 if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2798 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2797 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2799 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2798 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2800 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2799 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2801 2800 } else {
2802 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2803 goto exit;
2804 }
2805
2806 if (!val && wufc) {
2807 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2801 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
2808 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2802 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2809 ioread32(hw->hw_addr + REG_WOL_CTRL); 2803 ioread32(hw->hw_addr + REG_WOL_CTRL);
2810 iowrite32(0, hw->hw_addr + REG_MAC_CTRL); 2804 iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
2811 ioread32(hw->hw_addr + REG_MAC_CTRL); 2805 ioread32(hw->hw_addr + REG_MAC_CTRL);
2812 hw->phy_configured = false; 2806 hw->phy_configured = false;
2813 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2814 goto exit;
2815 } 2807 }
2816 2808
2817disable_wol: 2809 return 0;
2810
2811 disable_wol:
2818 iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2812 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
2819 ioread32(hw->hw_addr + REG_WOL_CTRL); 2813 ioread32(hw->hw_addr + REG_WOL_CTRL);
2820 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2814 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
2822 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2816 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2823 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2817 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2824 hw->phy_configured = false; 2818 hw->phy_configured = false;
2825 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2826exit:
2827 if (netif_running(netdev))
2828 pci_disable_msi(adapter->pdev);
2829 pci_disable_device(pdev);
2830 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2831 2819
2832 return 0; 2820 return 0;
2833} 2821}
2834 2822
2835static int atl1_resume(struct pci_dev *pdev) 2823static int atl1_resume(struct device *dev)
2836{ 2824{
2825 struct pci_dev *pdev = to_pci_dev(dev);
2837 struct net_device *netdev = pci_get_drvdata(pdev); 2826 struct net_device *netdev = pci_get_drvdata(pdev);
2838 struct atl1_adapter *adapter = netdev_priv(netdev); 2827 struct atl1_adapter *adapter = netdev_priv(netdev);
2839 u32 err;
2840 2828
2841 pci_set_power_state(pdev, PCI_D0);
2842 pci_restore_state(pdev);
2843
2844 err = pci_enable_device(pdev);
2845 if (err) {
2846 if (netif_msg_ifup(adapter))
2847 dev_printk(KERN_DEBUG, &pdev->dev,
2848 "error enabling pci device\n");
2849 return err;
2850 }
2851
2852 pci_set_master(pdev);
2853 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2829 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2854 pci_enable_wake(pdev, PCI_D3hot, 0);
2855 pci_enable_wake(pdev, PCI_D3cold, 0);
2856 2830
2857 atl1_reset_hw(&adapter->hw); 2831 atl1_reset_hw(&adapter->hw);
2858 2832
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
2864 2838
2865 return 0; 2839 return 0;
2866} 2840}
2841
2842static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
2843#define ATL1_PM_OPS (&atl1_pm_ops)
2844
2867#else 2845#else
2868#define atl1_suspend NULL 2846
2869#define atl1_resume NULL 2847static int atl1_suspend(struct device *dev) { return 0; }
2848
2849#define ATL1_PM_OPS NULL
2870#endif 2850#endif
2871 2851
2872static void atl1_shutdown(struct pci_dev *pdev) 2852static void atl1_shutdown(struct pci_dev *pdev)
2873{ 2853{
2874#ifdef CONFIG_PM 2854 struct net_device *netdev = pci_get_drvdata(pdev);
2875 atl1_suspend(pdev, PMSG_SUSPEND); 2855 struct atl1_adapter *adapter = netdev_priv(netdev);
2876#endif 2856
2857 atl1_suspend(&pdev->dev);
2858 pci_wake_from_d3(pdev, adapter->wol);
2859 pci_set_power_state(pdev, PCI_D3hot);
2877} 2860}
2878 2861
2879#ifdef CONFIG_NET_POLL_CONTROLLER 2862#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
3117 .id_table = atl1_pci_tbl, 3100 .id_table = atl1_pci_tbl,
3118 .probe = atl1_probe, 3101 .probe = atl1_probe,
3119 .remove = __devexit_p(atl1_remove), 3102 .remove = __devexit_p(atl1_remove),
3120 .suspend = atl1_suspend, 3103 .shutdown = atl1_shutdown,
3121 .resume = atl1_resume, 3104 .driver.pm = ATL1_PM_OPS,
3122 .shutdown = atl1_shutdown
3123}; 3105};
3124 3106
3125/* 3107/*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
3409 adapter->wol = 0; 3391 adapter->wol = 0;
3410 if (wol->wolopts & WAKE_MAGIC) 3392 if (wol->wolopts & WAKE_MAGIC)
3411 adapter->wol |= ATLX_WUFC_MAG; 3393 adapter->wol |= ATLX_WUFC_MAG;
3394
3395 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
3396
3412 return 0; 3397 return 0;
3413} 3398}
3414 3399
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 4e6f4e95a5a..e637e9f28fd 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
93 hw->device_id = pdev->device; 93 hw->device_id = pdev->device;
94 hw->subsystem_vendor_id = pdev->subsystem_vendor; 94 hw->subsystem_vendor_id = pdev->subsystem_vendor;
95 hw->subsystem_id = pdev->subsystem_device; 95 hw->subsystem_id = pdev->subsystem_device;
96 hw->revision_id = pdev->revision;
96 97
97 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
99 99
100 adapter->wol = 0; 100 adapter->wol = 0;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4bebff3faea..e7cb8c8b977 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -9,7 +9,7 @@
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12*/ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -17,46 +17,45 @@
17#include <linux/isapnp.h> 17#include <linux/isapnp.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/io.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
24#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
25#include <linux/ethtool.h> 26#include <linux/ethtool.h>
26#include <linux/mii.h> 27#include <linux/mdio-bitbang.h>
28#include <linux/phy.h>
27#include <linux/eeprom_93cx6.h> 29#include <linux/eeprom_93cx6.h>
28#include <linux/slab.h> 30#include <linux/slab.h>
29 31
30#include <net/ax88796.h> 32#include <net/ax88796.h>
31 33
32#include <asm/system.h> 34#include <asm/system.h>
33#include <asm/io.h>
34
35static int phy_debug = 0;
36 35
37/* Rename the lib8390.c functions to show that they are in this driver */ 36/* Rename the lib8390.c functions to show that they are in this driver */
38#define __ei_open ax_ei_open 37#define __ei_open ax_ei_open
39#define __ei_close ax_ei_close 38#define __ei_close ax_ei_close
40#define __ei_poll ax_ei_poll 39#define __ei_poll ax_ei_poll
41#define __ei_start_xmit ax_ei_start_xmit 40#define __ei_start_xmit ax_ei_start_xmit
42#define __ei_tx_timeout ax_ei_tx_timeout 41#define __ei_tx_timeout ax_ei_tx_timeout
43#define __ei_get_stats ax_ei_get_stats 42#define __ei_get_stats ax_ei_get_stats
44#define __ei_set_multicast_list ax_ei_set_multicast_list 43#define __ei_set_multicast_list ax_ei_set_multicast_list
45#define __ei_interrupt ax_ei_interrupt 44#define __ei_interrupt ax_ei_interrupt
46#define ____alloc_ei_netdev ax__alloc_ei_netdev 45#define ____alloc_ei_netdev ax__alloc_ei_netdev
47#define __NS8390_init ax_NS8390_init 46#define __NS8390_init ax_NS8390_init
48 47
49/* force unsigned long back to 'void __iomem *' */ 48/* force unsigned long back to 'void __iomem *' */
50#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) 49#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
51 50
52#define ei_inb(_a) readb(ax_convert_addr(_a)) 51#define ei_inb(_a) readb(ax_convert_addr(_a))
53#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) 52#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
54 53
55#define ei_inb_p(_a) ei_inb(_a) 54#define ei_inb_p(_a) ei_inb(_a)
56#define ei_outb_p(_v, _a) ei_outb(_v, _a) 55#define ei_outb_p(_v, _a) ei_outb(_v, _a)
57 56
58/* define EI_SHIFT() to take into account our register offsets */ 57/* define EI_SHIFT() to take into account our register offsets */
59#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) 58#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
60 59
61/* Ensure we have our RCR base value */ 60/* Ensure we have our RCR base value */
62#define AX88796_PLATFORM 61#define AX88796_PLATFORM
@@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
74#define NE_DATAPORT EI_SHIFT(0x10) 73#define NE_DATAPORT EI_SHIFT(0x10)
75 74
76#define NE1SM_START_PG 0x20 /* First page of TX buffer */ 75#define NE1SM_START_PG 0x20 /* First page of TX buffer */
77#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ 76#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
78#define NESM_START_PG 0x40 /* First page of TX buffer */ 77#define NESM_START_PG 0x40 /* First page of TX buffer */
79#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ 78#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
80 79
80#define AX_GPOC_PPDSET BIT(6)
81
81/* device private data */ 82/* device private data */
82 83
83struct ax_device { 84struct ax_device {
84 struct timer_list mii_timer; 85 struct mii_bus *mii_bus;
85 spinlock_t mii_lock; 86 struct mdiobb_ctrl bb_ctrl;
86 struct mii_if_info mii; 87 struct phy_device *phy_dev;
87 88 void __iomem *addr_memr;
88 u32 msg_enable; 89 u8 reg_memr;
89 void __iomem *map2; 90 int link;
90 struct platform_device *dev; 91 int speed;
91 struct resource *mem; 92 int duplex;
92 struct resource *mem2; 93
93 struct ax_plat_data *plat; 94 void __iomem *map2;
94 95 const struct ax_plat_data *plat;
95 unsigned char running; 96
96 unsigned char resume_open; 97 unsigned char running;
97 unsigned int irqflags; 98 unsigned char resume_open;
98 99 unsigned int irqflags;
99 u32 reg_offsets[0x20]; 100
101 u32 reg_offsets[0x20];
100}; 102};
101 103
102static inline struct ax_device *to_ax_dev(struct net_device *dev) 104static inline struct ax_device *to_ax_dev(struct net_device *dev)
103{ 105{
104 struct ei_device *ei_local = netdev_priv(dev); 106 struct ei_device *ei_local = netdev_priv(dev);
105 return (struct ax_device *)(ei_local+1); 107 return (struct ax_device *)(ei_local + 1);
106} 108}
107 109
108/* ax_initial_check 110/*
111 * ax_initial_check
109 * 112 *
110 * do an initial probe for the card to check wether it exists 113 * do an initial probe for the card to check wether it exists
111 * and is functional 114 * and is functional
112 */ 115 */
113
114static int ax_initial_check(struct net_device *dev) 116static int ax_initial_check(struct net_device *dev)
115{ 117{
116 struct ei_device *ei_local = netdev_priv(dev); 118 struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev)
122 if (reg0 == 0xFF) 124 if (reg0 == 0xFF)
123 return -ENODEV; 125 return -ENODEV;
124 126
125 ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); 127 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
126 regd = ei_inb(ioaddr + 0x0d); 128 regd = ei_inb(ioaddr + 0x0d);
127 ei_outb(0xff, ioaddr + 0x0d); 129 ei_outb(0xff, ioaddr + 0x0d);
128 ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); 130 ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
129 ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ 131 ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
130 if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { 132 if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
131 ei_outb(reg0, ioaddr); 133 ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev)
136 return 0; 138 return 0;
137} 139}
138 140
139/* Hard reset the card. This used to pause for the same period that a 141/*
140 8390 reset command required, but that shouldn't be necessary. */ 142 * Hard reset the card. This used to pause for the same period that a
141 143 * 8390 reset command required, but that shouldn't be necessary.
144 */
142static void ax_reset_8390(struct net_device *dev) 145static void ax_reset_8390(struct net_device *dev)
143{ 146{
144 struct ei_device *ei_local = netdev_priv(dev); 147 struct ei_device *ei_local = netdev_priv(dev);
145 struct ax_device *ax = to_ax_dev(dev);
146 unsigned long reset_start_time = jiffies; 148 unsigned long reset_start_time = jiffies;
147 void __iomem *addr = (void __iomem *)dev->base_addr; 149 void __iomem *addr = (void __iomem *)dev->base_addr;
148 150
149 if (ei_debug > 1) 151 if (ei_debug > 1)
150 dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies); 152 netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
151 153
152 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); 154 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
153 155
154 ei_status.txing = 0; 156 ei_local->txing = 0;
155 ei_status.dmaing = 0; 157 ei_local->dmaing = 0;
156 158
157 /* This check _should_not_ be necessary, omit eventually. */ 159 /* This check _should_not_ be necessary, omit eventually. */
158 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 160 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
159 if (jiffies - reset_start_time > 2*HZ/100) { 161 if (jiffies - reset_start_time > 2 * HZ / 100) {
160 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 162 netdev_warn(dev, "%s: did not complete.\n", __func__);
161 __func__, dev->name);
162 break; 163 break;
163 } 164 }
164 } 165 }
@@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
171 int ring_page) 172 int ring_page)
172{ 173{
173 struct ei_device *ei_local = netdev_priv(dev); 174 struct ei_device *ei_local = netdev_priv(dev);
174 struct ax_device *ax = to_ax_dev(dev);
175 void __iomem *nic_base = ei_local->mem; 175 void __iomem *nic_base = ei_local->mem;
176 176
177 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 177 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
178 if (ei_status.dmaing) { 178 if (ei_local->dmaing) {
179 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 179 netdev_err(dev, "DMAing conflict in %s "
180 "[DMAstat:%d][irqlock:%d].\n", 180 "[DMAstat:%d][irqlock:%d].\n",
181 dev->name, __func__, 181 __func__,
182 ei_status.dmaing, ei_status.irqlock); 182 ei_local->dmaing, ei_local->irqlock);
183 return; 183 return;
184 } 184 }
185 185
186 ei_status.dmaing |= 0x01; 186 ei_local->dmaing |= 0x01;
187 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); 187 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
188 ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); 188 ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
189 ei_outb(0, nic_base + EN0_RCNTHI); 189 ei_outb(0, nic_base + EN0_RCNTHI);
190 ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ 190 ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */
191 ei_outb(ring_page, nic_base + EN0_RSARHI); 191 ei_outb(ring_page, nic_base + EN0_RSARHI);
192 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 192 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
193 193
194 if (ei_status.word16) 194 if (ei_local->word16)
195 readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); 195 readsw(nic_base + NE_DATAPORT, hdr,
196 sizeof(struct e8390_pkt_hdr) >> 1);
196 else 197 else
197 readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); 198 readsb(nic_base + NE_DATAPORT, hdr,
199 sizeof(struct e8390_pkt_hdr));
198 200
199 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 201 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
200 ei_status.dmaing &= ~0x01; 202 ei_local->dmaing &= ~0x01;
201 203
202 le16_to_cpus(&hdr->count); 204 le16_to_cpus(&hdr->count);
203} 205}
204 206
205 207
206/* Block input and output, similar to the Crynwr packet driver. If you 208/*
207 are porting to a new ethercard, look at the packet driver source for hints. 209 * Block input and output, similar to the Crynwr packet driver. If
208 The NEx000 doesn't share the on-board packet memory -- you have to put 210 * you are porting to a new ethercard, look at the packet driver
209 the packet out through the "remote DMA" dataport using ei_outb. */ 211 * source for hints. The NEx000 doesn't share the on-board packet
210 212 * memory -- you have to put the packet out through the "remote DMA"
213 * dataport using ei_outb.
214 */
211static void ax_block_input(struct net_device *dev, int count, 215static void ax_block_input(struct net_device *dev, int count,
212 struct sk_buff *skb, int ring_offset) 216 struct sk_buff *skb, int ring_offset)
213{ 217{
214 struct ei_device *ei_local = netdev_priv(dev); 218 struct ei_device *ei_local = netdev_priv(dev);
215 struct ax_device *ax = to_ax_dev(dev);
216 void __iomem *nic_base = ei_local->mem; 219 void __iomem *nic_base = ei_local->mem;
217 char *buf = skb->data; 220 char *buf = skb->data;
218 221
219 if (ei_status.dmaing) { 222 if (ei_local->dmaing) {
220 dev_err(&ax->dev->dev, 223 netdev_err(dev,
221 "%s: DMAing conflict in %s " 224 "DMAing conflict in %s "
222 "[DMAstat:%d][irqlock:%d].\n", 225 "[DMAstat:%d][irqlock:%d].\n",
223 dev->name, __func__, 226 __func__,
224 ei_status.dmaing, ei_status.irqlock); 227 ei_local->dmaing, ei_local->irqlock);
225 return; 228 return;
226 } 229 }
227 230
228 ei_status.dmaing |= 0x01; 231 ei_local->dmaing |= 0x01;
229 232
230 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); 233 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
231 ei_outb(count & 0xff, nic_base + EN0_RCNTLO); 234 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
232 ei_outb(count >> 8, nic_base + EN0_RCNTHI); 235 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
233 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); 236 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
234 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); 237 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
235 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 238 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
236 239
237 if (ei_status.word16) { 240 if (ei_local->word16) {
238 readsw(nic_base + NE_DATAPORT, buf, count >> 1); 241 readsw(nic_base + NE_DATAPORT, buf, count >> 1);
239 if (count & 0x01) 242 if (count & 0x01)
240 buf[count-1] = ei_inb(nic_base + NE_DATAPORT); 243 buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count,
243 readsb(nic_base + NE_DATAPORT, buf, count); 246 readsb(nic_base + NE_DATAPORT, buf, count);
244 } 247 }
245 248
246 ei_status.dmaing &= ~1; 249 ei_local->dmaing &= ~1;
247} 250}
248 251
249static void ax_block_output(struct net_device *dev, int count, 252static void ax_block_output(struct net_device *dev, int count,
250 const unsigned char *buf, const int start_page) 253 const unsigned char *buf, const int start_page)
251{ 254{
252 struct ei_device *ei_local = netdev_priv(dev); 255 struct ei_device *ei_local = netdev_priv(dev);
253 struct ax_device *ax = to_ax_dev(dev);
254 void __iomem *nic_base = ei_local->mem; 256 void __iomem *nic_base = ei_local->mem;
255 unsigned long dma_start; 257 unsigned long dma_start;
256 258
257 /* Round the count up for word writes. Do we need to do this? 259 /*
258 What effect will an odd byte count have on the 8390? 260 * Round the count up for word writes. Do we need to do this?
259 I should check someday. */ 261 * What effect will an odd byte count have on the 8390? I
260 262 * should check someday.
261 if (ei_status.word16 && (count & 0x01)) 263 */
264 if (ei_local->word16 && (count & 0x01))
262 count++; 265 count++;
263 266
264 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 267 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
265 if (ei_status.dmaing) { 268 if (ei_local->dmaing) {
266 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 269 netdev_err(dev, "DMAing conflict in %s."
267 "[DMAstat:%d][irqlock:%d]\n", 270 "[DMAstat:%d][irqlock:%d]\n",
268 dev->name, __func__, 271 __func__,
269 ei_status.dmaing, ei_status.irqlock); 272 ei_local->dmaing, ei_local->irqlock);
270 return; 273 return;
271 } 274 }
272 275
273 ei_status.dmaing |= 0x01; 276 ei_local->dmaing |= 0x01;
274 /* We should already be in page 0, but to be safe... */ 277 /* We should already be in page 0, but to be safe... */
275 ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); 278 ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
276 279
@@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count,
278 281
279 /* Now the normal output. */ 282 /* Now the normal output. */
280 ei_outb(count & 0xff, nic_base + EN0_RCNTLO); 283 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
281 ei_outb(count >> 8, nic_base + EN0_RCNTHI); 284 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
282 ei_outb(0x00, nic_base + EN0_RSARLO); 285 ei_outb(0x00, nic_base + EN0_RSARLO);
283 ei_outb(start_page, nic_base + EN0_RSARHI); 286 ei_outb(start_page, nic_base + EN0_RSARHI);
284 287
285 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); 288 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
286 if (ei_status.word16) { 289 if (ei_local->word16)
287 writesw(nic_base + NE_DATAPORT, buf, count>>1); 290 writesw(nic_base + NE_DATAPORT, buf, count >> 1);
288 } else { 291 else
289 writesb(nic_base + NE_DATAPORT, buf, count); 292 writesb(nic_base + NE_DATAPORT, buf, count);
290 }
291 293
292 dma_start = jiffies; 294 dma_start = jiffies;
293 295
294 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { 296 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
295 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 297 if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
296 dev_warn(&ax->dev->dev, 298 netdev_warn(dev, "timeout waiting for Tx RDC.\n");
297 "%s: timeout waiting for Tx RDC.\n", dev->name);
298 ax_reset_8390(dev); 299 ax_reset_8390(dev);
299 ax_NS8390_init(dev,1); 300 ax_NS8390_init(dev, 1);
300 break; 301 break;
301 } 302 }
302 } 303 }
303 304
304 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 305 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
305 ei_status.dmaing &= ~0x01; 306 ei_local->dmaing &= ~0x01;
306} 307}
307 308
308/* definitions for accessing MII/EEPROM interface */ 309/* definitions for accessing MII/EEPROM interface */
309 310
310#define AX_MEMR EI_SHIFT(0x14) 311#define AX_MEMR EI_SHIFT(0x14)
311#define AX_MEMR_MDC (1<<0) 312#define AX_MEMR_MDC BIT(0)
312#define AX_MEMR_MDIR (1<<1) 313#define AX_MEMR_MDIR BIT(1)
313#define AX_MEMR_MDI (1<<2) 314#define AX_MEMR_MDI BIT(2)
314#define AX_MEMR_MDO (1<<3) 315#define AX_MEMR_MDO BIT(3)
315#define AX_MEMR_EECS (1<<4) 316#define AX_MEMR_EECS BIT(4)
316#define AX_MEMR_EEI (1<<5) 317#define AX_MEMR_EEI BIT(5)
317#define AX_MEMR_EEO (1<<6) 318#define AX_MEMR_EEO BIT(6)
318#define AX_MEMR_EECLK (1<<7) 319#define AX_MEMR_EECLK BIT(7)
319 320
320/* ax_mii_ei_outbits 321static void ax_handle_link_change(struct net_device *dev)
321 *
322 * write the specified set of bits to the phy
323*/
324
325static void
326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
327{ 322{
328 struct ei_device *ei_local = netdev_priv(dev); 323 struct ax_device *ax = to_ax_dev(dev);
329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 324 struct phy_device *phy_dev = ax->phy_dev;
330 unsigned int memr; 325 int status_change = 0;
331
332 /* clock low, data to output mode */
333 memr = ei_inb(memr_addr);
334 memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
335 ei_outb(memr, memr_addr);
336
337 for (len--; len >= 0; len--) {
338 if (bits & (1 << len))
339 memr |= AX_MEMR_MDO;
340 else
341 memr &= ~AX_MEMR_MDO;
342
343 ei_outb(memr, memr_addr);
344
345 /* clock high */
346 326
347 ei_outb(memr | AX_MEMR_MDC, memr_addr); 327 if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
348 udelay(1); 328 (ax->duplex != phy_dev->duplex))) {
349 329
350 /* clock low */ 330 ax->speed = phy_dev->speed;
351 ei_outb(memr, memr_addr); 331 ax->duplex = phy_dev->duplex;
332 status_change = 1;
352 } 333 }
353 334
354 /* leaves the clock line low, mdir input */ 335 if (phy_dev->link != ax->link) {
355 memr |= AX_MEMR_MDIR; 336 if (!phy_dev->link) {
356 ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR); 337 ax->speed = 0;
357} 338 ax->duplex = -1;
358 339 }
359/* ax_phy_ei_inbits 340 ax->link = phy_dev->link;
360 *
361 * read a specified number of bits from the phy
362*/
363
364static unsigned int
365ax_phy_ei_inbits(struct net_device *dev, int no)
366{
367 struct ei_device *ei_local = netdev_priv(dev);
368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
369 unsigned int memr;
370 unsigned int result = 0;
371
372 /* clock low, data to input mode */
373 memr = ei_inb(memr_addr);
374 memr &= ~AX_MEMR_MDC;
375 memr |= AX_MEMR_MDIR;
376 ei_outb(memr, memr_addr);
377
378 for (no--; no >= 0; no--) {
379 ei_outb(memr | AX_MEMR_MDC, memr_addr);
380
381 udelay(1);
382
383 if (ei_inb(memr_addr) & AX_MEMR_MDI)
384 result |= (1<<no);
385 341
386 ei_outb(memr, memr_addr); 342 status_change = 1;
387 } 343 }
388 344
389 return result; 345 if (status_change)
390} 346 phy_print_status(phy_dev);
391
392/* ax_phy_issueaddr
393 *
394 * use the low level bit shifting routines to send the address
395 * and command to the specified phy
396*/
397
398static void
399ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
400{
401 if (phy_debug)
402 pr_debug("%s: dev %p, %04x, %04x, %d\n",
403 __func__, dev, phy_addr, reg, opc);
404
405 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
406 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
407 ax_mii_ei_outbits(dev, opc, 2); /* op code */
408 ax_mii_ei_outbits(dev, phy_addr, 5); /* phy address */
409 ax_mii_ei_outbits(dev, reg, 5); /* reg address */
410} 347}
411 348
412static int 349static int ax_mii_probe(struct net_device *dev)
413ax_phy_read(struct net_device *dev, int phy_addr, int reg)
414{ 350{
415 struct ei_device *ei_local = netdev_priv(dev); 351 struct ax_device *ax = to_ax_dev(dev);
416 unsigned long flags; 352 struct phy_device *phy_dev = NULL;
417 unsigned int result; 353 int ret;
418 354
419 spin_lock_irqsave(&ei_local->page_lock, flags); 355 /* find the first phy */
356 phy_dev = phy_find_first(ax->mii_bus);
357 if (!phy_dev) {
358 netdev_err(dev, "no PHY found\n");
359 return -ENODEV;
360 }
420 361
421 ax_phy_issueaddr(dev, phy_addr, reg, 2); 362 ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
363 PHY_INTERFACE_MODE_MII);
364 if (ret) {
365 netdev_err(dev, "Could not attach to PHY\n");
366 return ret;
367 }
422 368
423 result = ax_phy_ei_inbits(dev, 17); 369 /* mask with MAC supported features */
424 result &= ~(3<<16); 370 phy_dev->supported &= PHY_BASIC_FEATURES;
371 phy_dev->advertising = phy_dev->supported;
425 372
426 spin_unlock_irqrestore(&ei_local->page_lock, flags); 373 ax->phy_dev = phy_dev;
427 374
428 if (phy_debug) 375 netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
429 pr_debug("%s: %04x.%04x => read %04x\n", __func__, 376 phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
430 phy_addr, reg, result);
431 377
432 return result; 378 return 0;
433} 379}
434 380
435static void 381static void ax_phy_switch(struct net_device *dev, int on)
436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
437{ 382{
438 struct ei_device *ei = netdev_priv(dev); 383 struct ei_device *ei_local = netdev_priv(dev);
439 struct ax_device *ax = to_ax_dev(dev); 384 struct ax_device *ax = to_ax_dev(dev);
440 unsigned long flags;
441
442 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
443 __func__, dev, phy_addr, reg, value);
444
445 spin_lock_irqsave(&ei->page_lock, flags);
446
447 ax_phy_issueaddr(dev, phy_addr, reg, 1);
448 ax_mii_ei_outbits(dev, 2, 2); /* send TA */
449 ax_mii_ei_outbits(dev, value, 16);
450
451 spin_unlock_irqrestore(&ei->page_lock, flags);
452}
453 385
454static void ax_mii_expiry(unsigned long data) 386 u8 reg_gpoc = ax->plat->gpoc_val;
455{
456 struct net_device *dev = (struct net_device *)data;
457 struct ax_device *ax = to_ax_dev(dev);
458 unsigned long flags;
459 387
460 spin_lock_irqsave(&ax->mii_lock, flags); 388 if (!!on)
461 mii_check_media(&ax->mii, netif_msg_link(ax), 0); 389 reg_gpoc &= ~AX_GPOC_PPDSET;
462 spin_unlock_irqrestore(&ax->mii_lock, flags); 390 else
391 reg_gpoc |= AX_GPOC_PPDSET;
463 392
464 if (ax->running) { 393 ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
465 ax->mii_timer.expires = jiffies + HZ*2;
466 add_timer(&ax->mii_timer);
467 }
468} 394}
469 395
470static int ax_open(struct net_device *dev) 396static int ax_open(struct net_device *dev)
471{ 397{
472 struct ax_device *ax = to_ax_dev(dev); 398 struct ax_device *ax = to_ax_dev(dev);
473 struct ei_device *ei_local = netdev_priv(dev);
474 int ret; 399 int ret;
475 400
476 dev_dbg(&ax->dev->dev, "%s: open\n", dev->name); 401 netdev_dbg(dev, "open\n");
477 402
478 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, 403 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
479 dev->name, dev); 404 dev->name, dev);
480 if (ret) 405 if (ret)
481 return ret; 406 goto failed_request_irq;
482
483 ret = ax_ei_open(dev);
484 if (ret) {
485 free_irq(dev->irq, dev);
486 return ret;
487 }
488 407
489 /* turn the phy on (if turned off) */ 408 /* turn the phy on (if turned off) */
409 ax_phy_switch(dev, 1);
490 410
491 ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17)); 411 ret = ax_mii_probe(dev);
492 ax->running = 1; 412 if (ret)
493 413 goto failed_mii_probe;
494 /* start the MII timer */ 414 phy_start(ax->phy_dev);
495
496 init_timer(&ax->mii_timer);
497 415
498 ax->mii_timer.expires = jiffies+1; 416 ret = ax_ei_open(dev);
499 ax->mii_timer.data = (unsigned long) dev; 417 if (ret)
500 ax->mii_timer.function = ax_mii_expiry; 418 goto failed_ax_ei_open;
501 419
502 add_timer(&ax->mii_timer); 420 ax->running = 1;
503 421
504 return 0; 422 return 0;
423
424 failed_ax_ei_open:
425 phy_disconnect(ax->phy_dev);
426 failed_mii_probe:
427 ax_phy_switch(dev, 0);
428 free_irq(dev->irq, dev);
429 failed_request_irq:
430 return ret;
505} 431}
506 432
507static int ax_close(struct net_device *dev) 433static int ax_close(struct net_device *dev)
508{ 434{
509 struct ax_device *ax = to_ax_dev(dev); 435 struct ax_device *ax = to_ax_dev(dev);
510 struct ei_device *ei_local = netdev_priv(dev);
511 436
512 dev_dbg(&ax->dev->dev, "%s: close\n", dev->name); 437 netdev_dbg(dev, "close\n");
513
514 /* turn the phy off */
515
516 ei_outb(ax->plat->gpoc_val | (1<<6),
517 ei_local->mem + EI_SHIFT(0x17));
518 438
519 ax->running = 0; 439 ax->running = 0;
520 wmb(); 440 wmb();
521 441
522 del_timer_sync(&ax->mii_timer);
523 ax_ei_close(dev); 442 ax_ei_close(dev);
524 443
444 /* turn the phy off */
445 ax_phy_switch(dev, 0);
446 phy_disconnect(ax->phy_dev);
447
525 free_irq(dev->irq, dev); 448 free_irq(dev->irq, dev);
526 return 0; 449 return 0;
527} 450}
@@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev)
529static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 452static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
530{ 453{
531 struct ax_device *ax = to_ax_dev(dev); 454 struct ax_device *ax = to_ax_dev(dev);
532 unsigned long flags; 455 struct phy_device *phy_dev = ax->phy_dev;
533 int rc;
534 456
535 if (!netif_running(dev)) 457 if (!netif_running(dev))
536 return -EINVAL; 458 return -EINVAL;
537 459
538 spin_lock_irqsave(&ax->mii_lock, flags); 460 if (!phy_dev)
539 rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL); 461 return -ENODEV;
540 spin_unlock_irqrestore(&ax->mii_lock, flags);
541 462
542 return rc; 463 return phy_mii_ioctl(phy_dev, req, cmd);
543} 464}
544 465
545/* ethtool ops */ 466/* ethtool ops */
@@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
547static void ax_get_drvinfo(struct net_device *dev, 468static void ax_get_drvinfo(struct net_device *dev,
548 struct ethtool_drvinfo *info) 469 struct ethtool_drvinfo *info)
549{ 470{
550 struct ax_device *ax = to_ax_dev(dev); 471 struct platform_device *pdev = to_platform_device(dev->dev.parent);
551 472
552 strcpy(info->driver, DRV_NAME); 473 strcpy(info->driver, DRV_NAME);
553 strcpy(info->version, DRV_VERSION); 474 strcpy(info->version, DRV_VERSION);
554 strcpy(info->bus_info, ax->dev->name); 475 strcpy(info->bus_info, pdev->name);
555} 476}
556 477
557static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 478static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
558{ 479{
559 struct ax_device *ax = to_ax_dev(dev); 480 struct ax_device *ax = to_ax_dev(dev);
560 unsigned long flags; 481 struct phy_device *phy_dev = ax->phy_dev;
561 482
562 spin_lock_irqsave(&ax->mii_lock, flags); 483 if (!phy_dev)
563 mii_ethtool_gset(&ax->mii, cmd); 484 return -ENODEV;
564 spin_unlock_irqrestore(&ax->mii_lock, flags);
565 485
566 return 0; 486 return phy_ethtool_gset(phy_dev, cmd);
567} 487}
568 488
569static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 489static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570{ 490{
571 struct ax_device *ax = to_ax_dev(dev); 491 struct ax_device *ax = to_ax_dev(dev);
572 unsigned long flags; 492 struct phy_device *phy_dev = ax->phy_dev;
573 int rc;
574 493
575 spin_lock_irqsave(&ax->mii_lock, flags); 494 if (!phy_dev)
576 rc = mii_ethtool_sset(&ax->mii, cmd); 495 return -ENODEV;
577 spin_unlock_irqrestore(&ax->mii_lock, flags);
578
579 return rc;
580}
581
582static int ax_nway_reset(struct net_device *dev)
583{
584 struct ax_device *ax = to_ax_dev(dev);
585 return mii_nway_restart(&ax->mii);
586}
587 496
588static u32 ax_get_link(struct net_device *dev) 497 return phy_ethtool_sset(phy_dev, cmd);
589{
590 struct ax_device *ax = to_ax_dev(dev);
591 return mii_link_ok(&ax->mii);
592} 498}
593 499
594static const struct ethtool_ops ax_ethtool_ops = { 500static const struct ethtool_ops ax_ethtool_ops = {
595 .get_drvinfo = ax_get_drvinfo, 501 .get_drvinfo = ax_get_drvinfo,
596 .get_settings = ax_get_settings, 502 .get_settings = ax_get_settings,
597 .set_settings = ax_set_settings, 503 .set_settings = ax_set_settings,
598 .nway_reset = ax_nway_reset, 504 .get_link = ethtool_op_get_link,
599 .get_link = ax_get_link,
600}; 505};
601 506
602#ifdef CONFIG_AX88796_93CX6 507#ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = {
640 .ndo_get_stats = ax_ei_get_stats, 545 .ndo_get_stats = ax_ei_get_stats,
641 .ndo_set_multicast_list = ax_ei_set_multicast_list, 546 .ndo_set_multicast_list = ax_ei_set_multicast_list,
642 .ndo_validate_addr = eth_validate_addr, 547 .ndo_validate_addr = eth_validate_addr,
643 .ndo_set_mac_address = eth_mac_addr, 548 .ndo_set_mac_address = eth_mac_addr,
644 .ndo_change_mtu = eth_change_mtu, 549 .ndo_change_mtu = eth_change_mtu,
645#ifdef CONFIG_NET_POLL_CONTROLLER 550#ifdef CONFIG_NET_POLL_CONTROLLER
646 .ndo_poll_controller = ax_ei_poll, 551 .ndo_poll_controller = ax_ei_poll,
647#endif 552#endif
648}; 553};
649 554
555static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
556{
557 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
558
559 if (level)
560 ax->reg_memr |= AX_MEMR_MDC;
561 else
562 ax->reg_memr &= ~AX_MEMR_MDC;
563
564 ei_outb(ax->reg_memr, ax->addr_memr);
565}
566
567static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
568{
569 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
570
571 if (output)
572 ax->reg_memr &= ~AX_MEMR_MDIR;
573 else
574 ax->reg_memr |= AX_MEMR_MDIR;
575
576 ei_outb(ax->reg_memr, ax->addr_memr);
577}
578
579static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
580{
581 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
582
583 if (value)
584 ax->reg_memr |= AX_MEMR_MDO;
585 else
586 ax->reg_memr &= ~AX_MEMR_MDO;
587
588 ei_outb(ax->reg_memr, ax->addr_memr);
589}
590
591static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
592{
593 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
594 int reg_memr = ei_inb(ax->addr_memr);
595
596 return reg_memr & AX_MEMR_MDI ? 1 : 0;
597}
598
599static struct mdiobb_ops bb_ops = {
600 .owner = THIS_MODULE,
601 .set_mdc = ax_bb_mdc,
602 .set_mdio_dir = ax_bb_dir,
603 .set_mdio_data = ax_bb_set_data,
604 .get_mdio_data = ax_bb_get_data,
605};
606
650/* setup code */ 607/* setup code */
651 608
609static int ax_mii_init(struct net_device *dev)
610{
611 struct platform_device *pdev = to_platform_device(dev->dev.parent);
612 struct ei_device *ei_local = netdev_priv(dev);
613 struct ax_device *ax = to_ax_dev(dev);
614 int err, i;
615
616 ax->bb_ctrl.ops = &bb_ops;
617 ax->addr_memr = ei_local->mem + AX_MEMR;
618 ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
619 if (!ax->mii_bus) {
620 err = -ENOMEM;
621 goto out;
622 }
623
624 ax->mii_bus->name = "ax88796_mii_bus";
625 ax->mii_bus->parent = dev->dev.parent;
626 snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
627
628 ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
629 if (!ax->mii_bus->irq) {
630 err = -ENOMEM;
631 goto out_free_mdio_bitbang;
632 }
633
634 for (i = 0; i < PHY_MAX_ADDR; i++)
635 ax->mii_bus->irq[i] = PHY_POLL;
636
637 err = mdiobus_register(ax->mii_bus);
638 if (err)
639 goto out_free_irq;
640
641 return 0;
642
643 out_free_irq:
644 kfree(ax->mii_bus->irq);
645 out_free_mdio_bitbang:
646 free_mdio_bitbang(ax->mii_bus);
647 out:
648 return err;
649}
650
652static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) 651static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
653{ 652{
654 void __iomem *ioaddr = ei_local->mem; 653 void __iomem *ioaddr = ei_local->mem;
655 struct ax_device *ax = to_ax_dev(dev); 654 struct ax_device *ax = to_ax_dev(dev);
656 655
657 /* Select page 0*/ 656 /* Select page 0 */
658 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD); 657 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
659 658
660 /* set to byte access */ 659 /* set to byte access */
661 ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); 660 ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
662 ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); 661 ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
663} 662}
664 663
665/* ax_init_dev 664/*
665 * ax_init_dev
666 * 666 *
667 * initialise the specified device, taking care to note the MAC 667 * initialise the specified device, taking care to note the MAC
668 * address it may already have (if configured), ensure 668 * address it may already have (if configured), ensure
669 * the device is ready to be used by lib8390.c and registerd with 669 * the device is ready to be used by lib8390.c and registerd with
670 * the network layer. 670 * the network layer.
671 */ 671 */
672 672static int ax_init_dev(struct net_device *dev)
673static int ax_init_dev(struct net_device *dev, int first_init)
674{ 673{
675 struct ei_device *ei_local = netdev_priv(dev); 674 struct ei_device *ei_local = netdev_priv(dev);
676 struct ax_device *ax = to_ax_dev(dev); 675 struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init)
690 689
691 /* read the mac from the card prom if we need it */ 690 /* read the mac from the card prom if we need it */
692 691
693 if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) { 692 if (ax->plat->flags & AXFLG_HAS_EEPROM) {
694 unsigned char SA_prom[32]; 693 unsigned char SA_prom[32];
695 694
696 for(i = 0; i < sizeof(SA_prom); i+=2) { 695 for (i = 0; i < sizeof(SA_prom); i += 2) {
697 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); 696 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
698 SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT); 697 SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
699 } 698 }
700 699
701 if (ax->plat->wordlength == 2) 700 if (ax->plat->wordlength == 2)
702 for (i = 0; i < 16; i++) 701 for (i = 0; i < 16; i++)
703 SA_prom[i] = SA_prom[i+i]; 702 SA_prom[i] = SA_prom[i+i];
704 703
705 memcpy(dev->dev_addr, SA_prom, 6); 704 memcpy(dev->dev_addr, SA_prom, 6);
706 } 705 }
707 706
708#ifdef CONFIG_AX88796_93CX6 707#ifdef CONFIG_AX88796_93CX6
709 if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) { 708 if (ax->plat->flags & AXFLG_HAS_93CX6) {
710 unsigned char mac_addr[6]; 709 unsigned char mac_addr[6];
711 struct eeprom_93cx6 eeprom; 710 struct eeprom_93cx6 eeprom;
712 711
@@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
719 (__le16 __force *)mac_addr, 718 (__le16 __force *)mac_addr,
720 sizeof(mac_addr) >> 1); 719 sizeof(mac_addr) >> 1);
721 720
722 memcpy(dev->dev_addr, mac_addr, 6); 721 memcpy(dev->dev_addr, mac_addr, 6);
723 } 722 }
724#endif 723#endif
725 if (ax->plat->wordlength == 2) { 724 if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init)
732 stop_page = NE1SM_STOP_PG; 731 stop_page = NE1SM_STOP_PG;
733 } 732 }
734 733
735 /* load the mac-address from the device if this is the 734 /* load the mac-address from the device */
736 * first time we've initialised */ 735 if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
737 736 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
738 if (first_init) { 737 ei_local->mem + E8390_CMD); /* 0x61 */
739 if (ax->plat->flags & AXFLG_MAC_FROMDEV) { 738 for (i = 0; i < ETHER_ADDR_LEN; i++)
740 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, 739 dev->dev_addr[i] =
741 ei_local->mem + E8390_CMD); /* 0x61 */ 740 ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
742 for (i = 0; i < ETHER_ADDR_LEN; i++)
743 dev->dev_addr[i] =
744 ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
745 }
746
747 if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
748 ax->plat->mac_addr)
749 memcpy(dev->dev_addr, ax->plat->mac_addr,
750 ETHER_ADDR_LEN);
751 } 741 }
752 742
743 if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
744 ax->plat->mac_addr)
745 memcpy(dev->dev_addr, ax->plat->mac_addr,
746 ETHER_ADDR_LEN);
747
753 ax_reset_8390(dev); 748 ax_reset_8390(dev);
754 749
755 ei_status.name = "AX88796"; 750 ei_local->name = "AX88796";
756 ei_status.tx_start_page = start_page; 751 ei_local->tx_start_page = start_page;
757 ei_status.stop_page = stop_page; 752 ei_local->stop_page = stop_page;
758 ei_status.word16 = (ax->plat->wordlength == 2); 753 ei_local->word16 = (ax->plat->wordlength == 2);
759 ei_status.rx_start_page = start_page + TX_PAGES; 754 ei_local->rx_start_page = start_page + TX_PAGES;
760 755
761#ifdef PACKETBUF_MEMSIZE 756#ifdef PACKETBUF_MEMSIZE
762 /* Allow the packet buffer size to be overridden by know-it-alls. */ 757 /* Allow the packet buffer size to be overridden by know-it-alls. */
763 ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; 758 ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
764#endif 759#endif
765 760
766 ei_status.reset_8390 = &ax_reset_8390; 761 ei_local->reset_8390 = &ax_reset_8390;
767 ei_status.block_input = &ax_block_input; 762 ei_local->block_input = &ax_block_input;
768 ei_status.block_output = &ax_block_output; 763 ei_local->block_output = &ax_block_output;
769 ei_status.get_8390_hdr = &ax_get_8390_hdr; 764 ei_local->get_8390_hdr = &ax_get_8390_hdr;
770 ei_status.priv = 0; 765 ei_local->priv = 0;
771
772 dev->netdev_ops = &ax_netdev_ops;
773 dev->ethtool_ops = &ax_ethtool_ops;
774
775 ax->msg_enable = NETIF_MSG_LINK;
776 ax->mii.phy_id_mask = 0x1f;
777 ax->mii.reg_num_mask = 0x1f;
778 ax->mii.phy_id = 0x10; /* onboard phy */
779 ax->mii.force_media = 0;
780 ax->mii.full_duplex = 0;
781 ax->mii.mdio_read = ax_phy_read;
782 ax->mii.mdio_write = ax_phy_write;
783 ax->mii.dev = dev;
784 766
785 ax_NS8390_init(dev, 0); 767 dev->netdev_ops = &ax_netdev_ops;
768 dev->ethtool_ops = &ax_ethtool_ops;
786 769
787 if (first_init) 770 ret = ax_mii_init(dev);
788 dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n", 771 if (ret)
789 ei_status.word16 ? 16:8, dev->irq, dev->base_addr, 772 goto out_irq;
790 dev->dev_addr); 773
774 ax_NS8390_init(dev, 0);
791 775
792 ret = register_netdev(dev); 776 ret = register_netdev(dev);
793 if (ret) 777 if (ret)
794 goto out_irq; 778 goto out_irq;
795 779
780 netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
781 ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
782 dev->dev_addr);
783
796 return 0; 784 return 0;
797 785
798 out_irq: 786 out_irq:
@@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init)
802 return ret; 790 return ret;
803} 791}
804 792
805static int ax_remove(struct platform_device *_dev) 793static int ax_remove(struct platform_device *pdev)
806{ 794{
807 struct net_device *dev = platform_get_drvdata(_dev); 795 struct net_device *dev = platform_get_drvdata(pdev);
808 struct ax_device *ax; 796 struct ei_device *ei_local = netdev_priv(dev);
809 797 struct ax_device *ax = to_ax_dev(dev);
810 ax = to_ax_dev(dev); 798 struct resource *mem;
811 799
812 unregister_netdev(dev); 800 unregister_netdev(dev);
813 free_irq(dev->irq, dev); 801 free_irq(dev->irq, dev);
814 802
815 iounmap(ei_status.mem); 803 iounmap(ei_local->mem);
816 release_resource(ax->mem); 804 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817 kfree(ax->mem); 805 release_mem_region(mem->start, resource_size(mem));
818 806
819 if (ax->map2) { 807 if (ax->map2) {
820 iounmap(ax->map2); 808 iounmap(ax->map2);
821 release_resource(ax->mem2); 809 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
822 kfree(ax->mem2); 810 release_mem_region(mem->start, resource_size(mem));
823 } 811 }
824 812
825 free_netdev(dev); 813 free_netdev(dev);
@@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev)
827 return 0; 815 return 0;
828} 816}
829 817
830/* ax_probe 818/*
819 * ax_probe
831 * 820 *
832 * This is the entry point when the platform device system uses to 821 * This is the entry point when the platform device system uses to
833 * notify us of a new device to attach to. Allocate memory, find 822 * notify us of a new device to attach to. Allocate memory, find the
834 * the resources and information passed, and map the necessary registers. 823 * resources and information passed, and map the necessary registers.
835*/ 824 */
836
837static int ax_probe(struct platform_device *pdev) 825static int ax_probe(struct platform_device *pdev)
838{ 826{
839 struct net_device *dev; 827 struct net_device *dev;
840 struct ax_device *ax; 828 struct ei_device *ei_local;
841 struct resource *res; 829 struct ax_device *ax;
842 size_t size; 830 struct resource *irq, *mem, *mem2;
831 resource_size_t mem_size, mem2_size = 0;
843 int ret = 0; 832 int ret = 0;
844 833
845 dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); 834 dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev)
847 return -ENOMEM; 836 return -ENOMEM;
848 837
849 /* ok, let's setup our device */ 838 /* ok, let's setup our device */
839 SET_NETDEV_DEV(dev, &pdev->dev);
840 ei_local = netdev_priv(dev);
850 ax = to_ax_dev(dev); 841 ax = to_ax_dev(dev);
851 842
852 memset(ax, 0, sizeof(struct ax_device));
853
854 spin_lock_init(&ax->mii_lock);
855
856 ax->dev = pdev;
857 ax->plat = pdev->dev.platform_data; 843 ax->plat = pdev->dev.platform_data;
858 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
859 845
860 ei_status.rxcr_base = ax->plat->rcr_val; 846 ei_local->rxcr_base = ax->plat->rcr_val;
861 847
862 /* find the platform resources */ 848 /* find the platform resources */
863 849 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
864 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 850 if (!irq) {
865 if (res == NULL) {
866 dev_err(&pdev->dev, "no IRQ specified\n"); 851 dev_err(&pdev->dev, "no IRQ specified\n");
867 ret = -ENXIO; 852 ret = -ENXIO;
868 goto exit_mem; 853 goto exit_mem;
869 } 854 }
870 855
871 dev->irq = res->start; 856 dev->irq = irq->start;
872 ax->irqflags = res->flags & IRQF_TRIGGER_MASK; 857 ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
873 858
874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 859 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
875 if (res == NULL) { 860 if (!mem) {
876 dev_err(&pdev->dev, "no MEM specified\n"); 861 dev_err(&pdev->dev, "no MEM specified\n");
877 ret = -ENXIO; 862 ret = -ENXIO;
878 goto exit_mem; 863 goto exit_mem;
879 } 864 }
880 865
881 size = (res->end - res->start) + 1; 866 mem_size = resource_size(mem);
882
883 /* setup the register offsets from either the platform data
884 * or by using the size of the resource provided */
885 867
868 /*
869 * setup the register offsets from either the platform data or
870 * by using the size of the resource provided
871 */
886 if (ax->plat->reg_offsets) 872 if (ax->plat->reg_offsets)
887 ei_status.reg_offset = ax->plat->reg_offsets; 873 ei_local->reg_offset = ax->plat->reg_offsets;
888 else { 874 else {
889 ei_status.reg_offset = ax->reg_offsets; 875 ei_local->reg_offset = ax->reg_offsets;
890 for (ret = 0; ret < 0x18; ret++) 876 for (ret = 0; ret < 0x18; ret++)
891 ax->reg_offsets[ret] = (size / 0x18) * ret; 877 ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
892 } 878 }
893 879
894 ax->mem = request_mem_region(res->start, size, pdev->name); 880 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
895 if (ax->mem == NULL) {
896 dev_err(&pdev->dev, "cannot reserve registers\n"); 881 dev_err(&pdev->dev, "cannot reserve registers\n");
897 ret = -ENXIO; 882 ret = -ENXIO;
898 goto exit_mem; 883 goto exit_mem;
899 } 884 }
900 885
901 ei_status.mem = ioremap(res->start, size); 886 ei_local->mem = ioremap(mem->start, mem_size);
902 dev->base_addr = (unsigned long)ei_status.mem; 887 dev->base_addr = (unsigned long)ei_local->mem;
903 888
904 if (ei_status.mem == NULL) { 889 if (ei_local->mem == NULL) {
905 dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n", 890 dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
906 (unsigned long long)res->start,
907 (unsigned long long)res->end);
908 891
909 ret = -ENXIO; 892 ret = -ENXIO;
910 goto exit_req; 893 goto exit_req;
911 } 894 }
912 895
913 /* look for reset area */ 896 /* look for reset area */
914 897 mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
915 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 898 if (!mem2) {
916 if (res == NULL) {
917 if (!ax->plat->reg_offsets) { 899 if (!ax->plat->reg_offsets) {
918 for (ret = 0; ret < 0x20; ret++) 900 for (ret = 0; ret < 0x20; ret++)
919 ax->reg_offsets[ret] = (size / 0x20) * ret; 901 ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
920 } 902 }
921
922 ax->map2 = NULL;
923 } else { 903 } else {
924 size = (res->end - res->start) + 1; 904 mem2_size = resource_size(mem2);
925 905
926 ax->mem2 = request_mem_region(res->start, size, pdev->name); 906 if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
927 if (ax->mem2 == NULL) {
928 dev_err(&pdev->dev, "cannot reserve registers\n"); 907 dev_err(&pdev->dev, "cannot reserve registers\n");
929 ret = -ENXIO; 908 ret = -ENXIO;
930 goto exit_mem1; 909 goto exit_mem1;
931 } 910 }
932 911
933 ax->map2 = ioremap(res->start, size); 912 ax->map2 = ioremap(mem2->start, mem2_size);
934 if (ax->map2 == NULL) { 913 if (!ax->map2) {
935 dev_err(&pdev->dev, "cannot map reset register\n"); 914 dev_err(&pdev->dev, "cannot map reset register\n");
936 ret = -ENXIO; 915 ret = -ENXIO;
937 goto exit_mem2; 916 goto exit_mem2;
938 } 917 }
939 918
940 ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem; 919 ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
941 } 920 }
942 921
943 /* got resources, now initialise and register device */ 922 /* got resources, now initialise and register device */
944 923 ret = ax_init_dev(dev);
945 ret = ax_init_dev(dev, 1);
946 if (!ret) 924 if (!ret)
947 return 0; 925 return 0;
948 926
949 if (ax->map2 == NULL) 927 if (!ax->map2)
950 goto exit_mem1; 928 goto exit_mem1;
951 929
952 iounmap(ax->map2); 930 iounmap(ax->map2);
953 931
954 exit_mem2: 932 exit_mem2:
955 release_resource(ax->mem2); 933 release_mem_region(mem2->start, mem2_size);
956 kfree(ax->mem2);
957 934
958 exit_mem1: 935 exit_mem1:
959 iounmap(ei_status.mem); 936 iounmap(ei_local->mem);
960 937
961 exit_req: 938 exit_req:
962 release_resource(ax->mem); 939 release_mem_region(mem->start, mem_size);
963 kfree(ax->mem);
964 940
965 exit_mem: 941 exit_mem:
966 free_netdev(dev); 942 free_netdev(dev);
@@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev)
974static int ax_suspend(struct platform_device *dev, pm_message_t state) 950static int ax_suspend(struct platform_device *dev, pm_message_t state)
975{ 951{
976 struct net_device *ndev = platform_get_drvdata(dev); 952 struct net_device *ndev = platform_get_drvdata(dev);
977 struct ax_device *ax = to_ax_dev(ndev); 953 struct ax_device *ax = to_ax_dev(ndev);
978 954
979 ax->resume_open = ax->running; 955 ax->resume_open = ax->running;
980 956
@@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state)
987static int ax_resume(struct platform_device *pdev) 963static int ax_resume(struct platform_device *pdev)
988{ 964{
989 struct net_device *ndev = platform_get_drvdata(pdev); 965 struct net_device *ndev = platform_get_drvdata(pdev);
990 struct ax_device *ax = to_ax_dev(ndev); 966 struct ax_device *ax = to_ax_dev(ndev);
991 967
992 ax_initial_setup(ndev, netdev_priv(ndev)); 968 ax_initial_setup(ndev, netdev_priv(ndev));
993 ax_NS8390_init(ndev, ax->resume_open); 969 ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev)
1001 977
1002#else 978#else
1003#define ax_suspend NULL 979#define ax_suspend NULL
1004#define ax_resume NULL 980#define ax_resume NULL
1005#endif 981#endif
1006 982
1007static struct platform_driver axdrv = { 983static struct platform_driver axdrv = {
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index add0b93350d..f803c58b941 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#ifndef BE_H 18#ifndef BE_H
@@ -33,7 +33,7 @@
33 33
34#include "be_hw.h" 34#include "be_hw.h"
35 35
36#define DRV_VER "2.103.175u" 36#define DRV_VER "4.0.100u"
37#define DRV_NAME "be2net" 37#define DRV_NAME "be2net"
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -67,7 +67,7 @@ static inline char *nic_name(struct pci_dev *pdev)
67} 67}
68 68
69/* Number of bytes of an RX frame that are copied to skb->data */ 69/* Number of bytes of an RX frame that are copied to skb->data */
70#define BE_HDR_LEN 64 70#define BE_HDR_LEN ((u16) 64)
71#define BE_MAX_JUMBO_FRAME_SIZE 9018 71#define BE_MAX_JUMBO_FRAME_SIZE 9018
72#define BE_MIN_MTU 256 72#define BE_MIN_MTU 256
73 73
@@ -211,18 +211,40 @@ struct be_rx_stats {
211 u32 rx_fps; /* Rx frags per second */ 211 u32 rx_fps; /* Rx frags per second */
212}; 212};
213 213
214struct be_rx_compl_info {
215 u32 rss_hash;
216 u16 vid;
217 u16 pkt_size;
218 u16 rxq_idx;
219 u16 mac_id;
220 u8 vlanf;
221 u8 num_rcvd;
222 u8 err;
223 u8 ipf;
224 u8 tcpf;
225 u8 udpf;
226 u8 ip_csum;
227 u8 l4_csum;
228 u8 ipv6;
229 u8 vtm;
230 u8 pkt_type;
231};
232
214struct be_rx_obj { 233struct be_rx_obj {
215 struct be_adapter *adapter; 234 struct be_adapter *adapter;
216 struct be_queue_info q; 235 struct be_queue_info q;
217 struct be_queue_info cq; 236 struct be_queue_info cq;
237 struct be_rx_compl_info rxcp;
218 struct be_rx_page_info page_info_tbl[RX_Q_LEN]; 238 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
219 struct be_eq_obj rx_eq; 239 struct be_eq_obj rx_eq;
220 struct be_rx_stats stats; 240 struct be_rx_stats stats;
221 u8 rss_id; 241 u8 rss_id;
222 bool rx_post_starved; /* Zero rx frags have been posted to BE */ 242 bool rx_post_starved; /* Zero rx frags have been posted to BE */
223 u16 last_frag_index; 243 u32 cache_line_barrier[16];
224 u16 rsvd; 244};
225 u32 cache_line_barrier[15]; 245
246struct be_drv_stats {
247 u8 be_on_die_temperature;
226}; 248};
227 249
228struct be_vf_cfg { 250struct be_vf_cfg {
@@ -234,6 +256,7 @@ struct be_vf_cfg {
234}; 256};
235 257
236#define BE_INVALID_PMAC_ID 0xffffffff 258#define BE_INVALID_PMAC_ID 0xffffffff
259
237struct be_adapter { 260struct be_adapter {
238 struct pci_dev *pdev; 261 struct pci_dev *pdev;
239 struct net_device *netdev; 262 struct net_device *netdev;
@@ -269,6 +292,7 @@ struct be_adapter {
269 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 292 u32 big_page_size; /* Compounded page size shared by rx wrbs */
270 293
271 u8 msix_vec_next_idx; 294 u8 msix_vec_next_idx;
295 struct be_drv_stats drv_stats;
272 296
273 struct vlan_group *vlan_grp; 297 struct vlan_group *vlan_grp;
274 u16 vlans_added; 298 u16 vlans_added;
@@ -281,6 +305,7 @@ struct be_adapter {
281 struct be_dma_mem stats_cmd; 305 struct be_dma_mem stats_cmd;
282 /* Work queue used to perform periodic tasks like getting statistics */ 306 /* Work queue used to perform periodic tasks like getting statistics */
283 struct delayed_work work; 307 struct delayed_work work;
308 u16 work_counter;
284 309
285 /* Ethtool knobs and info */ 310 /* Ethtool knobs and info */
286 bool rx_csum; /* BE card must perform rx-checksumming */ 311 bool rx_csum; /* BE card must perform rx-checksumming */
@@ -298,7 +323,7 @@ struct be_adapter {
298 u32 rx_fc; /* Rx flow control */ 323 u32 rx_fc; /* Rx flow control */
299 u32 tx_fc; /* Tx flow control */ 324 u32 tx_fc; /* Tx flow control */
300 bool ue_detected; 325 bool ue_detected;
301 bool stats_ioctl_sent; 326 bool stats_cmd_sent;
302 int link_speed; 327 int link_speed;
303 u8 port_type; 328 u8 port_type;
304 u8 transceiver; 329 u8 transceiver;
@@ -307,10 +332,13 @@ struct be_adapter {
307 u32 flash_status; 332 u32 flash_status;
308 struct completion flash_compl; 333 struct completion flash_compl;
309 334
335 bool be3_native;
310 bool sriov_enabled; 336 bool sriov_enabled;
311 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 337 struct be_vf_cfg vf_cfg[BE_MAX_VF];
312 u8 is_virtfn; 338 u8 is_virtfn;
313 u32 sli_family; 339 u32 sli_family;
340 u8 hba_port_num;
341 u16 pvid;
314}; 342};
315 343
316#define be_physfn(adapter) (!adapter->is_virtfn) 344#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -450,9 +478,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
450 mac[5] = (u8)(addr & 0xFF); 478 mac[5] = (u8)(addr & 0xFF);
451 mac[4] = (u8)((addr >> 8) & 0xFF); 479 mac[4] = (u8)((addr >> 8) & 0xFF);
452 mac[3] = (u8)((addr >> 16) & 0xFF); 480 mac[3] = (u8)((addr >> 16) & 0xFF);
453 mac[2] = 0xC9; 481 /* Use the OUI from the current MAC address */
454 mac[1] = 0x00; 482 memcpy(mac, adapter->netdev->dev_addr, 3);
455 mac[0] = 0x00;
456} 483}
457 484
458extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 485extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811faf72..5a4a87e7c5e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,21 +8,30 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
19#include "be_cmds.h" 19#include "be_cmds.h"
20 20
21/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 32;
23
21static void be_mcc_notify(struct be_adapter *adapter) 24static void be_mcc_notify(struct be_adapter *adapter)
22{ 25{
23 struct be_queue_info *mccq = &adapter->mcc_obj.q; 26 struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 u32 val = 0; 27 u32 val = 0;
25 28
29 if (adapter->eeh_err) {
30 dev_info(&adapter->pdev->dev,
31 "Error in Card Detected! Cannot issue commands\n");
32 return;
33 }
34
26 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28 37
@@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
75 be_dws_le_to_cpu(&resp->hw_stats, 84 be_dws_le_to_cpu(&resp->hw_stats,
76 sizeof(resp->hw_stats)); 85 sizeof(resp->hw_stats));
77 netdev_stats_update(adapter); 86 netdev_stats_update(adapter);
78 adapter->stats_ioctl_sent = false; 87 adapter->stats_cmd_sent = false;
79 } 88 }
80 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && 89 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { 90 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
102{ 111{
103 if (evt->valid) { 112 if (evt->valid) {
104 adapter->vlan_prio_bmap = evt->available_priority_bmap; 113 adapter->vlan_prio_bmap = evt->available_priority_bmap;
114 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
105 adapter->recommended_prio = 115 adapter->recommended_prio =
106 evt->reco_default_priority << VLAN_PRIO_SHIFT; 116 evt->reco_default_priority << VLAN_PRIO_SHIFT;
107 } 117 }
@@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
117 } 127 }
118} 128}
119 129
130/*Grp5 PVID evt*/
131static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
132 struct be_async_event_grp5_pvid_state *evt)
133{
134 if (evt->enabled)
135 adapter->pvid = evt->tag;
136 else
137 adapter->pvid = 0;
138}
139
120static void be_async_grp5_evt_process(struct be_adapter *adapter, 140static void be_async_grp5_evt_process(struct be_adapter *adapter,
121 u32 trailer, struct be_mcc_compl *evt) 141 u32 trailer, struct be_mcc_compl *evt)
122{ 142{
@@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
134 be_async_grp5_qos_speed_process(adapter, 154 be_async_grp5_qos_speed_process(adapter,
135 (struct be_async_event_grp5_qos_link_speed *)evt); 155 (struct be_async_event_grp5_qos_link_speed *)evt);
136 break; 156 break;
157 case ASYNC_EVENT_PVID_STATE:
158 be_async_grp5_pvid_state_process(adapter,
159 (struct be_async_event_grp5_pvid_state *)evt);
160 break;
137 default: 161 default:
138 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 162 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
139 break; 163 break;
@@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
216 int i, num, status = 0; 240 int i, num, status = 0;
217 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 241 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
218 242
243 if (adapter->eeh_err)
244 return -EIO;
245
219 for (i = 0; i < mcc_timeout; i++) { 246 for (i = 0; i < mcc_timeout; i++) {
220 num = be_process_mcc(adapter, &status); 247 num = be_process_mcc(adapter, &status);
221 if (num) 248 if (num)
@@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
245 int msecs = 0; 272 int msecs = 0;
246 u32 ready; 273 u32 ready;
247 274
275 if (adapter->eeh_err) {
276 dev_err(&adapter->pdev->dev,
277 "Error detected in card.Cannot issue commands\n");
278 return -EIO;
279 }
280
248 do { 281 do {
249 ready = ioread32(db); 282 ready = ioread32(db);
250 if (ready == 0xffffffff) { 283 if (ready == 0xffffffff) {
@@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
598 631
599/* Uses synchronous MCCQ */ 632/* Uses synchronous MCCQ */
600int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 633int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
601 u32 if_id, u32 *pmac_id) 634 u32 if_id, u32 *pmac_id, u32 domain)
602{ 635{
603 struct be_mcc_wrb *wrb; 636 struct be_mcc_wrb *wrb;
604 struct be_cmd_req_pmac_add *req; 637 struct be_cmd_req_pmac_add *req;
@@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
619 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 652 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
620 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); 653 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
621 654
655 req->hdr.domain = domain;
622 req->if_id = cpu_to_le32(if_id); 656 req->if_id = cpu_to_le32(if_id);
623 memcpy(req->mac_address, mac_addr, ETH_ALEN); 657 memcpy(req->mac_address, mac_addr, ETH_ALEN);
624 658
@@ -634,7 +668,7 @@ err:
634} 668}
635 669
636/* Uses synchronous MCCQ */ 670/* Uses synchronous MCCQ */
637int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) 671int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
638{ 672{
639 struct be_mcc_wrb *wrb; 673 struct be_mcc_wrb *wrb;
640 struct be_cmd_req_pmac_del *req; 674 struct be_cmd_req_pmac_del *req;
@@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
655 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 689 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
656 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); 690 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
657 691
692 req->hdr.domain = dom;
658 req->if_id = cpu_to_le32(if_id); 693 req->if_id = cpu_to_le32(if_id);
659 req->pmac_id = cpu_to_le32(pmac_id); 694 req->pmac_id = cpu_to_le32(pmac_id);
660 695
@@ -691,7 +726,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
691 726
692 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 727 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
693 if (lancer_chip(adapter)) { 728 if (lancer_chip(adapter)) {
694 req->hdr.version = 1; 729 req->hdr.version = 2;
695 req->page_size = 1; /* 1 for 4K */ 730 req->page_size = 1; /* 1 for 4K */
696 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt, 731 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
697 coalesce_wm); 732 coalesce_wm);
@@ -827,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter,
827 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, 862 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
828 sizeof(*req)); 863 sizeof(*req));
829 864
865 if (lancer_chip(adapter)) {
866 req->hdr.version = 1;
867 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
868 adapter->if_handle);
869 }
870
830 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 871 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
831 req->ulp_num = BE_ULP1_NUM; 872 req->ulp_num = BE_ULP1_NUM;
832 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 873 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -995,7 +1036,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
995} 1036}
996 1037
997/* Uses mbox */ 1038/* Uses mbox */
998int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) 1039int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
999{ 1040{
1000 struct be_mcc_wrb *wrb; 1041 struct be_mcc_wrb *wrb;
1001 struct be_cmd_req_if_destroy *req; 1042 struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1057,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
1016 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1057 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1017 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); 1058 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1018 1059
1060 req->hdr.domain = domain;
1019 req->interface_id = cpu_to_le32(interface_id); 1061 req->interface_id = cpu_to_le32(interface_id);
1020 1062
1021 status = be_mbox_notify_wait(adapter); 1063 status = be_mbox_notify_wait(adapter);
@@ -1036,6 +1078,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1036 struct be_sge *sge; 1078 struct be_sge *sge;
1037 int status = 0; 1079 int status = 0;
1038 1080
1081 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1082 be_cmd_get_die_temperature(adapter);
1083
1039 spin_lock_bh(&adapter->mcc_lock); 1084 spin_lock_bh(&adapter->mcc_lock);
1040 1085
1041 wrb = wrb_from_mccq(adapter); 1086 wrb = wrb_from_mccq(adapter);
@@ -1056,7 +1101,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1056 sge->len = cpu_to_le32(nonemb_cmd->size); 1101 sge->len = cpu_to_le32(nonemb_cmd->size);
1057 1102
1058 be_mcc_notify(adapter); 1103 be_mcc_notify(adapter);
1059 adapter->stats_ioctl_sent = true; 1104 adapter->stats_cmd_sent = true;
1060 1105
1061err: 1106err:
1062 spin_unlock_bh(&adapter->mcc_lock); 1107 spin_unlock_bh(&adapter->mcc_lock);
@@ -1103,6 +1148,44 @@ err:
1103 return status; 1148 return status;
1104} 1149}
1105 1150
1151/* Uses synchronous mcc */
1152int be_cmd_get_die_temperature(struct be_adapter *adapter)
1153{
1154 struct be_mcc_wrb *wrb;
1155 struct be_cmd_req_get_cntl_addnl_attribs *req;
1156 int status;
1157
1158 spin_lock_bh(&adapter->mcc_lock);
1159
1160 wrb = wrb_from_mccq(adapter);
1161 if (!wrb) {
1162 status = -EBUSY;
1163 goto err;
1164 }
1165 req = embedded_payload(wrb);
1166
1167 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1168 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1169
1170 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1171 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1172
1173 status = be_mcc_notify_wait(adapter);
1174 if (!status) {
1175 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1176 embedded_payload(wrb);
1177 adapter->drv_stats.be_on_die_temperature =
1178 resp->on_die_temperature;
1179 }
1180 /* If IOCTL fails once, do not bother issuing it again */
1181 else
1182 be_get_temp_freq = 0;
1183
1184err:
1185 spin_unlock_bh(&adapter->mcc_lock);
1186 return status;
1187}
1188
1106/* Uses Mbox */ 1189/* Uses Mbox */
1107int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) 1190int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1108{ 1191{
@@ -1786,6 +1869,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1786 spin_lock_bh(&adapter->mcc_lock); 1869 spin_lock_bh(&adapter->mcc_lock);
1787 1870
1788 wrb = wrb_from_mccq(adapter); 1871 wrb = wrb_from_mccq(adapter);
1872 if (!wrb) {
1873 status = -EBUSY;
1874 goto err;
1875 }
1789 req = nonemb_cmd->va; 1876 req = nonemb_cmd->va;
1790 sge = nonembedded_sgl(wrb); 1877 sge = nonembedded_sgl(wrb);
1791 1878
@@ -1801,6 +1888,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1801 1888
1802 status = be_mcc_notify_wait(adapter); 1889 status = be_mcc_notify_wait(adapter);
1803 1890
1891err:
1804 spin_unlock_bh(&adapter->mcc_lock); 1892 spin_unlock_bh(&adapter->mcc_lock);
1805 return status; 1893 return status;
1806} 1894}
@@ -1863,8 +1951,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1863 OPCODE_COMMON_SET_QOS, sizeof(*req)); 1951 OPCODE_COMMON_SET_QOS, sizeof(*req));
1864 1952
1865 req->hdr.domain = domain; 1953 req->hdr.domain = domain;
1866 req->valid_bits = BE_QOS_BITS_NIC; 1954 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
1867 req->max_bps_nic = bps; 1955 req->max_bps_nic = cpu_to_le32(bps);
1868 1956
1869 status = be_mcc_notify_wait(adapter); 1957 status = be_mcc_notify_wait(adapter);
1870 1958
@@ -1872,3 +1960,96 @@ err:
1872 spin_unlock_bh(&adapter->mcc_lock); 1960 spin_unlock_bh(&adapter->mcc_lock);
1873 return status; 1961 return status;
1874} 1962}
1963
1964int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1965{
1966 struct be_mcc_wrb *wrb;
1967 struct be_cmd_req_cntl_attribs *req;
1968 struct be_cmd_resp_cntl_attribs *resp;
1969 struct be_sge *sge;
1970 int status;
1971 int payload_len = max(sizeof(*req), sizeof(*resp));
1972 struct mgmt_controller_attrib *attribs;
1973 struct be_dma_mem attribs_cmd;
1974
1975 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
1976 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
1977 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
1978 &attribs_cmd.dma);
1979 if (!attribs_cmd.va) {
1980 dev_err(&adapter->pdev->dev,
1981 "Memory allocation failure\n");
1982 return -ENOMEM;
1983 }
1984
1985 if (mutex_lock_interruptible(&adapter->mbox_lock))
1986 return -1;
1987
1988 wrb = wrb_from_mbox(adapter);
1989 if (!wrb) {
1990 status = -EBUSY;
1991 goto err;
1992 }
1993 req = attribs_cmd.va;
1994 sge = nonembedded_sgl(wrb);
1995
1996 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1997 OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
1998 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1999 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
2000 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
2001 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
2002 sge->len = cpu_to_le32(attribs_cmd.size);
2003
2004 status = be_mbox_notify_wait(adapter);
2005 if (!status) {
2006 attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
2007 sizeof(struct be_cmd_resp_hdr));
2008 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2009 }
2010
2011err:
2012 mutex_unlock(&adapter->mbox_lock);
2013 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2014 attribs_cmd.dma);
2015 return status;
2016}
2017
2018/* Uses mbox */
2019int be_cmd_check_native_mode(struct be_adapter *adapter)
2020{
2021 struct be_mcc_wrb *wrb;
2022 struct be_cmd_req_set_func_cap *req;
2023 int status;
2024
2025 if (mutex_lock_interruptible(&adapter->mbox_lock))
2026 return -1;
2027
2028 wrb = wrb_from_mbox(adapter);
2029 if (!wrb) {
2030 status = -EBUSY;
2031 goto err;
2032 }
2033
2034 req = embedded_payload(wrb);
2035
2036 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2037 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
2038
2039 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2040 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
2041
2042 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2043 CAPABILITY_BE3_NATIVE_ERX_API);
2044 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2045
2046 status = be_mbox_notify_wait(adapter);
2047 if (!status) {
2048 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2049 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2050 CAPABILITY_BE3_NATIVE_ERX_API;
2051 }
2052err:
2053 mutex_unlock(&adapter->mbox_lock);
2054 return status;
2055}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 83d15c8a9fa..4f254cfaabe 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18/* 18/*
@@ -88,6 +88,7 @@ struct be_mcc_compl {
88#define ASYNC_EVENT_CODE_GRP_5 0x5 88#define ASYNC_EVENT_CODE_GRP_5 0x5
89#define ASYNC_EVENT_QOS_SPEED 0x1 89#define ASYNC_EVENT_QOS_SPEED 0x1
90#define ASYNC_EVENT_COS_PRIORITY 0x2 90#define ASYNC_EVENT_COS_PRIORITY 0x2
91#define ASYNC_EVENT_PVID_STATE 0x3
91struct be_async_event_trailer { 92struct be_async_event_trailer {
92 u32 code; 93 u32 code;
93}; 94};
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
134 struct be_async_event_trailer trailer; 135 struct be_async_event_trailer trailer;
135} __packed; 136} __packed;
136 137
138/* When the event code of an async trailer is GRP5 and event type is
139 * PVID state, the mcc_compl must be interpreted as follows
140 */
141struct be_async_event_grp5_pvid_state {
142 u8 enabled;
143 u8 rsvd0;
144 u16 tag;
145 u32 event_tag;
146 u32 rsvd1;
147 struct be_async_event_trailer trailer;
148} __packed;
149
137struct be_mcc_mailbox { 150struct be_mcc_mailbox {
138 struct be_mcc_wrb wrb; 151 struct be_mcc_wrb wrb;
139 struct be_mcc_compl compl; 152 struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
156#define OPCODE_COMMON_SET_QOS 28 169#define OPCODE_COMMON_SET_QOS 28
157#define OPCODE_COMMON_MCC_CREATE_EXT 90 170#define OPCODE_COMMON_MCC_CREATE_EXT 90
158#define OPCODE_COMMON_SEEPROM_READ 30 171#define OPCODE_COMMON_SEEPROM_READ 30
172#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
159#define OPCODE_COMMON_NTWK_RX_FILTER 34 173#define OPCODE_COMMON_NTWK_RX_FILTER 34
160#define OPCODE_COMMON_GET_FW_VERSION 35 174#define OPCODE_COMMON_GET_FW_VERSION 35
161#define OPCODE_COMMON_SET_FLOW_CONTROL 36 175#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -176,6 +190,8 @@ struct be_mcc_mailbox {
176#define OPCODE_COMMON_GET_BEACON_STATE 70 190#define OPCODE_COMMON_GET_BEACON_STATE 70
177#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 191#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
178#define OPCODE_COMMON_GET_PHY_DETAILS 102 192#define OPCODE_COMMON_GET_PHY_DETAILS 102
193#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
194#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
179 195
180#define OPCODE_ETH_RSS_CONFIG 1 196#define OPCODE_ETH_RSS_CONFIG 1
181#define OPCODE_ETH_ACPI_CONFIG 2 197#define OPCODE_ETH_ACPI_CONFIG 2
@@ -415,7 +431,7 @@ struct be_cmd_resp_mcc_create {
415/* Pseudo amap definition in which each bit of the actual structure is defined 431/* Pseudo amap definition in which each bit of the actual structure is defined
416 * as a byte: used to calculate offset/shift/mask of each field */ 432 * as a byte: used to calculate offset/shift/mask of each field */
417struct amap_tx_context { 433struct amap_tx_context {
418 u8 rsvd0[16]; /* dword 0 */ 434 u8 if_id[16]; /* dword 0 */
419 u8 tx_ring_size[4]; /* dword 0 */ 435 u8 tx_ring_size[4]; /* dword 0 */
420 u8 rsvd1[26]; /* dword 0 */ 436 u8 rsvd1[26]; /* dword 0 */
421 u8 pci_func_id[8]; /* dword 1 */ 437 u8 pci_func_id[8]; /* dword 1 */
@@ -503,7 +519,8 @@ enum be_if_flags {
503 BE_IF_FLAGS_VLAN = 0x100, 519 BE_IF_FLAGS_VLAN = 0x100,
504 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, 520 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
505 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, 521 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
506 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800 522 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
523 BE_IF_FLAGS_MULTICAST = 0x1000
507}; 524};
508 525
509/* An RX interface is an object with one or more MAC addresses and 526/* An RX interface is an object with one or more MAC addresses and
@@ -619,7 +636,10 @@ struct be_rxf_stats {
619 u32 rx_drops_invalid_ring; /* dword 145*/ 636 u32 rx_drops_invalid_ring; /* dword 145*/
620 u32 forwarded_packets; /* dword 146*/ 637 u32 forwarded_packets; /* dword 146*/
621 u32 rx_drops_mtu; /* dword 147*/ 638 u32 rx_drops_mtu; /* dword 147*/
622 u32 rsvd0[15]; 639 u32 rsvd0[7];
640 u32 port0_jabber_events;
641 u32 port1_jabber_events;
642 u32 rsvd1[6];
623}; 643};
624 644
625struct be_erx_stats { 645struct be_erx_stats {
@@ -630,11 +650,16 @@ struct be_erx_stats {
630 u32 debug_pmem_pbuf_dealloc; /* dword 47*/ 650 u32 debug_pmem_pbuf_dealloc; /* dword 47*/
631}; 651};
632 652
653struct be_pmem_stats {
654 u32 eth_red_drops;
655 u32 rsvd[4];
656};
657
633struct be_hw_stats { 658struct be_hw_stats {
634 struct be_rxf_stats rxf; 659 struct be_rxf_stats rxf;
635 u32 rsvd[48]; 660 u32 rsvd[48];
636 struct be_erx_stats erx; 661 struct be_erx_stats erx;
637 u32 rsvd1[6]; 662 struct be_pmem_stats pmem;
638}; 663};
639 664
640struct be_cmd_req_get_stats { 665struct be_cmd_req_get_stats {
@@ -647,6 +672,20 @@ struct be_cmd_resp_get_stats {
647 struct be_hw_stats hw_stats; 672 struct be_hw_stats hw_stats;
648}; 673};
649 674
675struct be_cmd_req_get_cntl_addnl_attribs {
676 struct be_cmd_req_hdr hdr;
677 u8 rsvd[8];
678};
679
680struct be_cmd_resp_get_cntl_addnl_attribs {
681 struct be_cmd_resp_hdr hdr;
682 u16 ipl_file_number;
683 u8 ipl_file_version;
684 u8 rsvd0;
685 u8 on_die_temperature; /* in degrees centigrade*/
686 u8 rsvd1[3];
687};
688
650struct be_cmd_req_vlan_config { 689struct be_cmd_req_vlan_config {
651 struct be_cmd_req_hdr hdr; 690 struct be_cmd_req_hdr hdr;
652 u8 interface_id; 691 u8 interface_id;
@@ -994,17 +1033,47 @@ struct be_cmd_resp_set_qos {
994 u32 rsvd; 1033 u32 rsvd;
995}; 1034};
996 1035
1036/*********************** Controller Attributes ***********************/
1037struct be_cmd_req_cntl_attribs {
1038 struct be_cmd_req_hdr hdr;
1039};
1040
1041struct be_cmd_resp_cntl_attribs {
1042 struct be_cmd_resp_hdr hdr;
1043 struct mgmt_controller_attrib attribs;
1044};
1045
1046/*********************** Set driver function ***********************/
1047#define CAPABILITY_SW_TIMESTAMPS 2
1048#define CAPABILITY_BE3_NATIVE_ERX_API 4
1049
1050struct be_cmd_req_set_func_cap {
1051 struct be_cmd_req_hdr hdr;
1052 u32 valid_cap_flags;
1053 u32 cap_flags;
1054 u8 rsvd[212];
1055};
1056
1057struct be_cmd_resp_set_func_cap {
1058 struct be_cmd_resp_hdr hdr;
1059 u32 valid_cap_flags;
1060 u32 cap_flags;
1061 u8 rsvd[212];
1062};
1063
997extern int be_pci_fnum_get(struct be_adapter *adapter); 1064extern int be_pci_fnum_get(struct be_adapter *adapter);
998extern int be_cmd_POST(struct be_adapter *adapter); 1065extern int be_cmd_POST(struct be_adapter *adapter);
999extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1066extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1000 u8 type, bool permanent, u32 if_handle); 1067 u8 type, bool permanent, u32 if_handle);
1001extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1068extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1002 u32 if_id, u32 *pmac_id); 1069 u32 if_id, u32 *pmac_id, u32 domain);
1003extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 1070extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
1071 u32 pmac_id, u32 domain);
1004extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 1072extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1005 u32 en_flags, u8 *mac, bool pmac_invalid, 1073 u32 en_flags, u8 *mac, bool pmac_invalid,
1006 u32 *if_handle, u32 *pmac_id, u32 domain); 1074 u32 *if_handle, u32 *pmac_id, u32 domain);
1007extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 1075extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
1076 u32 domain);
1008extern int be_cmd_eq_create(struct be_adapter *adapter, 1077extern int be_cmd_eq_create(struct be_adapter *adapter,
1009 struct be_queue_info *eq, int eq_delay); 1078 struct be_queue_info *eq, int eq_delay);
1010extern int be_cmd_cq_create(struct be_adapter *adapter, 1079extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1076,4 +1145,7 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
1076 struct be_dma_mem *cmd); 1145 struct be_dma_mem *cmd);
1077extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1146extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1078extern void be_detect_dump_ue(struct be_adapter *adapter); 1147extern void be_detect_dump_ue(struct be_adapter *adapter);
1148extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1149extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1150extern int be_cmd_check_native_mode(struct be_adapter *adapter);
1079 1151
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index b4be0271efe..aac248fbd18 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
26 int offset; 26 int offset;
27}; 27};
28 28
29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT}; 29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
30 PMEMSTAT, DRVSTAT};
30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 31#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field) 32 offsetof(_struct, field)
32#define NETSTAT_INFO(field) #field, NETSTAT,\ 33#define NETSTAT_INFO(field) #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
43 field) 44 field)
44#define ERXSTAT_INFO(field) #field, ERXSTAT,\ 45#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45 FIELDINFO(struct be_erx_stats, field) 46 FIELDINFO(struct be_erx_stats, field)
47#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
48 FIELDINFO(struct be_pmem_stats, field)
49#define DRVSTAT_INFO(field) #field, DRVSTAT,\
50 FIELDINFO(struct be_drv_stats, \
51 field)
46 52
47static const struct be_ethtool_stat et_stats[] = { 53static const struct be_ethtool_stat et_stats[] = {
48 {NETSTAT_INFO(rx_packets)}, 54 {NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
99 {MISCSTAT_INFO(rx_drops_too_many_frags)}, 105 {MISCSTAT_INFO(rx_drops_too_many_frags)},
100 {MISCSTAT_INFO(rx_drops_invalid_ring)}, 106 {MISCSTAT_INFO(rx_drops_invalid_ring)},
101 {MISCSTAT_INFO(forwarded_packets)}, 107 {MISCSTAT_INFO(forwarded_packets)},
102 {MISCSTAT_INFO(rx_drops_mtu)} 108 {MISCSTAT_INFO(rx_drops_mtu)},
109 {MISCSTAT_INFO(port0_jabber_events)},
110 {MISCSTAT_INFO(port1_jabber_events)},
111 {PMEMSTAT_INFO(eth_red_drops)},
112 {DRVSTAT_INFO(be_on_die_temperature)}
103}; 113};
104#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 114#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
105 115
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
121 "MAC Loopback test", 131 "MAC Loopback test",
122 "PHY Loopback test", 132 "PHY Loopback test",
123 "External Loopback test", 133 "External Loopback test",
124 "DDR DMA test" 134 "DDR DMA test",
125 "Link test" 135 "Link test"
126}; 136};
127 137
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
276 case MISCSTAT: 286 case MISCSTAT:
277 p = &hw_stats->rxf; 287 p = &hw_stats->rxf;
278 break; 288 break;
289 case PMEMSTAT:
290 p = &hw_stats->pmem;
291 break;
292 case DRVSTAT:
293 p = &adapter->drv_stats;
294 break;
279 } 295 }
280 296
281 p = (u8 *)p + et_stats[i].offset; 297 p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
376 } 392 }
377 393
378 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info); 394 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
379 phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size, 395 phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
380 &phy_cmd.dma); 396 phy_cmd.size, &phy_cmd.dma,
397 GFP_KERNEL);
381 if (!phy_cmd.va) { 398 if (!phy_cmd.va) {
382 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 399 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
383 return -ENOMEM; 400 return -ENOMEM;
@@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
416 adapter->port_type = ecmd->port; 433 adapter->port_type = ecmd->port;
417 adapter->transceiver = ecmd->transceiver; 434 adapter->transceiver = ecmd->transceiver;
418 adapter->autoneg = ecmd->autoneg; 435 adapter->autoneg = ecmd->autoneg;
419 pci_free_consistent(adapter->pdev, phy_cmd.size, 436 dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
420 phy_cmd.va, phy_cmd.dma); 437 phy_cmd.dma);
421 } else { 438 } else {
422 ecmd->speed = adapter->link_speed; 439 ecmd->speed = adapter->link_speed;
423 ecmd->port = adapter->port_type; 440 ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
496 int status; 513 int status;
497 u32 cur; 514 u32 cur;
498 515
499 be_cmd_get_beacon_state(adapter, adapter->port_num, &cur); 516 be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
500 517
501 if (cur == BEACON_STATE_ENABLED) 518 if (cur == BEACON_STATE_ENABLED)
502 return 0; 519 return 0;
@@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
504 if (data < 2) 521 if (data < 2)
505 data = 2; 522 data = 2;
506 523
507 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 524 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
508 BEACON_STATE_ENABLED); 525 BEACON_STATE_ENABLED);
509 set_current_state(TASK_INTERRUPTIBLE); 526 set_current_state(TASK_INTERRUPTIBLE);
510 schedule_timeout(data*HZ); 527 schedule_timeout(data*HZ);
511 528
512 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 529 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
513 BEACON_STATE_DISABLED); 530 BEACON_STATE_DISABLED);
514 531
515 return status; 532 return status;
516} 533}
517 534
535static bool
536be_is_wol_supported(struct be_adapter *adapter)
537{
538 if (!be_physfn(adapter))
539 return false;
540 else
541 return true;
542}
543
518static void 544static void
519be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 545be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
520{ 546{
521 struct be_adapter *adapter = netdev_priv(netdev); 547 struct be_adapter *adapter = netdev_priv(netdev);
522 548
523 wol->supported = WAKE_MAGIC; 549 if (be_is_wol_supported(adapter))
550 wol->supported = WAKE_MAGIC;
551
524 if (adapter->wol) 552 if (adapter->wol)
525 wol->wolopts = WAKE_MAGIC; 553 wol->wolopts = WAKE_MAGIC;
526 else 554 else
@@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
536 if (wol->wolopts & ~WAKE_MAGIC) 564 if (wol->wolopts & ~WAKE_MAGIC)
537 return -EINVAL; 565 return -EINVAL;
538 566
539 if (wol->wolopts & WAKE_MAGIC) 567 if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
540 adapter->wol = true; 568 adapter->wol = true;
541 else 569 else
542 adapter->wol = false; 570 adapter->wol = false;
@@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
554 }; 582 };
555 583
556 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 584 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
557 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 585 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
558 &ddrdma_cmd.dma); 586 &ddrdma_cmd.dma, GFP_KERNEL);
559 if (!ddrdma_cmd.va) { 587 if (!ddrdma_cmd.va) {
560 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 588 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
561 return -ENOMEM; 589 return -ENOMEM;
@@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
569 } 597 }
570 598
571err: 599err:
572 pci_free_consistent(adapter->pdev, ddrdma_cmd.size, 600 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
573 ddrdma_cmd.va, ddrdma_cmd.dma); 601 ddrdma_cmd.dma);
574 return ret; 602 return ret;
575} 603}
576 604
577static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 605static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
578 u64 *status) 606 u64 *status)
579{ 607{
580 be_cmd_set_loopback(adapter, adapter->port_num, 608 be_cmd_set_loopback(adapter, adapter->hba_port_num,
581 loopback_type, 1); 609 loopback_type, 1);
582 *status = be_cmd_loopback_test(adapter, adapter->port_num, 610 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
583 loopback_type, 1500, 611 loopback_type, 1500,
584 2, 0xabc); 612 2, 0xabc);
585 be_cmd_set_loopback(adapter, adapter->port_num, 613 be_cmd_set_loopback(adapter, adapter->hba_port_num,
586 BE_NO_LOOPBACK, 1); 614 BE_NO_LOOPBACK, 1);
587 return *status; 615 return *status;
588} 616}
@@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
621 &qos_link_speed) != 0) { 649 &qos_link_speed) != 0) {
622 test->flags |= ETH_TEST_FL_FAILED; 650 test->flags |= ETH_TEST_FL_FAILED;
623 data[4] = -1; 651 data[4] = -1;
624 } else if (mac_speed) { 652 } else if (!mac_speed) {
653 test->flags |= ETH_TEST_FL_FAILED;
625 data[4] = 1; 654 data[4] = 1;
626 } 655 }
627} 656}
@@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
662 691
663 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 692 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
664 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 693 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
665 eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size, 694 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
666 &eeprom_cmd.dma); 695 &eeprom_cmd.dma, GFP_KERNEL);
667 696
668 if (!eeprom_cmd.va) { 697 if (!eeprom_cmd.va) {
669 dev_err(&adapter->pdev->dev, 698 dev_err(&adapter->pdev->dev,
@@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
677 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va; 706 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
678 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); 707 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
679 } 708 }
680 pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va, 709 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
681 eeprom_cmd.dma); 710 eeprom_cmd.dma);
682 711
683 return status; 712 return status;
684} 713}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 4096d977823..d4344a06090 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18/********* Mailbox door bell *************/ 18/********* Mailbox door bell *************/
@@ -44,6 +44,18 @@
44#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ 44#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ 45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46 46
47
48/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
49#define SLIPORT_STATUS_OFFSET 0x404
50#define SLIPORT_CONTROL_OFFSET 0x408
51
52#define SLIPORT_STATUS_ERR_MASK 0x80000000
53#define SLIPORT_STATUS_RN_MASK 0x01000000
54#define SLIPORT_STATUS_RDY_MASK 0x00800000
55
56
57#define SLI_PORT_CONTROL_IP_MASK 0x08000000
58
47/********* Memory BAR register ************/ 59/********* Memory BAR register ************/
48#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 60#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
49/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 61/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -289,10 +301,10 @@ struct be_eth_rx_d {
289 301
290/* RX Compl Queue Descriptor */ 302/* RX Compl Queue Descriptor */
291 303
292/* Pseudo amap definition for eth_rx_compl in which each bit of the 304/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
293 * actual structure is defined as a byte: used to calculate 305 * each bit of the actual structure is defined as a byte: used to calculate
294 * offset/shift/mask of each field */ 306 * offset/shift/mask of each field */
295struct amap_eth_rx_compl { 307struct amap_eth_rx_compl_v0 {
296 u8 vlan_tag[16]; /* dword 0 */ 308 u8 vlan_tag[16]; /* dword 0 */
297 u8 pktsize[14]; /* dword 0 */ 309 u8 pktsize[14]; /* dword 0 */
298 u8 port; /* dword 0 */ 310 u8 port; /* dword 0 */
@@ -323,10 +335,92 @@ struct amap_eth_rx_compl {
323 u8 rsshash[32]; /* dword 3 */ 335 u8 rsshash[32]; /* dword 3 */
324} __packed; 336} __packed;
325 337
338/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
339 * each bit of the actual structure is defined as a byte: used to calculate
340 * offset/shift/mask of each field */
341struct amap_eth_rx_compl_v1 {
342 u8 vlan_tag[16]; /* dword 0 */
343 u8 pktsize[14]; /* dword 0 */
344 u8 vtp; /* dword 0 */
345 u8 ip_opt; /* dword 0 */
346 u8 err; /* dword 1 */
347 u8 rsshp; /* dword 1 */
348 u8 ipf; /* dword 1 */
349 u8 tcpf; /* dword 1 */
350 u8 udpf; /* dword 1 */
351 u8 ipcksm; /* dword 1 */
352 u8 l4_cksm; /* dword 1 */
353 u8 ip_version; /* dword 1 */
354 u8 macdst[7]; /* dword 1 */
355 u8 rsvd0; /* dword 1 */
356 u8 fragndx[10]; /* dword 1 */
357 u8 ct[2]; /* dword 1 */
358 u8 sw; /* dword 1 */
359 u8 numfrags[3]; /* dword 1 */
360 u8 rss_flush; /* dword 2 */
361 u8 cast_enc[2]; /* dword 2 */
362 u8 vtm; /* dword 2 */
363 u8 rss_bank; /* dword 2 */
364 u8 port[2]; /* dword 2 */
365 u8 vntagp; /* dword 2 */
366 u8 header_len[8]; /* dword 2 */
367 u8 header_split[2]; /* dword 2 */
368 u8 rsvd1[13]; /* dword 2 */
369 u8 valid; /* dword 2 */
370 u8 rsshash[32]; /* dword 3 */
371} __packed;
372
326struct be_eth_rx_compl { 373struct be_eth_rx_compl {
327 u32 dw[4]; 374 u32 dw[4];
328}; 375};
329 376
377struct mgmt_hba_attribs {
378 u8 flashrom_version_string[32];
379 u8 manufacturer_name[32];
380 u32 supported_modes;
381 u32 rsvd0[3];
382 u8 ncsi_ver_string[12];
383 u32 default_extended_timeout;
384 u8 controller_model_number[32];
385 u8 controller_description[64];
386 u8 controller_serial_number[32];
387 u8 ip_version_string[32];
388 u8 firmware_version_string[32];
389 u8 bios_version_string[32];
390 u8 redboot_version_string[32];
391 u8 driver_version_string[32];
392 u8 fw_on_flash_version_string[32];
393 u32 functionalities_supported;
394 u16 max_cdblength;
395 u8 asic_revision;
396 u8 generational_guid[16];
397 u8 hba_port_count;
398 u16 default_link_down_timeout;
399 u8 iscsi_ver_min_max;
400 u8 multifunction_device;
401 u8 cache_valid;
402 u8 hba_status;
403 u8 max_domains_supported;
404 u8 phy_port;
405 u32 firmware_post_status;
406 u32 hba_mtu[8];
407 u32 rsvd1[4];
408};
409
410struct mgmt_controller_attrib {
411 struct mgmt_hba_attribs hba_attribs;
412 u16 pci_vendor_id;
413 u16 pci_device_id;
414 u16 pci_sub_vendor_id;
415 u16 pci_sub_system_id;
416 u8 pci_bus_number;
417 u8 pci_device_number;
418 u8 pci_function_number;
419 u8 interface_type;
420 u64 unique_identifier;
421 u32 rsvd0[5];
422};
423
330struct controller_id { 424struct controller_id {
331 u32 vendor; 425 u32 vendor;
332 u32 device; 426 u32 device;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index de40d3b7152..a71163f1e34 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
@@ -25,9 +25,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation"); 25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27 27
28static unsigned int rx_frag_size = 2048; 28static ushort rx_frag_size = 2048;
29static unsigned int num_vfs; 29static unsigned int num_vfs;
30module_param(rx_frag_size, uint, S_IRUGO); 30module_param(rx_frag_size, ushort, S_IRUGO);
31module_param(num_vfs, uint, S_IRUGO); 31module_param(num_vfs, uint, S_IRUGO);
32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{ 125{
126 struct be_dma_mem *mem = &q->dma_mem; 126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va) 127 if (mem->va)
128 pci_free_consistent(adapter->pdev, mem->size, 128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->va, mem->dma); 129 mem->dma);
130} 130}
131 131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, 132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 q->len = len; 138 q->len = len;
139 q->entry_size = entry_size; 139 q->entry_size = entry_size;
140 mem->size = len * entry_size; 140 mem->size = len * entry_size;
141 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); 141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
142 if (!mem->va) 143 if (!mem->va)
143 return -1; 144 return -1;
144 memset(mem->va, 0, mem->size); 145 memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
235 if (!be_physfn(adapter)) 236 if (!be_physfn(adapter))
236 goto netdev_addr; 237 goto netdev_addr;
237 238
238 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); 239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
239 if (status) 241 if (status)
240 return status; 242 return status;
241 243
242 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, 244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243 adapter->if_handle, &adapter->pmac_id); 245 adapter->if_handle, &adapter->pmac_id, 0);
244netdev_addr: 246netdev_addr:
245 if (!status) 247 if (!status)
246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -312,11 +314,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
312 if (adapter->link_up != link_up) { 314 if (adapter->link_up != link_up) {
313 adapter->link_speed = -1; 315 adapter->link_speed = -1;
314 if (link_up) { 316 if (link_up) {
315 netif_start_queue(netdev);
316 netif_carrier_on(netdev); 317 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name); 318 printk(KERN_INFO "%s: Link up\n", netdev->name);
318 } else { 319 } else {
319 netif_stop_queue(netdev);
320 netif_carrier_off(netdev); 320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name); 321 printk(KERN_INFO "%s: Link down\n", netdev->name);
322 } 322 }
@@ -486,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); 486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487} 487}
488 488
489static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, 489static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490 bool unmap_single) 490 bool unmap_single)
491{ 491{
492 dma_addr_t dma; 492 dma_addr_t dma;
@@ -496,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; 496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497 if (wrb->frag_len) { 497 if (wrb->frag_len) {
498 if (unmap_single) 498 if (unmap_single)
499 pci_unmap_single(pdev, dma, wrb->frag_len, 499 dma_unmap_single(dev, dma, wrb->frag_len,
500 PCI_DMA_TODEVICE); 500 DMA_TO_DEVICE);
501 else 501 else
502 pci_unmap_page(pdev, dma, wrb->frag_len, 502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503 PCI_DMA_TODEVICE);
504 } 503 }
505} 504}
506 505
@@ -509,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
509{ 508{
510 dma_addr_t busaddr; 509 dma_addr_t busaddr;
511 int i, copied = 0; 510 int i, copied = 0;
512 struct pci_dev *pdev = adapter->pdev; 511 struct device *dev = &adapter->pdev->dev;
513 struct sk_buff *first_skb = skb; 512 struct sk_buff *first_skb = skb;
514 struct be_queue_info *txq = &adapter->tx_obj.q; 513 struct be_queue_info *txq = &adapter->tx_obj.q;
515 struct be_eth_wrb *wrb; 514 struct be_eth_wrb *wrb;
@@ -523,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
523 522
524 if (skb->len > skb->data_len) { 523 if (skb->len > skb->data_len) {
525 int len = skb_headlen(skb); 524 int len = skb_headlen(skb);
526 busaddr = pci_map_single(pdev, skb->data, len, 525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
527 PCI_DMA_TODEVICE); 526 if (dma_mapping_error(dev, busaddr))
528 if (pci_dma_mapping_error(pdev, busaddr))
529 goto dma_err; 527 goto dma_err;
530 map_single = true; 528 map_single = true;
531 wrb = queue_head_node(txq); 529 wrb = queue_head_node(txq);
@@ -538,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
538 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
539 struct skb_frag_struct *frag = 537 struct skb_frag_struct *frag =
540 &skb_shinfo(skb)->frags[i]; 538 &skb_shinfo(skb)->frags[i];
541 busaddr = pci_map_page(pdev, frag->page, 539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
542 frag->page_offset, 540 frag->size, DMA_TO_DEVICE);
543 frag->size, PCI_DMA_TODEVICE); 541 if (dma_mapping_error(dev, busaddr))
544 if (pci_dma_mapping_error(pdev, busaddr))
545 goto dma_err; 542 goto dma_err;
546 wrb = queue_head_node(txq); 543 wrb = queue_head_node(txq);
547 wrb_fill(wrb, busaddr, frag->size); 544 wrb_fill(wrb, busaddr, frag->size);
@@ -565,7 +562,7 @@ dma_err:
565 txq->head = map_head; 562 txq->head = map_head;
566 while (copied) { 563 while (copied) {
567 wrb = queue_head_node(txq); 564 wrb = queue_head_node(txq);
568 unmap_tx_frag(pdev, wrb, map_single); 565 unmap_tx_frag(dev, wrb, map_single);
569 map_single = false; 566 map_single = false;
570 copied -= wrb->frag_len; 567 copied -= wrb->frag_len;
571 queue_head_inc(txq); 568 queue_head_inc(txq);
@@ -745,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
745 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) 742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
746 status = be_cmd_pmac_del(adapter, 743 status = be_cmd_pmac_del(adapter,
747 adapter->vf_cfg[vf].vf_if_handle, 744 adapter->vf_cfg[vf].vf_if_handle,
748 adapter->vf_cfg[vf].vf_pmac_id); 745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
749 746
750 status = be_cmd_pmac_add(adapter, mac, 747 status = be_cmd_pmac_add(adapter, mac,
751 adapter->vf_cfg[vf].vf_if_handle, 748 adapter->vf_cfg[vf].vf_if_handle,
752 &adapter->vf_cfg[vf].vf_pmac_id); 749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
753 750
754 if (status) 751 if (status)
755 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -824,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
824 rate = 10000; 821 rate = 10000;
825 822
826 adapter->vf_cfg[vf].vf_tx_rate = rate; 823 adapter->vf_cfg[vf].vf_tx_rate = rate;
827 status = be_cmd_set_qos(adapter, rate / 10, vf); 824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
828 825
829 if (status) 826 if (status)
830 dev_info(&adapter->pdev->dev, 827 dev_info(&adapter->pdev->dev,
@@ -854,28 +851,26 @@ static void be_rx_rate_update(struct be_rx_obj *rxo)
854} 851}
855 852
856static void be_rx_stats_update(struct be_rx_obj *rxo, 853static void be_rx_stats_update(struct be_rx_obj *rxo,
857 u32 pktsize, u16 numfrags, u8 pkt_type) 854 struct be_rx_compl_info *rxcp)
858{ 855{
859 struct be_rx_stats *stats = &rxo->stats; 856 struct be_rx_stats *stats = &rxo->stats;
860 857
861 stats->rx_compl++; 858 stats->rx_compl++;
862 stats->rx_frags += numfrags; 859 stats->rx_frags += rxcp->num_rcvd;
863 stats->rx_bytes += pktsize; 860 stats->rx_bytes += rxcp->pkt_size;
864 stats->rx_pkts++; 861 stats->rx_pkts++;
865 if (pkt_type == BE_MULTICAST_PACKET) 862 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
866 stats->rx_mcast_pkts++; 863 stats->rx_mcast_pkts++;
864 if (rxcp->err)
865 stats->rxcp_err++;
867} 866}
868 867
869static inline bool csum_passed(struct be_eth_rx_compl *rxcp) 868static inline bool csum_passed(struct be_rx_compl_info *rxcp)
870{ 869{
871 u8 l4_cksm, ipv6, ipcksm; 870 /* L4 checksum is not reliable for non TCP/UDP packets.
872 871 * Also ignore ipcksm for ipv6 pkts */
873 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); 872 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
874 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); 873 (rxcp->ip_csum || rxcp->ipv6);
875 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
876
877 /* Ignore ipcksm for ipv6 pkts */
878 return l4_cksm && (ipcksm || ipv6);
879} 874}
880 875
881static struct be_rx_page_info * 876static struct be_rx_page_info *
@@ -890,8 +885,9 @@ get_rx_page_info(struct be_adapter *adapter,
890 BUG_ON(!rx_page_info->page); 885 BUG_ON(!rx_page_info->page);
891 886
892 if (rx_page_info->last_page_user) { 887 if (rx_page_info->last_page_user) {
893 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus), 888 dma_unmap_page(&adapter->pdev->dev,
894 adapter->big_page_size, PCI_DMA_FROMDEVICE); 889 dma_unmap_addr(rx_page_info, bus),
890 adapter->big_page_size, DMA_FROM_DEVICE);
895 rx_page_info->last_page_user = false; 891 rx_page_info->last_page_user = false;
896 } 892 }
897 893
@@ -902,26 +898,17 @@ get_rx_page_info(struct be_adapter *adapter,
902/* Throwaway the data in the Rx completion */ 898/* Throwaway the data in the Rx completion */
903static void be_rx_compl_discard(struct be_adapter *adapter, 899static void be_rx_compl_discard(struct be_adapter *adapter,
904 struct be_rx_obj *rxo, 900 struct be_rx_obj *rxo,
905 struct be_eth_rx_compl *rxcp) 901 struct be_rx_compl_info *rxcp)
906{ 902{
907 struct be_queue_info *rxq = &rxo->q; 903 struct be_queue_info *rxq = &rxo->q;
908 struct be_rx_page_info *page_info; 904 struct be_rx_page_info *page_info;
909 u16 rxq_idx, i, num_rcvd; 905 u16 i, num_rcvd = rxcp->num_rcvd;
910 906
911 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 907 for (i = 0; i < num_rcvd; i++) {
912 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 908 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
913 909 put_page(page_info->page);
914 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */ 910 memset(page_info, 0, sizeof(*page_info));
915 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) { 911 index_inc(&rxcp->rxq_idx, rxq->len);
916
917 rxo->last_frag_index = rxq_idx;
918
919 for (i = 0; i < num_rcvd; i++) {
920 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
921 put_page(page_info->page);
922 memset(page_info, 0, sizeof(*page_info));
923 index_inc(&rxq_idx, rxq->len);
924 }
925 } 912 }
926} 913}
927 914
@@ -930,30 +917,23 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
930 * indicated by rxcp. 917 * indicated by rxcp.
931 */ 918 */
932static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, 919static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
933 struct sk_buff *skb, struct be_eth_rx_compl *rxcp, 920 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
934 u16 num_rcvd)
935{ 921{
936 struct be_queue_info *rxq = &rxo->q; 922 struct be_queue_info *rxq = &rxo->q;
937 struct be_rx_page_info *page_info; 923 struct be_rx_page_info *page_info;
938 u16 rxq_idx, i, j; 924 u16 i, j;
939 u32 pktsize, hdr_len, curr_frag_len, size; 925 u16 hdr_len, curr_frag_len, remaining;
940 u8 *start; 926 u8 *start;
941 u8 pkt_type;
942
943 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
944 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
945 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
946
947 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
948 927
928 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
949 start = page_address(page_info->page) + page_info->page_offset; 929 start = page_address(page_info->page) + page_info->page_offset;
950 prefetch(start); 930 prefetch(start);
951 931
952 /* Copy data in the first descriptor of this completion */ 932 /* Copy data in the first descriptor of this completion */
953 curr_frag_len = min(pktsize, rx_frag_size); 933 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
954 934
955 /* Copy the header portion into skb_data */ 935 /* Copy the header portion into skb_data */
956 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); 936 hdr_len = min(BE_HDR_LEN, curr_frag_len);
957 memcpy(skb->data, start, hdr_len); 937 memcpy(skb->data, start, hdr_len);
958 skb->len = curr_frag_len; 938 skb->len = curr_frag_len;
959 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ 939 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
@@ -972,19 +952,17 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
972 } 952 }
973 page_info->page = NULL; 953 page_info->page = NULL;
974 954
975 if (pktsize <= rx_frag_size) { 955 if (rxcp->pkt_size <= rx_frag_size) {
976 BUG_ON(num_rcvd != 1); 956 BUG_ON(rxcp->num_rcvd != 1);
977 goto done; 957 return;
978 } 958 }
979 959
980 /* More frags present for this completion */ 960 /* More frags present for this completion */
981 size = pktsize; 961 index_inc(&rxcp->rxq_idx, rxq->len);
982 for (i = 1, j = 0; i < num_rcvd; i++) { 962 remaining = rxcp->pkt_size - curr_frag_len;
983 size -= curr_frag_len; 963 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
984 index_inc(&rxq_idx, rxq->len); 964 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
985 page_info = get_rx_page_info(adapter, rxo, rxq_idx); 965 curr_frag_len = min(remaining, rx_frag_size);
986
987 curr_frag_len = min(size, rx_frag_size);
988 966
989 /* Coalesce all frags from the same physical page in one slot */ 967 /* Coalesce all frags from the same physical page in one slot */
990 if (page_info->page_offset == 0) { 968 if (page_info->page_offset == 0) {
@@ -1003,25 +981,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1003 skb->len += curr_frag_len; 981 skb->len += curr_frag_len;
1004 skb->data_len += curr_frag_len; 982 skb->data_len += curr_frag_len;
1005 983
984 remaining -= curr_frag_len;
985 index_inc(&rxcp->rxq_idx, rxq->len);
1006 page_info->page = NULL; 986 page_info->page = NULL;
1007 } 987 }
1008 BUG_ON(j > MAX_SKB_FRAGS); 988 BUG_ON(j > MAX_SKB_FRAGS);
1009
1010done:
1011 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1012} 989}
1013 990
1014/* Process the RX completion indicated by rxcp when GRO is disabled */ 991/* Process the RX completion indicated by rxcp when GRO is disabled */
1015static void be_rx_compl_process(struct be_adapter *adapter, 992static void be_rx_compl_process(struct be_adapter *adapter,
1016 struct be_rx_obj *rxo, 993 struct be_rx_obj *rxo,
1017 struct be_eth_rx_compl *rxcp) 994 struct be_rx_compl_info *rxcp)
1018{ 995{
1019 struct sk_buff *skb; 996 struct sk_buff *skb;
1020 u32 vlanf, vid;
1021 u16 num_rcvd;
1022 u8 vtm;
1023
1024 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1025 997
1026 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 998 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1027 if (unlikely(!skb)) { 999 if (unlikely(!skb)) {
@@ -1031,7 +1003,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1031 return; 1003 return;
1032 } 1004 }
1033 1005
1034 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd); 1006 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1035 1007
1036 if (likely(adapter->rx_csum && csum_passed(rxcp))) 1008 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1037 skb->ip_summed = CHECKSUM_UNNECESSARY; 1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1041,23 +1013,12 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1041 skb->truesize = skb->len + sizeof(struct sk_buff); 1013 skb->truesize = skb->len + sizeof(struct sk_buff);
1042 skb->protocol = eth_type_trans(skb, adapter->netdev); 1014 skb->protocol = eth_type_trans(skb, adapter->netdev);
1043 1015
1044 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 1016 if (unlikely(rxcp->vlanf)) {
1045 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1046
1047 /* vlanf could be wrongly set in some cards.
1048 * ignore if vtm is not set */
1049 if ((adapter->function_mode & 0x400) && !vtm)
1050 vlanf = 0;
1051
1052 if (unlikely(vlanf)) {
1053 if (!adapter->vlan_grp || adapter->vlans_added == 0) { 1017 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1054 kfree_skb(skb); 1018 kfree_skb(skb);
1055 return; 1019 return;
1056 } 1020 }
1057 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1021 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1058 if (!lancer_chip(adapter))
1059 vid = swab16(vid);
1060 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1061 } else { 1022 } else {
1062 netif_receive_skb(skb); 1023 netif_receive_skb(skb);
1063 } 1024 }
@@ -1066,28 +1027,14 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1066/* Process the RX completion indicated by rxcp when GRO is enabled */ 1027/* Process the RX completion indicated by rxcp when GRO is enabled */
1067static void be_rx_compl_process_gro(struct be_adapter *adapter, 1028static void be_rx_compl_process_gro(struct be_adapter *adapter,
1068 struct be_rx_obj *rxo, 1029 struct be_rx_obj *rxo,
1069 struct be_eth_rx_compl *rxcp) 1030 struct be_rx_compl_info *rxcp)
1070{ 1031{
1071 struct be_rx_page_info *page_info; 1032 struct be_rx_page_info *page_info;
1072 struct sk_buff *skb = NULL; 1033 struct sk_buff *skb = NULL;
1073 struct be_queue_info *rxq = &rxo->q; 1034 struct be_queue_info *rxq = &rxo->q;
1074 struct be_eq_obj *eq_obj = &rxo->rx_eq; 1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1075 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 1036 u16 remaining, curr_frag_len;
1076 u16 i, rxq_idx = 0, vid, j; 1037 u16 i, j;
1077 u8 vtm;
1078 u8 pkt_type;
1079
1080 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1081 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1082 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1083 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1084 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1085 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1086
1087 /* vlanf could be wrongly set in some cards.
1088 * ignore if vtm is not set */
1089 if ((adapter->function_mode & 0x400) && !vtm)
1090 vlanf = 0;
1091 1038
1092 skb = napi_get_frags(&eq_obj->napi); 1039 skb = napi_get_frags(&eq_obj->napi);
1093 if (!skb) { 1040 if (!skb) {
@@ -1095,9 +1042,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1095 return; 1042 return;
1096 } 1043 }
1097 1044
1098 remaining = pkt_size; 1045 remaining = rxcp->pkt_size;
1099 for (i = 0, j = -1; i < num_rcvd; i++) { 1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1100 page_info = get_rx_page_info(adapter, rxo, rxq_idx); 1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1101 1048
1102 curr_frag_len = min(remaining, rx_frag_size); 1049 curr_frag_len = min(remaining, rx_frag_size);
1103 1050
@@ -1115,70 +1062,125 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1115 skb_shinfo(skb)->frags[j].size += curr_frag_len; 1062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1116 1063
1117 remaining -= curr_frag_len; 1064 remaining -= curr_frag_len;
1118 index_inc(&rxq_idx, rxq->len); 1065 index_inc(&rxcp->rxq_idx, rxq->len);
1119 memset(page_info, 0, sizeof(*page_info)); 1066 memset(page_info, 0, sizeof(*page_info));
1120 } 1067 }
1121 BUG_ON(j > MAX_SKB_FRAGS); 1068 BUG_ON(j > MAX_SKB_FRAGS);
1122 1069
1123 skb_shinfo(skb)->nr_frags = j + 1; 1070 skb_shinfo(skb)->nr_frags = j + 1;
1124 skb->len = pkt_size; 1071 skb->len = rxcp->pkt_size;
1125 skb->data_len = pkt_size; 1072 skb->data_len = rxcp->pkt_size;
1126 skb->truesize += pkt_size; 1073 skb->truesize += rxcp->pkt_size;
1127 skb->ip_summed = CHECKSUM_UNNECESSARY; 1074 skb->ip_summed = CHECKSUM_UNNECESSARY;
1128 1075
1129 if (likely(!vlanf)) { 1076 if (likely(!rxcp->vlanf))
1130 napi_gro_frags(&eq_obj->napi); 1077 napi_gro_frags(&eq_obj->napi);
1131 } else { 1078 else
1132 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1133 if (!lancer_chip(adapter)) 1080}
1134 vid = swab16(vid); 1081
1082static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083 struct be_eth_rx_compl *compl,
1084 struct be_rx_compl_info *rxcp)
1085{
1086 rxcp->pkt_size =
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1091 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1092 rxcp->ip_csum =
1093 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094 rxcp->l4_csum =
1095 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096 rxcp->ipv6 =
1097 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098 rxcp->rxq_idx =
1099 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100 rxcp->num_rcvd =
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1105 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
1106}
1107
1108static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1109 struct be_eth_rx_compl *compl,
1110 struct be_rx_compl_info *rxcp)
1111{
1112 rxcp->pkt_size =
1113 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1114 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1115 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1116 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1117 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1118 rxcp->ip_csum =
1119 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1120 rxcp->l4_csum =
1121 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1122 rxcp->ipv6 =
1123 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1124 rxcp->rxq_idx =
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1126 rxcp->num_rcvd =
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1128 rxcp->pkt_type =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1130 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1131 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
1132}
1133
1134static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1135{
1136 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1137 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1138 struct be_adapter *adapter = rxo->adapter;
1135 1139
1136 if (!adapter->vlan_grp || adapter->vlans_added == 0) 1140 /* For checking the valid bit it is Ok to use either definition as the
1137 return; 1141 * valid bit is at the same position in both v0 and v1 Rx compl */
1142 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1143 return NULL;
1138 1144
1139 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 1145 rmb();
1140 } 1146 be_dws_le_to_cpu(compl, sizeof(*compl));
1141 1147
1142 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type); 1148 if (adapter->be3_native)
1143} 1149 be_parse_rx_compl_v1(adapter, compl, rxcp);
1150 else
1151 be_parse_rx_compl_v0(adapter, compl, rxcp);
1144 1152
1145static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo) 1153 /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
1146{ 1154 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1147 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq); 1155 rxcp->vlanf = 0;
1148 1156
1149 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) 1157 if (!lancer_chip(adapter))
1150 return NULL; 1158 rxcp->vid = swab16(rxcp->vid);
1151 1159
1152 rmb(); 1160 if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
1153 be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); 1161 rxcp->vlanf = 0;
1162
1163 /* As the compl has been parsed, reset it; we wont touch it again */
1164 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1154 1165
1155 queue_tail_inc(&rxo->cq); 1166 queue_tail_inc(&rxo->cq);
1156 return rxcp; 1167 return rxcp;
1157} 1168}
1158 1169
1159/* To reset the valid bit, we need to reset the whole word as 1170static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1160 * when walking the queue the valid entries are little-endian
1161 * and invalid entries are host endian
1162 */
1163static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1164{ 1171{
1165 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1166}
1167
1168static inline struct page *be_alloc_pages(u32 size)
1169{
1170 gfp_t alloc_flags = GFP_ATOMIC;
1171 u32 order = get_order(size); 1172 u32 order = get_order(size);
1173
1172 if (order > 0) 1174 if (order > 0)
1173 alloc_flags |= __GFP_COMP; 1175 gfp |= __GFP_COMP;
1174 return alloc_pages(alloc_flags, order); 1176 return alloc_pages(gfp, order);
1175} 1177}
1176 1178
1177/* 1179/*
1178 * Allocate a page, split it to fragments of size rx_frag_size and post as 1180 * Allocate a page, split it to fragments of size rx_frag_size and post as
1179 * receive buffers to BE 1181 * receive buffers to BE
1180 */ 1182 */
1181static void be_post_rx_frags(struct be_rx_obj *rxo) 1183static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1182{ 1184{
1183 struct be_adapter *adapter = rxo->adapter; 1185 struct be_adapter *adapter = rxo->adapter;
1184 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; 1186 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
@@ -1192,14 +1194,14 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
1192 page_info = &rxo->page_info_tbl[rxq->head]; 1194 page_info = &rxo->page_info_tbl[rxq->head];
1193 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { 1195 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1194 if (!pagep) { 1196 if (!pagep) {
1195 pagep = be_alloc_pages(adapter->big_page_size); 1197 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1196 if (unlikely(!pagep)) { 1198 if (unlikely(!pagep)) {
1197 rxo->stats.rx_post_fail++; 1199 rxo->stats.rx_post_fail++;
1198 break; 1200 break;
1199 } 1201 }
1200 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, 1202 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1201 adapter->big_page_size, 1203 0, adapter->big_page_size,
1202 PCI_DMA_FROMDEVICE); 1204 DMA_FROM_DEVICE);
1203 page_info->page_offset = 0; 1205 page_info->page_offset = 0;
1204 } else { 1206 } else {
1205 get_page(pagep); 1207 get_page(pagep);
@@ -1272,8 +1274,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1272 do { 1274 do {
1273 cur_index = txq->tail; 1275 cur_index = txq->tail;
1274 wrb = queue_tail_node(txq); 1276 wrb = queue_tail_node(txq);
1275 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && 1277 unmap_tx_frag(&adapter->pdev->dev, wrb,
1276 skb_headlen(sent_skb))); 1278 (unmap_skb_hdr && skb_headlen(sent_skb)));
1277 unmap_skb_hdr = false; 1279 unmap_skb_hdr = false;
1278 1280
1279 num_wrbs++; 1281 num_wrbs++;
@@ -1341,13 +1343,12 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1341 struct be_rx_page_info *page_info; 1343 struct be_rx_page_info *page_info;
1342 struct be_queue_info *rxq = &rxo->q; 1344 struct be_queue_info *rxq = &rxo->q;
1343 struct be_queue_info *rx_cq = &rxo->cq; 1345 struct be_queue_info *rx_cq = &rxo->cq;
1344 struct be_eth_rx_compl *rxcp; 1346 struct be_rx_compl_info *rxcp;
1345 u16 tail; 1347 u16 tail;
1346 1348
1347 /* First cleanup pending rx completions */ 1349 /* First cleanup pending rx completions */
1348 while ((rxcp = be_rx_compl_get(rxo)) != NULL) { 1350 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1349 be_rx_compl_discard(adapter, rxo, rxcp); 1351 be_rx_compl_discard(adapter, rxo, rxcp);
1350 be_rx_compl_reset(rxcp);
1351 be_cq_notify(adapter, rx_cq->id, false, 1); 1352 be_cq_notify(adapter, rx_cq->id, false, 1);
1352 } 1353 }
1353 1354
@@ -1575,9 +1576,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1575 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1576 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1576 for_all_rx_queues(adapter, rxo, i) { 1577 for_all_rx_queues(adapter, rxo, i) {
1577 rxo->adapter = adapter; 1578 rxo->adapter = adapter;
1578 /* Init last_frag_index so that the frag index in the first
1579 * completion will never match */
1580 rxo->last_frag_index = 0xffff;
1581 rxo->rx_eq.max_eqd = BE_MAX_EQD; 1579 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1582 rxo->rx_eq.enable_aic = true; 1580 rxo->rx_eq.enable_aic = true;
1583 1581
@@ -1699,15 +1697,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1699 return IRQ_HANDLED; 1697 return IRQ_HANDLED;
1700} 1698}
1701 1699
1702static inline bool do_gro(struct be_rx_obj *rxo, 1700static inline bool do_gro(struct be_rx_compl_info *rxcp)
1703 struct be_eth_rx_compl *rxcp, u8 err)
1704{ 1701{
1705 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); 1702 return (rxcp->tcpf && !rxcp->err) ? true : false;
1706
1707 if (err)
1708 rxo->stats.rxcp_err++;
1709
1710 return (tcp_frame && !err) ? true : false;
1711} 1703}
1712 1704
1713static int be_poll_rx(struct napi_struct *napi, int budget) 1705static int be_poll_rx(struct napi_struct *napi, int budget)
@@ -1716,10 +1708,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1716 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq); 1708 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1717 struct be_adapter *adapter = rxo->adapter; 1709 struct be_adapter *adapter = rxo->adapter;
1718 struct be_queue_info *rx_cq = &rxo->cq; 1710 struct be_queue_info *rx_cq = &rxo->cq;
1719 struct be_eth_rx_compl *rxcp; 1711 struct be_rx_compl_info *rxcp;
1720 u32 work_done; 1712 u32 work_done;
1721 u16 frag_index, num_rcvd;
1722 u8 err;
1723 1713
1724 rxo->stats.rx_polls++; 1714 rxo->stats.rx_polls++;
1725 for (work_done = 0; work_done < budget; work_done++) { 1715 for (work_done = 0; work_done < budget; work_done++) {
@@ -1727,29 +1717,19 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1727 if (!rxcp) 1717 if (!rxcp)
1728 break; 1718 break;
1729 1719
1730 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); 1720 /* Ignore flush completions */
1731 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, 1721 if (rxcp->num_rcvd) {
1732 rxcp); 1722 if (do_gro(rxcp))
1733 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1734 rxcp);
1735
1736 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1737 if (likely(frag_index != rxo->last_frag_index &&
1738 num_rcvd != 0)) {
1739 rxo->last_frag_index = frag_index;
1740
1741 if (do_gro(rxo, rxcp, err))
1742 be_rx_compl_process_gro(adapter, rxo, rxcp); 1723 be_rx_compl_process_gro(adapter, rxo, rxcp);
1743 else 1724 else
1744 be_rx_compl_process(adapter, rxo, rxcp); 1725 be_rx_compl_process(adapter, rxo, rxcp);
1745 } 1726 }
1746 1727 be_rx_stats_update(rxo, rxcp);
1747 be_rx_compl_reset(rxcp);
1748 } 1728 }
1749 1729
1750 /* Refill the queue */ 1730 /* Refill the queue */
1751 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) 1731 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1752 be_post_rx_frags(rxo); 1732 be_post_rx_frags(rxo, GFP_ATOMIC);
1753 1733
1754 /* All consumed */ 1734 /* All consumed */
1755 if (work_done < budget) { 1735 if (work_done < budget) {
@@ -1829,6 +1809,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
1829 1809
1830 if (ue_status_lo || ue_status_hi) { 1810 if (ue_status_lo || ue_status_hi) {
1831 adapter->ue_detected = true; 1811 adapter->ue_detected = true;
1812 adapter->eeh_err = true;
1832 dev_err(&adapter->pdev->dev, "UE Detected!!\n"); 1813 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1833 } 1814 }
1834 1815
@@ -1867,10 +1848,14 @@ static void be_worker(struct work_struct *work)
1867 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 1848 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1868 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); 1849 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1869 } 1850 }
1851
1852 if (!adapter->ue_detected && !lancer_chip(adapter))
1853 be_detect_dump_ue(adapter);
1854
1870 goto reschedule; 1855 goto reschedule;
1871 } 1856 }
1872 1857
1873 if (!adapter->stats_ioctl_sent) 1858 if (!adapter->stats_cmd_sent)
1874 be_cmd_get_stats(adapter, &adapter->stats_cmd); 1859 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1875 1860
1876 be_tx_rate_update(adapter); 1861 be_tx_rate_update(adapter);
@@ -1881,7 +1866,7 @@ static void be_worker(struct work_struct *work)
1881 1866
1882 if (rxo->rx_post_starved) { 1867 if (rxo->rx_post_starved) {
1883 rxo->rx_post_starved = false; 1868 rxo->rx_post_starved = false;
1884 be_post_rx_frags(rxo); 1869 be_post_rx_frags(rxo, GFP_KERNEL);
1885 } 1870 }
1886 } 1871 }
1887 if (!adapter->ue_detected && !lancer_chip(adapter)) 1872 if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2085,13 +2070,24 @@ static int be_close(struct net_device *netdev)
2085 2070
2086 be_async_mcc_disable(adapter); 2071 be_async_mcc_disable(adapter);
2087 2072
2088 netif_stop_queue(netdev);
2089 netif_carrier_off(netdev); 2073 netif_carrier_off(netdev);
2090 adapter->link_up = false; 2074 adapter->link_up = false;
2091 2075
2092 if (!lancer_chip(adapter)) 2076 if (!lancer_chip(adapter))
2093 be_intr_set(adapter, false); 2077 be_intr_set(adapter, false);
2094 2078
2079 for_all_rx_queues(adapter, rxo, i)
2080 napi_disable(&rxo->rx_eq.napi);
2081
2082 napi_disable(&tx_eq->napi);
2083
2084 if (lancer_chip(adapter)) {
2085 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2086 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2087 for_all_rx_queues(adapter, rxo, i)
2088 be_cq_notify(adapter, rxo->cq.id, false, 0);
2089 }
2090
2095 if (adapter->msix_enabled) { 2091 if (adapter->msix_enabled) {
2096 vec = be_msix_vec_get(adapter, tx_eq); 2092 vec = be_msix_vec_get(adapter, tx_eq);
2097 synchronize_irq(vec); 2093 synchronize_irq(vec);
@@ -2105,11 +2101,6 @@ static int be_close(struct net_device *netdev)
2105 } 2101 }
2106 be_irq_unregister(adapter); 2102 be_irq_unregister(adapter);
2107 2103
2108 for_all_rx_queues(adapter, rxo, i)
2109 napi_disable(&rxo->rx_eq.napi);
2110
2111 napi_disable(&tx_eq->napi);
2112
2113 /* Wait for all pending tx completions to arrive so that 2104 /* Wait for all pending tx completions to arrive so that
2114 * all tx skbs are freed. 2105 * all tx skbs are freed.
2115 */ 2106 */
@@ -2129,7 +2120,7 @@ static int be_open(struct net_device *netdev)
2129 u16 link_speed; 2120 u16 link_speed;
2130 2121
2131 for_all_rx_queues(adapter, rxo, i) { 2122 for_all_rx_queues(adapter, rxo, i) {
2132 be_post_rx_frags(rxo); 2123 be_post_rx_frags(rxo, GFP_KERNEL);
2133 napi_enable(&rxo->rx_eq.napi); 2124 napi_enable(&rxo->rx_eq.napi);
2134 } 2125 }
2135 napi_enable(&tx_eq->napi); 2126 napi_enable(&tx_eq->napi);
@@ -2181,7 +2172,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2181 memset(mac, 0, ETH_ALEN); 2172 memset(mac, 0, ETH_ALEN);
2182 2173
2183 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 2174 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2184 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2175 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2176 GFP_KERNEL);
2185 if (cmd.va == NULL) 2177 if (cmd.va == NULL)
2186 return -1; 2178 return -1;
2187 memset(cmd.va, 0, cmd.size); 2179 memset(cmd.va, 0, cmd.size);
@@ -2192,8 +2184,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2192 if (status) { 2184 if (status) {
2193 dev_err(&adapter->pdev->dev, 2185 dev_err(&adapter->pdev->dev,
2194 "Could not enable Wake-on-lan\n"); 2186 "Could not enable Wake-on-lan\n");
2195 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, 2187 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2196 cmd.dma); 2188 cmd.dma);
2197 return status; 2189 return status;
2198 } 2190 }
2199 status = be_cmd_enable_magic_wol(adapter, 2191 status = be_cmd_enable_magic_wol(adapter,
@@ -2206,7 +2198,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2206 pci_enable_wake(adapter->pdev, PCI_D3cold, 0); 2198 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207 } 2199 }
2208 2200
2209 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2201 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2210 return status; 2202 return status;
2211} 2203}
2212 2204
@@ -2227,7 +2219,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2227 for (vf = 0; vf < num_vfs; vf++) { 2219 for (vf = 0; vf < num_vfs; vf++) {
2228 status = be_cmd_pmac_add(adapter, mac, 2220 status = be_cmd_pmac_add(adapter, mac,
2229 adapter->vf_cfg[vf].vf_if_handle, 2221 adapter->vf_cfg[vf].vf_if_handle,
2230 &adapter->vf_cfg[vf].vf_pmac_id); 2222 &adapter->vf_cfg[vf].vf_pmac_id,
2223 vf + 1);
2231 if (status) 2224 if (status)
2232 dev_err(&adapter->pdev->dev, 2225 dev_err(&adapter->pdev->dev,
2233 "Mac address add failed for VF %d\n", vf); 2226 "Mac address add failed for VF %d\n", vf);
@@ -2247,7 +2240,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2247 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) 2240 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248 be_cmd_pmac_del(adapter, 2241 be_cmd_pmac_del(adapter,
2249 adapter->vf_cfg[vf].vf_if_handle, 2242 adapter->vf_cfg[vf].vf_if_handle,
2250 adapter->vf_cfg[vf].vf_pmac_id); 2243 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2251 } 2244 }
2252} 2245}
2253 2246
@@ -2258,7 +2251,9 @@ static int be_setup(struct be_adapter *adapter)
2258 int status; 2251 int status;
2259 u8 mac[ETH_ALEN]; 2252 u8 mac[ETH_ALEN];
2260 2253
2261 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; 2254 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2255 BE_IF_FLAGS_BROADCAST |
2256 BE_IF_FLAGS_MULTICAST;
2262 2257
2263 if (be_physfn(adapter)) { 2258 if (be_physfn(adapter)) {
2264 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS | 2259 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2279,22 +2274,26 @@ static int be_setup(struct be_adapter *adapter)
2279 goto do_none; 2274 goto do_none;
2280 2275
2281 if (be_physfn(adapter)) { 2276 if (be_physfn(adapter)) {
2282 while (vf < num_vfs) { 2277 if (adapter->sriov_enabled) {
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED 2278 while (vf < num_vfs) {
2284 | BE_IF_FLAGS_BROADCAST; 2279 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2285 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2280 BE_IF_FLAGS_BROADCAST;
2286 mac, true, 2281 status = be_cmd_if_create(adapter, cap_flags,
2282 en_flags, mac, true,
2287 &adapter->vf_cfg[vf].vf_if_handle, 2283 &adapter->vf_cfg[vf].vf_if_handle,
2288 NULL, vf+1); 2284 NULL, vf+1);
2289 if (status) { 2285 if (status) {
2290 dev_err(&adapter->pdev->dev, 2286 dev_err(&adapter->pdev->dev,
2291 "Interface Create failed for VF %d\n", vf); 2287 "Interface Create failed for VF %d\n",
2292 goto if_destroy; 2288 vf);
2289 goto if_destroy;
2290 }
2291 adapter->vf_cfg[vf].vf_pmac_id =
2292 BE_INVALID_PMAC_ID;
2293 vf++;
2293 } 2294 }
2294 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2295 vf++;
2296 } 2295 }
2297 } else if (!be_physfn(adapter)) { 2296 } else {
2298 status = be_cmd_mac_addr_query(adapter, mac, 2297 status = be_cmd_mac_addr_query(adapter, mac,
2299 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); 2298 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2300 if (!status) { 2299 if (!status) {
@@ -2315,44 +2314,46 @@ static int be_setup(struct be_adapter *adapter)
2315 if (status != 0) 2314 if (status != 0)
2316 goto rx_qs_destroy; 2315 goto rx_qs_destroy;
2317 2316
2318 if (be_physfn(adapter)) {
2319 status = be_vf_eth_addr_config(adapter);
2320 if (status)
2321 goto mcc_q_destroy;
2322 }
2323
2324 adapter->link_speed = -1; 2317 adapter->link_speed = -1;
2325 2318
2326 return 0; 2319 return 0;
2327 2320
2328mcc_q_destroy:
2329 if (be_physfn(adapter))
2330 be_vf_eth_addr_rem(adapter);
2331 be_mcc_queues_destroy(adapter); 2321 be_mcc_queues_destroy(adapter);
2332rx_qs_destroy: 2322rx_qs_destroy:
2333 be_rx_queues_destroy(adapter); 2323 be_rx_queues_destroy(adapter);
2334tx_qs_destroy: 2324tx_qs_destroy:
2335 be_tx_queues_destroy(adapter); 2325 be_tx_queues_destroy(adapter);
2336if_destroy: 2326if_destroy:
2337 for (vf = 0; vf < num_vfs; vf++) 2327 if (be_physfn(adapter) && adapter->sriov_enabled)
2338 if (adapter->vf_cfg[vf].vf_if_handle) 2328 for (vf = 0; vf < num_vfs; vf++)
2339 be_cmd_if_destroy(adapter, 2329 if (adapter->vf_cfg[vf].vf_if_handle)
2340 adapter->vf_cfg[vf].vf_if_handle); 2330 be_cmd_if_destroy(adapter,
2341 be_cmd_if_destroy(adapter, adapter->if_handle); 2331 adapter->vf_cfg[vf].vf_if_handle,
2332 vf + 1);
2333 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2342do_none: 2334do_none:
2343 return status; 2335 return status;
2344} 2336}
2345 2337
2346static int be_clear(struct be_adapter *adapter) 2338static int be_clear(struct be_adapter *adapter)
2347{ 2339{
2348 if (be_physfn(adapter)) 2340 int vf;
2341
2342 if (be_physfn(adapter) && adapter->sriov_enabled)
2349 be_vf_eth_addr_rem(adapter); 2343 be_vf_eth_addr_rem(adapter);
2350 2344
2351 be_mcc_queues_destroy(adapter); 2345 be_mcc_queues_destroy(adapter);
2352 be_rx_queues_destroy(adapter); 2346 be_rx_queues_destroy(adapter);
2353 be_tx_queues_destroy(adapter); 2347 be_tx_queues_destroy(adapter);
2354 2348
2355 be_cmd_if_destroy(adapter, adapter->if_handle); 2349 if (be_physfn(adapter) && adapter->sriov_enabled)
2350 for (vf = 0; vf < num_vfs; vf++)
2351 if (adapter->vf_cfg[vf].vf_if_handle)
2352 be_cmd_if_destroy(adapter,
2353 adapter->vf_cfg[vf].vf_if_handle,
2354 vf + 1);
2355
2356 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2356 2357
2357 /* tell fw we're done with firing cmds */ 2358 /* tell fw we're done with firing cmds */
2358 be_cmd_fw_clean(adapter); 2359 be_cmd_fw_clean(adapter);
@@ -2455,8 +2456,8 @@ static int be_flash_data(struct be_adapter *adapter,
2455 continue; 2456 continue;
2456 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && 2457 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2457 (!be_flash_redboot(adapter, fw->data, 2458 (!be_flash_redboot(adapter, fw->data,
2458 pflashcomp[i].offset, pflashcomp[i].size, 2459 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2459 filehdr_size))) 2460 (num_of_images * sizeof(struct image_hdr)))))
2460 continue; 2461 continue;
2461 p = fw->data; 2462 p = fw->data;
2462 p += filehdr_size + pflashcomp[i].offset 2463 p += filehdr_size + pflashcomp[i].offset
@@ -2530,8 +2531,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2530 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); 2531 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2531 2532
2532 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 2533 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2533 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, 2534 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2534 &flash_cmd.dma); 2535 &flash_cmd.dma, GFP_KERNEL);
2535 if (!flash_cmd.va) { 2536 if (!flash_cmd.va) {
2536 status = -ENOMEM; 2537 status = -ENOMEM;
2537 dev_err(&adapter->pdev->dev, 2538 dev_err(&adapter->pdev->dev,
@@ -2560,8 +2561,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2560 status = -1; 2561 status = -1;
2561 } 2562 }
2562 2563
2563 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, 2564 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2564 flash_cmd.dma); 2565 flash_cmd.dma);
2565 if (status) { 2566 if (status) {
2566 dev_err(&adapter->pdev->dev, "Firmware load error\n"); 2567 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2567 goto fw_exit; 2568 goto fw_exit;
@@ -2628,8 +2629,6 @@ static void be_netdev_init(struct net_device *netdev)
2628 2629
2629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2630 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2630 BE_NAPI_WEIGHT); 2631 BE_NAPI_WEIGHT);
2631
2632 netif_stop_queue(netdev);
2633} 2632}
2634 2633
2635static void be_unmap_pci_bars(struct be_adapter *adapter) 2634static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -2704,13 +2703,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
2704 be_unmap_pci_bars(adapter); 2703 be_unmap_pci_bars(adapter);
2705 2704
2706 if (mem->va) 2705 if (mem->va)
2707 pci_free_consistent(adapter->pdev, mem->size, 2706 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2708 mem->va, mem->dma); 2707 mem->dma);
2709 2708
2710 mem = &adapter->mc_cmd_mem; 2709 mem = &adapter->mc_cmd_mem;
2711 if (mem->va) 2710 if (mem->va)
2712 pci_free_consistent(adapter->pdev, mem->size, 2711 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2713 mem->va, mem->dma); 2712 mem->dma);
2714} 2713}
2715 2714
2716static int be_ctrl_init(struct be_adapter *adapter) 2715static int be_ctrl_init(struct be_adapter *adapter)
@@ -2725,8 +2724,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
2725 goto done; 2724 goto done;
2726 2725
2727 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 2726 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2728 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, 2727 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2729 mbox_mem_alloc->size, &mbox_mem_alloc->dma); 2728 mbox_mem_alloc->size,
2729 &mbox_mem_alloc->dma,
2730 GFP_KERNEL);
2730 if (!mbox_mem_alloc->va) { 2731 if (!mbox_mem_alloc->va) {
2731 status = -ENOMEM; 2732 status = -ENOMEM;
2732 goto unmap_pci_bars; 2733 goto unmap_pci_bars;
@@ -2738,8 +2739,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
2738 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 2739 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2739 2740
2740 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config); 2741 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2741 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size, 2742 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2742 &mc_cmd_mem->dma); 2743 mc_cmd_mem->size, &mc_cmd_mem->dma,
2744 GFP_KERNEL);
2743 if (mc_cmd_mem->va == NULL) { 2745 if (mc_cmd_mem->va == NULL) {
2744 status = -ENOMEM; 2746 status = -ENOMEM;
2745 goto free_mbox; 2747 goto free_mbox;
@@ -2755,8 +2757,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
2755 return 0; 2757 return 0;
2756 2758
2757free_mbox: 2759free_mbox:
2758 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size, 2760 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2759 mbox_mem_alloc->va, mbox_mem_alloc->dma); 2761 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2760 2762
2761unmap_pci_bars: 2763unmap_pci_bars:
2762 be_unmap_pci_bars(adapter); 2764 be_unmap_pci_bars(adapter);
@@ -2770,8 +2772,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
2770 struct be_dma_mem *cmd = &adapter->stats_cmd; 2772 struct be_dma_mem *cmd = &adapter->stats_cmd;
2771 2773
2772 if (cmd->va) 2774 if (cmd->va)
2773 pci_free_consistent(adapter->pdev, cmd->size, 2775 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2774 cmd->va, cmd->dma); 2776 cmd->va, cmd->dma);
2775} 2777}
2776 2778
2777static int be_stats_init(struct be_adapter *adapter) 2779static int be_stats_init(struct be_adapter *adapter)
@@ -2779,7 +2781,8 @@ static int be_stats_init(struct be_adapter *adapter)
2779 struct be_dma_mem *cmd = &adapter->stats_cmd; 2781 struct be_dma_mem *cmd = &adapter->stats_cmd;
2780 2782
2781 cmd->size = sizeof(struct be_cmd_req_get_stats); 2783 cmd->size = sizeof(struct be_cmd_req_get_stats);
2782 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2784 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2785 GFP_KERNEL);
2783 if (cmd->va == NULL) 2786 if (cmd->va == NULL)
2784 return -1; 2787 return -1;
2785 memset(cmd->va, 0, cmd->size); 2788 memset(cmd->va, 0, cmd->size);
@@ -2849,6 +2852,11 @@ static int be_get_config(struct be_adapter *adapter)
2849 else 2852 else
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; 2853 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851 2854
2855 status = be_cmd_get_cntl_attributes(adapter);
2856 if (status)
2857 return status;
2858
2859 be_cmd_check_native_mode(adapter);
2852 return 0; 2860 return 0;
2853} 2861}
2854 2862
@@ -2890,6 +2898,54 @@ static int be_dev_family_check(struct be_adapter *adapter)
2890 return 0; 2898 return 0;
2891} 2899}
2892 2900
2901static int lancer_wait_ready(struct be_adapter *adapter)
2902{
2903#define SLIPORT_READY_TIMEOUT 500
2904 u32 sliport_status;
2905 int status = 0, i;
2906
2907 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2908 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2909 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2910 break;
2911
2912 msleep(20);
2913 }
2914
2915 if (i == SLIPORT_READY_TIMEOUT)
2916 status = -1;
2917
2918 return status;
2919}
2920
2921static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2922{
2923 int status;
2924 u32 sliport_status, err, reset_needed;
2925 status = lancer_wait_ready(adapter);
2926 if (!status) {
2927 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2928 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2929 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2930 if (err && reset_needed) {
2931 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2932 adapter->db + SLIPORT_CONTROL_OFFSET);
2933
2934 /* check adapter has corrected the error */
2935 status = lancer_wait_ready(adapter);
2936 sliport_status = ioread32(adapter->db +
2937 SLIPORT_STATUS_OFFSET);
2938 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2939 SLIPORT_STATUS_RN_MASK);
2940 if (status || sliport_status)
2941 status = -1;
2942 } else if (err || reset_needed) {
2943 status = -1;
2944 }
2945 }
2946 return status;
2947}
2948
2893static int __devinit be_probe(struct pci_dev *pdev, 2949static int __devinit be_probe(struct pci_dev *pdev,
2894 const struct pci_device_id *pdev_id) 2950 const struct pci_device_id *pdev_id)
2895{ 2951{
@@ -2922,11 +2978,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
2922 adapter->netdev = netdev; 2978 adapter->netdev = netdev;
2923 SET_NETDEV_DEV(netdev, &pdev->dev); 2979 SET_NETDEV_DEV(netdev, &pdev->dev);
2924 2980
2925 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2981 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2926 if (!status) { 2982 if (!status) {
2927 netdev->features |= NETIF_F_HIGHDMA; 2983 netdev->features |= NETIF_F_HIGHDMA;
2928 } else { 2984 } else {
2929 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2985 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2930 if (status) { 2986 if (status) {
2931 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 2987 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2932 goto free_netdev; 2988 goto free_netdev;
@@ -2939,6 +2995,14 @@ static int __devinit be_probe(struct pci_dev *pdev,
2939 if (status) 2995 if (status)
2940 goto free_netdev; 2996 goto free_netdev;
2941 2997
2998 if (lancer_chip(adapter)) {
2999 status = lancer_test_and_set_rdy_state(adapter);
3000 if (status) {
3001 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3002 goto free_netdev;
3003 }
3004 }
3005
2942 /* sync up with fw's ready state */ 3006 /* sync up with fw's ready state */
2943 if (be_physfn(adapter)) { 3007 if (be_physfn(adapter)) {
2944 status = be_cmd_POST(adapter); 3008 status = be_cmd_POST(adapter);
@@ -2951,11 +3015,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
2951 if (status) 3015 if (status)
2952 goto ctrl_clean; 3016 goto ctrl_clean;
2953 3017
2954 if (be_physfn(adapter)) { 3018 status = be_cmd_reset_function(adapter);
2955 status = be_cmd_reset_function(adapter); 3019 if (status)
2956 if (status) 3020 goto ctrl_clean;
2957 goto ctrl_clean;
2958 }
2959 3021
2960 status = be_stats_init(adapter); 3022 status = be_stats_init(adapter);
2961 if (status) 3023 if (status)
@@ -2979,10 +3041,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
2979 goto unsetup; 3041 goto unsetup;
2980 netif_carrier_off(netdev); 3042 netif_carrier_off(netdev);
2981 3043
3044 if (be_physfn(adapter) && adapter->sriov_enabled) {
3045 status = be_vf_eth_addr_config(adapter);
3046 if (status)
3047 goto unreg_netdev;
3048 }
3049
2982 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 3050 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2983 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 3051 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2984 return 0; 3052 return 0;
2985 3053
3054unreg_netdev:
3055 unregister_netdev(netdev);
2986unsetup: 3056unsetup:
2987 be_clear(adapter); 3057 be_clear(adapter);
2988msix_disable: 3058msix_disable:
@@ -3009,6 +3079,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3009 struct be_adapter *adapter = pci_get_drvdata(pdev); 3079 struct be_adapter *adapter = pci_get_drvdata(pdev);
3010 struct net_device *netdev = adapter->netdev; 3080 struct net_device *netdev = adapter->netdev;
3011 3081
3082 cancel_delayed_work_sync(&adapter->work);
3012 if (adapter->wol) 3083 if (adapter->wol)
3013 be_setup_wol(adapter, true); 3084 be_setup_wol(adapter, true);
3014 3085
@@ -3021,6 +3092,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3021 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc); 3092 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3022 be_clear(adapter); 3093 be_clear(adapter);
3023 3094
3095 be_msix_disable(adapter);
3024 pci_save_state(pdev); 3096 pci_save_state(pdev);
3025 pci_disable_device(pdev); 3097 pci_disable_device(pdev);
3026 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3098 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3042,6 +3114,7 @@ static int be_resume(struct pci_dev *pdev)
3042 pci_set_power_state(pdev, 0); 3114 pci_set_power_state(pdev, 0);
3043 pci_restore_state(pdev); 3115 pci_restore_state(pdev);
3044 3116
3117 be_msix_enable(adapter);
3045 /* tell fw we're ready to fire cmds */ 3118 /* tell fw we're ready to fire cmds */
3046 status = be_cmd_fw_init(adapter); 3119 status = be_cmd_fw_init(adapter);
3047 if (status) 3120 if (status)
@@ -3057,6 +3130,8 @@ static int be_resume(struct pci_dev *pdev)
3057 3130
3058 if (adapter->wol) 3131 if (adapter->wol)
3059 be_setup_wol(adapter, false); 3132 be_setup_wol(adapter, false);
3133
3134 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3060 return 0; 3135 return 0;
3061} 3136}
3062 3137
@@ -3068,6 +3143,9 @@ static void be_shutdown(struct pci_dev *pdev)
3068 struct be_adapter *adapter = pci_get_drvdata(pdev); 3143 struct be_adapter *adapter = pci_get_drvdata(pdev);
3069 struct net_device *netdev = adapter->netdev; 3144 struct net_device *netdev = adapter->netdev;
3070 3145
3146 if (netif_running(netdev))
3147 cancel_delayed_work_sync(&adapter->work);
3148
3071 netif_device_detach(netdev); 3149 netif_device_detach(netdev);
3072 3150
3073 be_cmd_reset_function(adapter); 3151 be_cmd_reset_function(adapter);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 22abfb39d81..68d45ba2d9b 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1237,8 +1237,17 @@ static int bfin_mac_enable(struct phy_device *phydev)
1237 1237
1238 if (phydev->interface == PHY_INTERFACE_MODE_RMII) { 1238 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
1239 opmode |= RMII; /* For Now only 100MBit are supported */ 1239 opmode |= RMII; /* For Now only 100MBit are supported */
1240#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 1240#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
1241 opmode |= TE; 1241 if (__SILICON_REVISION__ < 3) {
1242 /*
1243 * This isn't publicly documented (fun times!), but in
1244 * silicon <=0.2, the RX and TX pins are clocked together.
1245 * So in order to recv, we must enable the transmit side
1246 * as well. This will cause a spurious TX interrupt too,
1247 * but we can easily consume that.
1248 */
1249 opmode |= TE;
1250 }
1242#endif 1251#endif
1243 } 1252 }
1244 1253
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad912656fe..9f356d5d0f3 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
126 } 126 }
127 unmap_array[unmap_cons].skb = NULL; 127 unmap_array[unmap_cons].skb = NULL;
128 128
129 pci_unmap_single(bnad->pcidev, 129 dma_unmap_single(&bnad->pcidev->dev,
130 pci_unmap_addr(&unmap_array[unmap_cons], 130 dma_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb), 131 dma_addr), skb_headlen(skb),
132 PCI_DMA_TODEVICE); 132 DMA_TO_DEVICE);
133 133
134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 134 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135 if (++unmap_cons >= unmap_q->q_depth) 135 if (++unmap_cons >= unmap_q->q_depth)
136 break; 136 break;
137 137
138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 pci_unmap_page(bnad->pcidev, 139 dma_unmap_page(&bnad->pcidev->dev,
140 pci_unmap_addr(&unmap_array[unmap_cons], 140 dma_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr), 141 dma_addr),
142 skb_shinfo(skb)->frags[i].size, 142 skb_shinfo(skb)->frags[i].size,
143 PCI_DMA_TODEVICE); 143 DMA_TO_DEVICE);
144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 144 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
145 0); 145 0);
146 if (++unmap_cons >= unmap_q->q_depth) 146 if (++unmap_cons >= unmap_q->q_depth)
147 break; 147 break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
199 sent_bytes += skb->len; 199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); 200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
201 201
202 pci_unmap_single(bnad->pcidev, 202 dma_unmap_single(&bnad->pcidev->dev,
203 pci_unmap_addr(&unmap_array[unmap_cons], 203 dma_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb), 204 dma_addr), skb_headlen(skb),
205 PCI_DMA_TODEVICE); 205 DMA_TO_DEVICE);
206 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 206 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
208 208
209 prefetch(&unmap_array[unmap_cons + 1]); 209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]); 211 prefetch(&unmap_array[unmap_cons + 1]);
212 212
213 pci_unmap_page(bnad->pcidev, 213 dma_unmap_page(&bnad->pcidev->dev,
214 pci_unmap_addr(&unmap_array[unmap_cons], 214 dma_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr), 215 dma_addr),
216 skb_shinfo(skb)->frags[i].size, 216 skb_shinfo(skb)->frags[i].size,
217 PCI_DMA_TODEVICE); 217 DMA_TO_DEVICE);
218 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 218 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
219 0); 219 0);
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
221 } 221 }
@@ -340,19 +340,22 @@ static void
340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341{ 341{
342 struct bnad_unmap_q *unmap_q; 342 struct bnad_unmap_q *unmap_q;
343 struct bnad_skb_unmap *unmap_array;
343 struct sk_buff *skb; 344 struct sk_buff *skb;
344 int unmap_cons; 345 int unmap_cons;
345 346
346 unmap_q = rcb->unmap_q; 347 unmap_q = rcb->unmap_q;
348 unmap_array = unmap_q->unmap_array;
347 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { 349 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348 skb = unmap_q->unmap_array[unmap_cons].skb; 350 skb = unmap_array[unmap_cons].skb;
349 if (!skb) 351 if (!skb)
350 continue; 352 continue;
351 unmap_q->unmap_array[unmap_cons].skb = NULL; 353 unmap_array[unmap_cons].skb = NULL;
352 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> 354 dma_unmap_single(&bnad->pcidev->dev,
353 unmap_array[unmap_cons], 355 dma_unmap_addr(&unmap_array[unmap_cons],
354 dma_addr), rcb->rxq->buffer_size, 356 dma_addr),
355 PCI_DMA_FROMDEVICE); 357 rcb->rxq->buffer_size,
358 DMA_FROM_DEVICE);
356 dev_kfree_skb(skb); 359 dev_kfree_skb(skb);
357 } 360 }
358 bnad_reset_rcb(bnad, rcb); 361 bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
391 skb->dev = bnad->netdev; 394 skb->dev = bnad->netdev;
392 skb_reserve(skb, NET_IP_ALIGN); 395 skb_reserve(skb, NET_IP_ALIGN);
393 unmap_array[unmap_prod].skb = skb; 396 unmap_array[unmap_prod].skb = skb;
394 dma_addr = pci_map_single(bnad->pcidev, skb->data, 397 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
395 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); 398 rcb->rxq->buffer_size,
396 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 399 DMA_FROM_DEVICE);
400 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397 dma_addr); 401 dma_addr);
398 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 402 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 403 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
434 struct bna_rcb *rcb = NULL; 438 struct bna_rcb *rcb = NULL;
435 unsigned int wi_range, packets = 0, wis = 0; 439 unsigned int wi_range, packets = 0, wis = 0;
436 struct bnad_unmap_q *unmap_q; 440 struct bnad_unmap_q *unmap_q;
441 struct bnad_skb_unmap *unmap_array;
437 struct sk_buff *skb; 442 struct sk_buff *skb;
438 u32 flags; 443 u32 flags, unmap_cons;
439 u32 qid0 = ccb->rcb[0]->rxq->rxq_id; 444 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 445 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
441 446
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
456 rcb = ccb->rcb[1]; 461 rcb = ccb->rcb[1];
457 462
458 unmap_q = rcb->unmap_q; 463 unmap_q = rcb->unmap_q;
464 unmap_array = unmap_q->unmap_array;
465 unmap_cons = unmap_q->consumer_index;
459 466
460 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; 467 skb = unmap_array[unmap_cons].skb;
461 BUG_ON(!(skb)); 468 BUG_ON(!(skb));
462 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; 469 unmap_array[unmap_cons].skb = NULL;
463 pci_unmap_single(bnad->pcidev, 470 dma_unmap_single(&bnad->pcidev->dev,
464 pci_unmap_addr(&unmap_q-> 471 dma_unmap_addr(&unmap_array[unmap_cons],
465 unmap_array[unmap_q->
466 consumer_index],
467 dma_addr), 472 dma_addr),
468 rcb->rxq->buffer_size, 473 rcb->rxq->buffer_size,
469 PCI_DMA_FROMDEVICE); 474 DMA_FROM_DEVICE);
470 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); 475 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
471 476
472 /* Should be more efficient ? Performance ? */ 477 /* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
1015 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1020 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1016 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), 1021 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1017 dma_pa); 1022 dma_pa);
1018 pci_free_consistent(bnad->pcidev, 1023 dma_free_coherent(&bnad->pcidev->dev,
1019 mem_info->mdl[i].len, 1024 mem_info->mdl[i].len,
1020 mem_info->mdl[i].kva, dma_pa); 1025 mem_info->mdl[i].kva, dma_pa);
1021 } else 1026 } else
1022 kfree(mem_info->mdl[i].kva); 1027 kfree(mem_info->mdl[i].kva);
1023 } 1028 }
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
1047 for (i = 0; i < mem_info->num; i++) { 1052 for (i = 0; i < mem_info->num; i++) {
1048 mem_info->mdl[i].len = mem_info->len; 1053 mem_info->mdl[i].len = mem_info->len;
1049 mem_info->mdl[i].kva = 1054 mem_info->mdl[i].kva =
1050 pci_alloc_consistent(bnad->pcidev, 1055 dma_alloc_coherent(&bnad->pcidev->dev,
1051 mem_info->len, &dma_pa); 1056 mem_info->len, &dma_pa,
1057 GFP_KERNEL);
1052 1058
1053 if (mem_info->mdl[i].kva == NULL) 1059 if (mem_info->mdl[i].kva == NULL)
1054 goto err_return; 1060 goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2600 unmap_q->unmap_array[unmap_prod].skb = skb; 2606 unmap_q->unmap_array[unmap_prod].skb = skb;
2601 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); 2607 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2602 txqent->vector[vect_id].length = htons(skb_headlen(skb)); 2608 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2603 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb), 2609 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2604 PCI_DMA_TODEVICE); 2610 skb_headlen(skb), DMA_TO_DEVICE);
2605 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2611 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2606 dma_addr); 2612 dma_addr);
2607 2613
2608 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2614 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2630 2636
2631 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); 2637 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2632 txqent->vector[vect_id].length = htons(size); 2638 txqent->vector[vect_id].length = htons(size);
2633 dma_addr = 2639 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2634 pci_map_page(bnad->pcidev, frag->page, 2640 frag->page_offset, size, DMA_TO_DEVICE);
2635 frag->page_offset, size, 2641 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2636 PCI_DMA_TODEVICE);
2637 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2638 dma_addr); 2642 dma_addr);
2639 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2643 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2640 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 2644 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
3022 err = pci_request_regions(pdev, BNAD_NAME); 3026 err = pci_request_regions(pdev, BNAD_NAME);
3023 if (err) 3027 if (err)
3024 goto disable_device; 3028 goto disable_device;
3025 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3029 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3026 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3030 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3027 *using_dac = 1; 3031 *using_dac = 1;
3028 } else { 3032 } else {
3029 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3033 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3030 if (err) { 3034 if (err) {
3031 err = pci_set_consistent_dma_mask(pdev, 3035 err = dma_set_coherent_mask(&pdev->dev,
3032 DMA_BIT_MASK(32)); 3036 DMA_BIT_MASK(32));
3033 if (err) 3037 if (err)
3034 goto release_regions; 3038 goto release_regions;
3035 } 3039 }
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d51557de..a89117fa497 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@ struct bnad_rx_info {
181/* Unmap queues for Tx / Rx cleanup */ 181/* Unmap queues for Tx / Rx cleanup */
182struct bnad_skb_unmap { 182struct bnad_skb_unmap {
183 struct sk_buff *skb; 183 struct sk_buff *skb;
184 DECLARE_PCI_UNMAP_ADDR(dma_addr) 184 DEFINE_DMA_UNMAP_ADDR(dma_addr);
185}; 185};
186 186
187struct bnad_unmap_q { 187struct bnad_unmap_q {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index df99edf3464..8e6d618b530 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1,6 +1,6 @@
1/* bnx2.c: Broadcom NX2 network driver. 1/* bnx2.c: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2010 Broadcom Corporation 3 * Copyright (c) 2004-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -56,11 +56,11 @@
56#include "bnx2_fw.h" 56#include "bnx2_fw.h"
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define DRV_MODULE_VERSION "2.0.21" 59#define DRV_MODULE_VERSION "2.1.6"
60#define DRV_MODULE_RELDATE "Dec 23, 2010" 60#define DRV_MODULE_RELDATE "Mar 7, 2011"
61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw" 61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1.fw" 63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw" 64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw" 65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
66 66
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
435 struct cnic_ctl_info info; 435 struct cnic_ctl_info info;
436 436
437 mutex_lock(&bp->cnic_lock); 437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops; 438 c_ops = rcu_dereference_protected(bp->cnic_ops,
439 lockdep_is_held(&bp->cnic_lock));
439 if (c_ops) { 440 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD; 441 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info); 442 c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
450 struct cnic_ctl_info info; 451 struct cnic_ctl_info info;
451 452
452 mutex_lock(&bp->cnic_lock); 453 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops; 454 c_ops = rcu_dereference_protected(bp->cnic_ops,
455 lockdep_is_held(&bp->cnic_lock));
454 if (c_ops) { 456 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { 457 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 458 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -7553,6 +7555,10 @@ bnx2_set_flags(struct net_device *dev, u32 data)
7553 !(data & ETH_FLAG_RXVLAN)) 7555 !(data & ETH_FLAG_RXVLAN))
7554 return -EINVAL; 7556 return -EINVAL;
7555 7557
7558 /* TSO with VLAN tag won't work with current firmware */
7559 if (!(data & ETH_FLAG_TXVLAN))
7560 return -EINVAL;
7561
7556 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | 7562 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
7557 ETH_FLAG_TXVLAN); 7563 ETH_FLAG_TXVLAN);
7558 if (rc) 7564 if (rc)
@@ -7962,11 +7968,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7962 7968
7963 /* AER (Advanced Error Reporting) hooks */ 7969 /* AER (Advanced Error Reporting) hooks */
7964 err = pci_enable_pcie_error_reporting(pdev); 7970 err = pci_enable_pcie_error_reporting(pdev);
7965 if (err) { 7971 if (!err)
7966 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " 7972 bp->flags |= BNX2_FLAG_AER_ENABLED;
7967 "failed 0x%x\n", err);
7968 /* non-fatal, continue */
7969 }
7970 7973
7971 } else { 7974 } else {
7972 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 7975 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8232,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8229 return 0; 8232 return 0;
8230 8233
8231err_out_unmap: 8234err_out_unmap:
8232 if (bp->flags & BNX2_FLAG_PCIE) 8235 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8233 pci_disable_pcie_error_reporting(pdev); 8236 pci_disable_pcie_error_reporting(pdev);
8237 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8238 }
8234 8239
8235 if (bp->regview) { 8240 if (bp->regview) {
8236 iounmap(bp->regview); 8241 iounmap(bp->regview);
@@ -8312,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8312#endif 8317#endif
8313}; 8318};
8314 8319
8315static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 8320static inline void vlan_features_add(struct net_device *dev, u32 flags)
8316{ 8321{
8317 dev->vlan_features |= flags; 8322 dev->vlan_features |= flags;
8318} 8323}
@@ -8418,8 +8423,10 @@ bnx2_remove_one(struct pci_dev *pdev)
8418 8423
8419 kfree(bp->temp_stats_blk); 8424 kfree(bp->temp_stats_blk);
8420 8425
8421 if (bp->flags & BNX2_FLAG_PCIE) 8426 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8422 pci_disable_pcie_error_reporting(pdev); 8427 pci_disable_pcie_error_reporting(pdev);
8428 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8429 }
8423 8430
8424 free_netdev(dev); 8431 free_netdev(dev);
8425 8432
@@ -8535,7 +8542,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8535 } 8542 }
8536 rtnl_unlock(); 8543 rtnl_unlock();
8537 8544
8538 if (!(bp->flags & BNX2_FLAG_PCIE)) 8545 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8539 return result; 8546 return result;
8540 8547
8541 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8548 err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e82fe..68020451dc4 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -1,6 +1,6 @@
1/* bnx2.h: Broadcom NX2 network driver. 1/* bnx2.h: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2009 Broadcom Corporation 3 * Copyright (c) 2004-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
6207 6207
6208#define BNX2_CP_SCRATCH 0x001a0000 6208#define BNX2_CP_SCRATCH 0x001a0000
6209 6209
6210#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
6211
6210 6212
6211/* 6213/*
6212 * mcp_reg definition 6214 * mcp_reg definition
@@ -6741,6 +6743,7 @@ struct bnx2 {
6741#define BNX2_FLAG_JUMBO_BROKEN 0x00000800 6743#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
6742#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 6744#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
6743#define BNX2_FLAG_BROKEN_STATS 0x00002000 6745#define BNX2_FLAG_BROKEN_STATS 0x00002000
6746#define BNX2_FLAG_AER_ENABLED 0x00004000
6744 6747
6745 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; 6748 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
6746 6749
@@ -6758,7 +6761,7 @@ struct bnx2 {
6758 u32 tx_wake_thresh; 6761 u32 tx_wake_thresh;
6759 6762
6760#ifdef BCM_CNIC 6763#ifdef BCM_CNIC
6761 struct cnic_ops *cnic_ops; 6764 struct cnic_ops __rcu *cnic_ops;
6762 void *cnic_data; 6765 void *cnic_data;
6763#endif 6766#endif
6764 6767
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index a6cd335c943..b7ff87b35fb 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-3" 25#define DRV_MODULE_VERSION "1.62.11-0"
26#define DRV_MODULE_RELDATE "2010/12/21" 26#define DRV_MODULE_RELDATE "2011/01/31"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
@@ -31,7 +31,7 @@
31#define BNX2X_NEW_NAPI 31#define BNX2X_NEW_NAPI
32 32
33#if defined(CONFIG_DCB) 33#if defined(CONFIG_DCB)
34#define BCM_DCB 34#define BCM_DCBNL
35#endif 35#endif
36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
37#define BCM_CNIC 1 37#define BCM_CNIC 1
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129#endif 129#endif
130 130
131#define bnx2x_mc_addr(ha) ((ha)->addr) 131#define bnx2x_mc_addr(ha) ((ha)->addr)
132#define bnx2x_uc_addr(ha) ((ha)->addr)
132 133
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 134#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 135#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@ struct bnx2x_fastpath {
341 /* chip independed shortcut into rx_prods_offset memory */ 342 /* chip independed shortcut into rx_prods_offset memory */
342 u32 ustorm_rx_prods_offset; 343 u32 ustorm_rx_prods_offset;
343 344
345 u32 rx_buf_size;
346
344 dma_addr_t status_blk_mapping; 347 dma_addr_t status_blk_mapping;
345 348
346 struct sw_tx_bd *tx_buf_ring; 349 struct sw_tx_bd *tx_buf_ring;
@@ -428,6 +431,10 @@ struct bnx2x_fastpath {
428}; 431};
429 432
430#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 433#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
434
435/* Use 2500 as a mini-jumbo MTU for FCoE */
436#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
437
431#ifdef BCM_CNIC 438#ifdef BCM_CNIC
432/* FCoE L2 `fastpath' is right after the eth entries */ 439/* FCoE L2 `fastpath' is right after the eth entries */
433#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) 440#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@ struct bnx2x_slowpath {
810 struct eth_stats_query fw_stats; 817 struct eth_stats_query fw_stats;
811 struct mac_configuration_cmd mac_config; 818 struct mac_configuration_cmd mac_config;
812 struct mac_configuration_cmd mcast_config; 819 struct mac_configuration_cmd mcast_config;
820 struct mac_configuration_cmd uc_mac_config;
813 struct client_init_ramrod_data client_init_data; 821 struct client_init_ramrod_data client_init_data;
814 822
815 /* used by dmae command executer */ 823 /* used by dmae command executer */
@@ -911,7 +919,6 @@ struct bnx2x {
911 int tx_ring_size; 919 int tx_ring_size;
912 920
913 u32 rx_csum; 921 u32 rx_csum;
914 u32 rx_buf_size;
915/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 922/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
916#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) 923#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
917#define ETH_MIN_PACKET_SIZE 60 924#define ETH_MIN_PACKET_SIZE 60
@@ -939,7 +946,7 @@ struct bnx2x {
939 struct eth_spe *spq_prod_bd; 946 struct eth_spe *spq_prod_bd;
940 struct eth_spe *spq_last_bd; 947 struct eth_spe *spq_last_bd;
941 __le16 *dsb_sp_prod; 948 __le16 *dsb_sp_prod;
942 atomic_t spq_left; /* serialize spq */ 949 atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
943 /* used to synchronize spq accesses */ 950 /* used to synchronize spq accesses */
944 spinlock_t spq_lock; 951 spinlock_t spq_lock;
945 952
@@ -949,6 +956,7 @@ struct bnx2x {
949 u16 eq_prod; 956 u16 eq_prod;
950 u16 eq_cons; 957 u16 eq_cons;
951 __le16 *eq_cons_sb; 958 __le16 *eq_cons_sb;
959 atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
952 960
953 /* Flags for marking that there is a STAT_QUERY or 961 /* Flags for marking that there is a STAT_QUERY or
954 SET_MAC ramrod pending */ 962 SET_MAC ramrod pending */
@@ -976,8 +984,12 @@ struct bnx2x {
976#define MF_FUNC_DIS 0x1000 984#define MF_FUNC_DIS 0x1000
977#define FCOE_MACS_SET 0x2000 985#define FCOE_MACS_SET 0x2000
978#define NO_FCOE_FLAG 0x4000 986#define NO_FCOE_FLAG 0x4000
987#define NO_ISCSI_OOO_FLAG 0x8000
988#define NO_ISCSI_FLAG 0x10000
979 989
980#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) 990#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
991#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
992#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
981 993
982 int pf_num; /* absolute PF number */ 994 int pf_num; /* absolute PF number */
983 int pfid; /* per-path PF number */ 995 int pfid; /* per-path PF number */
@@ -1064,6 +1076,7 @@ struct bnx2x {
1064 int num_queues; 1076 int num_queues;
1065 int disable_tpa; 1077 int disable_tpa;
1066 int int_mode; 1078 int int_mode;
1079 u32 *rx_indir_table;
1067 1080
1068 struct tstorm_eth_mac_filter_config mac_filters; 1081 struct tstorm_eth_mac_filter_config mac_filters;
1069#define BNX2X_ACCEPT_NONE 0x0000 1082#define BNX2X_ACCEPT_NONE 0x0000
@@ -1110,7 +1123,7 @@ struct bnx2x {
1110#define BNX2X_CNIC_FLAG_MAC_SET 1 1123#define BNX2X_CNIC_FLAG_MAC_SET 1
1111 void *t2; 1124 void *t2;
1112 dma_addr_t t2_mapping; 1125 dma_addr_t t2_mapping;
1113 struct cnic_ops *cnic_ops; 1126 struct cnic_ops __rcu *cnic_ops;
1114 void *cnic_data; 1127 void *cnic_data;
1115 u32 cnic_tag; 1128 u32 cnic_tag;
1116 struct cnic_eth_dev cnic_eth_dev; 1129 struct cnic_eth_dev cnic_eth_dev;
@@ -1125,13 +1138,12 @@ struct bnx2x {
1125 u16 cnic_kwq_pending; 1138 u16 cnic_kwq_pending;
1126 u16 cnic_spq_pending; 1139 u16 cnic_spq_pending;
1127 struct mutex cnic_mutex; 1140 struct mutex cnic_mutex;
1128 u8 iscsi_mac[ETH_ALEN];
1129 u8 fip_mac[ETH_ALEN]; 1141 u8 fip_mac[ETH_ALEN];
1130#endif 1142#endif
1131 1143
1132 int dmae_ready; 1144 int dmae_ready;
1133 /* used to synchronize dmae accesses */ 1145 /* used to synchronize dmae accesses */
1134 struct mutex dmae_mutex; 1146 spinlock_t dmae_lock;
1135 1147
1136 /* used to protect the FW mail box */ 1148 /* used to protect the FW mail box */
1137 struct mutex fw_mb_mutex; 1149 struct mutex fw_mb_mutex;
@@ -1211,6 +1223,7 @@ struct bnx2x {
1211 /* DCBX Negotation results */ 1223 /* DCBX Negotation results */
1212 struct dcbx_features dcbx_local_feat; 1224 struct dcbx_features dcbx_local_feat;
1213 u32 dcbx_error; 1225 u32 dcbx_error;
1226 u32 pending_max;
1214}; 1227};
1215 1228
1216/** 1229/**
@@ -1447,6 +1460,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
1447void bnx2x_calc_fc_adv(struct bnx2x *bp); 1460void bnx2x_calc_fc_adv(struct bnx2x *bp);
1448int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1461int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1449 u32 data_hi, u32 data_lo, int common); 1462 u32 data_hi, u32 data_lo, int common);
1463
1464/* Clears multicast and unicast list configuration in the chip. */
1465void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
1466void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
1467void bnx2x_invalidate_uc_list(struct bnx2x *bp);
1468
1450void bnx2x_update_coalesce(struct bnx2x *bp); 1469void bnx2x_update_coalesce(struct bnx2x *bp);
1451int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1470int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1452 1471
@@ -1613,19 +1632,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1613#define BNX2X_BTR 4 1632#define BNX2X_BTR 4
1614#define MAX_SPQ_PENDING 8 1633#define MAX_SPQ_PENDING 8
1615 1634
1616 1635/* CMNG constants, as derived from system spec calculations */
1617/* CMNG constants 1636/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
1618 derived from lab experiments, and not from system spec calculations !!! */ 1637#define DEF_MIN_RATE 100
1619#define DEF_MIN_RATE 100 1638/* resolution of the rate shaping timer - 400 usec */
1620/* resolution of the rate shaping timer - 100 usec */ 1639#define RS_PERIODIC_TIMEOUT_USEC 400
1621#define RS_PERIODIC_TIMEOUT_USEC 100
1622/* resolution of fairness algorithm in usecs -
1623 coefficient for calculating the actual t fair */
1624#define T_FAIR_COEF 10000000
1625/* number of bytes in single QM arbitration cycle - 1640/* number of bytes in single QM arbitration cycle -
1626 coefficient for calculating the fairness timer */ 1641 * coefficient for calculating the fairness timer */
1627#define QM_ARB_BYTES 40000 1642#define QM_ARB_BYTES 160000
1628#define FAIR_MEM 2 1643/* resolution of Min algorithm 1:100 */
1644#define MIN_RES 100
1645/* how many bytes above threshold for the minimal credit of Min algorithm*/
1646#define MIN_ABOVE_THRESH 32768
1647/* Fairness algorithm integration time coefficient -
1648 * for calculating the actual Tfair */
1649#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
1650/* Memory of fairness algorithm . 2 cycles */
1651#define FAIR_MEM 2
1629 1652
1630 1653
1631#define ATTN_NIG_FOR_FUNC (1L << 8) 1654#define ATTN_NIG_FOR_FUNC (1L << 8)
@@ -1782,5 +1805,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1782BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ 1805BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1783 1806
1784extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1807extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1808void bnx2x_push_indir_table(struct bnx2x *bp);
1785 1809
1786#endif /* bnx2x.h */ 1810#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d04c5..e83ac6dd6fc 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
232 /* move empty skb from pool to prod and map it */ 232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, 234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235 bp->rx_buf_size, DMA_FROM_DEVICE); 235 fp->rx_buf_size, DMA_FROM_DEVICE);
236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237 237
238 /* move partial skb from cons to pool (don't unmap yet) */ 238 /* move partial skb from cons to pool (don't unmap yet) */
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
259#endif 259#endif
260} 260}
261 261
262/* Timestamp option length allowed for TPA aggregation:
263 *
264 * nop nop kind length echo val
265 */
266#define TPA_TSTAMP_OPT_LEN 12
267/**
268 * Calculate the approximate value of the MSS for this
269 * aggregation using the first packet of it.
270 *
271 * @param bp
272 * @param parsing_flags Parsing flags from the START CQE
273 * @param len_on_bd Total length of the first packet for the
274 * aggregation.
275 */
276static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
277 u16 len_on_bd)
278{
279 /* TPA arrgregation won't have an IP options and TCP options
280 * other than timestamp.
281 */
282 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
283
284
285 /* Check if there was a TCP timestamp, if there is it's will
286 * always be 12 bytes length: nop nop kind length echo val.
287 *
288 * Otherwise FW would close the aggregation.
289 */
290 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
291 hdrs_len += TPA_TSTAMP_OPT_LEN;
292
293 return len_on_bd - hdrs_len;
294}
295
262static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 296static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct sk_buff *skb, 297 struct sk_buff *skb,
264 struct eth_fast_path_rx_cqe *fp_cqe, 298 struct eth_fast_path_rx_cqe *fp_cqe,
265 u16 cqe_idx) 299 u16 cqe_idx, u16 parsing_flags)
266{ 300{
267 struct sw_rx_page *rx_pg, old_rx_pg; 301 struct sw_rx_page *rx_pg, old_rx_pg;
268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); 302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
275 309
276 /* This is needed in order to enable forwarding support */ 310 /* This is needed in order to enable forwarding support */
277 if (frag_size) 311 if (frag_size)
278 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, 312 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
279 max(frag_size, (u32)len_on_bd)); 313 len_on_bd);
280 314
281#ifdef BNX2X_STOP_ON_ERROR 315#ifdef BNX2X_STOP_ON_ERROR
282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { 316 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -333,26 +367,28 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
333 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; 367 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
334 struct sk_buff *skb = rx_buf->skb; 368 struct sk_buff *skb = rx_buf->skb;
335 /* alloc new skb */ 369 /* alloc new skb */
336 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 370 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
337 371
338 /* Unmap skb in the pool anyway, as we are going to change 372 /* Unmap skb in the pool anyway, as we are going to change
339 pool entry status to BNX2X_TPA_STOP even if new skb allocation 373 pool entry status to BNX2X_TPA_STOP even if new skb allocation
340 fails. */ 374 fails. */
341 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 375 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
342 bp->rx_buf_size, DMA_FROM_DEVICE); 376 fp->rx_buf_size, DMA_FROM_DEVICE);
343 377
344 if (likely(new_skb)) { 378 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */ 379 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */ 380 /* (no need to map the new skb) */
381 u16 parsing_flags =
382 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
347 383
348 prefetch(skb); 384 prefetch(skb);
349 prefetch(((char *)(skb)) + L1_CACHE_BYTES); 385 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
350 386
351#ifdef BNX2X_STOP_ON_ERROR 387#ifdef BNX2X_STOP_ON_ERROR
352 if (pad + len > bp->rx_buf_size) { 388 if (pad + len > fp->rx_buf_size) {
353 BNX2X_ERR("skb_put is about to fail... " 389 BNX2X_ERR("skb_put is about to fail... "
354 "pad %d len %d rx_buf_size %d\n", 390 "pad %d len %d rx_buf_size %d\n",
355 pad, len, bp->rx_buf_size); 391 pad, len, fp->rx_buf_size);
356 bnx2x_panic(); 392 bnx2x_panic();
357 return; 393 return;
358 } 394 }
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
373 } 409 }
374 410
375 if (!bnx2x_fill_frag_skb(bp, fp, skb, 411 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376 &cqe->fast_path_cqe, cqe_idx)) { 412 &cqe->fast_path_cqe, cqe_idx,
377 if ((le16_to_cpu(cqe->fast_path_cqe. 413 parsing_flags)) {
378 pars_flags.flags) & PARSING_FLAGS_VLAN)) 414 if (parsing_flags & PARSING_FLAGS_VLAN)
379 __vlan_hwaccel_put_tag(skb, 415 __vlan_hwaccel_put_tag(skb,
380 le16_to_cpu(cqe->fast_path_cqe. 416 le16_to_cpu(cqe->fast_path_cqe.
381 vlan_tag)); 417 vlan_tag));
@@ -582,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
582 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 618 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
583 dma_unmap_single(&bp->pdev->dev, 619 dma_unmap_single(&bp->pdev->dev,
584 dma_unmap_addr(rx_buf, mapping), 620 dma_unmap_addr(rx_buf, mapping),
585 bp->rx_buf_size, 621 fp->rx_buf_size,
586 DMA_FROM_DEVICE); 622 DMA_FROM_DEVICE);
587 skb_reserve(skb, pad); 623 skb_reserve(skb, pad);
588 skb_put(skb, len); 624 skb_put(skb, len);
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{ 739{
704 u16 line_speed = bp->link_vars.line_speed; 740 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) { 741 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] & 742 u16 maxCfg = bnx2x_extract_max_cfg(bp,
707 FUNC_MF_CFG_MAX_BW_MASK) >> 743 bp->mf_config[BP_VN(bp)]);
708 FUNC_MF_CFG_MAX_BW_SHIFT; 744
709 /* Calculate the current MAX line speed limit for the DCC 745 /* Calculate the current MAX line speed limit for the MF
710 * capable devices 746 * devices
711 */ 747 */
712 if (IS_MF_SD(bp)) { 748 if (IS_MF_SI(bp))
749 line_speed = (line_speed * maxCfg) / 100;
750 else { /* SD mode */
713 u16 vn_max_rate = maxCfg * 100; 751 u16 vn_max_rate = maxCfg * 100;
714 752
715 if (vn_max_rate < line_speed) 753 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate; 754 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */ 755 }
718 line_speed = (line_speed * maxCfg) / 100;
719 } 756 }
720 757
721 return line_speed; 758 return line_speed;
@@ -821,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
821 u16 ring_prod; 858 u16 ring_prod;
822 int i, j; 859 int i, j;
823 860
824 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
825 IP_HEADER_ALIGNMENT_PADDING;
826
827 DP(NETIF_MSG_IFUP,
828 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
829
830 for_each_rx_queue(bp, j) { 861 for_each_rx_queue(bp, j) {
831 struct bnx2x_fastpath *fp = &bp->fp[j]; 862 struct bnx2x_fastpath *fp = &bp->fp[j];
832 863
864 DP(NETIF_MSG_IFUP,
865 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
866
833 if (!fp->disable_tpa) { 867 if (!fp->disable_tpa) {
834 for (i = 0; i < max_agg_queues; i++) { 868 for (i = 0; i < max_agg_queues; i++) {
835 fp->tpa_pool[i].skb = 869 fp->tpa_pool[i].skb =
836 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 870 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
837 if (!fp->tpa_pool[i].skb) { 871 if (!fp->tpa_pool[i].skb) {
838 BNX2X_ERR("Failed to allocate TPA " 872 BNX2X_ERR("Failed to allocate TPA "
839 "skb pool for queue[%d] - " 873 "skb pool for queue[%d] - "
@@ -941,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
941 975
942 dma_unmap_single(&bp->pdev->dev, 976 dma_unmap_single(&bp->pdev->dev,
943 dma_unmap_addr(rx_buf, mapping), 977 dma_unmap_addr(rx_buf, mapping),
944 bp->rx_buf_size, DMA_FROM_DEVICE); 978 fp->rx_buf_size, DMA_FROM_DEVICE);
945 979
946 rx_buf->skb = NULL; 980 rx_buf->skb = NULL;
947 dev_kfree_skb(skb); 981 dev_kfree_skb(skb);
@@ -959,6 +993,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
959 bnx2x_free_rx_skbs(bp); 993 bnx2x_free_rx_skbs(bp);
960} 994}
961 995
996void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
997{
998 /* load old values */
999 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1000
1001 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1002 /* leave all but MAX value */
1003 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1004
1005 /* set new MAX value */
1006 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1007 & FUNC_MF_CFG_MAX_BW_MASK;
1008
1009 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1010 }
1011}
1012
962static void bnx2x_free_msix_irqs(struct bnx2x *bp) 1013static void bnx2x_free_msix_irqs(struct bnx2x *bp)
963{ 1014{
964 int i, offset = 1; 1015 int i, offset = 1;
@@ -1249,6 +1300,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1249 return rc; 1300 return rc;
1250} 1301}
1251 1302
1303static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1304{
1305 int i;
1306
1307 for_each_queue(bp, i) {
1308 struct bnx2x_fastpath *fp = &bp->fp[i];
1309
1310 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1311 if (IS_FCOE_IDX(i))
1312 /*
1313 * Although there are no IP frames expected to arrive to
1314 * this ring we still want to add an
1315 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1316 * overrun attack.
1317 */
1318 fp->rx_buf_size =
1319 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1320 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1321 else
1322 fp->rx_buf_size =
1323 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1324 IP_HEADER_ALIGNMENT_PADDING;
1325 }
1326}
1327
1252/* must be called with rtnl_lock */ 1328/* must be called with rtnl_lock */
1253int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1329int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1254{ 1330{
@@ -1272,6 +1348,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1272 /* must be called before memory allocation and HW init */ 1348 /* must be called before memory allocation and HW init */
1273 bnx2x_ilt_set_info(bp); 1349 bnx2x_ilt_set_info(bp);
1274 1350
1351 /* Set the receive queues buffer size */
1352 bnx2x_set_rx_buf_size(bp);
1353
1275 if (bnx2x_alloc_mem(bp)) 1354 if (bnx2x_alloc_mem(bp))
1276 return -ENOMEM; 1355 return -ENOMEM;
1277 1356
@@ -1427,28 +1506,40 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1427 1506
1428 bnx2x_set_eth_mac(bp, 1); 1507 bnx2x_set_eth_mac(bp, 1);
1429 1508
1509 /* Clear MC configuration */
1510 if (CHIP_IS_E1(bp))
1511 bnx2x_invalidate_e1_mc_list(bp);
1512 else
1513 bnx2x_invalidate_e1h_mc_list(bp);
1514
1515 /* Clear UC lists configuration */
1516 bnx2x_invalidate_uc_list(bp);
1517
1518 if (bp->pending_max) {
1519 bnx2x_update_max_mf_config(bp, bp->pending_max);
1520 bp->pending_max = 0;
1521 }
1522
1430 if (bp->port.pmf) 1523 if (bp->port.pmf)
1431 bnx2x_initial_phy_init(bp, load_mode); 1524 bnx2x_initial_phy_init(bp, load_mode);
1432 1525
1526 /* Initialize Rx filtering */
1527 bnx2x_set_rx_mode(bp->dev);
1528
1433 /* Start fast path */ 1529 /* Start fast path */
1434 switch (load_mode) { 1530 switch (load_mode) {
1435 case LOAD_NORMAL: 1531 case LOAD_NORMAL:
1436 /* Tx queue should be only reenabled */ 1532 /* Tx queue should be only reenabled */
1437 netif_tx_wake_all_queues(bp->dev); 1533 netif_tx_wake_all_queues(bp->dev);
1438 /* Initialize the receive filter. */ 1534 /* Initialize the receive filter. */
1439 bnx2x_set_rx_mode(bp->dev);
1440 break; 1535 break;
1441 1536
1442 case LOAD_OPEN: 1537 case LOAD_OPEN:
1443 netif_tx_start_all_queues(bp->dev); 1538 netif_tx_start_all_queues(bp->dev);
1444 smp_mb__after_clear_bit(); 1539 smp_mb__after_clear_bit();
1445 /* Initialize the receive filter. */
1446 bnx2x_set_rx_mode(bp->dev);
1447 break; 1540 break;
1448 1541
1449 case LOAD_DIAG: 1542 case LOAD_DIAG:
1450 /* Initialize the receive filter. */
1451 bnx2x_set_rx_mode(bp->dev);
1452 bp->state = BNX2X_STATE_DIAG; 1543 bp->state = BNX2X_STATE_DIAG;
1453 break; 1544 break;
1454 1545
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d68e6b..ef37b98d614 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp);
341 */ 341 */
342int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 342int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
343 343
344/**
345 * Updates MAX part of MF configuration in HW
346 * (if required)
347 *
348 * @param bp
349 * @param value
350 */
351void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
352
344/* dev_close main block */ 353/* dev_close main block */
345int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 354int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
346 355
@@ -822,11 +831,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
822 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 831 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
823 dma_addr_t mapping; 832 dma_addr_t mapping;
824 833
825 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 834 skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
826 if (unlikely(skb == NULL)) 835 if (unlikely(skb == NULL))
827 return -ENOMEM; 836 return -ENOMEM;
828 837
829 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, 838 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
830 DMA_FROM_DEVICE); 839 DMA_FROM_DEVICE);
831 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 840 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
832 dev_kfree_skb(skb); 841 dev_kfree_skb(skb);
@@ -892,7 +901,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
892 if (fp->tpa_state[i] == BNX2X_TPA_START) 901 if (fp->tpa_state[i] == BNX2X_TPA_START)
893 dma_unmap_single(&bp->pdev->dev, 902 dma_unmap_single(&bp->pdev->dev,
894 dma_unmap_addr(rx_buf, mapping), 903 dma_unmap_addr(rx_buf, mapping),
895 bp->rx_buf_size, DMA_FROM_DEVICE); 904 fp->rx_buf_size, DMA_FROM_DEVICE);
896 905
897 dev_kfree_skb(skb); 906 dev_kfree_skb(skb);
898 rx_buf->skb = NULL; 907 rx_buf->skb = NULL;
@@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
1044void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1053void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1045void bnx2x_release_phy_lock(struct bnx2x *bp); 1054void bnx2x_release_phy_lock(struct bnx2x *bp);
1046 1055
1056/**
1057 * Extracts MAX BW part from MF configuration.
1058 *
1059 * @param bp
1060 * @param mf_cfg
1061 *
1062 * @return u16
1063 */
1064static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1065{
1066 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1067 FUNC_MF_CFG_MAX_BW_SHIFT;
1068 if (!max_cfg) {
1069 BNX2X_ERR("Illegal configuration detected for Max BW - "
1070 "using 100 instead\n");
1071 max_cfg = 100;
1072 }
1073 return max_cfg;
1074}
1075
1047#endif /* BNX2X_CMN_H */ 1076#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index fb60021f81f..9a24d79c71d 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -19,6 +19,9 @@
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#ifdef BCM_DCBNL
23#include <linux/dcbnl.h>
24#endif
22 25
23#include "bnx2x.h" 26#include "bnx2x.h"
24#include "bnx2x_cmn.h" 27#include "bnx2x_cmn.h"
@@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
508 return 0; 511 return 0;
509} 512}
510 513
514
515#ifdef BCM_DCBNL
516static inline
517u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
518{
519 u8 pri;
520
521 /* Choose the highest priority */
522 for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
523 if (ent->pri_bitmap & (1 << pri))
524 break;
525 return pri;
526}
527
528static inline
529u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
530{
531 return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
532 DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
533 DCB_APP_IDTYPE_ETHTYPE;
534}
535
536static inline
537void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
538{
539 int i;
540 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
541 bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
542 ~DCBX_APP_ENTRY_VALID;
543}
544
545int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
546{
547 int i, err = 0;
548
549 for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
550 struct dcbx_app_priority_entry *ent =
551 &bp->dcbx_local_feat.app.app_pri_tbl[i];
552
553 if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
554 u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
555
556 /* avoid invalid user-priority */
557 if (up) {
558 struct dcb_app app;
559 app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
560 app.protocol = ent->app_id;
561 app.priority = delall ? 0 : up;
562 err = dcb_setapp(bp->dev, &app);
563 }
564 }
565 }
566 return err;
567}
568#endif
569
511void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) 570void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
512{ 571{
513 switch (state) { 572 switch (state) {
514 case BNX2X_DCBX_STATE_NEG_RECEIVED: 573 case BNX2X_DCBX_STATE_NEG_RECEIVED:
515 { 574 {
516 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); 575 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
517 576#ifdef BCM_DCBNL
577 /**
578 * Delete app tlvs from dcbnl before reading new
579 * negotiation results
580 */
581 bnx2x_dcbnl_update_applist(bp, true);
582#endif
518 /* Read neg results if dcbx is in the FW */ 583 /* Read neg results if dcbx is in the FW */
519 if (bnx2x_dcbx_read_shmem_neg_results(bp)) 584 if (bnx2x_dcbx_read_shmem_neg_results(bp))
520 return; 585 return;
@@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
526 bp->dcbx_error); 591 bp->dcbx_error);
527 592
528 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { 593 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
594#ifdef BCM_DCBNL
595 /**
596 * Add new app tlvs to dcbnl
597 */
598 bnx2x_dcbnl_update_applist(bp, false);
599#endif
529 bnx2x_dcbx_stop_hw_tx(bp); 600 bnx2x_dcbx_stop_hw_tx(bp);
530 return; 601 return;
531 } 602 }
532 /* fall through */ 603 /* fall through */
604#ifdef BCM_DCBNL
605 /**
606 * Invalidate the local app tlvs if they are not added
607 * to the dcbnl app list to avoid deleting them from
608 * the list later on
609 */
610 bnx2x_dcbx_invalidate_local_apps(bp);
611#endif
533 } 612 }
534 case BNX2X_DCBX_STATE_TX_PAUSED: 613 case BNX2X_DCBX_STATE_TX_PAUSED:
535 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); 614 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
@@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
1505 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); 1584 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
1506} 1585}
1507/* DCB netlink */ 1586/* DCB netlink */
1508#ifdef BCM_DCB 1587#ifdef BCM_DCBNL
1509#include <linux/dcbnl.h>
1510 1588
1511#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ 1589#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
1512 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) 1590 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
@@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
1816 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); 1894 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
1817} 1895}
1818 1896
1819static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
1820 u8 idtype, u16 idval)
1821{
1822 if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
1823 return false;
1824
1825 switch (idtype) {
1826 case DCB_APP_IDTYPE_ETHTYPE:
1827 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1828 DCBX_APP_SF_ETH_TYPE)
1829 return false;
1830 break;
1831 case DCB_APP_IDTYPE_PORTNUM:
1832 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1833 DCBX_APP_SF_PORT)
1834 return false;
1835 break;
1836 default:
1837 return false;
1838 }
1839 if (app_ent->app_id != idval)
1840 return false;
1841
1842 return true;
1843}
1844
1845static void bnx2x_admin_app_set_ent( 1897static void bnx2x_admin_app_set_ent(
1846 struct bnx2x_admin_priority_app_table *app_ent, 1898 struct bnx2x_admin_priority_app_table *app_ent,
1847 u8 idtype, u16 idval, u8 up) 1899 u8 idtype, u16 idval, u8 up)
@@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
1943 return bnx2x_set_admin_app_up(bp, idtype, idval, up); 1995 return bnx2x_set_admin_app_up(bp, idtype, idval, up);
1944} 1996}
1945 1997
1946static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
1947 u16 idval)
1948{
1949 int i;
1950 u8 up = 0;
1951
1952 struct bnx2x *bp = netdev_priv(netdev);
1953 DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
1954
1955 /* iterate over the app entries looking for idtype and idval */
1956 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
1957 if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
1958 idtype, idval))
1959 break;
1960
1961 if (i < DCBX_MAX_APP_PROTOCOL)
1962 /* if found return up */
1963 up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
1964 else
1965 DP(NETIF_MSG_LINK, "app not found\n");
1966
1967 return up;
1968}
1969
1970static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) 1998static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
1971{ 1999{
1972 struct bnx2x *bp = netdev_priv(netdev); 2000 struct bnx2x *bp = netdev_priv(netdev);
@@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2107 .setnumtcs = bnx2x_dcbnl_set_numtcs, 2135 .setnumtcs = bnx2x_dcbnl_set_numtcs,
2108 .getpfcstate = bnx2x_dcbnl_get_pfc_state, 2136 .getpfcstate = bnx2x_dcbnl_get_pfc_state,
2109 .setpfcstate = bnx2x_dcbnl_set_pfc_state, 2137 .setpfcstate = bnx2x_dcbnl_set_pfc_state,
2110 .getapp = bnx2x_dcbnl_get_app_up,
2111 .setapp = bnx2x_dcbnl_set_app_up, 2138 .setapp = bnx2x_dcbnl_set_app_up,
2112 .getdcbx = bnx2x_dcbnl_get_dcbx, 2139 .getdcbx = bnx2x_dcbnl_get_dcbx,
2113 .setdcbx = bnx2x_dcbnl_set_dcbx, 2140 .setdcbx = bnx2x_dcbnl_set_dcbx,
@@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2115 .setfeatcfg = bnx2x_dcbnl_set_featcfg, 2142 .setfeatcfg = bnx2x_dcbnl_set_featcfg,
2116}; 2143};
2117 2144
2118#endif /* BCM_DCB */ 2145#endif /* BCM_DCBNL */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index f650f98e409..71b8eda43bd 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -189,8 +189,9 @@ enum {
189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); 189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
190 190
191/* DCB netlink */ 191/* DCB netlink */
192#ifdef BCM_DCB 192#ifdef BCM_DCBNL
193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; 193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
194#endif /* BCM_DCB */ 194int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
195#endif /* BCM_DCBNL */
195 196
196#endif /* BNX2X_DCB_H */ 197#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b4850..f5050155c6b 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
238 speed |= (cmd->speed_hi << 16); 238 speed |= (cmd->speed_hi << 16);
239 239
240 if (IS_MF_SI(bp)) { 240 if (IS_MF_SI(bp)) {
241 u32 param = 0; 241 u32 part;
242 u32 line_speed = bp->link_vars.line_speed; 242 u32 line_speed = bp->link_vars.line_speed;
243 243
244 /* use 10G if no link detected */ 244 /* use 10G if no link detected */
@@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
251 REQ_BC_VER_4_SET_MF_BW); 251 REQ_BC_VER_4_SET_MF_BW);
252 return -EINVAL; 252 return -EINVAL;
253 } 253 }
254 if (line_speed < speed) { 254
255 BNX2X_DEV_INFO("New speed should be less or equal " 255 part = (speed * 100) / line_speed;
256 "to actual line speed\n"); 256
257 if (line_speed < speed || !part) {
258 BNX2X_DEV_INFO("Speed setting should be in a range "
259 "from 1%% to 100%% "
260 "of actual line speed\n");
257 return -EINVAL; 261 return -EINVAL;
258 } 262 }
259 /* load old values */
260 param = bp->mf_config[BP_VN(bp)];
261
262 /* leave only MIN value */
263 param &= FUNC_MF_CFG_MIN_BW_MASK;
264 263
265 /* set new MAX value */ 264 if (bp->state != BNX2X_STATE_OPEN)
266 param |= (((speed * 100) / line_speed) 265 /* store value for following "load" */
267 << FUNC_MF_CFG_MAX_BW_SHIFT) 266 bp->pending_max = part;
268 & FUNC_MF_CFG_MAX_BW_MASK; 267 else
268 bnx2x_update_max_mf_config(bp, part);
269 269
270 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
271 return 0; 270 return 0;
272 } 271 }
273 272
@@ -1618,7 +1617,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1618 /* prepare the loopback packet */ 1617 /* prepare the loopback packet */
1619 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? 1618 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
1620 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); 1619 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
1621 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 1620 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
1622 if (!skb) { 1621 if (!skb) {
1623 rc = -ENOMEM; 1622 rc = -ENOMEM;
1624 goto test_loopback_exit; 1623 goto test_loopback_exit;
@@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
1781 { 0x100, 0x350 }, /* manuf_info */ 1780 { 0x100, 0x350 }, /* manuf_info */
1782 { 0x450, 0xf0 }, /* feature_info */ 1781 { 0x450, 0xf0 }, /* feature_info */
1783 { 0x640, 0x64 }, /* upgrade_key_info */ 1782 { 0x640, 0x64 }, /* upgrade_key_info */
1784 { 0x6a4, 0x64 },
1785 { 0x708, 0x70 }, /* manuf_key_info */ 1783 { 0x708, 0x70 }, /* manuf_key_info */
1786 { 0x778, 0x70 },
1787 { 0, 0 } 1784 { 0, 0 }
1788 }; 1785 };
1789 __be32 buf[0x350 / 4]; 1786 __be32 buf[0x350 / 4];
@@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev,
1933 buf[4] = 1; 1930 buf[4] = 1;
1934 etest->flags |= ETH_TEST_FL_FAILED; 1931 etest->flags |= ETH_TEST_FL_FAILED;
1935 } 1932 }
1936 if (bp->port.pmf) 1933
1937 if (bnx2x_link_test(bp, is_serdes) != 0) { 1934 if (bnx2x_link_test(bp, is_serdes) != 0) {
1938 buf[5] = 1; 1935 buf[5] = 1;
1939 etest->flags |= ETH_TEST_FL_FAILED; 1936 etest->flags |= ETH_TEST_FL_FAILED;
1940 } 1937 }
1941 1938
1942#ifdef BNX2X_EXTRA_DEBUG 1939#ifdef BNX2X_EXTRA_DEBUG
1943 bnx2x_panic_dump(bp); 1940 bnx2x_panic_dump(bp);
@@ -2134,6 +2131,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
2134 return 0; 2131 return 0;
2135} 2132}
2136 2133
2134static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2135 void *rules __always_unused)
2136{
2137 struct bnx2x *bp = netdev_priv(dev);
2138
2139 switch (info->cmd) {
2140 case ETHTOOL_GRXRINGS:
2141 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2142 return 0;
2143
2144 default:
2145 return -EOPNOTSUPP;
2146 }
2147}
2148
2149static int bnx2x_get_rxfh_indir(struct net_device *dev,
2150 struct ethtool_rxfh_indir *indir)
2151{
2152 struct bnx2x *bp = netdev_priv(dev);
2153 size_t copy_size =
2154 min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
2155
2156 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2157 return -EOPNOTSUPP;
2158
2159 indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
2160 memcpy(indir->ring_index, bp->rx_indir_table,
2161 copy_size * sizeof(bp->rx_indir_table[0]));
2162 return 0;
2163}
2164
2165static int bnx2x_set_rxfh_indir(struct net_device *dev,
2166 const struct ethtool_rxfh_indir *indir)
2167{
2168 struct bnx2x *bp = netdev_priv(dev);
2169 size_t i;
2170
2171 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2172 return -EOPNOTSUPP;
2173
2174 /* Validate size and indices */
2175 if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
2176 return -EINVAL;
2177 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
2178 if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
2179 return -EINVAL;
2180
2181 memcpy(bp->rx_indir_table, indir->ring_index,
2182 indir->size * sizeof(bp->rx_indir_table[0]));
2183 bnx2x_push_indir_table(bp);
2184 return 0;
2185}
2186
2137static const struct ethtool_ops bnx2x_ethtool_ops = { 2187static const struct ethtool_ops bnx2x_ethtool_ops = {
2138 .get_settings = bnx2x_get_settings, 2188 .get_settings = bnx2x_get_settings,
2139 .set_settings = bnx2x_set_settings, 2189 .set_settings = bnx2x_set_settings,
@@ -2170,6 +2220,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2170 .get_strings = bnx2x_get_strings, 2220 .get_strings = bnx2x_get_strings,
2171 .phys_id = bnx2x_phys_id, 2221 .phys_id = bnx2x_phys_id,
2172 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2222 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2223 .get_rxnfc = bnx2x_get_rxnfc,
2224 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2225 .set_rxfh_indir = bnx2x_set_rxfh_indir,
2173}; 2226};
2174 2227
2175void bnx2x_set_ethtool_ops(struct net_device *netdev) 2228void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f6398..be503cc0a50 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,20 +11,27 @@
11 11
12#include "bnx2x_fw_defs.h" 12#include "bnx2x_fw_defs.h"
13 13
14#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
15
14struct license_key { 16struct license_key {
15 u32 reserved[6]; 17 u32 reserved[6];
16 18
17#if defined(__BIG_ENDIAN) 19 u32 max_iscsi_conn;
18 u16 max_iscsi_init_conn; 20#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
19 u16 max_iscsi_trgt_conn; 21#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
20#elif defined(__LITTLE_ENDIAN) 22#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
21 u16 max_iscsi_trgt_conn; 23#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
22 u16 max_iscsi_init_conn;
23#endif
24 24
25 u32 reserved_a[6]; 25 u32 reserved_a;
26}; 26
27 u32 max_fcoe_conn;
28#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
29#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
30#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
31#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
27 32
33 u32 reserved_b[4];
34};
28 35
29#define PORT_0 0 36#define PORT_0 0
30#define PORT_1 1 37#define PORT_1 1
@@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
237#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 244#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
238 245
239 246
240 u32 Reserved0[16]; /* 0x158 */ 247 u32 Reserved0[3]; /* 0x158 */
241 248 /* Controls the TX laser of the SFP+ module */
249 u32 sfp_ctrl; /* 0x164 */
250#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
251#define PORT_HW_CFG_TX_LASER_SHIFT 0
252#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
253#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
254#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
255#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
256#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
257
258 /* Controls the fault module LED of the SFP+ */
259#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
260#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
261#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
262#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
263#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
264#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
265#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
266 u32 Reserved01[12]; /* 0x158 */
242 /* for external PHY, or forced mode or during AN */ 267 /* for external PHY, or forced mode or during AN */
243 u16 xgxs_config_rx[4]; /* 0x198 */ 268 u16 xgxs_config_rx[4]; /* 0x198 */
244 269
@@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
246 271
247 u32 Reserved1[56]; /* 0x1A8 */ 272 u32 Reserved1[56]; /* 0x1A8 */
248 u32 default_cfg; /* 0x288 */ 273 u32 default_cfg; /* 0x288 */
274#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
275#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
276#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
277#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
278#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
279#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
280
281#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
282#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
283#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
284#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
285#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
286#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
287
288#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
289#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
290#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
291#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
292#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
293#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
294
295#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
296#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
297#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
298#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
299#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
300#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
301
302 /*
303 * When KR link is required to be set to force which is not
304 * KR-compliant, this parameter determine what is the trigger for it.
305 * When GPIO is selected, low input will force the speed. Currently
306 * default speed is 1G. In the future, it may be widen to select the
307 * forced speed in with another parameter. Note when force-1G is
308 * enabled, it override option 56: Link Speed option.
309 */
310#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
311#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
312#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
313#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
314#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
315#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
316#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
317#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
318#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
319#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
320#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
321#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
322 /* Enable to determine with which GPIO to reset the external phy */
323#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
324#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
325#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
326#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
327#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
328#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
329#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
330#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
331#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
332#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
333#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
249 /* Enable BAM on KR */ 334 /* Enable BAM on KR */
250#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 335#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
251#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 336#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
252#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 337#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
253#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 338#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
254 339
340 /* Enable Common Mode Sense */
341#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
342#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
343#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
344#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
345
255 u32 speed_capability_mask2; /* 0x28C */ 346 u32 speed_capability_mask2; /* 0x28C */
256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 347#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 348#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -352,6 +443,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
352#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 443#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
353 /* forced only */ 444 /* forced only */
354#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 445#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
446 /* Indicate whether to swap the external phy polarity */
447#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
448#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
449#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
355 450
356 u32 external_phy_config; 451 u32 external_phy_config;
357#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 452#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
@@ -377,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
377#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 472#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
378#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 473#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
379#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 474#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
475#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
380#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 476#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
381#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 477#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
382 478
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 5a268e9a089..fa6dbe3f205 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -241,7 +241,7 @@ static const struct {
241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't 241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
242 * want to handle "system kill" flow at the moment. 242 * want to handle "system kill" flow at the moment.
243 */ 243 */
244 BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), 244 BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), 245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), 246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), 247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de24f39..f2f367d4e74 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
1/* Copyright 2008-2009 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
28 28
29/********************************************************/ 29/********************************************************/
30#define ETH_HLEN 14 30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */ 31/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
32#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
32#define ETH_MIN_PACKET_SIZE 60 33#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500 34#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600 35#define ETH_MAX_JUMBO_PACKET_SIZE 9600
35#define MDIO_ACCESS_TIMEOUT 1000 36#define MDIO_ACCESS_TIMEOUT 1000
36#define BMAC_CONTROL_RX_ENABLE 2 37#define BMAC_CONTROL_RX_ENABLE 2
37 38
38/***********************************************************/ 39/***********************************************************/
39/* Shortcut definitions */ 40/* Shortcut definitions */
@@ -79,7 +80,7 @@
79 80
80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 81#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 82#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 83#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
83#define AUTONEG_PARALLEL \ 84#define AUTONEG_PARALLEL \
84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 85 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
85#define AUTONEG_SGMII_FIBER_AUTODET \ 86#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
112#define GP_STATUS_10G_KX4 \ 113#define GP_STATUS_10G_KX4 \
113 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 114 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
114 115
115#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD 116#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
116#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD 117#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
117#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD 118#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
118#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 119#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
119#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD 120#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
120#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD 121#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
121#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD 122#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
123#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD 124#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
124#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD 125#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
125#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD 126#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
126#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD 127#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
127#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD 128#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
128#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD 129#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
129#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD 130#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
130#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD 131#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
131#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD 132#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
132#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD 133#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
133#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD 134#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
134#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD 135#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
135#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD 136#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
136#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD 137#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
137#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD 138#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
138 139
139#define PHY_XGXS_FLAG 0x1 140#define PHY_XGXS_FLAG 0x1
140#define PHY_SGMII_FLAG 0x2 141#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
142 143
143/* */ 144/* */
144#define SFP_EEPROM_CON_TYPE_ADDR 0x2 145#define SFP_EEPROM_CON_TYPE_ADDR 0x2
145 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 146 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
146 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 147 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
147 148
148 149
@@ -153,15 +154,15 @@
153 154
154#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 155#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
155 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 157 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
157 158
158#define SFP_EEPROM_OPTIONS_ADDR 0x40 159#define SFP_EEPROM_OPTIONS_ADDR 0x40
159 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 160 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
160#define SFP_EEPROM_OPTIONS_SIZE 2 161#define SFP_EEPROM_OPTIONS_SIZE 2
161 162
162#define EDC_MODE_LINEAR 0x0022 163#define EDC_MODE_LINEAR 0x0022
163#define EDC_MODE_LIMITING 0x0044 164#define EDC_MODE_LIMITING 0x0044
164#define EDC_MODE_PASSIVE_DAC 0x0055 165#define EDC_MODE_PASSIVE_DAC 0x0055
165 166
166 167
167#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) 168#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -170,24 +171,18 @@
170/* INTERFACE */ 171/* INTERFACE */
171/**********************************************************/ 172/**********************************************************/
172 173
173#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ 174#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
174 bnx2x_cl45_write(_bp, _phy, \ 175 bnx2x_cl45_write(_bp, _phy, \
175 (_phy)->def_md_devad, \ 176 (_phy)->def_md_devad, \
176 (_bank + (_addr & 0xf)), \ 177 (_bank + (_addr & 0xf)), \
177 _val) 178 _val)
178 179
179#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ 180#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
180 bnx2x_cl45_read(_bp, _phy, \ 181 bnx2x_cl45_read(_bp, _phy, \
181 (_phy)->def_md_devad, \ 182 (_phy)->def_md_devad, \
182 (_bank + (_addr & 0xf)), \ 183 (_bank + (_addr & 0xf)), \
183 _val) 184 _val)
184 185
185static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
186 u8 devad, u16 reg, u16 *ret_val);
187
188static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
189 u8 devad, u16 reg, u16 val);
190
191static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 186static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
192{ 187{
193 u32 val = REG_RD(bp, reg); 188 u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
216 211
217 DP(NETIF_MSG_LINK, "ETS disabled configuration\n"); 212 DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
218 213
219 /** 214 /*
220 * mapping between entry priority to client number (0,1,2 -debug and 215 * mapping between entry priority to client number (0,1,2 -debug and
221 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 216 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
222 * 3bits client num. 217 * 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
225 */ 220 */
226 221
227 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); 222 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
228 /** 223 /*
229 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 224 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
230 * as strict. Bits 0,1,2 - debug and management entries, 3 - 225 * as strict. Bits 0,1,2 - debug and management entries, 3 -
231 * COS0 entry, 4 - COS1 entry. 226 * COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
237 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 232 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
238 /* defines which entries (clients) are subjected to WFQ arbitration */ 233 /* defines which entries (clients) are subjected to WFQ arbitration */
239 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 234 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
240 /** 235 /*
241 * For strict priority entries defines the number of consecutive 236 * For strict priority entries defines the number of consecutive
242 * slots for the highest priority. 237 * slots for the highest priority.
243 */ 238 */
244 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 239 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
245 /** 240 /*
246 * mapping between the CREDIT_WEIGHT registers and actual client 241 * mapping between the CREDIT_WEIGHT registers and actual client
247 * numbers 242 * numbers
248 */ 243 */
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
255 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); 250 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
256 /* ETS mode disable */ 251 /* ETS mode disable */
257 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 252 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
258 /** 253 /*
259 * If ETS mode is enabled (there is no strict priority) defines a WFQ 254 * If ETS mode is enabled (there is no strict priority) defines a WFQ
260 * weight for COS0/COS1. 255 * weight for COS0/COS1.
261 */ 256 */
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
268 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 263 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
269} 264}
270 265
271void bnx2x_ets_bw_limit_common(const struct link_params *params) 266static void bnx2x_ets_bw_limit_common(const struct link_params *params)
272{ 267{
273 /* ETS disabled configuration */ 268 /* ETS disabled configuration */
274 struct bnx2x *bp = params->bp; 269 struct bnx2x *bp = params->bp;
275 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 270 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
276 /** 271 /*
277 * defines which entries (clients) are subjected to WFQ arbitration 272 * defines which entries (clients) are subjected to WFQ arbitration
278 * COS0 0x8 273 * COS0 0x8
279 * COS1 0x10 274 * COS1 0x10
280 */ 275 */
281 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); 276 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
282 /** 277 /*
283 * mapping between the ARB_CREDIT_WEIGHT registers and actual 278 * mapping between the ARB_CREDIT_WEIGHT registers and actual
284 * client numbers (WEIGHT_0 does not actually have to represent 279 * client numbers (WEIGHT_0 does not actually have to represent
285 * client 0) 280 * client 0)
286 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 281 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
287 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 282 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
288 */ 283 */
289 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); 284 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
290 285
291 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 286 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
298 293
299 /* Defines the number of consecutive slots for the strict priority */ 294 /* Defines the number of consecutive slots for the strict priority */
300 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 295 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
301 /** 296 /*
302 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 297 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
303 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 298 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
304 * entry, 4 - COS1 entry. 299 * entry, 4 - COS1 entry.
305 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 300 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
306 * bit4 bit3 bit2 bit1 bit0 301 * bit4 bit3 bit2 bit1 bit0
307 * MCP and debug are strict 302 * MCP and debug are strict
308 */ 303 */
309 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 304 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
310 305
311 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ 306 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
329 if ((0 == total_bw) || 324 if ((0 == total_bw) ||
330 (0 == cos0_bw) || 325 (0 == cos0_bw) ||
331 (0 == cos1_bw)) { 326 (0 == cos1_bw)) {
332 DP(NETIF_MSG_LINK, 327 DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
333 "bnx2x_ets_bw_limit: Total BW can't be zero\n");
334 return; 328 return;
335 } 329 }
336 330
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
355 u32 val = 0; 349 u32 val = 0;
356 350
357 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); 351 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
358 /** 352 /*
359 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 353 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
360 * as strict. Bits 0,1,2 - debug and management entries, 354 * as strict. Bits 0,1,2 - debug and management entries,
361 * 3 - COS0 entry, 4 - COS1 entry. 355 * 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
364 * MCP and debug are strict 358 * MCP and debug are strict
365 */ 359 */
366 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); 360 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
367 /** 361 /*
368 * For strict priority entries defines the number of consecutive slots 362 * For strict priority entries defines the number of consecutive slots
369 * for the highest priority. 363 * for the highest priority.
370 */ 364 */
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
377 /* Defines the number of consecutive slots for the strict priority */ 371 /* Defines the number of consecutive slots for the strict priority */
378 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); 372 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
379 373
380 /** 374 /*
381 * mapping between entry priority to client number (0,1,2 -debug and 375 * mapping between entry priority to client number (0,1,2 -debug and
382 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 376 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
383 * 3bits client num. 377 * 3bits client num.
384 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 378 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
385 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 379 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
386 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 380 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
387 */ 381 */
388 val = (0 == strict_cos) ? 0x2318 : 0x22E0; 382 val = (0 == strict_cos) ? 0x2318 : 0x22E0;
389 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); 383 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
390 384
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
471/* MAC/PBF section */ 465/* MAC/PBF section */
472/******************************************************************/ 466/******************************************************************/
473static void bnx2x_emac_init(struct link_params *params, 467static void bnx2x_emac_init(struct link_params *params,
474 struct link_vars *vars) 468 struct link_vars *vars)
475{ 469{
476 /* reset and unreset the emac core */ 470 /* reset and unreset the emac core */
477 struct bnx2x *bp = params->bp; 471 struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
481 u16 timeout; 475 u16 timeout;
482 476
483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 477 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
484 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 478 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
485 udelay(5); 479 udelay(5);
486 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 480 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
487 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 481 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
488 482
489 /* init emac - use read-modify-write */ 483 /* init emac - use read-modify-write */
490 /* self clear reset */ 484 /* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
515} 509}
516 510
517static u8 bnx2x_emac_enable(struct link_params *params, 511static u8 bnx2x_emac_enable(struct link_params *params,
518 struct link_vars *vars, u8 lb) 512 struct link_vars *vars, u8 lb)
519{ 513{
520 struct bnx2x *bp = params->bp; 514 struct bnx2x *bp = params->bp;
521 u8 port = params->port; 515 u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
527 /* enable emac and not bmac */ 521 /* enable emac and not bmac */
528 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); 522 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
529 523
530 /* for paladium */
531 if (CHIP_REV_IS_EMUL(bp)) {
532 /* Use lane 1 (of lanes 0-3) */
533 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
535 port*4, 1);
536 }
537 /* for fpga */
538 else
539
540 if (CHIP_REV_IS_FPGA(bp)) {
541 /* Use lane 1 (of lanes 0-3) */
542 DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
543
544 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
545 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
546 0);
547 } else
548 /* ASIC */ 524 /* ASIC */
549 if (vars->phy_flags & PHY_XGXS_FLAG) { 525 if (vars->phy_flags & PHY_XGXS_FLAG) {
550 u32 ser_lane = ((params->lane_config & 526 u32 ser_lane = ((params->lane_config &
551 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 527 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
552 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 528 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
553 529
554 DP(NETIF_MSG_LINK, "XGXS\n"); 530 DP(NETIF_MSG_LINK, "XGXS\n");
555 /* select the master lanes (out of 0-3) */ 531 /* select the master lanes (out of 0-3) */
556 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + 532 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
557 port*4, ser_lane);
558 /* select XGXS */ 533 /* select XGXS */
559 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
560 port*4, 1);
561 535
562 } else { /* SerDes */ 536 } else { /* SerDes */
563 DP(NETIF_MSG_LINK, "SerDes\n"); 537 DP(NETIF_MSG_LINK, "SerDes\n");
564 /* select SerDes */ 538 /* select SerDes */
565 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 539 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
566 port*4, 0);
567 } 540 }
568 541
569 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 542 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
570 EMAC_RX_MODE_RESET); 543 EMAC_RX_MODE_RESET);
571 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 544 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
572 EMAC_TX_MODE_RESET); 545 EMAC_TX_MODE_RESET);
573 546
574 if (CHIP_REV_IS_SLOW(bp)) { 547 if (CHIP_REV_IS_SLOW(bp)) {
575 /* config GMII mode */ 548 /* config GMII mode */
576 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 549 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
577 EMAC_WR(bp, EMAC_REG_EMAC_MODE, 550 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
578 (val | EMAC_MODE_PORT_GMII));
579 } else { /* ASIC */ 551 } else { /* ASIC */
580 /* pause enable/disable */ 552 /* pause enable/disable */
581 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 553 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
605 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 577 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
606 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 578 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
607 579
608 /** 580 /*
609 * Setting this bit causes MAC control frames (except for pause 581 * Setting this bit causes MAC control frames (except for pause
610 * frames) to be passed on for processing. This setting has no 582 * frames) to be passed on for processing. This setting has no
611 * affect on the operation of the pause frames. This bit effects 583 * affect on the operation of the pause frames. This bit effects
612 * all packets regardless of RX Parser packet sorting logic. 584 * all packets regardless of RX Parser packet sorting logic.
613 * Turn the PFC off to make sure we are in Xon state before 585 * Turn the PFC off to make sure we are in Xon state before
614 * enabling it. 586 * enabling it.
615 */ 587 */
616 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); 588 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
617 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { 589 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
618 DP(NETIF_MSG_LINK, "PFC is enabled\n"); 590 DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
666 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); 638 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
667 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); 639 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
668 640
669 if (CHIP_REV_IS_EMUL(bp)) { 641 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
670 /* take the BigMac out of reset */
671 REG_WR(bp,
672 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
673 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
674
675 /* enable access for bmac registers */
676 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
677 } else
678 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
679 642
680 vars->mac_type = MAC_TYPE_EMAC; 643 vars->mac_type = MAC_TYPE_EMAC;
681 return 0; 644 return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
731 val |= (1<<5); 694 val |= (1<<5);
732 wb_data[0] = val; 695 wb_data[0] = val;
733 wb_data[1] = 0; 696 wb_data[1] = 0;
734 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, 697 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
735 wb_data, 2);
736 udelay(30); 698 udelay(30);
737 699
738 /* Tx control */ 700 /* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
768 730
769 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); 731 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
770 732
771 /** 733 /*
772 * Set Time (based unit is 512 bit time) between automatic 734 * Set Time (based unit is 512 bit time) between automatic
773 * re-sending of PP packets amd enable automatic re-send of 735 * re-sending of PP packets amd enable automatic re-send of
774 * Per-Priroity Packet as long as pp_gen is asserted and 736 * Per-Priroity Packet as long as pp_gen is asserted and
775 * pp_disable is low. 737 * pp_disable is low.
776 */ 738 */
777 val = 0x8000; 739 val = 0x8000;
778 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 740 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
779 val |= (1<<16); /* enable automatic re-send */ 741 val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
781 wb_data[0] = val; 743 wb_data[0] = val;
782 wb_data[1] = 0; 744 wb_data[1] = 0;
783 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, 745 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
784 wb_data, 2); 746 wb_data, 2);
785 747
786 /* mac control */ 748 /* mac control */
787 val = 0x3; /* Enable RX and TX */ 749 val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
795 757
796 wb_data[0] = val; 758 wb_data[0] = val;
797 wb_data[1] = 0; 759 wb_data[1] = 0;
798 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, 760 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
799 wb_data, 2);
800} 761}
801 762
802static void bnx2x_update_pfc_brb(struct link_params *params, 763static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
825 full_xon_th = 786 full_xon_th =
826 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; 787 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
827 } 788 }
828 /* The number of free blocks below which the pause signal to class 0 789 /*
829 of MAC #n is asserted. n=0,1 */ 790 * The number of free blocks below which the pause signal to class 0
791 * of MAC #n is asserted. n=0,1
792 */
830 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th); 793 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
831 /* The number of free blocks above which the pause signal to class 0 794 /*
832 of MAC #n is de-asserted. n=0,1 */ 795 * The number of free blocks above which the pause signal to class 0
796 * of MAC #n is de-asserted. n=0,1
797 */
833 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th); 798 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
834 /* The number of free blocks below which the full signal to class 0 799 /*
835 of MAC #n is asserted. n=0,1 */ 800 * The number of free blocks below which the full signal to class 0
801 * of MAC #n is asserted. n=0,1
802 */
836 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th); 803 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
837 /* The number of free blocks above which the full signal to class 0 804 /*
838 of MAC #n is de-asserted. n=0,1 */ 805 * The number of free blocks above which the full signal to class 0
806 * of MAC #n is de-asserted. n=0,1
807 */
839 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th); 808 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
840 809
841 if (set_pfc && pfc_params) { 810 if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
859 full_xon_th = 828 full_xon_th =
860 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; 829 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
861 } 830 }
862 /** 831 /*
863 * The number of free blocks below which the pause signal to 832 * The number of free blocks below which the pause signal to
864 * class 1 of MAC #n is asserted. n=0,1 833 * class 1 of MAC #n is asserted. n=0,1
865 **/ 834 */
866 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th); 835 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
867 /** 836 /*
868 * The number of free blocks above which the pause signal to 837 * The number of free blocks above which the pause signal to
869 * class 1 of MAC #n is de-asserted. n=0,1 838 * class 1 of MAC #n is de-asserted. n=0,1
870 **/ 839 */
871 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th); 840 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
872 /** 841 /*
873 * The number of free blocks below which the full signal to 842 * The number of free blocks below which the full signal to
874 * class 1 of MAC #n is asserted. n=0,1 843 * class 1 of MAC #n is asserted. n=0,1
875 **/ 844 */
876 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th); 845 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
877 /** 846 /*
878 * The number of free blocks above which the full signal to 847 * The number of free blocks above which the full signal to
879 * class 1 of MAC #n is de-asserted. n=0,1 848 * class 1 of MAC #n is de-asserted. n=0,1
880 **/ 849 */
881 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th); 850 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
882 } 851 }
883} 852}
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
896 FEATURE_CONFIG_PFC_ENABLED; 865 FEATURE_CONFIG_PFC_ENABLED;
897 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); 866 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
898 867
899 /** 868 /*
900 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set 869 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
901 * MAC control frames (that are not pause packets) 870 * MAC control frames (that are not pause packets)
902 * will be forwarded to the XCM. 871 * will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
904 xcm_mask = REG_RD(bp, 873 xcm_mask = REG_RD(bp,
905 port ? NIG_REG_LLH1_XCM_MASK : 874 port ? NIG_REG_LLH1_XCM_MASK :
906 NIG_REG_LLH0_XCM_MASK); 875 NIG_REG_LLH0_XCM_MASK);
907 /** 876 /*
908 * nig params will override non PFC params, since it's possible to 877 * nig params will override non PFC params, since it's possible to
909 * do transition from PFC to SAFC 878 * do transition from PFC to SAFC
910 */ 879 */
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
994 struct link_vars *vars, 963 struct link_vars *vars,
995 struct bnx2x_nig_brb_pfc_port_params *pfc_params) 964 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
996{ 965{
997 /** 966 /*
998 * The PFC and pause are orthogonal to one another, meaning when 967 * The PFC and pause are orthogonal to one another, meaning when
999 * PFC is enabled, the pause are disabled, and when PFC is 968 * PFC is enabled, the pause are disabled, and when PFC is
1000 * disabled, pause are set according to the pause result. 969 * disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
1035 1004
1036static u8 bnx2x_bmac1_enable(struct link_params *params, 1005static u8 bnx2x_bmac1_enable(struct link_params *params,
1037 struct link_vars *vars, 1006 struct link_vars *vars,
1038 u8 is_lb) 1007 u8 is_lb)
1039{ 1008{
1040 struct bnx2x *bp = params->bp; 1009 struct bnx2x *bp = params->bp;
1041 u8 port = params->port; 1010 u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1049 /* XGXS control */ 1018 /* XGXS control */
1050 wb_data[0] = 0x3c; 1019 wb_data[0] = 0x3c;
1051 wb_data[1] = 0; 1020 wb_data[1] = 0;
1052 REG_WR_DMAE(bp, bmac_addr + 1021 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
1053 BIGMAC_REGISTER_BMAC_XGXS_CONTROL, 1022 wb_data, 2);
1054 wb_data, 2);
1055 1023
1056 /* tx MAC SA */ 1024 /* tx MAC SA */
1057 wb_data[0] = ((params->mac_addr[2] << 24) | 1025 wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1060 params->mac_addr[5]); 1028 params->mac_addr[5]);
1061 wb_data[1] = ((params->mac_addr[0] << 8) | 1029 wb_data[1] = ((params->mac_addr[0] << 8) |
1062 params->mac_addr[1]); 1030 params->mac_addr[1]);
1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, 1031 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
1064 wb_data, 2);
1065 1032
1066 /* mac control */ 1033 /* mac control */
1067 val = 0x3; 1034 val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1071 } 1038 }
1072 wb_data[0] = val; 1039 wb_data[0] = val;
1073 wb_data[1] = 0; 1040 wb_data[1] = 0;
1074 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 1041 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
1075 wb_data, 2);
1076 1042
1077 /* set rx mtu */ 1043 /* set rx mtu */
1078 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1044 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1079 wb_data[1] = 0; 1045 wb_data[1] = 0;
1080 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, 1046 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
1081 wb_data, 2);
1082 1047
1083 bnx2x_update_pfc_bmac1(params, vars); 1048 bnx2x_update_pfc_bmac1(params, vars);
1084 1049
1085 /* set tx mtu */ 1050 /* set tx mtu */
1086 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1051 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1087 wb_data[1] = 0; 1052 wb_data[1] = 0;
1088 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, 1053 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
1089 wb_data, 2);
1090 1054
1091 /* set cnt max size */ 1055 /* set cnt max size */
1092 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1056 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1093 wb_data[1] = 0; 1057 wb_data[1] = 0;
1094 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, 1058 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1095 wb_data, 2);
1096 1059
1097 /* configure safc */ 1060 /* configure safc */
1098 wb_data[0] = 0x1000200; 1061 wb_data[0] = 0x1000200;
1099 wb_data[1] = 0; 1062 wb_data[1] = 0;
1100 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
1101 wb_data, 2); 1064 wb_data, 2);
1102 /* fix for emulation */
1103 if (CHIP_REV_IS_EMUL(bp)) {
1104 wb_data[0] = 0xf000;
1105 wb_data[1] = 0;
1106 REG_WR_DMAE(bp,
1107 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
1108 wb_data, 2);
1109 }
1110
1111 1065
1112 return 0; 1066 return 0;
1113} 1067}
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1126 1080
1127 wb_data[0] = 0; 1081 wb_data[0] = 0;
1128 wb_data[1] = 0; 1082 wb_data[1] = 0;
1129 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, 1083 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
1130 wb_data, 2);
1131 udelay(30); 1084 udelay(30);
1132 1085
1133 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ 1086 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
1134 wb_data[0] = 0x3c; 1087 wb_data[0] = 0x3c;
1135 wb_data[1] = 0; 1088 wb_data[1] = 0;
1136 REG_WR_DMAE(bp, bmac_addr + 1089 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
1137 BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, 1090 wb_data, 2);
1138 wb_data, 2);
1139 1091
1140 udelay(30); 1092 udelay(30);
1141 1093
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1147 wb_data[1] = ((params->mac_addr[0] << 8) | 1099 wb_data[1] = ((params->mac_addr[0] << 8) |
1148 params->mac_addr[1]); 1100 params->mac_addr[1]);
1149 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, 1101 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
1150 wb_data, 2); 1102 wb_data, 2);
1151 1103
1152 udelay(30); 1104 udelay(30);
1153 1105
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1155 wb_data[0] = 0x1000200; 1107 wb_data[0] = 0x1000200;
1156 wb_data[1] = 0; 1108 wb_data[1] = 0;
1157 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, 1109 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
1158 wb_data, 2); 1110 wb_data, 2);
1159 udelay(30); 1111 udelay(30);
1160 1112
1161 /* set rx mtu */ 1113 /* set rx mtu */
1162 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1114 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1163 wb_data[1] = 0; 1115 wb_data[1] = 0;
1164 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, 1116 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
1165 wb_data, 2);
1166 udelay(30); 1117 udelay(30);
1167 1118
1168 /* set tx mtu */ 1119 /* set tx mtu */
1169 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1120 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1170 wb_data[1] = 0; 1121 wb_data[1] = 0;
1171 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, 1122 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
1172 wb_data, 2);
1173 udelay(30); 1123 udelay(30);
1174 /* set cnt max size */ 1124 /* set cnt max size */
1175 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; 1125 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
1176 wb_data[1] = 0; 1126 wb_data[1] = 0;
1177 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, 1127 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1178 wb_data, 2);
1179 udelay(30); 1128 udelay(30);
1180 bnx2x_update_pfc_bmac2(params, vars, is_lb); 1129 bnx2x_update_pfc_bmac2(params, vars, is_lb);
1181 1130
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
1191 u32 val; 1140 u32 val;
1192 /* reset and unreset the BigMac */ 1141 /* reset and unreset the BigMac */
1193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1142 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1194 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1143 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1195 msleep(1); 1144 msleep(1);
1196 1145
1197 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1146 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1198 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1147 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1199 1148
1200 /* enable access for bmac registers */ 1149 /* enable access for bmac registers */
1201 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); 1150 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
1230 struct bnx2x *bp = params->bp; 1179 struct bnx2x *bp = params->bp;
1231 1180
1232 REG_WR(bp, params->shmem_base + 1181 REG_WR(bp, params->shmem_base +
1233 offsetof(struct shmem_region, 1182 offsetof(struct shmem_region,
1234 port_mb[params->port].link_status), 1183 port_mb[params->port].link_status), link_status);
1235 link_status);
1236} 1184}
1237 1185
1238static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) 1186static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1239{ 1187{
1240 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 1188 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1241 NIG_REG_INGRESS_BMAC0_MEM; 1189 NIG_REG_INGRESS_BMAC0_MEM;
1242 u32 wb_data[2]; 1190 u32 wb_data[2];
1243 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 1191 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
1244 1192
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1250 if (CHIP_IS_E2(bp)) { 1198 if (CHIP_IS_E2(bp)) {
1251 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1199 /* Clear Rx Enable bit in BMAC_CONTROL register */
1252 REG_RD_DMAE(bp, bmac_addr + 1200 REG_RD_DMAE(bp, bmac_addr +
1253 BIGMAC2_REGISTER_BMAC_CONTROL, 1201 BIGMAC2_REGISTER_BMAC_CONTROL,
1254 wb_data, 2); 1202 wb_data, 2);
1255 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 1203 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
1256 REG_WR_DMAE(bp, bmac_addr + 1204 REG_WR_DMAE(bp, bmac_addr +
1257 BIGMAC2_REGISTER_BMAC_CONTROL, 1205 BIGMAC2_REGISTER_BMAC_CONTROL,
1258 wb_data, 2); 1206 wb_data, 2);
1259 } else { 1207 } else {
1260 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1208 /* Clear Rx Enable bit in BMAC_CONTROL register */
1261 REG_RD_DMAE(bp, bmac_addr + 1209 REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1271} 1219}
1272 1220
1273static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, 1221static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1274 u32 line_speed) 1222 u32 line_speed)
1275{ 1223{
1276 struct bnx2x *bp = params->bp; 1224 struct bnx2x *bp = params->bp;
1277 u8 port = params->port; 1225 u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1308 /* update threshold */ 1256 /* update threshold */
1309 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 1257 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
1310 /* update init credit */ 1258 /* update init credit */
1311 init_crd = 778; /* (800-18-4) */ 1259 init_crd = 778; /* (800-18-4) */
1312 1260
1313 } else { 1261 } else {
1314 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 1262 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1353 return 0; 1301 return 0;
1354} 1302}
1355 1303
1304/*
1305 * get_emac_base
1306 *
1307 * @param cb
1308 * @param mdc_mdio_access
1309 * @param port
1310 *
1311 * @return u32
1312 *
1313 * This function selects the MDC/MDIO access (through emac0 or
1314 * emac1) depend on the mdc_mdio_access, port, port swapped. Each
1315 * phy has a default access mode, which could also be overridden
1316 * by nvram configuration. This parameter, whether this is the
1317 * default phy configuration, or the nvram overrun
1318 * configuration, is passed here as mdc_mdio_access and selects
1319 * the emac_base for the CL45 read/writes operations
1320 */
1356static u32 bnx2x_get_emac_base(struct bnx2x *bp, 1321static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1357 u32 mdc_mdio_access, u8 port) 1322 u32 mdc_mdio_access, u8 port)
1358{ 1323{
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1385 1350
1386} 1351}
1387 1352
1388u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, 1353/******************************************************************/
1389 u8 devad, u16 reg, u16 val) 1354/* CL45 access functions */
1355/******************************************************************/
1356static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1357 u8 devad, u16 reg, u16 val)
1390{ 1358{
1391 u32 tmp, saved_mode; 1359 u32 tmp, saved_mode;
1392 u8 i, rc = 0; 1360 u8 i, rc = 0;
1393 1361 /*
1394 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 1362 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1395 * (a value of 49==0x31) and make sure that the AUTO poll is off 1363 * (a value of 49==0x31) and make sure that the AUTO poll is off
1396 */ 1364 */
1397 1365
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1414 for (i = 0; i < 50; i++) { 1382 for (i = 0; i < 50; i++) {
1415 udelay(10); 1383 udelay(10);
1416 1384
1417 tmp = REG_RD(bp, phy->mdio_ctrl + 1385 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1418 EMAC_REG_EMAC_MDIO_COMM);
1419 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1386 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1420 udelay(5); 1387 udelay(5);
1421 break; 1388 break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1423 } 1390 }
1424 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1391 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1425 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1392 DP(NETIF_MSG_LINK, "write phy register failed\n");
1393 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1426 rc = -EFAULT; 1394 rc = -EFAULT;
1427 } else { 1395 } else {
1428 /* data */ 1396 /* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1435 udelay(10); 1403 udelay(10);
1436 1404
1437 tmp = REG_RD(bp, phy->mdio_ctrl + 1405 tmp = REG_RD(bp, phy->mdio_ctrl +
1438 EMAC_REG_EMAC_MDIO_COMM); 1406 EMAC_REG_EMAC_MDIO_COMM);
1439 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1407 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1440 udelay(5); 1408 udelay(5);
1441 break; 1409 break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1443 } 1411 }
1444 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1412 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1445 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1413 DP(NETIF_MSG_LINK, "write phy register failed\n");
1414 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1446 rc = -EFAULT; 1415 rc = -EFAULT;
1447 } 1416 }
1448 } 1417 }
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1453 return rc; 1422 return rc;
1454} 1423}
1455 1424
1456u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, 1425static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1457 u8 devad, u16 reg, u16 *ret_val) 1426 u8 devad, u16 reg, u16 *ret_val)
1458{ 1427{
1459 u32 val, saved_mode; 1428 u32 val, saved_mode;
1460 u16 i; 1429 u16 i;
1461 u8 rc = 0; 1430 u8 rc = 0;
1462 1431 /*
1463 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 1432 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1464 * (a value of 49==0x31) and make sure that the AUTO poll is off 1433 * (a value of 49==0x31) and make sure that the AUTO poll is off
1465 */ 1434 */
1466 1435
1467 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 1436 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1468 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL | 1437 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
1469 EMAC_MDIO_MODE_CLOCK_CNT)); 1438 EMAC_MDIO_MODE_CLOCK_CNT));
1470 val |= (EMAC_MDIO_MODE_CLAUSE_45 | 1439 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1471 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 1440 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1472 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); 1441 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1490 } 1459 }
1491 if (val & EMAC_MDIO_COMM_START_BUSY) { 1460 if (val & EMAC_MDIO_COMM_START_BUSY) {
1492 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1461 DP(NETIF_MSG_LINK, "read phy register failed\n");
1493 1462 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1494 *ret_val = 0; 1463 *ret_val = 0;
1495 rc = -EFAULT; 1464 rc = -EFAULT;
1496 1465
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1505 udelay(10); 1474 udelay(10);
1506 1475
1507 val = REG_RD(bp, phy->mdio_ctrl + 1476 val = REG_RD(bp, phy->mdio_ctrl +
1508 EMAC_REG_EMAC_MDIO_COMM); 1477 EMAC_REG_EMAC_MDIO_COMM);
1509 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1478 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1510 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 1479 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
1511 break; 1480 break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1513 } 1482 }
1514 if (val & EMAC_MDIO_COMM_START_BUSY) { 1483 if (val & EMAC_MDIO_COMM_START_BUSY) {
1515 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1484 DP(NETIF_MSG_LINK, "read phy register failed\n");
1516 1485 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1517 *ret_val = 0; 1486 *ret_val = 0;
1518 rc = -EFAULT; 1487 rc = -EFAULT;
1519 } 1488 }
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
1529 u8 devad, u16 reg, u16 *ret_val) 1498 u8 devad, u16 reg, u16 *ret_val)
1530{ 1499{
1531 u8 phy_index; 1500 u8 phy_index;
1532 /** 1501 /*
1533 * Probe for the phy according to the given phy_addr, and execute 1502 * Probe for the phy according to the given phy_addr, and execute
1534 * the read request on it 1503 * the read request on it
1535 */ 1504 */
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
1547 u8 devad, u16 reg, u16 val) 1516 u8 devad, u16 reg, u16 val)
1548{ 1517{
1549 u8 phy_index; 1518 u8 phy_index;
1550 /** 1519 /*
1551 * Probe for the phy according to the given phy_addr, and execute 1520 * Probe for the phy according to the given phy_addr, and execute
1552 * the write request on it 1521 * the write request on it
1553 */ 1522 */
@@ -1573,19 +1542,18 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
1573 1542
1574 offset = phy->addr + ser_lane; 1543 offset = phy->addr + ser_lane;
1575 if (CHIP_IS_E2(bp)) 1544 if (CHIP_IS_E2(bp))
1576 aer_val = 0x2800 + offset - 1; 1545 aer_val = 0x3800 + offset - 1;
1577 else 1546 else
1578 aer_val = 0x3800 + offset; 1547 aer_val = 0x3800 + offset;
1579 CL45_WR_OVER_CL22(bp, phy, 1548 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
1580 MDIO_REG_BANK_AER_BLOCK, 1549 MDIO_AER_BLOCK_AER_REG, aer_val);
1581 MDIO_AER_BLOCK_AER_REG, aer_val);
1582} 1550}
1583static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp, 1551static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
1584 struct bnx2x_phy *phy) 1552 struct bnx2x_phy *phy)
1585{ 1553{
1586 CL45_WR_OVER_CL22(bp, phy, 1554 CL22_WR_OVER_CL45(bp, phy,
1587 MDIO_REG_BANK_AER_BLOCK, 1555 MDIO_REG_BANK_AER_BLOCK,
1588 MDIO_AER_BLOCK_AER_REG, 0x3800); 1556 MDIO_AER_BLOCK_AER_REG, 0x3800);
1589} 1557}
1590 1558
1591/******************************************************************/ 1559/******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
1621 1589
1622 bnx2x_set_serdes_access(bp, port); 1590 bnx2x_set_serdes_access(bp, port);
1623 1591
1624 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + 1592 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
1625 port*0x10, 1593 DEFAULT_PHY_DEV_ADDR);
1626 DEFAULT_PHY_DEV_ADDR);
1627} 1594}
1628 1595
1629static void bnx2x_xgxs_deassert(struct link_params *params) 1596static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
1641 udelay(500); 1608 udelay(500);
1642 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 1609 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
1643 1610
1644 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + 1611 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
1645 port*0x18, 0);
1646 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 1612 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
1647 params->phy[INT_PHY].def_md_devad); 1613 params->phy[INT_PHY].def_md_devad);
1648} 1614}
1649 1615
1650 1616
1651void bnx2x_link_status_update(struct link_params *params, 1617void bnx2x_link_status_update(struct link_params *params,
1652 struct link_vars *vars) 1618 struct link_vars *vars)
1653{ 1619{
1654 struct bnx2x *bp = params->bp; 1620 struct bnx2x *bp = params->bp;
1655 u8 link_10g; 1621 u8 link_10g;
1656 u8 port = params->port; 1622 u8 port = params->port;
1657 1623
1658 vars->link_status = REG_RD(bp, params->shmem_base + 1624 vars->link_status = REG_RD(bp, params->shmem_base +
1659 offsetof(struct shmem_region, 1625 offsetof(struct shmem_region,
1660 port_mb[port].link_status)); 1626 port_mb[port].link_status));
1661 1627
1662 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); 1628 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
1663 1629
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
1667 vars->phy_link_up = 1; 1633 vars->phy_link_up = 1;
1668 vars->duplex = DUPLEX_FULL; 1634 vars->duplex = DUPLEX_FULL;
1669 switch (vars->link_status & 1635 switch (vars->link_status &
1670 LINK_STATUS_SPEED_AND_DUPLEX_MASK) { 1636 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
1671 case LINK_10THD: 1637 case LINK_10THD:
1672 vars->duplex = DUPLEX_HALF; 1638 vars->duplex = DUPLEX_HALF;
1673 /* fall thru */ 1639 /* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
1779{ 1745{
1780 struct bnx2x *bp = params->bp; 1746 struct bnx2x *bp = params->bp;
1781 u16 new_master_ln, ser_lane; 1747 u16 new_master_ln, ser_lane;
1782 ser_lane = ((params->lane_config & 1748 ser_lane = ((params->lane_config &
1783 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1749 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1784 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1750 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1785 1751
1786 /* set the master_ln for AN */ 1752 /* set the master_ln for AN */
1787 CL45_RD_OVER_CL22(bp, phy, 1753 CL22_RD_OVER_CL45(bp, phy,
1788 MDIO_REG_BANK_XGXS_BLOCK2, 1754 MDIO_REG_BANK_XGXS_BLOCK2,
1789 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1755 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1790 &new_master_ln); 1756 &new_master_ln);
1791 1757
1792 CL45_WR_OVER_CL22(bp, phy, 1758 CL22_WR_OVER_CL45(bp, phy,
1793 MDIO_REG_BANK_XGXS_BLOCK2 , 1759 MDIO_REG_BANK_XGXS_BLOCK2 ,
1794 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1760 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1795 (new_master_ln | ser_lane)); 1761 (new_master_ln | ser_lane));
1796} 1762}
1797 1763
1798static u8 bnx2x_reset_unicore(struct link_params *params, 1764static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1802 struct bnx2x *bp = params->bp; 1768 struct bnx2x *bp = params->bp;
1803 u16 mii_control; 1769 u16 mii_control;
1804 u16 i; 1770 u16 i;
1805 1771 CL22_RD_OVER_CL45(bp, phy,
1806 CL45_RD_OVER_CL22(bp, phy, 1772 MDIO_REG_BANK_COMBO_IEEE0,
1807 MDIO_REG_BANK_COMBO_IEEE0, 1773 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1808 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1809 1774
1810 /* reset the unicore */ 1775 /* reset the unicore */
1811 CL45_WR_OVER_CL22(bp, phy, 1776 CL22_WR_OVER_CL45(bp, phy,
1812 MDIO_REG_BANK_COMBO_IEEE0, 1777 MDIO_REG_BANK_COMBO_IEEE0,
1813 MDIO_COMBO_IEEE0_MII_CONTROL, 1778 MDIO_COMBO_IEEE0_MII_CONTROL,
1814 (mii_control | 1779 (mii_control |
1815 MDIO_COMBO_IEEO_MII_CONTROL_RESET)); 1780 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1816 if (set_serdes) 1781 if (set_serdes)
1817 bnx2x_set_serdes_access(bp, params->port); 1782 bnx2x_set_serdes_access(bp, params->port);
1818 1783
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1821 udelay(5); 1786 udelay(5);
1822 1787
1823 /* the reset erased the previous bank value */ 1788 /* the reset erased the previous bank value */
1824 CL45_RD_OVER_CL22(bp, phy, 1789 CL22_RD_OVER_CL45(bp, phy,
1825 MDIO_REG_BANK_COMBO_IEEE0, 1790 MDIO_REG_BANK_COMBO_IEEE0,
1826 MDIO_COMBO_IEEE0_MII_CONTROL, 1791 MDIO_COMBO_IEEE0_MII_CONTROL,
1827 &mii_control); 1792 &mii_control);
1828 1793
1829 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { 1794 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
1830 udelay(5); 1795 udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1832 } 1797 }
1833 } 1798 }
1834 1799
1800 netdev_err(bp->dev, "Warning: PHY was not initialized,"
1801 " Port %d\n",
1802 params->port);
1835 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); 1803 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
1836 return -EINVAL; 1804 return -EINVAL;
1837 1805
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
1841 struct bnx2x_phy *phy) 1809 struct bnx2x_phy *phy)
1842{ 1810{
1843 struct bnx2x *bp = params->bp; 1811 struct bnx2x *bp = params->bp;
1844 /* Each two bits represents a lane number: 1812 /*
1845 No swap is 0123 => 0x1b no need to enable the swap */ 1813 * Each two bits represents a lane number:
1814 * No swap is 0123 => 0x1b no need to enable the swap
1815 */
1846 u16 ser_lane, rx_lane_swap, tx_lane_swap; 1816 u16 ser_lane, rx_lane_swap, tx_lane_swap;
1847 1817
1848 ser_lane = ((params->lane_config & 1818 ser_lane = ((params->lane_config &
1849 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1819 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1850 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1820 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1851 rx_lane_swap = ((params->lane_config & 1821 rx_lane_swap = ((params->lane_config &
1852 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> 1822 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
1853 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); 1823 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
1854 tx_lane_swap = ((params->lane_config & 1824 tx_lane_swap = ((params->lane_config &
1855 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> 1825 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
1856 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); 1826 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1857 1827
1858 if (rx_lane_swap != 0x1b) { 1828 if (rx_lane_swap != 0x1b) {
1859 CL45_WR_OVER_CL22(bp, phy, 1829 CL22_WR_OVER_CL45(bp, phy,
1860 MDIO_REG_BANK_XGXS_BLOCK2, 1830 MDIO_REG_BANK_XGXS_BLOCK2,
1861 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 1831 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1862 (rx_lane_swap | 1832 (rx_lane_swap |
1863 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | 1833 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1864 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); 1834 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1865 } else { 1835 } else {
1866 CL45_WR_OVER_CL22(bp, phy, 1836 CL22_WR_OVER_CL45(bp, phy,
1867 MDIO_REG_BANK_XGXS_BLOCK2, 1837 MDIO_REG_BANK_XGXS_BLOCK2,
1868 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); 1838 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1869 } 1839 }
1870 1840
1871 if (tx_lane_swap != 0x1b) { 1841 if (tx_lane_swap != 0x1b) {
1872 CL45_WR_OVER_CL22(bp, phy, 1842 CL22_WR_OVER_CL45(bp, phy,
1873 MDIO_REG_BANK_XGXS_BLOCK2, 1843 MDIO_REG_BANK_XGXS_BLOCK2,
1874 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 1844 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1875 (tx_lane_swap | 1845 (tx_lane_swap |
1876 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); 1846 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1877 } else { 1847 } else {
1878 CL45_WR_OVER_CL22(bp, phy, 1848 CL22_WR_OVER_CL45(bp, phy,
1879 MDIO_REG_BANK_XGXS_BLOCK2, 1849 MDIO_REG_BANK_XGXS_BLOCK2,
1880 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); 1850 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1881 } 1851 }
1882} 1852}
1883 1853
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
1886{ 1856{
1887 struct bnx2x *bp = params->bp; 1857 struct bnx2x *bp = params->bp;
1888 u16 control2; 1858 u16 control2;
1889 CL45_RD_OVER_CL22(bp, phy, 1859 CL22_RD_OVER_CL45(bp, phy,
1890 MDIO_REG_BANK_SERDES_DIGITAL, 1860 MDIO_REG_BANK_SERDES_DIGITAL,
1891 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1861 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1892 &control2); 1862 &control2);
1893 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1863 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1894 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1864 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1895 else 1865 else
1896 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1866 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1897 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", 1867 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1898 phy->speed_cap_mask, control2); 1868 phy->speed_cap_mask, control2);
1899 CL45_WR_OVER_CL22(bp, phy, 1869 CL22_WR_OVER_CL45(bp, phy,
1900 MDIO_REG_BANK_SERDES_DIGITAL, 1870 MDIO_REG_BANK_SERDES_DIGITAL,
1901 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1871 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1902 control2); 1872 control2);
1903 1873
1904 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 1874 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
1905 (phy->speed_cap_mask & 1875 (phy->speed_cap_mask &
1906 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 1876 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1907 DP(NETIF_MSG_LINK, "XGXS\n"); 1877 DP(NETIF_MSG_LINK, "XGXS\n");
1908 1878
1909 CL45_WR_OVER_CL22(bp, phy, 1879 CL22_WR_OVER_CL45(bp, phy,
1910 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1880 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1911 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 1881 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1912 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 1882 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1913 1883
1914 CL45_RD_OVER_CL22(bp, phy, 1884 CL22_RD_OVER_CL45(bp, phy,
1915 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1885 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1916 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1886 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1917 &control2); 1887 &control2);
1918 1888
1919 1889
1920 control2 |= 1890 control2 |=
1921 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 1891 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1922 1892
1923 CL45_WR_OVER_CL22(bp, phy, 1893 CL22_WR_OVER_CL45(bp, phy,
1924 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1894 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1925 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1895 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1926 control2); 1896 control2);
1927 1897
1928 /* Disable parallel detection of HiG */ 1898 /* Disable parallel detection of HiG */
1929 CL45_WR_OVER_CL22(bp, phy, 1899 CL22_WR_OVER_CL45(bp, phy,
1930 MDIO_REG_BANK_XGXS_BLOCK2, 1900 MDIO_REG_BANK_XGXS_BLOCK2,
1931 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, 1901 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1932 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | 1902 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
1933 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); 1903 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
1934 } 1904 }
1935} 1905}
1936 1906
1937static void bnx2x_set_autoneg(struct bnx2x_phy *phy, 1907static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1938 struct link_params *params, 1908 struct link_params *params,
1939 struct link_vars *vars, 1909 struct link_vars *vars,
1940 u8 enable_cl73) 1910 u8 enable_cl73)
1941{ 1911{
1942 struct bnx2x *bp = params->bp; 1912 struct bnx2x *bp = params->bp;
1943 u16 reg_val; 1913 u16 reg_val;
1944 1914
1945 /* CL37 Autoneg */ 1915 /* CL37 Autoneg */
1946 CL45_RD_OVER_CL22(bp, phy, 1916 CL22_RD_OVER_CL45(bp, phy,
1947 MDIO_REG_BANK_COMBO_IEEE0, 1917 MDIO_REG_BANK_COMBO_IEEE0,
1948 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1918 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1949 1919
1950 /* CL37 Autoneg Enabled */ 1920 /* CL37 Autoneg Enabled */
1951 if (vars->line_speed == SPEED_AUTO_NEG) 1921 if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1954 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1924 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1955 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); 1925 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1956 1926
1957 CL45_WR_OVER_CL22(bp, phy, 1927 CL22_WR_OVER_CL45(bp, phy,
1958 MDIO_REG_BANK_COMBO_IEEE0, 1928 MDIO_REG_BANK_COMBO_IEEE0,
1959 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 1929 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1960 1930
1961 /* Enable/Disable Autodetection */ 1931 /* Enable/Disable Autodetection */
1962 1932
1963 CL45_RD_OVER_CL22(bp, phy, 1933 CL22_RD_OVER_CL45(bp, phy,
1964 MDIO_REG_BANK_SERDES_DIGITAL, 1934 MDIO_REG_BANK_SERDES_DIGITAL,
1965 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1935 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1966 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | 1936 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
1967 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); 1937 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
1968 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; 1938 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1971 else 1941 else
1972 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1942 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1973 1943
1974 CL45_WR_OVER_CL22(bp, phy, 1944 CL22_WR_OVER_CL45(bp, phy,
1975 MDIO_REG_BANK_SERDES_DIGITAL, 1945 MDIO_REG_BANK_SERDES_DIGITAL,
1976 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); 1946 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1977 1947
1978 /* Enable TetonII and BAM autoneg */ 1948 /* Enable TetonII and BAM autoneg */
1979 CL45_RD_OVER_CL22(bp, phy, 1949 CL22_RD_OVER_CL45(bp, phy,
1980 MDIO_REG_BANK_BAM_NEXT_PAGE, 1950 MDIO_REG_BANK_BAM_NEXT_PAGE,
1981 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1951 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1982 &reg_val); 1952 &reg_val);
1983 if (vars->line_speed == SPEED_AUTO_NEG) { 1953 if (vars->line_speed == SPEED_AUTO_NEG) {
1984 /* Enable BAM aneg Mode and TetonII aneg Mode */ 1954 /* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1989 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1959 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1990 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1960 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1991 } 1961 }
1992 CL45_WR_OVER_CL22(bp, phy, 1962 CL22_WR_OVER_CL45(bp, phy,
1993 MDIO_REG_BANK_BAM_NEXT_PAGE, 1963 MDIO_REG_BANK_BAM_NEXT_PAGE,
1994 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1964 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1995 reg_val); 1965 reg_val);
1996 1966
1997 if (enable_cl73) { 1967 if (enable_cl73) {
1998 /* Enable Cl73 FSM status bits */ 1968 /* Enable Cl73 FSM status bits */
1999 CL45_WR_OVER_CL22(bp, phy, 1969 CL22_WR_OVER_CL45(bp, phy,
2000 MDIO_REG_BANK_CL73_USERB0, 1970 MDIO_REG_BANK_CL73_USERB0,
2001 MDIO_CL73_USERB0_CL73_UCTRL, 1971 MDIO_CL73_USERB0_CL73_UCTRL,
2002 0xe); 1972 0xe);
2003 1973
2004 /* Enable BAM Station Manager*/ 1974 /* Enable BAM Station Manager*/
2005 CL45_WR_OVER_CL22(bp, phy, 1975 CL22_WR_OVER_CL45(bp, phy,
2006 MDIO_REG_BANK_CL73_USERB0, 1976 MDIO_REG_BANK_CL73_USERB0,
2007 MDIO_CL73_USERB0_CL73_BAM_CTRL1, 1977 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2008 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | 1978 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2010 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1980 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
2011 1981
2012 /* Advertise CL73 link speeds */ 1982 /* Advertise CL73 link speeds */
2013 CL45_RD_OVER_CL22(bp, phy, 1983 CL22_RD_OVER_CL45(bp, phy,
2014 MDIO_REG_BANK_CL73_IEEEB1, 1984 MDIO_REG_BANK_CL73_IEEEB1,
2015 MDIO_CL73_IEEEB1_AN_ADV2, 1985 MDIO_CL73_IEEEB1_AN_ADV2,
2016 &reg_val); 1986 &reg_val);
2017 if (phy->speed_cap_mask & 1987 if (phy->speed_cap_mask &
2018 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1988 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2019 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; 1989 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2021 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1991 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
2022 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; 1992 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2023 1993
2024 CL45_WR_OVER_CL22(bp, phy, 1994 CL22_WR_OVER_CL45(bp, phy,
2025 MDIO_REG_BANK_CL73_IEEEB1, 1995 MDIO_REG_BANK_CL73_IEEEB1,
2026 MDIO_CL73_IEEEB1_AN_ADV2, 1996 MDIO_CL73_IEEEB1_AN_ADV2,
2027 reg_val); 1997 reg_val);
2028 1998
2029 /* CL73 Autoneg Enabled */ 1999 /* CL73 Autoneg Enabled */
2030 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 2000 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2032 } else /* CL73 Autoneg Disabled */ 2002 } else /* CL73 Autoneg Disabled */
2033 reg_val = 0; 2003 reg_val = 0;
2034 2004
2035 CL45_WR_OVER_CL22(bp, phy, 2005 CL22_WR_OVER_CL45(bp, phy,
2036 MDIO_REG_BANK_CL73_IEEEB0, 2006 MDIO_REG_BANK_CL73_IEEEB0,
2037 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 2007 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2038} 2008}
2039 2009
2040/* program SerDes, forced speed */ 2010/* program SerDes, forced speed */
2041static void bnx2x_program_serdes(struct bnx2x_phy *phy, 2011static void bnx2x_program_serdes(struct bnx2x_phy *phy,
2042 struct link_params *params, 2012 struct link_params *params,
2043 struct link_vars *vars) 2013 struct link_vars *vars)
2044{ 2014{
2045 struct bnx2x *bp = params->bp; 2015 struct bnx2x *bp = params->bp;
2046 u16 reg_val; 2016 u16 reg_val;
2047 2017
2048 /* program duplex, disable autoneg and sgmii*/ 2018 /* program duplex, disable autoneg and sgmii*/
2049 CL45_RD_OVER_CL22(bp, phy, 2019 CL22_RD_OVER_CL45(bp, phy,
2050 MDIO_REG_BANK_COMBO_IEEE0, 2020 MDIO_REG_BANK_COMBO_IEEE0,
2051 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 2021 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2052 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | 2022 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2053 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2023 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2054 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); 2024 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
2055 if (phy->req_duplex == DUPLEX_FULL) 2025 if (phy->req_duplex == DUPLEX_FULL)
2056 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2026 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2057 CL45_WR_OVER_CL22(bp, phy, 2027 CL22_WR_OVER_CL45(bp, phy,
2058 MDIO_REG_BANK_COMBO_IEEE0, 2028 MDIO_REG_BANK_COMBO_IEEE0,
2059 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 2029 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2060 2030
2061 /* program speed 2031 /*
2062 - needed only if the speed is greater than 1G (2.5G or 10G) */ 2032 * program speed
2063 CL45_RD_OVER_CL22(bp, phy, 2033 * - needed only if the speed is greater than 1G (2.5G or 10G)
2064 MDIO_REG_BANK_SERDES_DIGITAL, 2034 */
2065 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 2035 CL22_RD_OVER_CL45(bp, phy,
2036 MDIO_REG_BANK_SERDES_DIGITAL,
2037 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2066 /* clearing the speed value before setting the right speed */ 2038 /* clearing the speed value before setting the right speed */
2067 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); 2039 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
2068 2040
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
2083 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 2055 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
2084 } 2056 }
2085 2057
2086 CL45_WR_OVER_CL22(bp, phy, 2058 CL22_WR_OVER_CL45(bp, phy,
2087 MDIO_REG_BANK_SERDES_DIGITAL, 2059 MDIO_REG_BANK_SERDES_DIGITAL,
2088 MDIO_SERDES_DIGITAL_MISC1, reg_val); 2060 MDIO_SERDES_DIGITAL_MISC1, reg_val);
2089 2061
2090} 2062}
2091 2063
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
2102 val |= MDIO_OVER_1G_UP1_2_5G; 2074 val |= MDIO_OVER_1G_UP1_2_5G;
2103 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2075 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2104 val |= MDIO_OVER_1G_UP1_10G; 2076 val |= MDIO_OVER_1G_UP1_10G;
2105 CL45_WR_OVER_CL22(bp, phy, 2077 CL22_WR_OVER_CL45(bp, phy,
2106 MDIO_REG_BANK_OVER_1G, 2078 MDIO_REG_BANK_OVER_1G,
2107 MDIO_OVER_1G_UP1, val); 2079 MDIO_OVER_1G_UP1, val);
2108 2080
2109 CL45_WR_OVER_CL22(bp, phy, 2081 CL22_WR_OVER_CL45(bp, phy,
2110 MDIO_REG_BANK_OVER_1G, 2082 MDIO_REG_BANK_OVER_1G,
2111 MDIO_OVER_1G_UP3, 0x400); 2083 MDIO_OVER_1G_UP3, 0x400);
2112} 2084}
2113 2085
2114static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 2086static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2116{ 2088{
2117 struct bnx2x *bp = params->bp; 2089 struct bnx2x *bp = params->bp;
2118 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 2090 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2119 /* resolve pause mode and advertisement 2091 /*
2120 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 2092 * Resolve pause mode and advertisement.
2093 * Please refer to Table 28B-3 of the 802.3ab-1999 spec
2094 */
2121 2095
2122 switch (phy->req_flow_ctrl) { 2096 switch (phy->req_flow_ctrl) {
2123 case BNX2X_FLOW_CTRL_AUTO: 2097 case BNX2X_FLOW_CTRL_AUTO:
2124 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { 2098 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
2125 *ieee_fc |= 2099 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2126 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 2100 else
2127 } else {
2128 *ieee_fc |= 2101 *ieee_fc |=
2129 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 2102 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2130 }
2131 break; 2103 break;
2132 case BNX2X_FLOW_CTRL_TX: 2104 case BNX2X_FLOW_CTRL_TX:
2133 *ieee_fc |= 2105 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2134 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2135 break; 2106 break;
2136 2107
2137 case BNX2X_FLOW_CTRL_RX: 2108 case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2149 2120
2150static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy, 2121static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
2151 struct link_params *params, 2122 struct link_params *params,
2152 u16 ieee_fc) 2123 u16 ieee_fc)
2153{ 2124{
2154 struct bnx2x *bp = params->bp; 2125 struct bnx2x *bp = params->bp;
2155 u16 val; 2126 u16 val;
2156 /* for AN, we are always publishing full duplex */ 2127 /* for AN, we are always publishing full duplex */
2157 2128
2158 CL45_WR_OVER_CL22(bp, phy, 2129 CL22_WR_OVER_CL45(bp, phy,
2159 MDIO_REG_BANK_COMBO_IEEE0, 2130 MDIO_REG_BANK_COMBO_IEEE0,
2160 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 2131 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
2161 CL45_RD_OVER_CL22(bp, phy, 2132 CL22_RD_OVER_CL45(bp, phy,
2162 MDIO_REG_BANK_CL73_IEEEB1, 2133 MDIO_REG_BANK_CL73_IEEEB1,
2163 MDIO_CL73_IEEEB1_AN_ADV1, &val); 2134 MDIO_CL73_IEEEB1_AN_ADV1, &val);
2164 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; 2135 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
2165 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); 2136 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
2166 CL45_WR_OVER_CL22(bp, phy, 2137 CL22_WR_OVER_CL45(bp, phy,
2167 MDIO_REG_BANK_CL73_IEEEB1, 2138 MDIO_REG_BANK_CL73_IEEEB1,
2168 MDIO_CL73_IEEEB1_AN_ADV1, val); 2139 MDIO_CL73_IEEEB1_AN_ADV1, val);
2169} 2140}
2170 2141
2171static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, 2142static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
2179 /* Enable and restart BAM/CL37 aneg */ 2150 /* Enable and restart BAM/CL37 aneg */
2180 2151
2181 if (enable_cl73) { 2152 if (enable_cl73) {
2182 CL45_RD_OVER_CL22(bp, phy, 2153 CL22_RD_OVER_CL45(bp, phy,
2183 MDIO_REG_BANK_CL73_IEEEB0, 2154 MDIO_REG_BANK_CL73_IEEEB0,
2184 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2155 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2185 &mii_control); 2156 &mii_control);
2186 2157
2187 CL45_WR_OVER_CL22(bp, phy, 2158 CL22_WR_OVER_CL45(bp, phy,
2188 MDIO_REG_BANK_CL73_IEEEB0, 2159 MDIO_REG_BANK_CL73_IEEEB0,
2189 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2160 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2190 (mii_control | 2161 (mii_control |
2191 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | 2162 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
2192 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); 2163 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
2193 } else { 2164 } else {
2194 2165
2195 CL45_RD_OVER_CL22(bp, phy, 2166 CL22_RD_OVER_CL45(bp, phy,
2196 MDIO_REG_BANK_COMBO_IEEE0, 2167 MDIO_REG_BANK_COMBO_IEEE0,
2197 MDIO_COMBO_IEEE0_MII_CONTROL, 2168 MDIO_COMBO_IEEE0_MII_CONTROL,
2198 &mii_control); 2169 &mii_control);
2199 DP(NETIF_MSG_LINK, 2170 DP(NETIF_MSG_LINK,
2200 "bnx2x_restart_autoneg mii_control before = 0x%x\n", 2171 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
2201 mii_control); 2172 mii_control);
2202 CL45_WR_OVER_CL22(bp, phy, 2173 CL22_WR_OVER_CL45(bp, phy,
2203 MDIO_REG_BANK_COMBO_IEEE0, 2174 MDIO_REG_BANK_COMBO_IEEE0,
2204 MDIO_COMBO_IEEE0_MII_CONTROL, 2175 MDIO_COMBO_IEEE0_MII_CONTROL,
2205 (mii_control | 2176 (mii_control |
2206 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2177 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2207 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); 2178 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
2208 } 2179 }
2209} 2180}
2210 2181
2211static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, 2182static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2212 struct link_params *params, 2183 struct link_params *params,
2213 struct link_vars *vars) 2184 struct link_vars *vars)
2214{ 2185{
2215 struct bnx2x *bp = params->bp; 2186 struct bnx2x *bp = params->bp;
2216 u16 control1; 2187 u16 control1;
2217 2188
2218 /* in SGMII mode, the unicore is always slave */ 2189 /* in SGMII mode, the unicore is always slave */
2219 2190
2220 CL45_RD_OVER_CL22(bp, phy, 2191 CL22_RD_OVER_CL45(bp, phy,
2221 MDIO_REG_BANK_SERDES_DIGITAL, 2192 MDIO_REG_BANK_SERDES_DIGITAL,
2222 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2193 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2223 &control1); 2194 &control1);
2224 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; 2195 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
2225 /* set sgmii mode (and not fiber) */ 2196 /* set sgmii mode (and not fiber) */
2226 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 2197 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
2227 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 2198 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
2228 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 2199 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
2229 CL45_WR_OVER_CL22(bp, phy, 2200 CL22_WR_OVER_CL45(bp, phy,
2230 MDIO_REG_BANK_SERDES_DIGITAL, 2201 MDIO_REG_BANK_SERDES_DIGITAL,
2231 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2202 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2232 control1); 2203 control1);
2233 2204
2234 /* if forced speed */ 2205 /* if forced speed */
2235 if (!(vars->line_speed == SPEED_AUTO_NEG)) { 2206 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
2236 /* set speed, disable autoneg */ 2207 /* set speed, disable autoneg */
2237 u16 mii_control; 2208 u16 mii_control;
2238 2209
2239 CL45_RD_OVER_CL22(bp, phy, 2210 CL22_RD_OVER_CL45(bp, phy,
2240 MDIO_REG_BANK_COMBO_IEEE0, 2211 MDIO_REG_BANK_COMBO_IEEE0,
2241 MDIO_COMBO_IEEE0_MII_CONTROL, 2212 MDIO_COMBO_IEEE0_MII_CONTROL,
2242 &mii_control); 2213 &mii_control);
2243 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2214 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2244 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 2215 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
2245 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 2216 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2267 if (phy->req_duplex == DUPLEX_FULL) 2238 if (phy->req_duplex == DUPLEX_FULL)
2268 mii_control |= 2239 mii_control |=
2269 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2240 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2270 CL45_WR_OVER_CL22(bp, phy, 2241 CL22_WR_OVER_CL45(bp, phy,
2271 MDIO_REG_BANK_COMBO_IEEE0, 2242 MDIO_REG_BANK_COMBO_IEEE0,
2272 MDIO_COMBO_IEEE0_MII_CONTROL, 2243 MDIO_COMBO_IEEE0_MII_CONTROL,
2273 mii_control); 2244 mii_control);
2274 2245
2275 } else { /* AN mode */ 2246 } else { /* AN mode */
2276 /* enable and restart AN */ 2247 /* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2285 2256
2286static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 2257static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
2287{ /* LD LP */ 2258{ /* LD LP */
2288 switch (pause_result) { /* ASYM P ASYM P */ 2259 switch (pause_result) { /* ASYM P ASYM P */
2289 case 0xb: /* 1 0 1 1 */ 2260 case 0xb: /* 1 0 1 1 */
2290 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; 2261 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
2291 break; 2262 break;
2292 2263
2293 case 0xe: /* 1 1 1 0 */ 2264 case 0xe: /* 1 1 1 0 */
2294 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; 2265 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
2295 break; 2266 break;
2296 2267
2297 case 0x5: /* 0 1 0 1 */ 2268 case 0x5: /* 0 1 0 1 */
2298 case 0x7: /* 0 1 1 1 */ 2269 case 0x7: /* 0 1 1 1 */
2299 case 0xd: /* 1 1 0 1 */ 2270 case 0xd: /* 1 1 0 1 */
2300 case 0xf: /* 1 1 1 1 */ 2271 case 0xf: /* 1 1 1 1 */
2301 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 2272 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
2302 break; 2273 break;
2303 2274
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
2317 u16 pd_10g, status2_1000x; 2288 u16 pd_10g, status2_1000x;
2318 if (phy->req_line_speed != SPEED_AUTO_NEG) 2289 if (phy->req_line_speed != SPEED_AUTO_NEG)
2319 return 0; 2290 return 0;
2320 CL45_RD_OVER_CL22(bp, phy, 2291 CL22_RD_OVER_CL45(bp, phy,
2321 MDIO_REG_BANK_SERDES_DIGITAL, 2292 MDIO_REG_BANK_SERDES_DIGITAL,
2322 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2293 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
2323 &status2_1000x); 2294 &status2_1000x);
2324 CL45_RD_OVER_CL22(bp, phy, 2295 CL22_RD_OVER_CL45(bp, phy,
2325 MDIO_REG_BANK_SERDES_DIGITAL, 2296 MDIO_REG_BANK_SERDES_DIGITAL,
2326 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2297 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
2327 &status2_1000x); 2298 &status2_1000x);
2328 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { 2299 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
2329 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", 2300 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
2330 params->port); 2301 params->port);
2331 return 1; 2302 return 1;
2332 } 2303 }
2333 2304
2334 CL45_RD_OVER_CL22(bp, phy, 2305 CL22_RD_OVER_CL45(bp, phy,
2335 MDIO_REG_BANK_10G_PARALLEL_DETECT, 2306 MDIO_REG_BANK_10G_PARALLEL_DETECT,
2336 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, 2307 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
2337 &pd_10g); 2308 &pd_10g);
2338 2309
2339 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { 2310 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
2340 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", 2311 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
2373 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 2344 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
2374 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { 2345 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
2375 2346
2376 CL45_RD_OVER_CL22(bp, phy, 2347 CL22_RD_OVER_CL45(bp, phy,
2377 MDIO_REG_BANK_CL73_IEEEB1, 2348 MDIO_REG_BANK_CL73_IEEEB1,
2378 MDIO_CL73_IEEEB1_AN_ADV1, 2349 MDIO_CL73_IEEEB1_AN_ADV1,
2379 &ld_pause); 2350 &ld_pause);
2380 CL45_RD_OVER_CL22(bp, phy, 2351 CL22_RD_OVER_CL45(bp, phy,
2381 MDIO_REG_BANK_CL73_IEEEB1, 2352 MDIO_REG_BANK_CL73_IEEEB1,
2382 MDIO_CL73_IEEEB1_AN_LP_ADV1, 2353 MDIO_CL73_IEEEB1_AN_LP_ADV1,
2383 &lp_pause); 2354 &lp_pause);
2384 pause_result = (ld_pause & 2355 pause_result = (ld_pause &
2385 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) 2356 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
2386 >> 8; 2357 >> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
2390 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", 2361 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
2391 pause_result); 2362 pause_result);
2392 } else { 2363 } else {
2393 CL45_RD_OVER_CL22(bp, phy, 2364 CL22_RD_OVER_CL45(bp, phy,
2394 MDIO_REG_BANK_COMBO_IEEE0, 2365 MDIO_REG_BANK_COMBO_IEEE0,
2395 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 2366 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
2396 &ld_pause); 2367 &ld_pause);
2397 CL45_RD_OVER_CL22(bp, phy, 2368 CL22_RD_OVER_CL45(bp, phy,
2398 MDIO_REG_BANK_COMBO_IEEE0, 2369 MDIO_REG_BANK_COMBO_IEEE0,
2399 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 2370 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
2400 &lp_pause); 2371 &lp_pause);
2401 pause_result = (ld_pause & 2372 pause_result = (ld_pause &
2402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 2373 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
2403 pause_result |= (lp_pause & 2374 pause_result |= (lp_pause &
2404 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 2375 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
2405 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", 2376 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
2406 pause_result); 2377 pause_result);
2407 } 2378 }
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2417 u16 rx_status, ustat_val, cl37_fsm_recieved; 2388 u16 rx_status, ustat_val, cl37_fsm_recieved;
2418 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); 2389 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
2419 /* Step 1: Make sure signal is detected */ 2390 /* Step 1: Make sure signal is detected */
2420 CL45_RD_OVER_CL22(bp, phy, 2391 CL22_RD_OVER_CL45(bp, phy,
2421 MDIO_REG_BANK_RX0, 2392 MDIO_REG_BANK_RX0,
2422 MDIO_RX0_RX_STATUS, 2393 MDIO_RX0_RX_STATUS,
2423 &rx_status); 2394 &rx_status);
2424 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != 2395 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
2425 (MDIO_RX0_RX_STATUS_SIGDET)) { 2396 (MDIO_RX0_RX_STATUS_SIGDET)) {
2426 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." 2397 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
2427 "rx_status(0x80b0) = 0x%x\n", rx_status); 2398 "rx_status(0x80b0) = 0x%x\n", rx_status);
2428 CL45_WR_OVER_CL22(bp, phy, 2399 CL22_WR_OVER_CL45(bp, phy,
2429 MDIO_REG_BANK_CL73_IEEEB0, 2400 MDIO_REG_BANK_CL73_IEEEB0,
2430 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2401 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2431 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); 2402 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
2432 return; 2403 return;
2433 } 2404 }
2434 /* Step 2: Check CL73 state machine */ 2405 /* Step 2: Check CL73 state machine */
2435 CL45_RD_OVER_CL22(bp, phy, 2406 CL22_RD_OVER_CL45(bp, phy,
2436 MDIO_REG_BANK_CL73_USERB0, 2407 MDIO_REG_BANK_CL73_USERB0,
2437 MDIO_CL73_USERB0_CL73_USTAT1, 2408 MDIO_CL73_USERB0_CL73_USTAT1,
2438 &ustat_val); 2409 &ustat_val);
2439 if ((ustat_val & 2410 if ((ustat_val &
2440 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | 2411 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
2441 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != 2412 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2445 "ustat_val(0x8371) = 0x%x\n", ustat_val); 2416 "ustat_val(0x8371) = 0x%x\n", ustat_val);
2446 return; 2417 return;
2447 } 2418 }
2448 /* Step 3: Check CL37 Message Pages received to indicate LP 2419 /*
2449 supports only CL37 */ 2420 * Step 3: Check CL37 Message Pages received to indicate LP
2450 CL45_RD_OVER_CL22(bp, phy, 2421 * supports only CL37
2451 MDIO_REG_BANK_REMOTE_PHY, 2422 */
2452 MDIO_REMOTE_PHY_MISC_RX_STATUS, 2423 CL22_RD_OVER_CL45(bp, phy,
2453 &cl37_fsm_recieved); 2424 MDIO_REG_BANK_REMOTE_PHY,
2425 MDIO_REMOTE_PHY_MISC_RX_STATUS,
2426 &cl37_fsm_recieved);
2454 if ((cl37_fsm_recieved & 2427 if ((cl37_fsm_recieved &
2455 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | 2428 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
2456 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != 2429 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2461 cl37_fsm_recieved); 2434 cl37_fsm_recieved);
2462 return; 2435 return;
2463 } 2436 }
2464 /* The combined cl37/cl73 fsm state information indicating that we are 2437 /*
2465 connected to a device which does not support cl73, but does support 2438 * The combined cl37/cl73 fsm state information indicating that
2466 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ 2439 * we are connected to a device which does not support cl73, but
2440 * does support cl37 BAM. In this case we disable cl73 and
2441 * restart cl37 auto-neg
2442 */
2443
2467 /* Disable CL73 */ 2444 /* Disable CL73 */
2468 CL45_WR_OVER_CL22(bp, phy, 2445 CL22_WR_OVER_CL45(bp, phy,
2469 MDIO_REG_BANK_CL73_IEEEB0, 2446 MDIO_REG_BANK_CL73_IEEEB0,
2470 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2447 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2471 0); 2448 0);
2472 /* Restart CL37 autoneg */ 2449 /* Restart CL37 autoneg */
2473 bnx2x_restart_autoneg(phy, params, 0); 2450 bnx2x_restart_autoneg(phy, params, 0);
2474 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); 2451 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
2493 struct link_vars *vars) 2470 struct link_vars *vars)
2494{ 2471{
2495 struct bnx2x *bp = params->bp; 2472 struct bnx2x *bp = params->bp;
2496 u16 new_line_speed , gp_status; 2473 u16 new_line_speed, gp_status;
2497 u8 rc = 0; 2474 u8 rc = 0;
2498 2475
2499 /* Read gp_status */ 2476 /* Read gp_status */
2500 CL45_RD_OVER_CL22(bp, phy, 2477 CL22_RD_OVER_CL45(bp, phy,
2501 MDIO_REG_BANK_GP_STATUS, 2478 MDIO_REG_BANK_GP_STATUS,
2502 MDIO_GP_STATUS_TOP_AN_STATUS1, 2479 MDIO_GP_STATUS_TOP_AN_STATUS1,
2503 &gp_status); 2480 &gp_status);
2504 2481
2505 if (phy->req_line_speed == SPEED_AUTO_NEG) 2482 if (phy->req_line_speed == SPEED_AUTO_NEG)
2506 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; 2483 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2637 u16 bank; 2614 u16 bank;
2638 2615
2639 /* read precomp */ 2616 /* read precomp */
2640 CL45_RD_OVER_CL22(bp, phy, 2617 CL22_RD_OVER_CL45(bp, phy,
2641 MDIO_REG_BANK_OVER_1G, 2618 MDIO_REG_BANK_OVER_1G,
2642 MDIO_OVER_1G_LP_UP2, &lp_up2); 2619 MDIO_OVER_1G_LP_UP2, &lp_up2);
2643 2620
2644 /* bits [10:7] at lp_up2, positioned at [15:12] */ 2621 /* bits [10:7] at lp_up2, positioned at [15:12] */
2645 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> 2622 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2651 2628
2652 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; 2629 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2653 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { 2630 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2654 CL45_RD_OVER_CL22(bp, phy, 2631 CL22_RD_OVER_CL45(bp, phy,
2655 bank, 2632 bank,
2656 MDIO_TX0_TX_DRIVER, &tx_driver); 2633 MDIO_TX0_TX_DRIVER, &tx_driver);
2657 2634
2658 /* replace tx_driver bits [15:12] */ 2635 /* replace tx_driver bits [15:12] */
2659 if (lp_up2 != 2636 if (lp_up2 !=
2660 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 2637 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2661 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 2638 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2662 tx_driver |= lp_up2; 2639 tx_driver |= lp_up2;
2663 CL45_WR_OVER_CL22(bp, phy, 2640 CL22_WR_OVER_CL45(bp, phy,
2664 bank, 2641 bank,
2665 MDIO_TX0_TX_DRIVER, tx_driver); 2642 MDIO_TX0_TX_DRIVER, tx_driver);
2666 } 2643 }
2667 } 2644 }
2668} 2645}
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
2676 2653
2677 DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); 2654 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2678 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + 2655 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
2679 EMAC_REG_EMAC_MODE, 2656 EMAC_REG_EMAC_MODE,
2680 (EMAC_MODE_25G_MODE | 2657 (EMAC_MODE_25G_MODE |
2681 EMAC_MODE_PORT_MII_10M | 2658 EMAC_MODE_PORT_MII_10M |
2682 EMAC_MODE_HALF_DUPLEX)); 2659 EMAC_MODE_HALF_DUPLEX));
2683 switch (vars->line_speed) { 2660 switch (vars->line_speed) {
2684 case SPEED_10: 2661 case SPEED_10:
2685 mode |= EMAC_MODE_PORT_MII_10M; 2662 mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
2707 if (vars->duplex == DUPLEX_HALF) 2684 if (vars->duplex == DUPLEX_HALF)
2708 mode |= EMAC_MODE_HALF_DUPLEX; 2685 mode |= EMAC_MODE_HALF_DUPLEX;
2709 bnx2x_bits_en(bp, 2686 bnx2x_bits_en(bp,
2710 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 2687 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2711 mode); 2688 mode);
2712 2689
2713 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 2690 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
2714 return 0; 2691 return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2723 2700
2724 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; 2701 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
2725 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { 2702 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
2726 CL45_WR_OVER_CL22(bp, phy, 2703 CL22_WR_OVER_CL45(bp, phy,
2727 bank, 2704 bank,
2728 MDIO_RX0_RX_EQ_BOOST, 2705 MDIO_RX0_RX_EQ_BOOST,
2729 phy->rx_preemphasis[i]); 2706 phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2731 2708
2732 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; 2709 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
2733 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { 2710 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
2734 CL45_WR_OVER_CL22(bp, phy, 2711 CL22_WR_OVER_CL45(bp, phy,
2735 bank, 2712 bank,
2736 MDIO_TX0_TX_DRIVER, 2713 MDIO_TX0_TX_DRIVER,
2737 phy->tx_preemphasis[i]); 2714 phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2754 /* forced speed requested? */ 2731 /* forced speed requested? */
2755 if (vars->line_speed != SPEED_AUTO_NEG || 2732 if (vars->line_speed != SPEED_AUTO_NEG ||
2756 (SINGLE_MEDIA_DIRECT(params) && 2733 (SINGLE_MEDIA_DIRECT(params) &&
2757 params->loopback_mode == LOOPBACK_EXT)) { 2734 params->loopback_mode == LOOPBACK_EXT)) {
2758 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 2735 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2759 2736
2760 /* disable autoneg */ 2737 /* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2771 2748
2772 /* program duplex & pause advertisement (for aneg) */ 2749 /* program duplex & pause advertisement (for aneg) */
2773 bnx2x_set_ieee_aneg_advertisment(phy, params, 2750 bnx2x_set_ieee_aneg_advertisment(phy, params,
2774 vars->ieee_fc); 2751 vars->ieee_fc);
2775 2752
2776 /* enable autoneg */ 2753 /* enable autoneg */
2777 bnx2x_set_autoneg(phy, params, vars, enable_cl73); 2754 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2842} 2819}
2843 2820
2844static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, 2821static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2845 struct bnx2x_phy *phy) 2822 struct bnx2x_phy *phy,
2823 struct link_params *params)
2846{ 2824{
2847 u16 cnt, ctrl; 2825 u16 cnt, ctrl;
2848 /* Wait for soft reset to get cleared upto 1 sec */ 2826 /* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2853 break; 2831 break;
2854 msleep(1); 2832 msleep(1);
2855 } 2833 }
2834
2835 if (cnt == 1000)
2836 netdev_err(bp->dev, "Warning: PHY was not initialized,"
2837 " Port %d\n",
2838 params->port);
2856 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); 2839 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
2857 return cnt; 2840 return cnt;
2858} 2841}
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
2863 u32 mask; 2846 u32 mask;
2864 struct bnx2x *bp = params->bp; 2847 struct bnx2x *bp = params->bp;
2865 2848
2866 /* setting the status to report on link up 2849 /* Setting the status to report on link up for either XGXS or SerDes */
2867 for either XGXS or SerDes */
2868
2869 if (params->switch_cfg == SWITCH_CFG_10G) { 2850 if (params->switch_cfg == SWITCH_CFG_10G) {
2870 mask = (NIG_MASK_XGXS0_LINK10G | 2851 mask = (NIG_MASK_XGXS0_LINK10G |
2871 NIG_MASK_XGXS0_LINK_STATUS); 2852 NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2908{ 2889{
2909 u32 latch_status = 0; 2890 u32 latch_status = 0;
2910 2891
2911 /** 2892 /*
2912 * Disable the MI INT ( external phy int ) by writing 1 to the 2893 * Disable the MI INT ( external phy int ) by writing 1 to the
2913 * status register. Link down indication is high-active-signal, 2894 * status register. Link down indication is high-active-signal,
2914 * so in this case we need to write the status to clear the XOR 2895 * so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2933 2914
2934 /* For all latched-signal=up : Re-Arm Latch signals */ 2915 /* For all latched-signal=up : Re-Arm Latch signals */
2935 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, 2916 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
2936 (latch_status & 0xfffe) | (latch_status & 1)); 2917 (latch_status & 0xfffe) | (latch_status & 1));
2937 } 2918 }
2938 /* For all latched-signal=up,Write original_signal to status */ 2919 /* For all latched-signal=up,Write original_signal to status */
2939} 2920}
2940 2921
2941static void bnx2x_link_int_ack(struct link_params *params, 2922static void bnx2x_link_int_ack(struct link_params *params,
2942 struct link_vars *vars, u8 is_10g) 2923 struct link_vars *vars, u8 is_10g)
2943{ 2924{
2944 struct bnx2x *bp = params->bp; 2925 struct bnx2x *bp = params->bp;
2945 u8 port = params->port; 2926 u8 port = params->port;
2946 2927
2947 /* first reset all status 2928 /*
2948 * we assume only one line will be change at a time */ 2929 * First reset all status we assume only one line will be
2930 * change at a time
2931 */
2949 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 2932 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2950 (NIG_STATUS_XGXS0_LINK10G | 2933 (NIG_STATUS_XGXS0_LINK10G |
2951 NIG_STATUS_XGXS0_LINK_STATUS | 2934 NIG_STATUS_XGXS0_LINK_STATUS |
2952 NIG_STATUS_SERDES0_LINK_STATUS)); 2935 NIG_STATUS_SERDES0_LINK_STATUS));
2953 if (vars->phy_link_up) { 2936 if (vars->phy_link_up) {
2954 if (is_10g) { 2937 if (is_10g) {
2955 /* Disable the 10G link interrupt 2938 /*
2956 * by writing 1 to the status register 2939 * Disable the 10G link interrupt by writing 1 to the
2940 * status register
2957 */ 2941 */
2958 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); 2942 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
2959 bnx2x_bits_en(bp, 2943 bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
2961 NIG_STATUS_XGXS0_LINK10G); 2945 NIG_STATUS_XGXS0_LINK10G);
2962 2946
2963 } else if (params->switch_cfg == SWITCH_CFG_10G) { 2947 } else if (params->switch_cfg == SWITCH_CFG_10G) {
2964 /* Disable the link interrupt 2948 /*
2965 * by writing 1 to the relevant lane 2949 * Disable the link interrupt by writing 1 to the
2966 * in the status register 2950 * relevant lane in the status register
2967 */ 2951 */
2968 u32 ser_lane = ((params->lane_config & 2952 u32 ser_lane = ((params->lane_config &
2969 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 2953 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
2978 2962
2979 } else { /* SerDes */ 2963 } else { /* SerDes */
2980 DP(NETIF_MSG_LINK, "SerDes phy link up\n"); 2964 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
2981 /* Disable the link interrupt 2965 /*
2982 * by writing 1 to the status register 2966 * Disable the link interrupt by writing 1 to the status
2967 * register
2983 */ 2968 */
2984 bnx2x_bits_en(bp, 2969 bnx2x_bits_en(bp,
2985 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 2970 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3059 } 3044 }
3060 if ((params->num_phys == MAX_PHYS) && 3045 if ((params->num_phys == MAX_PHYS) &&
3061 (params->phy[EXT_PHY2].ver_addr != 0)) { 3046 (params->phy[EXT_PHY2].ver_addr != 0)) {
3062 spirom_ver = REG_RD(bp, 3047 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
3063 params->phy[EXT_PHY2].ver_addr);
3064 if (params->phy[EXT_PHY2].format_fw_ver) { 3048 if (params->phy[EXT_PHY2].format_fw_ver) {
3065 *ver_p = '/'; 3049 *ver_p = '/';
3066 ver_p++; 3050 ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
3089 3073
3090 /* change the uni_phy_addr in the nig */ 3074 /* change the uni_phy_addr in the nig */
3091 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 3075 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
3092 port*0x18)); 3076 port*0x18));
3093 3077
3094 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); 3078 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3095 3079
3096 bnx2x_cl45_write(bp, phy, 3080 bnx2x_cl45_write(bp, phy,
3097 5, 3081 5,
3098 (MDIO_REG_BANK_AER_BLOCK + 3082 (MDIO_REG_BANK_AER_BLOCK +
3099 (MDIO_AER_BLOCK_AER_REG & 0xf)), 3083 (MDIO_AER_BLOCK_AER_REG & 0xf)),
3100 0x2800); 3084 0x2800);
3101 3085
3102 bnx2x_cl45_write(bp, phy, 3086 bnx2x_cl45_write(bp, phy,
3103 5, 3087 5,
3104 (MDIO_REG_BANK_CL73_IEEEB0 + 3088 (MDIO_REG_BANK_CL73_IEEEB0 +
3105 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 3089 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
3106 0x6041); 3090 0x6041);
3107 msleep(200); 3091 msleep(200);
3108 /* set aer mmd back */ 3092 /* set aer mmd back */
3109 bnx2x_set_aer_mmd_xgxs(params, phy); 3093 bnx2x_set_aer_mmd_xgxs(params, phy);
3110 3094
3111 /* and md_devad */ 3095 /* and md_devad */
3112 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 3096 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3113 md_devad);
3114
3115 } else { 3097 } else {
3116 u16 mii_ctrl; 3098 u16 mii_ctrl;
3117 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); 3099 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,56 +3134,71 @@ u8 bnx2x_set_led(struct link_params *params,
3152 case LED_MODE_OFF: 3134 case LED_MODE_OFF:
3153 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 3135 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
3154 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3136 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
3155 SHARED_HW_CFG_LED_MAC1); 3137 SHARED_HW_CFG_LED_MAC1);
3156 3138
3157 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 3139 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3158 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); 3140 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
3159 break; 3141 break;
3160 3142
3161 case LED_MODE_OPER: 3143 case LED_MODE_OPER:
3162 /** 3144 /*
3163 * For all other phys, OPER mode is same as ON, so in case 3145 * For all other phys, OPER mode is same as ON, so in case
3164 * link is down, do nothing 3146 * link is down, do nothing
3165 **/ 3147 */
3166 if (!vars->link_up) 3148 if (!vars->link_up)
3167 break; 3149 break;
3168 case LED_MODE_ON: 3150 case LED_MODE_ON:
3169 if (SINGLE_MEDIA_DIRECT(params)) { 3151 if (params->phy[EXT_PHY1].type ==
3170 /** 3152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
3171 * This is a work-around for HW issue found when link 3153 CHIP_IS_E2(bp) && params->num_phys == 2) {
3172 * is up in CL73 3154 /*
3173 */ 3155 * This is a work-around for E2+8727 Configurations
3156 */
3157 if (mode == LED_MODE_ON ||
3158 speed == SPEED_10000){
3159 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3160 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3161
3162 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3163 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3164 (tmp | EMAC_LED_OVERRIDE));
3165 return rc;
3166 }
3167 } else if (SINGLE_MEDIA_DIRECT(params)) {
3168 /*
3169 * This is a work-around for HW issue found when link
3170 * is up in CL73
3171 */
3174 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 3172 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3175 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 3173 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3176 } else { 3174 } else {
3177 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3175 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
3178 hw_led_mode);
3179 } 3176 }
3180 3177
3181 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 3178 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
3182 port*4, 0);
3183 /* Set blinking rate to ~15.9Hz */ 3179 /* Set blinking rate to ~15.9Hz */
3184 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, 3180 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
3185 LED_BLINK_RATE_VAL); 3181 LED_BLINK_RATE_VAL);
3186 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 3182 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
3187 port*4, 1); 3183 port*4, 1);
3188 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 3184 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3189 EMAC_WR(bp, EMAC_REG_EMAC_LED, 3185 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
3190 (tmp & (~EMAC_LED_OVERRIDE)));
3191 3186
3192 if (CHIP_IS_E1(bp) && 3187 if (CHIP_IS_E1(bp) &&
3193 ((speed == SPEED_2500) || 3188 ((speed == SPEED_2500) ||
3194 (speed == SPEED_1000) || 3189 (speed == SPEED_1000) ||
3195 (speed == SPEED_100) || 3190 (speed == SPEED_100) ||
3196 (speed == SPEED_10))) { 3191 (speed == SPEED_10))) {
3197 /* On Everest 1 Ax chip versions for speeds less than 3192 /*
3198 10G LED scheme is different */ 3193 * On Everest 1 Ax chip versions for speeds less than
3194 * 10G LED scheme is different
3195 */
3199 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 3196 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
3200 + port*4, 1); 3197 + port*4, 1);
3201 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + 3198 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
3202 port*4, 0); 3199 port*4, 0);
3203 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + 3200 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
3204 port*4, 1); 3201 port*4, 1);
3205 } 3202 }
3206 break; 3203 break;
3207 3204
@@ -3215,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
3215 3212
3216} 3213}
3217 3214
3218/** 3215/*
3219 * This function comes to reflect the actual link state read DIRECTLY from the 3216 * This function comes to reflect the actual link state read DIRECTLY from the
3220 * HW 3217 * HW
3221 */ 3218 */
@@ -3227,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
3227 u8 ext_phy_link_up = 0, serdes_phy_type; 3224 u8 ext_phy_link_up = 0, serdes_phy_type;
3228 struct link_vars temp_vars; 3225 struct link_vars temp_vars;
3229 3226
3230 CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY], 3227 CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
3231 MDIO_REG_BANK_GP_STATUS, 3228 MDIO_REG_BANK_GP_STATUS,
3232 MDIO_GP_STATUS_TOP_AN_STATUS1, 3229 MDIO_GP_STATUS_TOP_AN_STATUS1,
3233 &gp_status); 3230 &gp_status);
3234 /* link is up only if both local phy and external phy are up */ 3231 /* link is up only if both local phy and external phy are up */
3235 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) 3232 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
3236 return -ESRCH; 3233 return -ESRCH;
@@ -3274,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3274 u8 rc = 0; 3271 u8 rc = 0;
3275 u8 phy_index, non_ext_phy; 3272 u8 phy_index, non_ext_phy;
3276 struct bnx2x *bp = params->bp; 3273 struct bnx2x *bp = params->bp;
3277 /** 3274 /*
3278 * In case of external phy existence, the line speed would be the 3275 * In case of external phy existence, the line speed would be the
3279 * line speed linked up by the external phy. In case it is direct 3276 * line speed linked up by the external phy. In case it is direct
3280 * only, then the line_speed during initialization will be 3277 * only, then the line_speed during initialization will be
3281 * equal to the req_line_speed 3278 * equal to the req_line_speed
3282 */ 3279 */
3283 vars->line_speed = params->phy[INT_PHY].req_line_speed; 3280 vars->line_speed = params->phy[INT_PHY].req_line_speed;
3284 3281
3285 /** 3282 /*
3286 * Initialize the internal phy in case this is a direct board 3283 * Initialize the internal phy in case this is a direct board
3287 * (no external phys), or this board has external phy which requires 3284 * (no external phys), or this board has external phy which requires
3288 * to first. 3285 * to first.
@@ -3310,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3310 if (!non_ext_phy) 3307 if (!non_ext_phy)
3311 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 3308 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3312 phy_index++) { 3309 phy_index++) {
3313 /** 3310 /*
3314 * No need to initialize second phy in case of first 3311 * No need to initialize second phy in case of first
3315 * phy only selection. In case of second phy, we do 3312 * phy only selection. In case of second phy, we do
3316 * need to initialize the first phy, since they are 3313 * need to initialize the first phy, since they are
3317 * connected. 3314 * connected.
3318 **/ 3315 */
3319 if (phy_index == EXT_PHY2 && 3316 if (phy_index == EXT_PHY2 &&
3320 (bnx2x_phy_selection(params) == 3317 (bnx2x_phy_selection(params) ==
3321 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { 3318 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
3322 DP(NETIF_MSG_LINK, "Not initializing" 3319 DP(NETIF_MSG_LINK, "Ignoring second phy\n");
3323 "second phy\n");
3324 continue; 3320 continue;
3325 } 3321 }
3326 params->phy[phy_index].config_init( 3322 params->phy[phy_index].config_init(
@@ -3342,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
3342 struct link_params *params) 3338 struct link_params *params)
3343{ 3339{
3344 /* reset the SerDes/XGXS */ 3340 /* reset the SerDes/XGXS */
3345 REG_WR(params->bp, GRCBASE_MISC + 3341 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3346 MISC_REGISTERS_RESET_REG_3_CLEAR, 3342 (0x1ff << (params->port*16)));
3347 (0x1ff << (params->port*16)));
3348} 3343}
3349 3344
3350static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, 3345static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3358,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
3358 else 3353 else
3359 gpio_port = params->port; 3354 gpio_port = params->port;
3360 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3355 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3361 MISC_REGISTERS_GPIO_OUTPUT_LOW, 3356 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3362 gpio_port); 3357 gpio_port);
3363 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 3358 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3364 MISC_REGISTERS_GPIO_OUTPUT_LOW, 3359 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3365 gpio_port); 3360 gpio_port);
3366 DP(NETIF_MSG_LINK, "reset external PHY\n"); 3361 DP(NETIF_MSG_LINK, "reset external PHY\n");
3367} 3362}
3368 3363
@@ -3393,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
3393 3388
3394 /* reset BigMac */ 3389 /* reset BigMac */
3395 bnx2x_bmac_rx_disable(bp, params->port); 3390 bnx2x_bmac_rx_disable(bp, params->port);
3396 REG_WR(bp, GRCBASE_MISC + 3391 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3397 MISC_REGISTERS_RESET_REG_2_CLEAR, 3392 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3398 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3399 return 0; 3393 return 0;
3400} 3394}
3401 3395
@@ -3446,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
3446 msleep(20); 3440 msleep(20);
3447 return rc; 3441 return rc;
3448} 3442}
3449/** 3443/*
3450 * The bnx2x_link_update function should be called upon link 3444 * The bnx2x_link_update function should be called upon link
3451 * interrupt. 3445 * interrupt.
3452 * Link is considered up as follows: 3446 * Link is considered up as follows:
@@ -3485,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3485 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 3479 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
3486 3480
3487 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + 3481 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
3488 port*0x18) > 0); 3482 port*0x18) > 0);
3489 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", 3483 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
3490 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), 3484 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3491 is_mi_int, 3485 is_mi_int,
3492 REG_RD(bp, 3486 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3493 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3494 3487
3495 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 3488 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
3496 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 3489 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3499,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3499 /* disable emac */ 3492 /* disable emac */
3500 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 3493 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
3501 3494
3502 /** 3495 /*
3503 * Step 1: 3496 * Step 1:
3504 * Check external link change only for external phys, and apply 3497 * Check external link change only for external phys, and apply
3505 * priority selection between them in case the link on both phys 3498 * priority selection between them in case the link on both phys
3506 * is up. Note that the instead of the common vars, a temporary 3499 * is up. Note that the instead of the common vars, a temporary
3507 * vars argument is used since each phy may have different link/ 3500 * vars argument is used since each phy may have different link/
3508 * speed/duplex result 3501 * speed/duplex result
3509 */ 3502 */
3510 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 3503 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3511 phy_index++) { 3504 phy_index++) {
3512 struct bnx2x_phy *phy = &params->phy[phy_index]; 3505 struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3531,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3531 switch (bnx2x_phy_selection(params)) { 3524 switch (bnx2x_phy_selection(params)) {
3532 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 3525 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3533 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 3526 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3534 /** 3527 /*
3535 * In this option, the first PHY makes sure to pass the 3528 * In this option, the first PHY makes sure to pass the
3536 * traffic through itself only. 3529 * traffic through itself only.
3537 * Its not clear how to reset the link on the second phy 3530 * Its not clear how to reset the link on the second phy
3538 **/ 3531 */
3539 active_external_phy = EXT_PHY1; 3532 active_external_phy = EXT_PHY1;
3540 break; 3533 break;
3541 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 3534 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3542 /** 3535 /*
3543 * In this option, the first PHY makes sure to pass the 3536 * In this option, the first PHY makes sure to pass the
3544 * traffic through the second PHY. 3537 * traffic through the second PHY.
3545 **/ 3538 */
3546 active_external_phy = EXT_PHY2; 3539 active_external_phy = EXT_PHY2;
3547 break; 3540 break;
3548 default: 3541 default:
3549 /** 3542 /*
3550 * Link indication on both PHYs with the following cases 3543 * Link indication on both PHYs with the following cases
3551 * is invalid: 3544 * is invalid:
3552 * - FIRST_PHY means that second phy wasn't initialized, 3545 * - FIRST_PHY means that second phy wasn't initialized,
@@ -3554,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3554 * - SECOND_PHY means that first phy should not be able 3547 * - SECOND_PHY means that first phy should not be able
3555 * to link up by itself (using configuration) 3548 * to link up by itself (using configuration)
3556 * - DEFAULT should be overriden during initialiazation 3549 * - DEFAULT should be overriden during initialiazation
3557 **/ 3550 */
3558 DP(NETIF_MSG_LINK, "Invalid link indication" 3551 DP(NETIF_MSG_LINK, "Invalid link indication"
3559 "mpc=0x%x. DISABLING LINK !!!\n", 3552 "mpc=0x%x. DISABLING LINK !!!\n",
3560 params->multi_phy_config); 3553 params->multi_phy_config);
@@ -3564,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3564 } 3557 }
3565 } 3558 }
3566 prev_line_speed = vars->line_speed; 3559 prev_line_speed = vars->line_speed;
3567 /** 3560 /*
3568 * Step 2: 3561 * Step 2:
3569 * Read the status of the internal phy. In case of 3562 * Read the status of the internal phy. In case of
3570 * DIRECT_SINGLE_MEDIA board, this link is the external link, 3563 * DIRECT_SINGLE_MEDIA board, this link is the external link,
3571 * otherwise this is the link between the 577xx and the first 3564 * otherwise this is the link between the 577xx and the first
3572 * external phy 3565 * external phy
3573 */ 3566 */
3574 if (params->phy[INT_PHY].read_status) 3567 if (params->phy[INT_PHY].read_status)
3575 params->phy[INT_PHY].read_status( 3568 params->phy[INT_PHY].read_status(
3576 &params->phy[INT_PHY], 3569 &params->phy[INT_PHY],
3577 params, vars); 3570 params, vars);
3578 /** 3571 /*
3579 * The INT_PHY flow control reside in the vars. This include the 3572 * The INT_PHY flow control reside in the vars. This include the
3580 * case where the speed or flow control are not set to AUTO. 3573 * case where the speed or flow control are not set to AUTO.
3581 * Otherwise, the active external phy flow control result is set 3574 * Otherwise, the active external phy flow control result is set
@@ -3585,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3585 */ 3578 */
3586 if (active_external_phy > INT_PHY) { 3579 if (active_external_phy > INT_PHY) {
3587 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; 3580 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
3588 /** 3581 /*
3589 * Link speed is taken from the XGXS. AN and FC result from 3582 * Link speed is taken from the XGXS. AN and FC result from
3590 * the external phy. 3583 * the external phy.
3591 */ 3584 */
3592 vars->link_status |= phy_vars[active_external_phy].link_status; 3585 vars->link_status |= phy_vars[active_external_phy].link_status;
3593 3586
3594 /** 3587 /*
3595 * if active_external_phy is first PHY and link is up - disable 3588 * if active_external_phy is first PHY and link is up - disable
3596 * disable TX on second external PHY 3589 * disable TX on second external PHY
3597 */ 3590 */
@@ -3627,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3627 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," 3620 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
3628 " ext_phy_line_speed = %d\n", vars->flow_ctrl, 3621 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
3629 vars->link_status, ext_phy_line_speed); 3622 vars->link_status, ext_phy_line_speed);
3630 /** 3623 /*
3631 * Upon link speed change set the NIG into drain mode. Comes to 3624 * Upon link speed change set the NIG into drain mode. Comes to
3632 * deals with possible FIFO glitch due to clk change when speed 3625 * deals with possible FIFO glitch due to clk change when speed
3633 * is decreased without link down indicator 3626 * is decreased without link down indicator
@@ -3642,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3642 ext_phy_line_speed); 3635 ext_phy_line_speed);
3643 vars->phy_link_up = 0; 3636 vars->phy_link_up = 0;
3644 } else if (prev_line_speed != vars->line_speed) { 3637 } else if (prev_line_speed != vars->line_speed) {
3645 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE 3638 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
3646 + params->port*4, 0); 3639 0);
3647 msleep(1); 3640 msleep(1);
3648 } 3641 }
3649 } 3642 }
@@ -3658,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3658 3651
3659 bnx2x_link_int_ack(params, vars, link_10g); 3652 bnx2x_link_int_ack(params, vars, link_10g);
3660 3653
3661 /** 3654 /*
3662 * In case external phy link is up, and internal link is down 3655 * In case external phy link is up, and internal link is down
3663 * (not initialized yet probably after link initialization, it 3656 * (not initialized yet probably after link initialization, it
3664 * needs to be initialized. 3657 * needs to be initialized.
3665 * Note that after link down-up as result of cable plug, the xgxs 3658 * Note that after link down-up as result of cable plug, the xgxs
3666 * link would probably become up again without the need 3659 * link would probably become up again without the need
3667 * initialize it 3660 * initialize it
3668 */ 3661 */
3669 if (!(SINGLE_MEDIA_DIRECT(params))) { 3662 if (!(SINGLE_MEDIA_DIRECT(params))) {
3670 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," 3663 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
3671 " init_preceding = %d\n", ext_phy_link_up, 3664 " init_preceding = %d\n", ext_phy_link_up,
@@ -3685,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3685 vars); 3678 vars);
3686 } 3679 }
3687 } 3680 }
3688 /** 3681 /*
3689 * Link is up only if both local phy and external phy (in case of 3682 * Link is up only if both local phy and external phy (in case of
3690 * non-direct board) are up 3683 * non-direct board) are up
3691 */ 3684 */
3692 vars->link_up = (vars->phy_link_up && 3685 vars->link_up = (vars->phy_link_up &&
3693 (ext_phy_link_up || 3686 (ext_phy_link_up ||
@@ -3708,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3708void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) 3701void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
3709{ 3702{
3710 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3703 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3711 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 3704 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3712 msleep(1); 3705 msleep(1);
3713 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3706 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3714 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 3707 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
3715} 3708}
3716 3709
3717static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, 3710static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3731,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
3731 u16 fw_ver1, fw_ver2; 3724 u16 fw_ver1, fw_ver2;
3732 3725
3733 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 3726 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
3734 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 3727 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3735 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 3728 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
3736 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 3729 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
3737 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), 3730 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
3738 phy->ver_addr); 3731 phy->ver_addr);
3739} 3732}
@@ -3754,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
3754 if ((vars->ieee_fc & 3747 if ((vars->ieee_fc &
3755 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 3748 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3756 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 3749 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3757 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 3750 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3758 } 3751 }
3759 if ((vars->ieee_fc & 3752 if ((vars->ieee_fc &
3760 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 3753 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3785,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3785 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 3778 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
3786 ret = 1; 3779 ret = 1;
3787 bnx2x_cl45_read(bp, phy, 3780 bnx2x_cl45_read(bp, phy,
3788 MDIO_AN_DEVAD, 3781 MDIO_AN_DEVAD,
3789 MDIO_AN_REG_ADV_PAUSE, &ld_pause); 3782 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3790 bnx2x_cl45_read(bp, phy, 3783 bnx2x_cl45_read(bp, phy,
3791 MDIO_AN_DEVAD, 3784 MDIO_AN_DEVAD,
3792 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); 3785 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3793 pause_result = (ld_pause & 3786 pause_result = (ld_pause &
3794 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; 3787 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
3795 pause_result |= (lp_pause & 3788 pause_result |= (lp_pause &
@@ -3854,90 +3847,82 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
3854 pause_result); 3847 pause_result);
3855 } 3848 }
3856} 3849}
3857 3850static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3858static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3859 struct bnx2x_phy *phy, 3851 struct bnx2x_phy *phy,
3860 u8 port) 3852 u8 port)
3861{ 3853{
3854 u32 count = 0;
3855 u16 fw_ver1, fw_msgout;
3856 u8 rc = 0;
3857
3862 /* Boot port from external ROM */ 3858 /* Boot port from external ROM */
3863 /* EDC grst */ 3859 /* EDC grst */
3864 bnx2x_cl45_write(bp, phy, 3860 bnx2x_cl45_write(bp, phy,
3865 MDIO_PMA_DEVAD, 3861 MDIO_PMA_DEVAD,
3866 MDIO_PMA_REG_GEN_CTRL, 3862 MDIO_PMA_REG_GEN_CTRL,
3867 0x0001); 3863 0x0001);
3868 3864
3869 /* ucode reboot and rst */ 3865 /* ucode reboot and rst */
3870 bnx2x_cl45_write(bp, phy, 3866 bnx2x_cl45_write(bp, phy,
3871 MDIO_PMA_DEVAD, 3867 MDIO_PMA_DEVAD,
3872 MDIO_PMA_REG_GEN_CTRL, 3868 MDIO_PMA_REG_GEN_CTRL,
3873 0x008c); 3869 0x008c);
3874 3870
3875 bnx2x_cl45_write(bp, phy, 3871 bnx2x_cl45_write(bp, phy,
3876 MDIO_PMA_DEVAD, 3872 MDIO_PMA_DEVAD,
3877 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3873 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
3878 3874
3879 /* Reset internal microprocessor */ 3875 /* Reset internal microprocessor */
3880 bnx2x_cl45_write(bp, phy, 3876 bnx2x_cl45_write(bp, phy,
3881 MDIO_PMA_DEVAD, 3877 MDIO_PMA_DEVAD,
3882 MDIO_PMA_REG_GEN_CTRL, 3878 MDIO_PMA_REG_GEN_CTRL,
3883 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3879 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
3884 3880
3885 /* Release srst bit */ 3881 /* Release srst bit */
3886 bnx2x_cl45_write(bp, phy, 3882 bnx2x_cl45_write(bp, phy,
3887 MDIO_PMA_DEVAD, 3883 MDIO_PMA_DEVAD,
3888 MDIO_PMA_REG_GEN_CTRL, 3884 MDIO_PMA_REG_GEN_CTRL,
3889 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3885 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3890 3886
3891 /* wait for 120ms for code download via SPI port */ 3887 /* Delay 100ms per the PHY specifications */
3892 msleep(120); 3888 msleep(100);
3893 3889
3894 /* Clear ser_boot_ctl bit */ 3890 /* 8073 sometimes taking longer to download */
3895 bnx2x_cl45_write(bp, phy, 3891 do {
3896 MDIO_PMA_DEVAD, 3892 count++;
3897 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3893 if (count > 300) {
3898 bnx2x_save_bcm_spirom_ver(bp, phy, port); 3894 DP(NETIF_MSG_LINK,
3899} 3895 "bnx2x_8073_8727_external_rom_boot port %x:"
3896 "Download failed. fw version = 0x%x\n",
3897 port, fw_ver1);
3898 rc = -EINVAL;
3899 break;
3900 }
3900 3901
3901static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, 3902 bnx2x_cl45_read(bp, phy,
3902 struct bnx2x_phy *phy) 3903 MDIO_PMA_DEVAD,
3903{ 3904 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3904 u16 val; 3905 bnx2x_cl45_read(bp, phy,
3905 bnx2x_cl45_read(bp, phy, 3906 MDIO_PMA_DEVAD,
3906 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val); 3907 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
3907 3908
3908 if (val == 0) { 3909 msleep(1);
3909 /* Mustn't set low power mode in 8073 A0 */ 3910 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
3910 return; 3911 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
3911 } 3912 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
3912 3913
3913 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */ 3914 /* Clear ser_boot_ctl bit */
3914 bnx2x_cl45_read(bp, phy,
3915 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3916 val &= ~(1<<13);
3917 bnx2x_cl45_write(bp, phy, 3915 bnx2x_cl45_write(bp, phy,
3918 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); 3916 MDIO_PMA_DEVAD,
3919 3917 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3920 /* PLL controls */ 3918 bnx2x_save_bcm_spirom_ver(bp, phy, port);
3921 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
3922 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
3923 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
3924 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
3925 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
3926
3927 /* Tx Controls */
3928 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3929 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
3930 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
3931 3919
3932 /* Rx Controls */ 3920 DP(NETIF_MSG_LINK,
3933 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4); 3921 "bnx2x_8073_8727_external_rom_boot port %x:"
3934 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249); 3922 "Download complete. fw version = 0x%x\n",
3935 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015); 3923 port, fw_ver1);
3936 3924
3937 /* Enable PLL sequencer (use read-modify-write to set bit 13) */ 3925 return rc;
3938 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3939 val |= (1<<13);
3940 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3941} 3926}
3942 3927
3943/******************************************************************/ 3928/******************************************************************/
@@ -3950,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
3950 3935
3951 /* Read 8073 HW revision*/ 3936 /* Read 8073 HW revision*/
3952 bnx2x_cl45_read(bp, phy, 3937 bnx2x_cl45_read(bp, phy,
3953 MDIO_PMA_DEVAD, 3938 MDIO_PMA_DEVAD,
3954 MDIO_PMA_REG_8073_CHIP_REV, &val); 3939 MDIO_PMA_REG_8073_CHIP_REV, &val);
3955 3940
3956 if (val != 1) { 3941 if (val != 1) {
3957 /* No need to workaround in 8073 A1 */ 3942 /* No need to workaround in 8073 A1 */
@@ -3959,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
3959 } 3944 }
3960 3945
3961 bnx2x_cl45_read(bp, phy, 3946 bnx2x_cl45_read(bp, phy,
3962 MDIO_PMA_DEVAD, 3947 MDIO_PMA_DEVAD,
3963 MDIO_PMA_REG_ROM_VER2, &val); 3948 MDIO_PMA_REG_ROM_VER2, &val);
3964 3949
3965 /* SNR should be applied only for version 0x102 */ 3950 /* SNR should be applied only for version 0x102 */
3966 if (val != 0x102) 3951 if (val != 0x102)
@@ -3974,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
3974 u16 val, cnt, cnt1 ; 3959 u16 val, cnt, cnt1 ;
3975 3960
3976 bnx2x_cl45_read(bp, phy, 3961 bnx2x_cl45_read(bp, phy,
3977 MDIO_PMA_DEVAD, 3962 MDIO_PMA_DEVAD,
3978 MDIO_PMA_REG_8073_CHIP_REV, &val); 3963 MDIO_PMA_REG_8073_CHIP_REV, &val);
3979 3964
3980 if (val > 0) { 3965 if (val > 0) {
3981 /* No need to workaround in 8073 A1 */ 3966 /* No need to workaround in 8073 A1 */
@@ -3983,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
3983 } 3968 }
3984 /* XAUI workaround in 8073 A0: */ 3969 /* XAUI workaround in 8073 A0: */
3985 3970
3986 /* After loading the boot ROM and restarting Autoneg, 3971 /*
3987 poll Dev1, Reg $C820: */ 3972 * After loading the boot ROM and restarting Autoneg, poll
3973 * Dev1, Reg $C820:
3974 */
3988 3975
3989 for (cnt = 0; cnt < 1000; cnt++) { 3976 for (cnt = 0; cnt < 1000; cnt++) {
3990 bnx2x_cl45_read(bp, phy, 3977 bnx2x_cl45_read(bp, phy,
3991 MDIO_PMA_DEVAD, 3978 MDIO_PMA_DEVAD,
3992 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 3979 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
3993 &val); 3980 &val);
3994 /* If bit [14] = 0 or bit [13] = 0, continue on with 3981 /*
3995 system initialization (XAUI work-around not required, 3982 * If bit [14] = 0 or bit [13] = 0, continue on with
3996 as these bits indicate 2.5G or 1G link up). */ 3983 * system initialization (XAUI work-around not required, as
3984 * these bits indicate 2.5G or 1G link up).
3985 */
3997 if (!(val & (1<<14)) || !(val & (1<<13))) { 3986 if (!(val & (1<<14)) || !(val & (1<<13))) {
3998 DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); 3987 DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
3999 return 0; 3988 return 0;
4000 } else if (!(val & (1<<15))) { 3989 } else if (!(val & (1<<15))) {
4001 DP(NETIF_MSG_LINK, "clc bit 15 went off\n"); 3990 DP(NETIF_MSG_LINK, "bit 15 went off\n");
4002 /* If bit 15 is 0, then poll Dev1, Reg $C841 until 3991 /*
4003 it's MSB (bit 15) goes to 1 (indicating that the 3992 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
4004 XAUI workaround has completed), 3993 * MSB (bit15) goes to 1 (indicating that the XAUI
4005 then continue on with system initialization.*/ 3994 * workaround has completed), then continue on with
3995 * system initialization.
3996 */
4006 for (cnt1 = 0; cnt1 < 1000; cnt1++) { 3997 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
4007 bnx2x_cl45_read(bp, phy, 3998 bnx2x_cl45_read(bp, phy,
4008 MDIO_PMA_DEVAD, 3999 MDIO_PMA_DEVAD,
@@ -4085,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4085 gpio_port = params->port; 4076 gpio_port = params->port;
4086 /* Restore normal power mode*/ 4077 /* Restore normal power mode*/
4087 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4078 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4088 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 4079 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
4089 4080
4090 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 4081 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4091 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 4082 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
4092 4083
4093 /* enable LASI */ 4084 /* enable LASI */
4094 bnx2x_cl45_write(bp, phy, 4085 bnx2x_cl45_write(bp, phy,
@@ -4098,8 +4089,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4098 4089
4099 bnx2x_8073_set_pause_cl37(params, phy, vars); 4090 bnx2x_8073_set_pause_cl37(params, phy, vars);
4100 4091
4101 bnx2x_8073_set_xaui_low_power_mode(bp, phy);
4102
4103 bnx2x_cl45_read(bp, phy, 4092 bnx2x_cl45_read(bp, phy,
4104 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); 4093 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4105 4094
@@ -4108,6 +4097,21 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4108 4097
4109 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 4098 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
4110 4099
4100 /* Swap polarity if required - Must be done only in non-1G mode */
4101 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4102 /* Configure the 8073 to swap _P and _N of the KR lines */
4103 DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
4104 /* 10G Rx/Tx and 1G Tx signal polarity swap */
4105 bnx2x_cl45_read(bp, phy,
4106 MDIO_PMA_DEVAD,
4107 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
4108 bnx2x_cl45_write(bp, phy,
4109 MDIO_PMA_DEVAD,
4110 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
4111 (val | (3<<9)));
4112 }
4113
4114
4111 /* Enable CL37 BAM */ 4115 /* Enable CL37 BAM */
4112 if (REG_RD(bp, params->shmem_base + 4116 if (REG_RD(bp, params->shmem_base +
4113 offsetof(struct shmem_region, dev_info. 4117 offsetof(struct shmem_region, dev_info.
@@ -4135,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4135 val = (1<<7); 4139 val = (1<<7);
4136 } else if (phy->req_line_speed == SPEED_2500) { 4140 } else if (phy->req_line_speed == SPEED_2500) {
4137 val = (1<<5); 4141 val = (1<<5);
4138 /* Note that 2.5G works only 4142 /*
4139 when used with 1G advertisment */ 4143 * Note that 2.5G works only when used with 1G
4144 * advertisment
4145 */
4140 } else 4146 } else
4141 val = (1<<5); 4147 val = (1<<5);
4142 } else { 4148 } else {
@@ -4145,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4145 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 4151 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
4146 val |= (1<<7); 4152 val |= (1<<7);
4147 4153
4148 /* Note that 2.5G works only when 4154 /* Note that 2.5G works only when used with 1G advertisment */
4149 used with 1G advertisment */
4150 if (phy->speed_cap_mask & 4155 if (phy->speed_cap_mask &
4151 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | 4156 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4152 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 4157 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4186,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4186 /* Add support for CL37 (passive mode) III */ 4191 /* Add support for CL37 (passive mode) III */
4187 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 4192 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
4188 4193
4189 /* The SNR will improve about 2db by changing 4194 /*
4190 BW and FEE main tap. Rest commands are executed 4195 * The SNR will improve about 2db by changing BW and FEE main
4191 after link is up*/ 4196 * tap. Rest commands are executed after link is up
4197 * Change FFE main cursor to 5 in EDC register
4198 */
4192 if (bnx2x_8073_is_snr_needed(bp, phy)) 4199 if (bnx2x_8073_is_snr_needed(bp, phy))
4193 bnx2x_cl45_write(bp, phy, 4200 bnx2x_cl45_write(bp, phy,
4194 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, 4201 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4272,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4272 4279
4273 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); 4280 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
4274 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { 4281 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
4275 /* The SNR will improve about 2dbby 4282 /*
4276 changing the BW and FEE main tap.*/ 4283 * The SNR will improve about 2dbby changing the BW and FEE main
4277 /* The 1st write to change FFE main 4284 * tap. The 1st write to change FFE main tap is set before
4278 tap is set before restart AN */ 4285 * restart AN. Change PLL Bandwidth in EDC register
4279 /* Change PLL Bandwidth in EDC 4286 */
4280 register */
4281 bnx2x_cl45_write(bp, phy, 4287 bnx2x_cl45_write(bp, phy,
4282 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, 4288 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
4283 0x26BC); 4289 0x26BC);
@@ -4314,8 +4320,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4314 } 4320 }
4315 4321
4316 if (link_up) { 4322 if (link_up) {
4323 /* Swap polarity if required */
4324 if (params->lane_config &
4325 PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4326 /* Configure the 8073 to swap P and N of the KR lines */
4327 bnx2x_cl45_read(bp, phy,
4328 MDIO_XS_DEVAD,
4329 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
4330 /*
4331 * Set bit 3 to invert Rx in 1G mode and clear this bit
4332 * when it`s in 10G mode.
4333 */
4334 if (vars->line_speed == SPEED_1000) {
4335 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
4336 "the 8073\n");
4337 val1 |= (1<<3);
4338 } else
4339 val1 &= ~(1<<3);
4340
4341 bnx2x_cl45_write(bp, phy,
4342 MDIO_XS_DEVAD,
4343 MDIO_XS_REG_8073_RX_CTRL_PCIE,
4344 val1);
4345 }
4317 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 4346 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
4318 bnx2x_8073_resolve_fc(phy, params, vars); 4347 bnx2x_8073_resolve_fc(phy, params, vars);
4348 vars->duplex = DUPLEX_FULL;
4319 } 4349 }
4320 return link_up; 4350 return link_up;
4321} 4351}
@@ -4332,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
4332 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", 4362 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
4333 gpio_port); 4363 gpio_port);
4334 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4364 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4335 MISC_REGISTERS_GPIO_OUTPUT_LOW, 4365 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4336 gpio_port); 4366 gpio_port);
4337} 4367}
4338 4368
4339/******************************************************************/ 4369/******************************************************************/
@@ -4347,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
4347 DP(NETIF_MSG_LINK, "init 8705\n"); 4377 DP(NETIF_MSG_LINK, "init 8705\n");
4348 /* Restore normal power mode*/ 4378 /* Restore normal power mode*/
4349 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4379 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4350 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 4380 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
4351 /* HW reset */ 4381 /* HW reset */
4352 bnx2x_ext_phy_hw_reset(bp, params->port); 4382 bnx2x_ext_phy_hw_reset(bp, params->port);
4353 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 4383 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
4354 bnx2x_wait_reset_complete(bp, phy); 4384 bnx2x_wait_reset_complete(bp, phy, params);
4355 4385
4356 bnx2x_cl45_write(bp, phy, 4386 bnx2x_cl45_write(bp, phy,
4357 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); 4387 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4402,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
4402/******************************************************************/ 4432/******************************************************************/
4403/* SFP+ module Section */ 4433/* SFP+ module Section */
4404/******************************************************************/ 4434/******************************************************************/
4405static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, 4435static u8 bnx2x_get_gpio_port(struct link_params *params)
4436{
4437 u8 gpio_port;
4438 u32 swap_val, swap_override;
4439 struct bnx2x *bp = params->bp;
4440 if (CHIP_IS_E2(bp))
4441 gpio_port = BP_PATH(bp);
4442 else
4443 gpio_port = params->port;
4444 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4445 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4446 return gpio_port ^ (swap_val && swap_override);
4447}
4448static void bnx2x_sfp_set_transmitter(struct link_params *params,
4406 struct bnx2x_phy *phy, 4449 struct bnx2x_phy *phy,
4407 u8 port,
4408 u8 tx_en) 4450 u8 tx_en)
4409{ 4451{
4410 u16 val; 4452 u16 val;
4453 u8 port = params->port;
4454 struct bnx2x *bp = params->bp;
4455 u32 tx_en_mode;
4411 4456
4412 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
4413 tx_en, port);
4414 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ 4457 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
4415 bnx2x_cl45_read(bp, phy, 4458 tx_en_mode = REG_RD(bp, params->shmem_base +
4416 MDIO_PMA_DEVAD, 4459 offsetof(struct shmem_region,
4417 MDIO_PMA_REG_PHY_IDENTIFIER, 4460 dev_info.port_hw_config[port].sfp_ctrl)) &
4418 &val); 4461 PORT_HW_CFG_TX_LASER_MASK;
4462 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
4463 "mode = %x\n", tx_en, port, tx_en_mode);
4464 switch (tx_en_mode) {
4465 case PORT_HW_CFG_TX_LASER_MDIO:
4419 4466
4420 if (tx_en) 4467 bnx2x_cl45_read(bp, phy,
4421 val &= ~(1<<15); 4468 MDIO_PMA_DEVAD,
4422 else 4469 MDIO_PMA_REG_PHY_IDENTIFIER,
4423 val |= (1<<15); 4470 &val);
4424 4471
4425 bnx2x_cl45_write(bp, phy, 4472 if (tx_en)
4426 MDIO_PMA_DEVAD, 4473 val &= ~(1<<15);
4427 MDIO_PMA_REG_PHY_IDENTIFIER, 4474 else
4428 val); 4475 val |= (1<<15);
4476
4477 bnx2x_cl45_write(bp, phy,
4478 MDIO_PMA_DEVAD,
4479 MDIO_PMA_REG_PHY_IDENTIFIER,
4480 val);
4481 break;
4482 case PORT_HW_CFG_TX_LASER_GPIO0:
4483 case PORT_HW_CFG_TX_LASER_GPIO1:
4484 case PORT_HW_CFG_TX_LASER_GPIO2:
4485 case PORT_HW_CFG_TX_LASER_GPIO3:
4486 {
4487 u16 gpio_pin;
4488 u8 gpio_port, gpio_mode;
4489 if (tx_en)
4490 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
4491 else
4492 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
4493
4494 gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
4495 gpio_port = bnx2x_get_gpio_port(params);
4496 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
4497 break;
4498 }
4499 default:
4500 DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
4501 break;
4502 }
4429} 4503}
4430 4504
4431static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4505static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4432 struct link_params *params, 4506 struct link_params *params,
4433 u16 addr, u8 byte_cnt, u8 *o_buf) 4507 u16 addr, u8 byte_cnt, u8 *o_buf)
4434{ 4508{
4435 struct bnx2x *bp = params->bp; 4509 struct bnx2x *bp = params->bp;
4436 u16 val = 0; 4510 u16 val = 0;
@@ -4443,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4443 /* Set the read command byte count */ 4517 /* Set the read command byte count */
4444 bnx2x_cl45_write(bp, phy, 4518 bnx2x_cl45_write(bp, phy,
4445 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 4519 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
4446 (byte_cnt | 0xa000)); 4520 (byte_cnt | 0xa000));
4447 4521
4448 /* Set the read command address */ 4522 /* Set the read command address */
4449 bnx2x_cl45_write(bp, phy, 4523 bnx2x_cl45_write(bp, phy,
4450 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 4524 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
4451 addr); 4525 addr);
4452 4526
4453 /* Activate read command */ 4527 /* Activate read command */
4454 bnx2x_cl45_write(bp, phy, 4528 bnx2x_cl45_write(bp, phy,
4455 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4529 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4456 0x2c0f); 4530 0x2c0f);
4457 4531
4458 /* Wait up to 500us for command complete status */ 4532 /* Wait up to 500us for command complete status */
4459 for (i = 0; i < 100; i++) { 4533 for (i = 0; i < 100; i++) {
4460 bnx2x_cl45_read(bp, phy, 4534 bnx2x_cl45_read(bp, phy,
4461 MDIO_PMA_DEVAD, 4535 MDIO_PMA_DEVAD,
4462 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4536 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4463 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4537 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4464 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4538 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
4465 break; 4539 break;
@@ -4477,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4477 /* Read the buffer */ 4551 /* Read the buffer */
4478 for (i = 0; i < byte_cnt; i++) { 4552 for (i = 0; i < byte_cnt; i++) {
4479 bnx2x_cl45_read(bp, phy, 4553 bnx2x_cl45_read(bp, phy,
4480 MDIO_PMA_DEVAD, 4554 MDIO_PMA_DEVAD,
4481 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); 4555 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
4482 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); 4556 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
4483 } 4557 }
4484 4558
4485 for (i = 0; i < 100; i++) { 4559 for (i = 0; i < 100; i++) {
4486 bnx2x_cl45_read(bp, phy, 4560 bnx2x_cl45_read(bp, phy,
4487 MDIO_PMA_DEVAD, 4561 MDIO_PMA_DEVAD,
4488 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4562 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4489 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4563 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4490 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4564 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
4491 return 0; 4565 return 0;
@@ -4496,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4496 4570
4497static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4571static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4498 struct link_params *params, 4572 struct link_params *params,
4499 u16 addr, u8 byte_cnt, u8 *o_buf) 4573 u16 addr, u8 byte_cnt, u8 *o_buf)
4500{ 4574{
4501 struct bnx2x *bp = params->bp; 4575 struct bnx2x *bp = params->bp;
4502 u16 val, i; 4576 u16 val, i;
@@ -4509,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4509 4583
4510 /* Need to read from 1.8000 to clear it */ 4584 /* Need to read from 1.8000 to clear it */
4511 bnx2x_cl45_read(bp, phy, 4585 bnx2x_cl45_read(bp, phy,
4512 MDIO_PMA_DEVAD, 4586 MDIO_PMA_DEVAD,
4513 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4587 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4514 &val); 4588 &val);
4515 4589
4516 /* Set the read command byte count */ 4590 /* Set the read command byte count */
4517 bnx2x_cl45_write(bp, phy, 4591 bnx2x_cl45_write(bp, phy,
4518 MDIO_PMA_DEVAD, 4592 MDIO_PMA_DEVAD,
4519 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 4593 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
4520 ((byte_cnt < 2) ? 2 : byte_cnt)); 4594 ((byte_cnt < 2) ? 2 : byte_cnt));
4521 4595
4522 /* Set the read command address */ 4596 /* Set the read command address */
4523 bnx2x_cl45_write(bp, phy, 4597 bnx2x_cl45_write(bp, phy,
4524 MDIO_PMA_DEVAD, 4598 MDIO_PMA_DEVAD,
4525 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 4599 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
4526 addr); 4600 addr);
4527 /* Set the destination address */ 4601 /* Set the destination address */
4528 bnx2x_cl45_write(bp, phy, 4602 bnx2x_cl45_write(bp, phy,
4529 MDIO_PMA_DEVAD, 4603 MDIO_PMA_DEVAD,
4530 0x8004, 4604 0x8004,
4531 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); 4605 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
4532 4606
4533 /* Activate read command */ 4607 /* Activate read command */
4534 bnx2x_cl45_write(bp, phy, 4608 bnx2x_cl45_write(bp, phy,
4535 MDIO_PMA_DEVAD, 4609 MDIO_PMA_DEVAD,
4536 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4610 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4537 0x8002); 4611 0x8002);
4538 /* Wait appropriate time for two-wire command to finish before 4612 /*
4539 polling the status register */ 4613 * Wait appropriate time for two-wire command to finish before
4614 * polling the status register
4615 */
4540 msleep(1); 4616 msleep(1);
4541 4617
4542 /* Wait up to 500us for command complete status */ 4618 /* Wait up to 500us for command complete status */
4543 for (i = 0; i < 100; i++) { 4619 for (i = 0; i < 100; i++) {
4544 bnx2x_cl45_read(bp, phy, 4620 bnx2x_cl45_read(bp, phy,
4545 MDIO_PMA_DEVAD, 4621 MDIO_PMA_DEVAD,
4546 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4622 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4547 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4623 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4548 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4624 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
4549 break; 4625 break;
@@ -4555,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4555 DP(NETIF_MSG_LINK, 4631 DP(NETIF_MSG_LINK,
4556 "Got bad status 0x%x when reading from SFP+ EEPROM\n", 4632 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
4557 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); 4633 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
4558 return -EINVAL; 4634 return -EFAULT;
4559 } 4635 }
4560 4636
4561 /* Read the buffer */ 4637 /* Read the buffer */
4562 for (i = 0; i < byte_cnt; i++) { 4638 for (i = 0; i < byte_cnt; i++) {
4563 bnx2x_cl45_read(bp, phy, 4639 bnx2x_cl45_read(bp, phy,
4564 MDIO_PMA_DEVAD, 4640 MDIO_PMA_DEVAD,
4565 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); 4641 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
4566 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); 4642 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
4567 } 4643 }
4568 4644
4569 for (i = 0; i < 100; i++) { 4645 for (i = 0; i < 100; i++) {
4570 bnx2x_cl45_read(bp, phy, 4646 bnx2x_cl45_read(bp, phy,
4571 MDIO_PMA_DEVAD, 4647 MDIO_PMA_DEVAD,
4572 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4648 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4573 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4649 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4574 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4650 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
4575 return 0; 4651 return 0;
@@ -4579,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4579 return -EINVAL; 4655 return -EINVAL;
4580} 4656}
4581 4657
4582static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4658u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4583 struct link_params *params, u16 addr, 4659 struct link_params *params, u16 addr,
4584 u8 byte_cnt, u8 *o_buf) 4660 u8 byte_cnt, u8 *o_buf)
4585{ 4661{
4586 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 4662 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
4587 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 4663 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
4588 byte_cnt, o_buf); 4664 byte_cnt, o_buf);
4589 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 4665 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
4590 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, 4666 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
4591 byte_cnt, o_buf); 4667 byte_cnt, o_buf);
4592 return -EINVAL; 4668 return -EINVAL;
4593} 4669}
4594 4670
4595static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, 4671static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4596 struct link_params *params, 4672 struct link_params *params,
4597 u16 *edc_mode) 4673 u16 *edc_mode)
4598{ 4674{
4599 struct bnx2x *bp = params->bp; 4675 struct bnx2x *bp = params->bp;
4600 u8 val, check_limiting_mode = 0; 4676 u8 val, check_limiting_mode = 0;
@@ -4615,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4615 { 4691 {
4616 u8 copper_module_type; 4692 u8 copper_module_type;
4617 4693
4618 /* Check if its active cable( includes SFP+ module) 4694 /*
4619 of passive cable*/ 4695 * Check if its active cable (includes SFP+ module)
4696 * of passive cable
4697 */
4620 if (bnx2x_read_sfp_module_eeprom(phy, 4698 if (bnx2x_read_sfp_module_eeprom(phy,
4621 params, 4699 params,
4622 SFP_EEPROM_FC_TX_TECH_ADDR, 4700 SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4675,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4675 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 4753 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
4676 return 0; 4754 return 0;
4677} 4755}
4678/* This function read the relevant field from the module ( SFP+ ), 4756/*
4679 and verify it is compliant with this board */ 4757 * This function read the relevant field from the module (SFP+), and verify it
4758 * is compliant with this board
4759 */
4680static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, 4760static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4681 struct link_params *params) 4761 struct link_params *params)
4682{ 4762{
@@ -4725,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4725 /* format the warning message */ 4805 /* format the warning message */
4726 if (bnx2x_read_sfp_module_eeprom(phy, 4806 if (bnx2x_read_sfp_module_eeprom(phy,
4727 params, 4807 params,
4728 SFP_EEPROM_VENDOR_NAME_ADDR, 4808 SFP_EEPROM_VENDOR_NAME_ADDR,
4729 SFP_EEPROM_VENDOR_NAME_SIZE, 4809 SFP_EEPROM_VENDOR_NAME_SIZE,
4730 (u8 *)vendor_name)) 4810 (u8 *)vendor_name))
4731 vendor_name[0] = '\0'; 4811 vendor_name[0] = '\0';
4732 else 4812 else
4733 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 4813 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
4734 if (bnx2x_read_sfp_module_eeprom(phy, 4814 if (bnx2x_read_sfp_module_eeprom(phy,
4735 params, 4815 params,
4736 SFP_EEPROM_PART_NO_ADDR, 4816 SFP_EEPROM_PART_NO_ADDR,
4737 SFP_EEPROM_PART_NO_SIZE, 4817 SFP_EEPROM_PART_NO_SIZE,
4738 (u8 *)vendor_pn)) 4818 (u8 *)vendor_pn))
4739 vendor_pn[0] = '\0'; 4819 vendor_pn[0] = '\0';
4740 else 4820 else
4741 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 4821 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
4742 4822
4743 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected," 4823 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
4744 " Port %d from %s part number %s\n", 4824 " Port %d from %s part number %s\n",
4745 params->port, vendor_name, vendor_pn); 4825 params->port, vendor_name, vendor_pn);
4746 phy->flags |= FLAGS_SFP_NOT_APPROVED; 4826 phy->flags |= FLAGS_SFP_NOT_APPROVED;
4747 return -EINVAL; 4827 return -EINVAL;
4748} 4828}
@@ -4754,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
4754 u8 val; 4834 u8 val;
4755 struct bnx2x *bp = params->bp; 4835 struct bnx2x *bp = params->bp;
4756 u16 timeout; 4836 u16 timeout;
4757 /* Initialization time after hot-plug may take up to 300ms for some 4837 /*
4758 phys type ( e.g. JDSU ) */ 4838 * Initialization time after hot-plug may take up to 300ms for
4839 * some phys type ( e.g. JDSU )
4840 */
4841
4759 for (timeout = 0; timeout < 60; timeout++) { 4842 for (timeout = 0; timeout < 60; timeout++) {
4760 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 4843 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
4761 == 0) { 4844 == 0) {
@@ -4774,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
4774 /* Make sure GPIOs are not using for LED mode */ 4857 /* Make sure GPIOs are not using for LED mode */
4775 u16 val; 4858 u16 val;
4776 /* 4859 /*
4777 * In the GPIO register, bit 4 is use to detemine if the GPIOs are 4860 * In the GPIO register, bit 4 is use to determine if the GPIOs are
4778 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 4861 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
4779 * output 4862 * output
4780 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 4863 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
4781 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 4864 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
4782 * where the 1st bit is the over-current(only input), and 2nd bit is 4865 * where the 1st bit is the over-current(only input), and 2nd bit is
4783 * for power( only output ) 4866 * for power( only output )
4784 */ 4867 *
4785
4786 /*
4787 * In case of NOC feature is disabled and power is up, set GPIO control 4868 * In case of NOC feature is disabled and power is up, set GPIO control
4788 * as input to enable listening of over-current indication 4869 * as input to enable listening of over-current indication
4789 */ 4870 */
@@ -4812,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4812 u16 cur_limiting_mode; 4893 u16 cur_limiting_mode;
4813 4894
4814 bnx2x_cl45_read(bp, phy, 4895 bnx2x_cl45_read(bp, phy,
4815 MDIO_PMA_DEVAD, 4896 MDIO_PMA_DEVAD,
4816 MDIO_PMA_REG_ROM_VER2, 4897 MDIO_PMA_REG_ROM_VER2,
4817 &cur_limiting_mode); 4898 &cur_limiting_mode);
4818 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", 4899 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
4819 cur_limiting_mode); 4900 cur_limiting_mode);
4820 4901
4821 if (edc_mode == EDC_MODE_LIMITING) { 4902 if (edc_mode == EDC_MODE_LIMITING) {
4822 DP(NETIF_MSG_LINK, 4903 DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
4823 "Setting LIMITING MODE\n");
4824 bnx2x_cl45_write(bp, phy, 4904 bnx2x_cl45_write(bp, phy,
4825 MDIO_PMA_DEVAD, 4905 MDIO_PMA_DEVAD,
4826 MDIO_PMA_REG_ROM_VER2, 4906 MDIO_PMA_REG_ROM_VER2,
@@ -4829,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4829 4909
4830 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 4910 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
4831 4911
4832 /* Changing to LRM mode takes quite few seconds. 4912 /*
4833 So do it only if current mode is limiting 4913 * Changing to LRM mode takes quite few seconds. So do it only
4834 ( default is LRM )*/ 4914 * if current mode is limiting (default is LRM)
4915 */
4835 if (cur_limiting_mode != EDC_MODE_LIMITING) 4916 if (cur_limiting_mode != EDC_MODE_LIMITING)
4836 return 0; 4917 return 0;
4837 4918
4838 bnx2x_cl45_write(bp, phy, 4919 bnx2x_cl45_write(bp, phy,
4839 MDIO_PMA_DEVAD, 4920 MDIO_PMA_DEVAD,
4840 MDIO_PMA_REG_LRM_MODE, 4921 MDIO_PMA_REG_LRM_MODE,
4841 0); 4922 0);
4842 bnx2x_cl45_write(bp, phy, 4923 bnx2x_cl45_write(bp, phy,
4843 MDIO_PMA_DEVAD, 4924 MDIO_PMA_DEVAD,
4844 MDIO_PMA_REG_ROM_VER2, 4925 MDIO_PMA_REG_ROM_VER2,
4845 0x128); 4926 0x128);
4846 bnx2x_cl45_write(bp, phy, 4927 bnx2x_cl45_write(bp, phy,
4847 MDIO_PMA_DEVAD, 4928 MDIO_PMA_DEVAD,
4848 MDIO_PMA_REG_MISC_CTRL0, 4929 MDIO_PMA_REG_MISC_CTRL0,
4849 0x4008); 4930 0x4008);
4850 bnx2x_cl45_write(bp, phy, 4931 bnx2x_cl45_write(bp, phy,
4851 MDIO_PMA_DEVAD, 4932 MDIO_PMA_DEVAD,
4852 MDIO_PMA_REG_LRM_MODE, 4933 MDIO_PMA_REG_LRM_MODE,
4853 0xaaaa); 4934 0xaaaa);
4854 } 4935 }
4855 return 0; 4936 return 0;
4856} 4937}
4857 4938
4858static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp, 4939static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
4859 struct bnx2x_phy *phy, 4940 struct bnx2x_phy *phy,
4860 u16 edc_mode) 4941 u16 edc_mode)
4861{ 4942{
4862 u16 phy_identifier; 4943 u16 phy_identifier;
4863 u16 rom_ver2_val; 4944 u16 rom_ver2_val;
4864 bnx2x_cl45_read(bp, phy, 4945 bnx2x_cl45_read(bp, phy,
4865 MDIO_PMA_DEVAD, 4946 MDIO_PMA_DEVAD,
4866 MDIO_PMA_REG_PHY_IDENTIFIER, 4947 MDIO_PMA_REG_PHY_IDENTIFIER,
4867 &phy_identifier); 4948 &phy_identifier);
4868 4949
4869 bnx2x_cl45_write(bp, phy, 4950 bnx2x_cl45_write(bp, phy,
4870 MDIO_PMA_DEVAD, 4951 MDIO_PMA_DEVAD,
4871 MDIO_PMA_REG_PHY_IDENTIFIER, 4952 MDIO_PMA_REG_PHY_IDENTIFIER,
4872 (phy_identifier & ~(1<<9))); 4953 (phy_identifier & ~(1<<9)));
4873 4954
4874 bnx2x_cl45_read(bp, phy, 4955 bnx2x_cl45_read(bp, phy,
4875 MDIO_PMA_DEVAD, 4956 MDIO_PMA_DEVAD,
4876 MDIO_PMA_REG_ROM_VER2, 4957 MDIO_PMA_REG_ROM_VER2,
4877 &rom_ver2_val); 4958 &rom_ver2_val);
4878 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ 4959 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
4879 bnx2x_cl45_write(bp, phy, 4960 bnx2x_cl45_write(bp, phy,
4880 MDIO_PMA_DEVAD, 4961 MDIO_PMA_DEVAD,
4881 MDIO_PMA_REG_ROM_VER2, 4962 MDIO_PMA_REG_ROM_VER2,
4882 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); 4963 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
4883 4964
4884 bnx2x_cl45_write(bp, phy, 4965 bnx2x_cl45_write(bp, phy,
4885 MDIO_PMA_DEVAD, 4966 MDIO_PMA_DEVAD,
4886 MDIO_PMA_REG_PHY_IDENTIFIER, 4967 MDIO_PMA_REG_PHY_IDENTIFIER,
4887 (phy_identifier | (1<<9))); 4968 (phy_identifier | (1<<9)));
4888 4969
4889 return 0; 4970 return 0;
4890} 4971}
@@ -4897,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4897 4978
4898 switch (action) { 4979 switch (action) {
4899 case DISABLE_TX: 4980 case DISABLE_TX:
4900 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 4981 bnx2x_sfp_set_transmitter(params, phy, 0);
4901 break; 4982 break;
4902 case ENABLE_TX: 4983 case ENABLE_TX:
4903 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) 4984 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
4904 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); 4985 bnx2x_sfp_set_transmitter(params, phy, 1);
4905 break; 4986 break;
4906 default: 4987 default:
4907 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 4988 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4910,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4910 } 4991 }
4911} 4992}
4912 4993
4994static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
4995 u8 gpio_mode)
4996{
4997 struct bnx2x *bp = params->bp;
4998
4999 u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
5000 offsetof(struct shmem_region,
5001 dev_info.port_hw_config[params->port].sfp_ctrl)) &
5002 PORT_HW_CFG_FAULT_MODULE_LED_MASK;
5003 switch (fault_led_gpio) {
5004 case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
5005 return;
5006 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
5007 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
5008 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
5009 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
5010 {
5011 u8 gpio_port = bnx2x_get_gpio_port(params);
5012 u16 gpio_pin = fault_led_gpio -
5013 PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
5014 DP(NETIF_MSG_LINK, "Set fault module-detected led "
5015 "pin %x port %x mode %x\n",
5016 gpio_pin, gpio_port, gpio_mode);
5017 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
5018 }
5019 break;
5020 default:
5021 DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
5022 fault_led_gpio);
5023 }
5024}
5025
4913static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, 5026static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4914 struct link_params *params) 5027 struct link_params *params)
4915{ 5028{
@@ -4927,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4927 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { 5040 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
4928 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 5041 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
4929 return -EINVAL; 5042 return -EINVAL;
4930 } else if (bnx2x_verify_sfp_module(phy, params) != 5043 } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
4931 0) {
4932 /* check SFP+ module compatibility */ 5044 /* check SFP+ module compatibility */
4933 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 5045 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
4934 rc = -EINVAL; 5046 rc = -EINVAL;
4935 /* Turn on fault module-detected led */ 5047 /* Turn on fault module-detected led */
4936 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5048 bnx2x_set_sfp_module_fault_led(params,
4937 MISC_REGISTERS_GPIO_HIGH, 5049 MISC_REGISTERS_GPIO_HIGH);
4938 params->port); 5050
4939 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && 5051 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
4940 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5052 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4941 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { 5053 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4946,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4946 } 5058 }
4947 } else { 5059 } else {
4948 /* Turn off fault module-detected led */ 5060 /* Turn off fault module-detected led */
4949 DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n"); 5061 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
4950 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
4951 MISC_REGISTERS_GPIO_LOW,
4952 params->port);
4953 } 5062 }
4954 5063
4955 /* power up the SFP module */ 5064 /* power up the SFP module */
4956 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 5065 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
4957 bnx2x_8727_power_module(bp, phy, 1); 5066 bnx2x_8727_power_module(bp, phy, 1);
4958 5067
4959 /* Check and set limiting mode / LRM mode on 8726. 5068 /*
4960 On 8727 it is done automatically */ 5069 * Check and set limiting mode / LRM mode on 8726. On 8727 it
5070 * is done automatically
5071 */
4961 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 5072 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
4962 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode); 5073 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
4963 else 5074 else
@@ -4969,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4969 if (rc == 0 || 5080 if (rc == 0 ||
4970 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 5081 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
4971 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5082 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
4972 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); 5083 bnx2x_sfp_set_transmitter(params, phy, 1);
4973 else 5084 else
4974 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5085 bnx2x_sfp_set_transmitter(params, phy, 0);
4975 5086
4976 return rc; 5087 return rc;
4977} 5088}
@@ -4984,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
4984 u8 port = params->port; 5095 u8 port = params->port;
4985 5096
4986 /* Set valid module led off */ 5097 /* Set valid module led off */
4987 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5098 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
4988 MISC_REGISTERS_GPIO_HIGH,
4989 params->port);
4990 5099
4991 /* Get current gpio val refelecting module plugged in / out*/ 5100 /* Get current gpio val reflecting module plugged in / out*/
4992 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); 5101 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
4993 5102
4994 /* Call the handling function in case module is detected */ 5103 /* Call the handling function in case module is detected */
@@ -5004,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
5004 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 5113 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
5005 } else { 5114 } else {
5006 u32 val = REG_RD(bp, params->shmem_base + 5115 u32 val = REG_RD(bp, params->shmem_base +
5007 offsetof(struct shmem_region, dev_info. 5116 offsetof(struct shmem_region, dev_info.
5008 port_feature_config[params->port]. 5117 port_feature_config[params->port].
5009 config)); 5118 config));
5010 5119
5011 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 5120 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
5012 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 5121 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
5013 port); 5122 port);
5014 /* Module was plugged out. */ 5123 /*
5015 /* Disable transmit for this module */ 5124 * Module was plugged out.
5125 * Disable transmit for this module
5126 */
5016 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5127 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
5017 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5128 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5018 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5129 bnx2x_sfp_set_transmitter(params, phy, 0);
5019 } 5130 }
5020} 5131}
5021 5132
@@ -5051,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5051 5162
5052 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" 5163 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
5053 " link_status 0x%x\n", rx_sd, pcs_status, val2); 5164 " link_status 0x%x\n", rx_sd, pcs_status, val2);
5054 /* link is up if both bit 0 of pmd_rx_sd and 5165 /*
5055 * bit 0 of pcs_status are set, or if the autoneg bit 5166 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
5056 * 1 is set 5167 * are set, or if the autoneg bit 1 is set
5057 */ 5168 */
5058 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); 5169 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
5059 if (link_up) { 5170 if (link_up) {
@@ -5062,6 +5173,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5062 else 5173 else
5063 vars->line_speed = SPEED_10000; 5174 vars->line_speed = SPEED_10000;
5064 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5175 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5176 vars->duplex = DUPLEX_FULL;
5065 } 5177 }
5066 return link_up; 5178 return link_up;
5067} 5179}
@@ -5073,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5073 struct link_params *params, 5185 struct link_params *params,
5074 struct link_vars *vars) 5186 struct link_vars *vars)
5075{ 5187{
5076 u16 cnt, val; 5188 u32 tx_en_mode;
5189 u16 cnt, val, tmp1;
5077 struct bnx2x *bp = params->bp; 5190 struct bnx2x *bp = params->bp;
5078 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 5191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5079 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 5192 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5080 /* HW reset */ 5193 /* HW reset */
5081 bnx2x_ext_phy_hw_reset(bp, params->port); 5194 bnx2x_ext_phy_hw_reset(bp, params->port);
5082 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 5195 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
5083 bnx2x_wait_reset_complete(bp, phy); 5196 bnx2x_wait_reset_complete(bp, phy, params);
5084 5197
5085 /* Wait until fw is loaded */ 5198 /* Wait until fw is loaded */
5086 for (cnt = 0; cnt < 100; cnt++) { 5199 for (cnt = 0; cnt < 100; cnt++) {
@@ -5147,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5147 0x0004); 5260 0x0004);
5148 } 5261 }
5149 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 5262 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
5263
5264 /*
5265 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
5266 * power mode, if TX Laser is disabled
5267 */
5268
5269 tx_en_mode = REG_RD(bp, params->shmem_base +
5270 offsetof(struct shmem_region,
5271 dev_info.port_hw_config[params->port].sfp_ctrl))
5272 & PORT_HW_CFG_TX_LASER_MASK;
5273
5274 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5275 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5276 bnx2x_cl45_read(bp, phy,
5277 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
5278 tmp1 |= 0x1;
5279 bnx2x_cl45_write(bp, phy,
5280 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
5281 }
5282
5150 return 0; 5283 return 0;
5151} 5284}
5152 5285
@@ -5181,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
5181 5314
5182 /* Set soft reset */ 5315 /* Set soft reset */
5183 bnx2x_cl45_write(bp, phy, 5316 bnx2x_cl45_write(bp, phy,
5184 MDIO_PMA_DEVAD, 5317 MDIO_PMA_DEVAD,
5185 MDIO_PMA_REG_GEN_CTRL, 5318 MDIO_PMA_REG_GEN_CTRL,
5186 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 5319 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
5187 5320
5188 bnx2x_cl45_write(bp, phy, 5321 bnx2x_cl45_write(bp, phy,
5189 MDIO_PMA_DEVAD, 5322 MDIO_PMA_DEVAD,
5190 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 5323 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
5191 5324
5192 bnx2x_cl45_write(bp, phy, 5325 bnx2x_cl45_write(bp, phy,
5193 MDIO_PMA_DEVAD, 5326 MDIO_PMA_DEVAD,
5194 MDIO_PMA_REG_GEN_CTRL, 5327 MDIO_PMA_REG_GEN_CTRL,
5195 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 5328 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
5196 5329
5197 /* wait for 150ms for microcode load */ 5330 /* wait for 150ms for microcode load */
5198 msleep(150); 5331 msleep(150);
5199 5332
5200 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 5333 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
5201 bnx2x_cl45_write(bp, phy, 5334 bnx2x_cl45_write(bp, phy,
5202 MDIO_PMA_DEVAD, 5335 MDIO_PMA_DEVAD,
5203 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 5336 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
5204 5337
5205 msleep(200); 5338 msleep(200);
5206 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 5339 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5235,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5235 u32 val; 5368 u32 val;
5236 u32 swap_val, swap_override, aeu_gpio_mask, offset; 5369 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5237 DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); 5370 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
5238 /* Restore normal power mode*/
5239 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5240 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5241
5242 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5243 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5244 5371
5245 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5372 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5246 bnx2x_wait_reset_complete(bp, phy); 5373 bnx2x_wait_reset_complete(bp, phy, params);
5247 5374
5248 bnx2x_8726_external_rom_boot(phy, params); 5375 bnx2x_8726_external_rom_boot(phy, params);
5249 5376
5250 /* Need to call module detected on initialization since 5377 /*
5251 the module detection triggered by actual module 5378 * Need to call module detected on initialization since the module
5252 insertion might occur before driver is loaded, and when 5379 * detection triggered by actual module insertion might occur before
5253 driver is loaded, it reset all registers, including the 5380 * driver is loaded, and when driver is loaded, it reset all
5254 transmitter */ 5381 * registers, including the transmitter
5382 */
5255 bnx2x_sfp_module_detection(phy, params); 5383 bnx2x_sfp_module_detection(phy, params);
5256 5384
5257 if (phy->req_line_speed == SPEED_1000) { 5385 if (phy->req_line_speed == SPEED_1000) {
@@ -5284,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5284 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 5412 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
5285 bnx2x_cl45_write(bp, phy, 5413 bnx2x_cl45_write(bp, phy,
5286 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 5414 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
5287 /* Enable RX-ALARM control to receive 5415 /*
5288 interrupt for 1G speed change */ 5416 * Enable RX-ALARM control to receive interrupt for 1G speed
5417 * change
5418 */
5289 bnx2x_cl45_write(bp, phy, 5419 bnx2x_cl45_write(bp, phy,
5290 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4); 5420 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
5291 bnx2x_cl45_write(bp, phy, 5421 bnx2x_cl45_write(bp, phy,
@@ -5317,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5317 5447
5318 /* Set GPIO3 to trigger SFP+ module insertion/removal */ 5448 /* Set GPIO3 to trigger SFP+ module insertion/removal */
5319 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5449 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5320 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); 5450 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
5321 5451
5322 /* The GPIO should be swapped if the swap register is set and active */ 5452 /* The GPIO should be swapped if the swap register is set and active */
5323 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 5453 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5408,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5408 struct link_params *params) { 5538 struct link_params *params) {
5409 u32 swap_val, swap_override; 5539 u32 swap_val, swap_override;
5410 u8 port; 5540 u8 port;
5411 /** 5541 /*
5412 * The PHY reset is controlled by GPIO 1. Fake the port number 5542 * The PHY reset is controlled by GPIO 1. Fake the port number
5413 * to cancel the swap done in set_gpio() 5543 * to cancel the swap done in set_gpio()
5414 */ 5544 */
@@ -5417,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5417 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 5547 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5418 port = (swap_val && swap_override) ^ 1; 5548 port = (swap_val && swap_override) ^ 1;
5419 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 5549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5420 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 5550 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
5421} 5551}
5422 5552
5423static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, 5553static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5424 struct link_params *params, 5554 struct link_params *params,
5425 struct link_vars *vars) 5555 struct link_vars *vars)
5426{ 5556{
5427 u16 tmp1, val, mod_abs; 5557 u32 tx_en_mode;
5558 u16 tmp1, val, mod_abs, tmp2;
5428 u16 rx_alarm_ctrl_val; 5559 u16 rx_alarm_ctrl_val;
5429 u16 lasi_ctrl_val; 5560 u16 lasi_ctrl_val;
5430 struct bnx2x *bp = params->bp; 5561 struct bnx2x *bp = params->bp;
5431 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ 5562 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
5432 5563
5433 bnx2x_wait_reset_complete(bp, phy); 5564 bnx2x_wait_reset_complete(bp, phy, params);
5434 rx_alarm_ctrl_val = (1<<2) | (1<<5) ; 5565 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
5435 lasi_ctrl_val = 0x0004; 5566 lasi_ctrl_val = 0x0004;
5436 5567
@@ -5443,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5443 bnx2x_cl45_write(bp, phy, 5574 bnx2x_cl45_write(bp, phy,
5444 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val); 5575 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
5445 5576
5446 /* Initially configure MOD_ABS to interrupt when 5577 /*
5447 module is presence( bit 8) */ 5578 * Initially configure MOD_ABS to interrupt when module is
5579 * presence( bit 8)
5580 */
5448 bnx2x_cl45_read(bp, phy, 5581 bnx2x_cl45_read(bp, phy,
5449 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 5582 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5450 /* Set EDC off by setting OPTXLOS signal input to low 5583 /*
5451 (bit 9). 5584 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
5452 When the EDC is off it locks onto a reference clock and 5585 * When the EDC is off it locks onto a reference clock and avoids
5453 avoids becoming 'lost'.*/ 5586 * becoming 'lost'
5587 */
5454 mod_abs &= ~(1<<8); 5588 mod_abs &= ~(1<<8);
5455 if (!(phy->flags & FLAGS_NOC)) 5589 if (!(phy->flags & FLAGS_NOC))
5456 mod_abs &= ~(1<<9); 5590 mod_abs &= ~(1<<9);
@@ -5465,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5465 if (phy->flags & FLAGS_NOC) 5599 if (phy->flags & FLAGS_NOC)
5466 val |= (3<<5); 5600 val |= (3<<5);
5467 5601
5468 /** 5602 /*
5469 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 5603 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
5470 * status which reflect SFP+ module over-current 5604 * status which reflect SFP+ module over-current
5471 */ 5605 */
@@ -5492,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5492 bnx2x_cl45_read(bp, phy, 5626 bnx2x_cl45_read(bp, phy,
5493 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); 5627 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
5494 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); 5628 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
5495 /** 5629 /*
5496 * Power down the XAUI until link is up in case of dual-media 5630 * Power down the XAUI until link is up in case of dual-media
5497 * and 1G 5631 * and 1G
5498 */ 5632 */
@@ -5518,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5518 bnx2x_cl45_write(bp, phy, 5652 bnx2x_cl45_write(bp, phy,
5519 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); 5653 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
5520 } else { 5654 } else {
5521 /** 5655 /*
5522 * Since the 8727 has only single reset pin, need to set the 10G 5656 * Since the 8727 has only single reset pin, need to set the 10G
5523 * registers although it is default 5657 * registers although it is default
5524 */ 5658 */
@@ -5534,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5534 0x0008); 5668 0x0008);
5535 } 5669 }
5536 5670
5537 /* Set 2-wire transfer rate of SFP+ module EEPROM 5671 /*
5672 * Set 2-wire transfer rate of SFP+ module EEPROM
5538 * to 100Khz since some DACs(direct attached cables) do 5673 * to 100Khz since some DACs(direct attached cables) do
5539 * not work at 400Khz. 5674 * not work at 400Khz.
5540 */ 5675 */
@@ -5557,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5557 phy->tx_preemphasis[1]); 5692 phy->tx_preemphasis[1]);
5558 } 5693 }
5559 5694
5695 /*
5696 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
5697 * power mode, if TX Laser is disabled
5698 */
5699 tx_en_mode = REG_RD(bp, params->shmem_base +
5700 offsetof(struct shmem_region,
5701 dev_info.port_hw_config[params->port].sfp_ctrl))
5702 & PORT_HW_CFG_TX_LASER_MASK;
5703
5704 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5705
5706 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5707 bnx2x_cl45_read(bp, phy,
5708 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
5709 tmp2 |= 0x1000;
5710 tmp2 &= 0xFFEF;
5711 bnx2x_cl45_write(bp, phy,
5712 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
5713 }
5714
5560 return 0; 5715 return 0;
5561} 5716}
5562 5717
@@ -5570,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5570 port_feature_config[params->port]. 5725 port_feature_config[params->port].
5571 config)); 5726 config));
5572 bnx2x_cl45_read(bp, phy, 5727 bnx2x_cl45_read(bp, phy,
5573 MDIO_PMA_DEVAD, 5728 MDIO_PMA_DEVAD,
5574 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 5729 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5575 if (mod_abs & (1<<8)) { 5730 if (mod_abs & (1<<8)) {
5576 5731
5577 /* Module is absent */ 5732 /* Module is absent */
5578 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5733 DP(NETIF_MSG_LINK, "MOD_ABS indication "
5579 "show module is absent\n"); 5734 "show module is absent\n");
5580 5735
5581 /* 1. Set mod_abs to detect next module 5736 /*
5582 presence event 5737 * 1. Set mod_abs to detect next module
5583 2. Set EDC off by setting OPTXLOS signal input to low 5738 * presence event
5584 (bit 9). 5739 * 2. Set EDC off by setting OPTXLOS signal input to low
5585 When the EDC is off it locks onto a reference clock and 5740 * (bit 9).
5586 avoids becoming 'lost'.*/ 5741 * When the EDC is off it locks onto a reference clock and
5742 * avoids becoming 'lost'.
5743 */
5587 mod_abs &= ~(1<<8); 5744 mod_abs &= ~(1<<8);
5588 if (!(phy->flags & FLAGS_NOC)) 5745 if (!(phy->flags & FLAGS_NOC))
5589 mod_abs &= ~(1<<9); 5746 mod_abs &= ~(1<<9);
5590 bnx2x_cl45_write(bp, phy, 5747 bnx2x_cl45_write(bp, phy,
5591 MDIO_PMA_DEVAD, 5748 MDIO_PMA_DEVAD,
5592 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5749 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
5593 5750
5594 /* Clear RX alarm since it stays up as long as 5751 /*
5595 the mod_abs wasn't changed */ 5752 * Clear RX alarm since it stays up as long as
5753 * the mod_abs wasn't changed
5754 */
5596 bnx2x_cl45_read(bp, phy, 5755 bnx2x_cl45_read(bp, phy,
5597 MDIO_PMA_DEVAD, 5756 MDIO_PMA_DEVAD,
5598 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5757 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
5599 5758
5600 } else { 5759 } else {
5601 /* Module is present */ 5760 /* Module is present */
5602 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5761 DP(NETIF_MSG_LINK, "MOD_ABS indication "
5603 "show module is present\n"); 5762 "show module is present\n");
5604 /* First thing, disable transmitter, 5763 /*
5605 and if the module is ok, the 5764 * First disable transmitter, and if the module is ok, the
5606 module_detection will enable it*/ 5765 * module_detection will enable it
5607 5766 * 1. Set mod_abs to detect next module absent event ( bit 8)
5608 /* 1. Set mod_abs to detect next module 5767 * 2. Restore the default polarity of the OPRXLOS signal and
5609 absent event ( bit 8) 5768 * this signal will then correctly indicate the presence or
5610 2. Restore the default polarity of the OPRXLOS signal and 5769 * absence of the Rx signal. (bit 9)
5611 this signal will then correctly indicate the presence or 5770 */
5612 absence of the Rx signal. (bit 9) */
5613 mod_abs |= (1<<8); 5771 mod_abs |= (1<<8);
5614 if (!(phy->flags & FLAGS_NOC)) 5772 if (!(phy->flags & FLAGS_NOC))
5615 mod_abs |= (1<<9); 5773 mod_abs |= (1<<9);
@@ -5617,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5617 MDIO_PMA_DEVAD, 5775 MDIO_PMA_DEVAD,
5618 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5776 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
5619 5777
5620 /* Clear RX alarm since it stays up as long as 5778 /*
5621 the mod_abs wasn't changed. This is need to be done 5779 * Clear RX alarm since it stays up as long as the mod_abs
5622 before calling the module detection, otherwise it will clear 5780 * wasn't changed. This is need to be done before calling the
5623 the link update alarm */ 5781 * module detection, otherwise it will clear* the link update
5782 * alarm
5783 */
5624 bnx2x_cl45_read(bp, phy, 5784 bnx2x_cl45_read(bp, phy,
5625 MDIO_PMA_DEVAD, 5785 MDIO_PMA_DEVAD,
5626 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5786 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5628,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5628 5788
5629 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5789 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
5630 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5790 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5631 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5791 bnx2x_sfp_set_transmitter(params, phy, 0);
5632 5792
5633 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) 5793 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
5634 bnx2x_sfp_module_detection(phy, params); 5794 bnx2x_sfp_module_detection(phy, params);
@@ -5637,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5637 } 5797 }
5638 5798
5639 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", 5799 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
5640 rx_alarm_status); 5800 rx_alarm_status);
5641 /* No need to check link status in case of 5801 /* No need to check link status in case of module plugged in/out */
5642 module plugged in/out */
5643} 5802}
5644 5803
5645static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, 5804static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5675,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5675 bnx2x_cl45_read(bp, phy, 5834 bnx2x_cl45_read(bp, phy,
5676 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 5835 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
5677 5836
5678 /** 5837 /*
5679 * If a module is present and there is need to check 5838 * If a module is present and there is need to check
5680 * for over current 5839 * for over current
5681 */ 5840 */
@@ -5695,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5695 " Please remove the SFP+ module and" 5854 " Please remove the SFP+ module and"
5696 " restart the system to clear this" 5855 " restart the system to clear this"
5697 " error.\n", 5856 " error.\n",
5698 params->port); 5857 params->port);
5699 5858 /* Disable all RX_ALARMs except for mod_abs */
5700 /*
5701 * Disable all RX_ALARMs except for
5702 * mod_abs
5703 */
5704 bnx2x_cl45_write(bp, phy, 5859 bnx2x_cl45_write(bp, phy,
5705 MDIO_PMA_DEVAD, 5860 MDIO_PMA_DEVAD,
5706 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5)); 5861 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5743,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5743 MDIO_PMA_DEVAD, 5898 MDIO_PMA_DEVAD,
5744 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); 5899 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
5745 5900
5746 /* Bits 0..2 --> speed detected, 5901 /*
5747 bits 13..15--> link is down */ 5902 * Bits 0..2 --> speed detected,
5903 * Bits 13..15--> link is down
5904 */
5748 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 5905 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
5749 link_up = 1; 5906 link_up = 1;
5750 vars->line_speed = SPEED_10000; 5907 vars->line_speed = SPEED_10000;
5908 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
5909 params->port);
5751 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { 5910 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
5752 link_up = 1; 5911 link_up = 1;
5753 vars->line_speed = SPEED_1000; 5912 vars->line_speed = SPEED_1000;
@@ -5758,15 +5917,18 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5758 DP(NETIF_MSG_LINK, "port %x: External link is down\n", 5917 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
5759 params->port); 5918 params->port);
5760 } 5919 }
5761 if (link_up) 5920 if (link_up) {
5762 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5921 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5922 vars->duplex = DUPLEX_FULL;
5923 DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
5924 }
5763 5925
5764 if ((DUAL_MEDIA(params)) && 5926 if ((DUAL_MEDIA(params)) &&
5765 (phy->req_line_speed == SPEED_1000)) { 5927 (phy->req_line_speed == SPEED_1000)) {
5766 bnx2x_cl45_read(bp, phy, 5928 bnx2x_cl45_read(bp, phy,
5767 MDIO_PMA_DEVAD, 5929 MDIO_PMA_DEVAD,
5768 MDIO_PMA_REG_8727_PCS_GP, &val1); 5930 MDIO_PMA_REG_8727_PCS_GP, &val1);
5769 /** 5931 /*
5770 * In case of dual-media board and 1G, power up the XAUI side, 5932 * In case of dual-media board and 1G, power up the XAUI side,
5771 * otherwise power it down. For 10G it is done automatically 5933 * otherwise power it down. For 10G it is done automatically
5772 */ 5934 */
@@ -5786,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5786{ 5948{
5787 struct bnx2x *bp = params->bp; 5949 struct bnx2x *bp = params->bp;
5788 /* Disable Transmitter */ 5950 /* Disable Transmitter */
5789 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5951 bnx2x_sfp_set_transmitter(params, phy, 0);
5790 /* Clear LASI */ 5952 /* Clear LASI */
5791 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0); 5953 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
5792 5954
@@ -5798,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5798static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, 5960static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5799 struct link_params *params) 5961 struct link_params *params)
5800{ 5962{
5801 u16 val, fw_ver1, fw_ver2, cnt; 5963 u16 val, fw_ver1, fw_ver2, cnt, adj;
5802 struct bnx2x *bp = params->bp; 5964 struct bnx2x *bp = params->bp;
5803 5965
5966 adj = 0;
5967 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
5968 adj = -1;
5969
5804 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ 5970 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
5805 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 5971 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
5806 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); 5972 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
5807 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 5973 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5808 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); 5974 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
5809 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); 5975 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
5810 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); 5976 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
5811 5977
5812 for (cnt = 0; cnt < 100; cnt++) { 5978 for (cnt = 0; cnt < 100; cnt++) {
5813 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 5979 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5814 if (val & 1) 5980 if (val & 1)
5815 break; 5981 break;
5816 udelay(5); 5982 udelay(5);
@@ -5824,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5824 5990
5825 5991
5826 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 5992 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
5827 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); 5993 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
5828 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 5994 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5829 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); 5995 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
5830 for (cnt = 0; cnt < 100; cnt++) { 5996 for (cnt = 0; cnt < 100; cnt++) {
5831 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 5997 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5832 if (val & 1) 5998 if (val & 1)
5833 break; 5999 break;
5834 udelay(5); 6000 udelay(5);
@@ -5841,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5841 } 6007 }
5842 6008
5843 /* lower 16 bits of the register SPI_FW_STATUS */ 6009 /* lower 16 bits of the register SPI_FW_STATUS */
5844 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); 6010 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
5845 /* upper 16 bits of register SPI_FW_STATUS */ 6011 /* upper 16 bits of register SPI_FW_STATUS */
5846 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); 6012 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
5847 6013
5848 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1, 6014 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
5849 phy->ver_addr); 6015 phy->ver_addr);
@@ -5852,33 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5852static void bnx2x_848xx_set_led(struct bnx2x *bp, 6018static void bnx2x_848xx_set_led(struct bnx2x *bp,
5853 struct bnx2x_phy *phy) 6019 struct bnx2x_phy *phy)
5854{ 6020{
5855 u16 val; 6021 u16 val, adj;
6022
6023 adj = 0;
6024 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6025 adj = -1;
5856 6026
5857 /* PHYC_CTL_LED_CTL */ 6027 /* PHYC_CTL_LED_CTL */
5858 bnx2x_cl45_read(bp, phy, 6028 bnx2x_cl45_read(bp, phy,
5859 MDIO_PMA_DEVAD, 6029 MDIO_PMA_DEVAD,
5860 MDIO_PMA_REG_8481_LINK_SIGNAL, &val); 6030 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
5861 val &= 0xFE00; 6031 val &= 0xFE00;
5862 val |= 0x0092; 6032 val |= 0x0092;
5863 6033
5864 bnx2x_cl45_write(bp, phy, 6034 bnx2x_cl45_write(bp, phy,
5865 MDIO_PMA_DEVAD, 6035 MDIO_PMA_DEVAD,
5866 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 6036 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
5867 6037
5868 bnx2x_cl45_write(bp, phy, 6038 bnx2x_cl45_write(bp, phy,
5869 MDIO_PMA_DEVAD, 6039 MDIO_PMA_DEVAD,
5870 MDIO_PMA_REG_8481_LED1_MASK, 6040 MDIO_PMA_REG_8481_LED1_MASK + adj,
5871 0x80); 6041 0x80);
5872 6042
5873 bnx2x_cl45_write(bp, phy, 6043 bnx2x_cl45_write(bp, phy,
5874 MDIO_PMA_DEVAD, 6044 MDIO_PMA_DEVAD,
5875 MDIO_PMA_REG_8481_LED2_MASK, 6045 MDIO_PMA_REG_8481_LED2_MASK + adj,
5876 0x18); 6046 0x18);
5877 6047
6048 /* Select activity source by Tx and Rx, as suggested by PHY AE */
5878 bnx2x_cl45_write(bp, phy, 6049 bnx2x_cl45_write(bp, phy,
5879 MDIO_PMA_DEVAD, 6050 MDIO_PMA_DEVAD,
5880 MDIO_PMA_REG_8481_LED3_MASK, 6051 MDIO_PMA_REG_8481_LED3_MASK + adj,
5881 0x0040); 6052 0x0006);
6053
6054 /* Select the closest activity blink rate to that in 10/100/1000 */
6055 bnx2x_cl45_write(bp, phy,
6056 MDIO_PMA_DEVAD,
6057 MDIO_PMA_REG_8481_LED3_BLINK + adj,
6058 0);
6059
6060 bnx2x_cl45_read(bp, phy,
6061 MDIO_PMA_DEVAD,
6062 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
6063 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
6064
6065 bnx2x_cl45_write(bp, phy,
6066 MDIO_PMA_DEVAD,
6067 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
5882 6068
5883 /* 'Interrupt Mask' */ 6069 /* 'Interrupt Mask' */
5884 bnx2x_cl45_write(bp, phy, 6070 bnx2x_cl45_write(bp, phy,
@@ -5892,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5892{ 6078{
5893 struct bnx2x *bp = params->bp; 6079 struct bnx2x *bp = params->bp;
5894 u16 autoneg_val, an_1000_val, an_10_100_val; 6080 u16 autoneg_val, an_1000_val, an_10_100_val;
5895 6081 /*
6082 * This phy uses the NIG latch mechanism since link indication
6083 * arrives through its LED4 and not via its LASI signal, so we
6084 * get steady signal instead of clear on read
6085 */
5896 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 6086 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5897 1 << NIG_LATCH_BC_ENABLE_MI_INT); 6087 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5898 6088
@@ -6017,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
6017 struct bnx2x *bp = params->bp; 6207 struct bnx2x *bp = params->bp;
6018 /* Restore normal power mode*/ 6208 /* Restore normal power mode*/
6019 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6209 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6020 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 6210 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
6021 6211
6022 /* HW reset */ 6212 /* HW reset */
6023 bnx2x_ext_phy_hw_reset(bp, params->port); 6213 bnx2x_ext_phy_hw_reset(bp, params->port);
6024 bnx2x_wait_reset_complete(bp, phy); 6214 bnx2x_wait_reset_complete(bp, phy, params);
6025 6215
6026 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 6216 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
6027 return bnx2x_848xx_cmn_config_init(phy, params, vars); 6217 return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6033,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6033{ 6223{
6034 struct bnx2x *bp = params->bp; 6224 struct bnx2x *bp = params->bp;
6035 u8 port, initialize = 1; 6225 u8 port, initialize = 1;
6036 u16 val; 6226 u16 val, adj;
6037 u16 temp; 6227 u16 temp;
6038 u32 actual_phy_selection; 6228 u32 actual_phy_selection, cms_enable;
6039 u8 rc = 0; 6229 u8 rc = 0;
6040 6230
6041 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 6231 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
6232 adj = 0;
6233 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6234 adj = 3;
6042 6235
6043 msleep(1); 6236 msleep(1);
6044 if (CHIP_IS_E2(bp)) 6237 if (CHIP_IS_E2(bp))
@@ -6048,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6048 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 6241 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6049 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 6242 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
6050 port); 6243 port);
6051 bnx2x_wait_reset_complete(bp, phy); 6244 bnx2x_wait_reset_complete(bp, phy, params);
6052 /* Wait for GPHY to come out of reset */ 6245 /* Wait for GPHY to come out of reset */
6053 msleep(50); 6246 msleep(50);
6054 /* BCM84823 requires that XGXS links up first @ 10G for normal 6247 /*
6055 behavior */ 6248 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
6249 */
6056 temp = vars->line_speed; 6250 temp = vars->line_speed;
6057 vars->line_speed = SPEED_10000; 6251 vars->line_speed = SPEED_10000;
6058 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0); 6252 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6062,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6062 /* Set dual-media configuration according to configuration */ 6256 /* Set dual-media configuration according to configuration */
6063 6257
6064 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 6258 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6065 MDIO_CTL_REG_84823_MEDIA, &val); 6259 MDIO_CTL_REG_84823_MEDIA + adj, &val);
6066 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | 6260 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
6067 MDIO_CTL_REG_84823_MEDIA_LINE_MASK | 6261 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
6068 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | 6262 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6095,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6095 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; 6289 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
6096 6290
6097 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 6291 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6098 MDIO_CTL_REG_84823_MEDIA, val); 6292 MDIO_CTL_REG_84823_MEDIA + adj, val);
6099 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", 6293 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
6100 params->multi_phy_config, val); 6294 params->multi_phy_config, val);
6101 6295
@@ -6103,29 +6297,50 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6103 rc = bnx2x_848xx_cmn_config_init(phy, params, vars); 6297 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
6104 else 6298 else
6105 bnx2x_save_848xx_spirom_version(phy, params); 6299 bnx2x_save_848xx_spirom_version(phy, params);
6300 cms_enable = REG_RD(bp, params->shmem_base +
6301 offsetof(struct shmem_region,
6302 dev_info.port_hw_config[params->port].default_cfg)) &
6303 PORT_HW_CFG_ENABLE_CMS_MASK;
6304
6305 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6306 MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
6307 if (cms_enable)
6308 val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
6309 else
6310 val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
6311 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6312 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
6313
6314
6106 return rc; 6315 return rc;
6107} 6316}
6108 6317
6109static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, 6318static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
6110 struct link_params *params, 6319 struct link_params *params,
6111 struct link_vars *vars) 6320 struct link_vars *vars)
6112{ 6321{
6113 struct bnx2x *bp = params->bp; 6322 struct bnx2x *bp = params->bp;
6114 u16 val, val1, val2; 6323 u16 val, val1, val2, adj;
6115 u8 link_up = 0; 6324 u8 link_up = 0;
6116 6325
6326 /* Reg offset adjustment for 84833 */
6327 adj = 0;
6328 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6329 adj = -1;
6330
6117 /* Check 10G-BaseT link status */ 6331 /* Check 10G-BaseT link status */
6118 /* Check PMD signal ok */ 6332 /* Check PMD signal ok */
6119 bnx2x_cl45_read(bp, phy, 6333 bnx2x_cl45_read(bp, phy,
6120 MDIO_AN_DEVAD, 0xFFFA, &val1); 6334 MDIO_AN_DEVAD, 0xFFFA, &val1);
6121 bnx2x_cl45_read(bp, phy, 6335 bnx2x_cl45_read(bp, phy,
6122 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, 6336 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
6123 &val2); 6337 &val2);
6124 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); 6338 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
6125 6339
6126 /* Check link 10G */ 6340 /* Check link 10G */
6127 if (val2 & (1<<11)) { 6341 if (val2 & (1<<11)) {
6128 vars->line_speed = SPEED_10000; 6342 vars->line_speed = SPEED_10000;
6343 vars->duplex = DUPLEX_FULL;
6129 link_up = 1; 6344 link_up = 1;
6130 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6345 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
6131 } else { /* Check Legacy speed link */ 6346 } else { /* Check Legacy speed link */
@@ -6203,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
6203 struct link_params *params) 6418 struct link_params *params)
6204{ 6419{
6205 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6420 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6206 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); 6421 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
6207 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6422 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6208 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); 6423 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
6209} 6424}
6210 6425
6211static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, 6426static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6227,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
6227 else 6442 else
6228 port = params->port; 6443 port = params->port;
6229 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 6444 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6230 MISC_REGISTERS_GPIO_OUTPUT_LOW, 6445 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6231 port); 6446 port);
6232} 6447}
6233 6448
6234static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 6449static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6283,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6283 6498
6284 /* Set LED masks */ 6499 /* Set LED masks */
6285 bnx2x_cl45_write(bp, phy, 6500 bnx2x_cl45_write(bp, phy,
6286 MDIO_PMA_DEVAD, 6501 MDIO_PMA_DEVAD,
6287 MDIO_PMA_REG_8481_LED1_MASK, 6502 MDIO_PMA_REG_8481_LED1_MASK,
6288 0x0); 6503 0x0);
6289 6504
6290 bnx2x_cl45_write(bp, phy, 6505 bnx2x_cl45_write(bp, phy,
6291 MDIO_PMA_DEVAD, 6506 MDIO_PMA_DEVAD,
6292 MDIO_PMA_REG_8481_LED2_MASK, 6507 MDIO_PMA_REG_8481_LED2_MASK,
6293 0x0); 6508 0x0);
6294 6509
6295 bnx2x_cl45_write(bp, phy, 6510 bnx2x_cl45_write(bp, phy,
6296 MDIO_PMA_DEVAD, 6511 MDIO_PMA_DEVAD,
6297 MDIO_PMA_REG_8481_LED3_MASK, 6512 MDIO_PMA_REG_8481_LED3_MASK,
6298 0x0); 6513 0x0);
6299 6514
6300 bnx2x_cl45_write(bp, phy, 6515 bnx2x_cl45_write(bp, phy,
6301 MDIO_PMA_DEVAD, 6516 MDIO_PMA_DEVAD,
6302 MDIO_PMA_REG_8481_LED5_MASK, 6517 MDIO_PMA_REG_8481_LED5_MASK,
6303 0x20); 6518 0x20);
6304 6519
6305 } else { 6520 } else {
6306 bnx2x_cl45_write(bp, phy, 6521 bnx2x_cl45_write(bp, phy,
@@ -6324,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6324 val |= 0x2492; 6539 val |= 0x2492;
6325 6540
6326 bnx2x_cl45_write(bp, phy, 6541 bnx2x_cl45_write(bp, phy,
6327 MDIO_PMA_DEVAD, 6542 MDIO_PMA_DEVAD,
6328 MDIO_PMA_REG_8481_LINK_SIGNAL, 6543 MDIO_PMA_REG_8481_LINK_SIGNAL,
6329 val); 6544 val);
6330 6545
6331 /* Set LED masks */ 6546 /* Set LED masks */
6332 bnx2x_cl45_write(bp, phy, 6547 bnx2x_cl45_write(bp, phy,
6333 MDIO_PMA_DEVAD, 6548 MDIO_PMA_DEVAD,
6334 MDIO_PMA_REG_8481_LED1_MASK, 6549 MDIO_PMA_REG_8481_LED1_MASK,
6335 0x0); 6550 0x0);
6336 6551
6337 bnx2x_cl45_write(bp, phy, 6552 bnx2x_cl45_write(bp, phy,
6338 MDIO_PMA_DEVAD, 6553 MDIO_PMA_DEVAD,
6339 MDIO_PMA_REG_8481_LED2_MASK, 6554 MDIO_PMA_REG_8481_LED2_MASK,
6340 0x20); 6555 0x20);
6341 6556
6342 bnx2x_cl45_write(bp, phy, 6557 bnx2x_cl45_write(bp, phy,
6343 MDIO_PMA_DEVAD, 6558 MDIO_PMA_DEVAD,
6344 MDIO_PMA_REG_8481_LED3_MASK, 6559 MDIO_PMA_REG_8481_LED3_MASK,
6345 0x20); 6560 0x20);
6346 6561
6347 bnx2x_cl45_write(bp, phy, 6562 bnx2x_cl45_write(bp, phy,
6348 MDIO_PMA_DEVAD, 6563 MDIO_PMA_DEVAD,
6349 MDIO_PMA_REG_8481_LED5_MASK, 6564 MDIO_PMA_REG_8481_LED5_MASK,
6350 0x0); 6565 0x0);
6351 } else { 6566 } else {
6352 bnx2x_cl45_write(bp, phy, 6567 bnx2x_cl45_write(bp, phy,
6353 MDIO_PMA_DEVAD, 6568 MDIO_PMA_DEVAD,
6354 MDIO_PMA_REG_8481_LED1_MASK, 6569 MDIO_PMA_REG_8481_LED1_MASK,
6355 0x20); 6570 0x20);
6356 } 6571 }
6357 break; 6572 break;
6358 6573
@@ -6370,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6370 &val); 6585 &val);
6371 6586
6372 if (!((val & 6587 if (!((val &
6373 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) 6588 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
6374 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){ 6589 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
6375 DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n"); 6590 DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
6376 bnx2x_cl45_write(bp, phy, 6591 bnx2x_cl45_write(bp, phy,
6377 MDIO_PMA_DEVAD, 6592 MDIO_PMA_DEVAD,
6378 MDIO_PMA_REG_8481_LINK_SIGNAL, 6593 MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6381,30 +6596,42 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6381 6596
6382 /* Set LED masks */ 6597 /* Set LED masks */
6383 bnx2x_cl45_write(bp, phy, 6598 bnx2x_cl45_write(bp, phy,
6384 MDIO_PMA_DEVAD, 6599 MDIO_PMA_DEVAD,
6385 MDIO_PMA_REG_8481_LED1_MASK, 6600 MDIO_PMA_REG_8481_LED1_MASK,
6386 0x10); 6601 0x10);
6387 6602
6388 bnx2x_cl45_write(bp, phy, 6603 bnx2x_cl45_write(bp, phy,
6389 MDIO_PMA_DEVAD, 6604 MDIO_PMA_DEVAD,
6390 MDIO_PMA_REG_8481_LED2_MASK, 6605 MDIO_PMA_REG_8481_LED2_MASK,
6391 0x80); 6606 0x80);
6392 6607
6393 bnx2x_cl45_write(bp, phy, 6608 bnx2x_cl45_write(bp, phy,
6394 MDIO_PMA_DEVAD, 6609 MDIO_PMA_DEVAD,
6395 MDIO_PMA_REG_8481_LED3_MASK, 6610 MDIO_PMA_REG_8481_LED3_MASK,
6396 0x98); 6611 0x98);
6397 6612
6398 bnx2x_cl45_write(bp, phy, 6613 bnx2x_cl45_write(bp, phy,
6399 MDIO_PMA_DEVAD, 6614 MDIO_PMA_DEVAD,
6400 MDIO_PMA_REG_8481_LED5_MASK, 6615 MDIO_PMA_REG_8481_LED5_MASK,
6401 0x40); 6616 0x40);
6402 6617
6403 } else { 6618 } else {
6404 bnx2x_cl45_write(bp, phy, 6619 bnx2x_cl45_write(bp, phy,
6405 MDIO_PMA_DEVAD, 6620 MDIO_PMA_DEVAD,
6406 MDIO_PMA_REG_8481_LED1_MASK, 6621 MDIO_PMA_REG_8481_LED1_MASK,
6407 0x80); 6622 0x80);
6623
6624 /* Tell LED3 to blink on source */
6625 bnx2x_cl45_read(bp, phy,
6626 MDIO_PMA_DEVAD,
6627 MDIO_PMA_REG_8481_LINK_SIGNAL,
6628 &val);
6629 val &= ~(7<<6);
6630 val |= (1<<6); /* A83B[8:6]= 1 */
6631 bnx2x_cl45_write(bp, phy,
6632 MDIO_PMA_DEVAD,
6633 MDIO_PMA_REG_8481_LINK_SIGNAL,
6634 val);
6408 } 6635 }
6409 break; 6636 break;
6410 } 6637 }
@@ -6431,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
6431 6658
6432 /* Restore normal power mode*/ 6659 /* Restore normal power mode*/
6433 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6434 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 6661 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
6435 /* HW reset */ 6662 /* HW reset */
6436 bnx2x_ext_phy_hw_reset(bp, params->port); 6663 bnx2x_ext_phy_hw_reset(bp, params->port);
6437 bnx2x_wait_reset_complete(bp, phy); 6664 bnx2x_wait_reset_complete(bp, phy, params);
6438 6665
6439 bnx2x_cl45_write(bp, phy, 6666 bnx2x_cl45_write(bp, phy,
6440 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1); 6667 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6481,14 +6708,13 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
6481 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", 6708 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
6482 val2, val1); 6709 val2, val1);
6483 link_up = ((val1 & 4) == 4); 6710 link_up = ((val1 & 4) == 4);
6484 /* if link is up 6711 /* if link is up print the AN outcome of the SFX7101 PHY */
6485 * print the AN outcome of the SFX7101 PHY
6486 */
6487 if (link_up) { 6712 if (link_up) {
6488 bnx2x_cl45_read(bp, phy, 6713 bnx2x_cl45_read(bp, phy,
6489 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 6714 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
6490 &val2); 6715 &val2);
6491 vars->line_speed = SPEED_10000; 6716 vars->line_speed = SPEED_10000;
6717 vars->duplex = DUPLEX_FULL;
6492 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", 6718 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
6493 val2, (val2 & (1<<14))); 6719 val2, (val2 & (1<<14)));
6494 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6720 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -6516,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
6516 u16 val, cnt; 6742 u16 val, cnt;
6517 6743
6518 bnx2x_cl45_read(bp, phy, 6744 bnx2x_cl45_read(bp, phy,
6519 MDIO_PMA_DEVAD, 6745 MDIO_PMA_DEVAD,
6520 MDIO_PMA_REG_7101_RESET, &val); 6746 MDIO_PMA_REG_7101_RESET, &val);
6521 6747
6522 for (cnt = 0; cnt < 10; cnt++) { 6748 for (cnt = 0; cnt < 10; cnt++) {
6523 msleep(50); 6749 msleep(50);
6524 /* Writes a self-clearing reset */ 6750 /* Writes a self-clearing reset */
6525 bnx2x_cl45_write(bp, phy, 6751 bnx2x_cl45_write(bp, phy,
6526 MDIO_PMA_DEVAD, 6752 MDIO_PMA_DEVAD,
6527 MDIO_PMA_REG_7101_RESET, 6753 MDIO_PMA_REG_7101_RESET,
6528 (val | (1<<15))); 6754 (val | (1<<15)));
6529 /* Wait for clear */ 6755 /* Wait for clear */
6530 bnx2x_cl45_read(bp, phy, 6756 bnx2x_cl45_read(bp, phy,
6531 MDIO_PMA_DEVAD, 6757 MDIO_PMA_DEVAD,
6532 MDIO_PMA_REG_7101_RESET, &val); 6758 MDIO_PMA_REG_7101_RESET, &val);
6533 6759
6534 if ((val & (1<<15)) == 0) 6760 if ((val & (1<<15)) == 0)
6535 break; 6761 break;
@@ -6540,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
6540 struct link_params *params) { 6766 struct link_params *params) {
6541 /* Low power mode is controlled by GPIO 2 */ 6767 /* Low power mode is controlled by GPIO 2 */
6542 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, 6768 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
6543 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 6769 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6544 /* The PHY reset is controlled by GPIO 1 */ 6770 /* The PHY reset is controlled by GPIO 1 */
6545 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6771 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6546 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 6772 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6547} 6773}
6548 6774
6549static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, 6775static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6585,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
6585 .supported = 0, 6811 .supported = 0,
6586 .media_type = ETH_PHY_NOT_PRESENT, 6812 .media_type = ETH_PHY_NOT_PRESENT,
6587 .ver_addr = 0, 6813 .ver_addr = 0,
6588 .req_flow_ctrl = 0, 6814 .req_flow_ctrl = 0,
6589 .req_line_speed = 0, 6815 .req_line_speed = 0,
6590 .speed_cap_mask = 0, 6816 .speed_cap_mask = 0,
6591 .req_duplex = 0, 6817 .req_duplex = 0,
6592 .rsrv = 0, 6818 .rsrv = 0,
6593 .config_init = (config_init_t)NULL, 6819 .config_init = (config_init_t)NULL,
@@ -6622,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
6622 .media_type = ETH_PHY_UNSPECIFIED, 6848 .media_type = ETH_PHY_UNSPECIFIED,
6623 .ver_addr = 0, 6849 .ver_addr = 0,
6624 .req_flow_ctrl = 0, 6850 .req_flow_ctrl = 0,
6625 .req_line_speed = 0, 6851 .req_line_speed = 0,
6626 .speed_cap_mask = 0, 6852 .speed_cap_mask = 0,
6627 .req_duplex = 0, 6853 .req_duplex = 0,
6628 .rsrv = 0, 6854 .rsrv = 0,
6629 .config_init = (config_init_t)bnx2x_init_serdes, 6855 .config_init = (config_init_t)bnx2x_init_serdes,
@@ -6659,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
6659 .media_type = ETH_PHY_UNSPECIFIED, 6885 .media_type = ETH_PHY_UNSPECIFIED,
6660 .ver_addr = 0, 6886 .ver_addr = 0,
6661 .req_flow_ctrl = 0, 6887 .req_flow_ctrl = 0,
6662 .req_line_speed = 0, 6888 .req_line_speed = 0,
6663 .speed_cap_mask = 0, 6889 .speed_cap_mask = 0,
6664 .req_duplex = 0, 6890 .req_duplex = 0,
6665 .rsrv = 0, 6891 .rsrv = 0,
6666 .config_init = (config_init_t)bnx2x_init_xgxs, 6892 .config_init = (config_init_t)bnx2x_init_xgxs,
@@ -6690,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
6690 .media_type = ETH_PHY_BASE_T, 6916 .media_type = ETH_PHY_BASE_T,
6691 .ver_addr = 0, 6917 .ver_addr = 0,
6692 .req_flow_ctrl = 0, 6918 .req_flow_ctrl = 0,
6693 .req_line_speed = 0, 6919 .req_line_speed = 0,
6694 .speed_cap_mask = 0, 6920 .speed_cap_mask = 0,
6695 .req_duplex = 0, 6921 .req_duplex = 0,
6696 .rsrv = 0, 6922 .rsrv = 0,
6697 .config_init = (config_init_t)bnx2x_7101_config_init, 6923 .config_init = (config_init_t)bnx2x_7101_config_init,
@@ -6721,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
6721 SUPPORTED_Asym_Pause), 6947 SUPPORTED_Asym_Pause),
6722 .media_type = ETH_PHY_UNSPECIFIED, 6948 .media_type = ETH_PHY_UNSPECIFIED,
6723 .ver_addr = 0, 6949 .ver_addr = 0,
6724 .req_flow_ctrl = 0, 6950 .req_flow_ctrl = 0,
6725 .req_line_speed = 0, 6951 .req_line_speed = 0,
6726 .speed_cap_mask = 0, 6952 .speed_cap_mask = 0,
6727 .req_duplex = 0, 6953 .req_duplex = 0,
6728 .rsrv = 0, 6954 .rsrv = 0,
6729 .config_init = (config_init_t)bnx2x_8073_config_init, 6955 .config_init = (config_init_t)bnx2x_8073_config_init,
@@ -6932,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
6932 .phy_specific_func = (phy_specific_func_t)NULL 7158 .phy_specific_func = (phy_specific_func_t)NULL
6933}; 7159};
6934 7160
7161static struct bnx2x_phy phy_84833 = {
7162 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
7163 .addr = 0xff,
7164 .flags = FLAGS_FAN_FAILURE_DET_REQ |
7165 FLAGS_REARM_LATCH_SIGNAL,
7166 .def_md_devad = 0,
7167 .reserved = 0,
7168 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7169 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7170 .mdio_ctrl = 0,
7171 .supported = (SUPPORTED_10baseT_Half |
7172 SUPPORTED_10baseT_Full |
7173 SUPPORTED_100baseT_Half |
7174 SUPPORTED_100baseT_Full |
7175 SUPPORTED_1000baseT_Full |
7176 SUPPORTED_10000baseT_Full |
7177 SUPPORTED_TP |
7178 SUPPORTED_Autoneg |
7179 SUPPORTED_Pause |
7180 SUPPORTED_Asym_Pause),
7181 .media_type = ETH_PHY_BASE_T,
7182 .ver_addr = 0,
7183 .req_flow_ctrl = 0,
7184 .req_line_speed = 0,
7185 .speed_cap_mask = 0,
7186 .req_duplex = 0,
7187 .rsrv = 0,
7188 .config_init = (config_init_t)bnx2x_848x3_config_init,
7189 .read_status = (read_status_t)bnx2x_848xx_read_status,
7190 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
7191 .config_loopback = (config_loopback_t)NULL,
7192 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
7193 .hw_reset = (hw_reset_t)NULL,
7194 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
7195 .phy_specific_func = (phy_specific_func_t)NULL
7196};
7197
6935/*****************************************************************/ 7198/*****************************************************************/
6936/* */ 7199/* */
6937/* Populate the phy according. Main function: bnx2x_populate_phy */ 7200/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -6945,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
6945 /* Get the 4 lanes xgxs config rx and tx */ 7208 /* Get the 4 lanes xgxs config rx and tx */
6946 u32 rx = 0, tx = 0, i; 7209 u32 rx = 0, tx = 0, i;
6947 for (i = 0; i < 2; i++) { 7210 for (i = 0; i < 2; i++) {
6948 /** 7211 /*
6949 * INT_PHY and EXT_PHY1 share the same value location in the 7212 * INT_PHY and EXT_PHY1 share the same value location in the
6950 * shmem. When num_phys is greater than 1, than this value 7213 * shmem. When num_phys is greater than 1, than this value
6951 * applies only to EXT_PHY1 7214 * applies only to EXT_PHY1
@@ -6953,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
6953 if (phy_index == INT_PHY || phy_index == EXT_PHY1) { 7216 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
6954 rx = REG_RD(bp, shmem_base + 7217 rx = REG_RD(bp, shmem_base +
6955 offsetof(struct shmem_region, 7218 offsetof(struct shmem_region,
6956 dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); 7219 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
6957 7220
6958 tx = REG_RD(bp, shmem_base + 7221 tx = REG_RD(bp, shmem_base +
6959 offsetof(struct shmem_region, 7222 offsetof(struct shmem_region,
6960 dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); 7223 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
6961 } else { 7224 } else {
6962 rx = REG_RD(bp, shmem_base + 7225 rx = REG_RD(bp, shmem_base +
6963 offsetof(struct shmem_region, 7226 offsetof(struct shmem_region,
6964 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 7227 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
6965 7228
6966 tx = REG_RD(bp, shmem_base + 7229 tx = REG_RD(bp, shmem_base +
6967 offsetof(struct shmem_region, 7230 offsetof(struct shmem_region,
6968 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 7231 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
6969 } 7232 }
6970 7233
6971 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); 7234 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7085,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7085 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 7348 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
7086 *phy = phy_84823; 7349 *phy = phy_84823;
7087 break; 7350 break;
7351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
7352 *phy = phy_84833;
7353 break;
7088 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 7354 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7089 *phy = phy_7101; 7355 *phy = phy_7101;
7090 break; 7356 break;
@@ -7099,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7099 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); 7365 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
7100 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); 7366 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
7101 7367
7102 /** 7368 /*
7103 * The shmem address of the phy version is located on different 7369 * The shmem address of the phy version is located on different
7104 * structures. In case this structure is too old, do not set 7370 * structures. In case this structure is too old, do not set
7105 * the address 7371 * the address
7106 */ 7372 */
7107 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, 7373 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
7108 dev_info.shared_hw_config.config2)); 7374 dev_info.shared_hw_config.config2));
7109 if (phy_index == EXT_PHY1) { 7375 if (phy_index == EXT_PHY1) {
7110 phy->ver_addr = shmem_base + offsetof(struct shmem_region, 7376 phy->ver_addr = shmem_base + offsetof(struct shmem_region,
7111 port_mb[port].ext_phy_fw_version); 7377 port_mb[port].ext_phy_fw_version);
7112 7378
7113 /* Check specific mdc mdio settings */ 7379 /* Check specific mdc mdio settings */
7114 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) 7380 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
7115 mdc_mdio_access = config2 & 7381 mdc_mdio_access = config2 &
7116 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; 7382 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
7117 } else { 7383 } else {
7118 u32 size = REG_RD(bp, shmem2_base); 7384 u32 size = REG_RD(bp, shmem2_base);
7119 7385
@@ -7132,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7132 } 7398 }
7133 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); 7399 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
7134 7400
7135 /** 7401 /*
7136 * In case mdc/mdio_access of the external phy is different than the 7402 * In case mdc/mdio_access of the external phy is different than the
7137 * mdc/mdio access of the XGXS, a HW lock must be taken in each access 7403 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
7138 * to prevent one port interfere with another port's CL45 operations. 7404 * to prevent one port interfere with another port's CL45 operations.
@@ -7167,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
7167 /* Populate the default phy configuration for MF mode */ 7433 /* Populate the default phy configuration for MF mode */
7168 if (phy_index == EXT_PHY2) { 7434 if (phy_index == EXT_PHY2) {
7169 link_config = REG_RD(bp, params->shmem_base + 7435 link_config = REG_RD(bp, params->shmem_base +
7170 offsetof(struct shmem_region, dev_info. 7436 offsetof(struct shmem_region, dev_info.
7171 port_feature_config[params->port].link_config2)); 7437 port_feature_config[params->port].link_config2));
7172 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 7438 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7173 offsetof(struct shmem_region, dev_info. 7439 offsetof(struct shmem_region,
7440 dev_info.
7174 port_hw_config[params->port].speed_capability_mask2)); 7441 port_hw_config[params->port].speed_capability_mask2));
7175 } else { 7442 } else {
7176 link_config = REG_RD(bp, params->shmem_base + 7443 link_config = REG_RD(bp, params->shmem_base +
7177 offsetof(struct shmem_region, dev_info. 7444 offsetof(struct shmem_region, dev_info.
7178 port_feature_config[params->port].link_config)); 7445 port_feature_config[params->port].link_config));
7179 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 7446 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7180 offsetof(struct shmem_region, dev_info. 7447 offsetof(struct shmem_region,
7181 port_hw_config[params->port].speed_capability_mask)); 7448 dev_info.
7449 port_hw_config[params->port].speed_capability_mask));
7182 } 7450 }
7183 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask" 7451 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
7184 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask); 7452 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7325,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
7325 else if (phy_index == EXT_PHY2) 7593 else if (phy_index == EXT_PHY2)
7326 actual_phy_idx = EXT_PHY1; 7594 actual_phy_idx = EXT_PHY1;
7327 } 7595 }
7328 params->phy[actual_phy_idx].req_flow_ctrl = 7596 params->phy[actual_phy_idx].req_flow_ctrl =
7329 params->req_flow_ctrl[link_cfg_idx]; 7597 params->req_flow_ctrl[link_cfg_idx];
7330 7598
7331 params->phy[actual_phy_idx].req_line_speed = 7599 params->phy[actual_phy_idx].req_line_speed =
@@ -7378,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7378 set_phy_vars(params); 7646 set_phy_vars(params);
7379 7647
7380 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); 7648 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
7381 if (CHIP_REV_IS_FPGA(bp)) {
7382
7383 vars->link_up = 1;
7384 vars->line_speed = SPEED_10000;
7385 vars->duplex = DUPLEX_FULL;
7386 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7387 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
7388 /* enable on E1.5 FPGA */
7389 if (CHIP_IS_E1H(bp)) {
7390 vars->flow_ctrl |=
7391 (BNX2X_FLOW_CTRL_TX |
7392 BNX2X_FLOW_CTRL_RX);
7393 vars->link_status |=
7394 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
7395 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
7396 }
7397
7398 bnx2x_emac_enable(params, vars, 0);
7399 if (!(CHIP_IS_E2(bp)))
7400 bnx2x_pbf_update(params, vars->flow_ctrl,
7401 vars->line_speed);
7402 /* disable drain */
7403 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7404
7405 /* update shared memory */
7406 bnx2x_update_mng(params, vars->link_status);
7407
7408 return 0;
7409
7410 } else
7411 if (CHIP_REV_IS_EMUL(bp)) {
7412
7413 vars->link_up = 1;
7414 vars->line_speed = SPEED_10000;
7415 vars->duplex = DUPLEX_FULL;
7416 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7417 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
7418
7419 bnx2x_bmac_enable(params, vars, 0);
7420
7421 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
7422 /* Disable drain */
7423 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
7424 + params->port*4, 0);
7425
7426 /* update shared memory */
7427 bnx2x_update_mng(params, vars->link_status);
7428
7429 return 0;
7430
7431 } else
7432 if (params->loopback_mode == LOOPBACK_BMAC) { 7649 if (params->loopback_mode == LOOPBACK_BMAC) {
7433 7650
7434 vars->link_up = 1; 7651 vars->link_up = 1;
@@ -7444,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7444 /* set bmac loopback */ 7661 /* set bmac loopback */
7445 bnx2x_bmac_enable(params, vars, 1); 7662 bnx2x_bmac_enable(params, vars, 1);
7446 7663
7447 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7664 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7448 params->port*4, 0);
7449 7665
7450 } else if (params->loopback_mode == LOOPBACK_EMAC) { 7666 } else if (params->loopback_mode == LOOPBACK_EMAC) {
7451 7667
@@ -7461,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7461 /* set bmac loopback */ 7677 /* set bmac loopback */
7462 bnx2x_emac_enable(params, vars, 1); 7678 bnx2x_emac_enable(params, vars, 1);
7463 bnx2x_emac_program(params, vars); 7679 bnx2x_emac_program(params, vars);
7464 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7680 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7465 params->port*4, 0);
7466 7681
7467 } else if ((params->loopback_mode == LOOPBACK_XGXS) || 7682 } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
7468 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 7683 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7485,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7485 bnx2x_emac_program(params, vars); 7700 bnx2x_emac_program(params, vars);
7486 bnx2x_emac_enable(params, vars, 0); 7701 bnx2x_emac_enable(params, vars, 0);
7487 } else 7702 } else
7488 bnx2x_bmac_enable(params, vars, 0); 7703 bnx2x_bmac_enable(params, vars, 0);
7489
7490 if (params->loopback_mode == LOOPBACK_XGXS) { 7704 if (params->loopback_mode == LOOPBACK_XGXS) {
7491 /* set 10G XGXS loopback */ 7705 /* set 10G XGXS loopback */
7492 params->phy[INT_PHY].config_loopback( 7706 params->phy[INT_PHY].config_loopback(
@@ -7504,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7504 params); 7718 params);
7505 } 7719 }
7506 } 7720 }
7507 7721 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7508 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
7509 params->port*4, 0);
7510 7722
7511 bnx2x_set_led(params, vars, 7723 bnx2x_set_led(params, vars,
7512 LED_MODE_OPER, vars->line_speed); 7724 LED_MODE_OPER, vars->line_speed);
@@ -7525,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7525 return 0; 7737 return 0;
7526} 7738}
7527u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 7739u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7528 u8 reset_ext_phy) 7740 u8 reset_ext_phy)
7529{ 7741{
7530 struct bnx2x *bp = params->bp; 7742 struct bnx2x *bp = params->bp;
7531 u8 phy_index, port = params->port, clear_latch_ind = 0; 7743 u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7534,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7534 vars->link_status = 0; 7746 vars->link_status = 0;
7535 bnx2x_update_mng(params, vars->link_status); 7747 bnx2x_update_mng(params, vars->link_status);
7536 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7748 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
7537 (NIG_MASK_XGXS0_LINK_STATUS | 7749 (NIG_MASK_XGXS0_LINK_STATUS |
7538 NIG_MASK_XGXS0_LINK10G | 7750 NIG_MASK_XGXS0_LINK10G |
7539 NIG_MASK_SERDES0_LINK_STATUS | 7751 NIG_MASK_SERDES0_LINK_STATUS |
7540 NIG_MASK_MI_INT)); 7752 NIG_MASK_MI_INT));
7541 7753
7542 /* activate nig drain */ 7754 /* activate nig drain */
7543 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 7755 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7605,10 +7817,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7605 struct bnx2x_phy phy[PORT_MAX]; 7817 struct bnx2x_phy phy[PORT_MAX];
7606 struct bnx2x_phy *phy_blk[PORT_MAX]; 7818 struct bnx2x_phy *phy_blk[PORT_MAX];
7607 u16 val; 7819 u16 val;
7608 s8 port; 7820 s8 port = 0;
7609 s8 port_of_path = 0; 7821 s8 port_of_path = 0;
7610 7822 u32 swap_val, swap_override;
7611 bnx2x_ext_phy_hw_reset(bp, 0); 7823 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7824 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7825 port ^= (swap_val && swap_override);
7826 bnx2x_ext_phy_hw_reset(bp, port);
7612 /* PART1 - Reset both phys */ 7827 /* PART1 - Reset both phys */
7613 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7828 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7614 u32 shmem_base, shmem2_base; 7829 u32 shmem_base, shmem2_base;
@@ -7633,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7633 /* disable attentions */ 7848 /* disable attentions */
7634 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 7849 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7635 port_of_path*4, 7850 port_of_path*4,
7636 (NIG_MASK_XGXS0_LINK_STATUS | 7851 (NIG_MASK_XGXS0_LINK_STATUS |
7637 NIG_MASK_XGXS0_LINK10G | 7852 NIG_MASK_XGXS0_LINK10G |
7638 NIG_MASK_SERDES0_LINK_STATUS | 7853 NIG_MASK_SERDES0_LINK_STATUS |
7639 NIG_MASK_MI_INT)); 7854 NIG_MASK_MI_INT));
7640 7855
7641 /* Need to take the phy out of low power mode in order 7856 /* Need to take the phy out of low power mode in order
7642 to write to access its registers */ 7857 to write to access its registers */
7643 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7858 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7644 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 7859 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
7860 port);
7645 7861
7646 /* Reset the phy */ 7862 /* Reset the phy */
7647 bnx2x_cl45_write(bp, &phy[port], 7863 bnx2x_cl45_write(bp, &phy[port],
7648 MDIO_PMA_DEVAD, 7864 MDIO_PMA_DEVAD,
7649 MDIO_PMA_REG_CTRL, 7865 MDIO_PMA_REG_CTRL,
7650 1<<15); 7866 1<<15);
7651 } 7867 }
7652 7868
7653 /* Add delay of 150ms after reset */ 7869 /* Add delay of 150ms after reset */
@@ -7663,7 +7879,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7663 7879
7664 /* PART2 - Download firmware to both phys */ 7880 /* PART2 - Download firmware to both phys */
7665 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7881 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7666 u16 fw_ver1;
7667 if (CHIP_IS_E2(bp)) 7882 if (CHIP_IS_E2(bp))
7668 port_of_path = 0; 7883 port_of_path = 0;
7669 else 7884 else
@@ -7671,34 +7886,26 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7671 7886
7672 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 7887 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7673 phy_blk[port]->addr); 7888 phy_blk[port]->addr);
7674 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7889 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7675 port_of_path); 7890 port_of_path))
7676
7677 bnx2x_cl45_read(bp, phy_blk[port],
7678 MDIO_PMA_DEVAD,
7679 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7680 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7681 DP(NETIF_MSG_LINK,
7682 "bnx2x_8073_common_init_phy port %x:"
7683 "Download failed. fw version = 0x%x\n",
7684 port, fw_ver1);
7685 return -EINVAL; 7891 return -EINVAL;
7686 }
7687 7892
7688 /* Only set bit 10 = 1 (Tx power down) */ 7893 /* Only set bit 10 = 1 (Tx power down) */
7689 bnx2x_cl45_read(bp, phy_blk[port], 7894 bnx2x_cl45_read(bp, phy_blk[port],
7690 MDIO_PMA_DEVAD, 7895 MDIO_PMA_DEVAD,
7691 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7896 MDIO_PMA_REG_TX_POWER_DOWN, &val);
7692 7897
7693 /* Phase1 of TX_POWER_DOWN reset */ 7898 /* Phase1 of TX_POWER_DOWN reset */
7694 bnx2x_cl45_write(bp, phy_blk[port], 7899 bnx2x_cl45_write(bp, phy_blk[port],
7695 MDIO_PMA_DEVAD, 7900 MDIO_PMA_DEVAD,
7696 MDIO_PMA_REG_TX_POWER_DOWN, 7901 MDIO_PMA_REG_TX_POWER_DOWN,
7697 (val | 1<<10)); 7902 (val | 1<<10));
7698 } 7903 }
7699 7904
7700 /* Toggle Transmitter: Power down and then up with 600ms 7905 /*
7701 delay between */ 7906 * Toggle Transmitter: Power down and then up with 600ms delay
7907 * between
7908 */
7702 msleep(600); 7909 msleep(600);
7703 7910
7704 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ 7911 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7706,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7706 /* Phase2 of POWER_DOWN_RESET */ 7913 /* Phase2 of POWER_DOWN_RESET */
7707 /* Release bit 10 (Release Tx power down) */ 7914 /* Release bit 10 (Release Tx power down) */
7708 bnx2x_cl45_read(bp, phy_blk[port], 7915 bnx2x_cl45_read(bp, phy_blk[port],
7709 MDIO_PMA_DEVAD, 7916 MDIO_PMA_DEVAD,
7710 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7917 MDIO_PMA_REG_TX_POWER_DOWN, &val);
7711 7918
7712 bnx2x_cl45_write(bp, phy_blk[port], 7919 bnx2x_cl45_write(bp, phy_blk[port],
7713 MDIO_PMA_DEVAD, 7920 MDIO_PMA_DEVAD,
7714 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 7921 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
7715 msleep(15); 7922 msleep(15);
7716 7923
7717 /* Read modify write the SPI-ROM version select register */ 7924 /* Read modify write the SPI-ROM version select register */
7718 bnx2x_cl45_read(bp, phy_blk[port], 7925 bnx2x_cl45_read(bp, phy_blk[port],
7719 MDIO_PMA_DEVAD, 7926 MDIO_PMA_DEVAD,
7720 MDIO_PMA_REG_EDC_FFE_MAIN, &val); 7927 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
7721 bnx2x_cl45_write(bp, phy_blk[port], 7928 bnx2x_cl45_write(bp, phy_blk[port],
7722 MDIO_PMA_DEVAD, 7929 MDIO_PMA_DEVAD,
7723 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); 7930 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
7724 7931
7725 /* set GPIO2 back to LOW */ 7932 /* set GPIO2 back to LOW */
7726 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7933 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7727 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 7934 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
7728 } 7935 }
7729 return 0; 7936 return 0;
7730} 7937}
@@ -7771,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7771 7978
7772 /* Set fault module detected LED on */ 7979 /* Set fault module detected LED on */
7773 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 7980 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
7774 MISC_REGISTERS_GPIO_HIGH, 7981 MISC_REGISTERS_GPIO_HIGH,
7775 port); 7982 port);
7776 } 7983 }
7777 7984
7778 return 0; 7985 return 0;
7779} 7986}
7987static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
7988 u8 *io_gpio, u8 *io_port)
7989{
7990
7991 u32 phy_gpio_reset = REG_RD(bp, shmem_base +
7992 offsetof(struct shmem_region,
7993 dev_info.port_hw_config[PORT_0].default_cfg));
7994 switch (phy_gpio_reset) {
7995 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
7996 *io_gpio = 0;
7997 *io_port = 0;
7998 break;
7999 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
8000 *io_gpio = 1;
8001 *io_port = 0;
8002 break;
8003 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
8004 *io_gpio = 2;
8005 *io_port = 0;
8006 break;
8007 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
8008 *io_gpio = 3;
8009 *io_port = 0;
8010 break;
8011 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
8012 *io_gpio = 0;
8013 *io_port = 1;
8014 break;
8015 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
8016 *io_gpio = 1;
8017 *io_port = 1;
8018 break;
8019 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
8020 *io_gpio = 2;
8021 *io_port = 1;
8022 break;
8023 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
8024 *io_gpio = 3;
8025 *io_port = 1;
8026 break;
8027 default:
8028 /* Don't override the io_gpio and io_port */
8029 break;
8030 }
8031}
7780static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, 8032static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7781 u32 shmem_base_path[], 8033 u32 shmem_base_path[],
7782 u32 shmem2_base_path[], u8 phy_index, 8034 u32 shmem2_base_path[], u8 phy_index,
7783 u32 chip_id) 8035 u32 chip_id)
7784{ 8036{
7785 s8 port; 8037 s8 port, reset_gpio;
7786 u32 swap_val, swap_override; 8038 u32 swap_val, swap_override;
7787 struct bnx2x_phy phy[PORT_MAX]; 8039 struct bnx2x_phy phy[PORT_MAX];
7788 struct bnx2x_phy *phy_blk[PORT_MAX]; 8040 struct bnx2x_phy *phy_blk[PORT_MAX];
7789 s8 port_of_path; 8041 s8 port_of_path;
7790 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 8042 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7791 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 8043 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7792 8044
8045 reset_gpio = MISC_REGISTERS_GPIO_1;
7793 port = 1; 8046 port = 1;
7794 8047
7795 bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override)); 8048 /*
8049 * Retrieve the reset gpio/port which control the reset.
8050 * Default is GPIO1, PORT1
8051 */
8052 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
8053 (u8 *)&reset_gpio, (u8 *)&port);
7796 8054
7797 /* Calculate the port based on port swap */ 8055 /* Calculate the port based on port swap */
7798 port ^= (swap_val && swap_override); 8056 port ^= (swap_val && swap_override);
7799 8057
8058 /* Initiate PHY reset*/
8059 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
8060 port);
8061 msleep(1);
8062 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
8063 port);
8064
7800 msleep(5); 8065 msleep(5);
7801 8066
7802 /* PART1 - Reset both phys */ 8067 /* PART1 - Reset both phys */
@@ -7832,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7832 8097
7833 /* Reset the phy */ 8098 /* Reset the phy */
7834 bnx2x_cl45_write(bp, &phy[port], 8099 bnx2x_cl45_write(bp, &phy[port],
7835 MDIO_PMA_DEVAD, 8100 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
7836 MDIO_PMA_REG_CTRL,
7837 1<<15);
7838 } 8101 }
7839 8102
7840 /* Add delay of 150ms after reset */ 8103 /* Add delay of 150ms after reset */
@@ -7848,27 +8111,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7848 } 8111 }
7849 /* PART2 - Download firmware to both phys */ 8112 /* PART2 - Download firmware to both phys */
7850 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 8113 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7851 u16 fw_ver1; 8114 if (CHIP_IS_E2(bp))
7852 if (CHIP_IS_E2(bp))
7853 port_of_path = 0; 8115 port_of_path = 0;
7854 else 8116 else
7855 port_of_path = port; 8117 port_of_path = port;
7856 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 8118 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7857 phy_blk[port]->addr); 8119 phy_blk[port]->addr);
7858 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 8120 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7859 port_of_path); 8121 port_of_path))
7860 bnx2x_cl45_read(bp, phy_blk[port],
7861 MDIO_PMA_DEVAD,
7862 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7863 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7864 DP(NETIF_MSG_LINK,
7865 "bnx2x_8727_common_init_phy port %x:"
7866 "Download failed. fw version = 0x%x\n",
7867 port, fw_ver1);
7868 return -EINVAL; 8122 return -EINVAL;
7869 }
7870 }
7871 8123
8124 }
7872 return 0; 8125 return 0;
7873} 8126}
7874 8127
@@ -7893,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7893 break; 8146 break;
7894 8147
7895 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8148 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7896 /* GPIO1 affects both ports, so there's need to pull 8149 /*
7897 it for single port alone */ 8150 * GPIO1 affects both ports, so there's need to pull
8151 * it for single port alone
8152 */
7898 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, 8153 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
7899 shmem2_base_path, 8154 shmem2_base_path,
7900 phy_index, chip_id); 8155 phy_index, chip_id);
@@ -7904,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7904 break; 8159 break;
7905 default: 8160 default:
7906 DP(NETIF_MSG_LINK, 8161 DP(NETIF_MSG_LINK,
7907 "bnx2x_common_init_phy: ext_phy 0x%x not required\n", 8162 "ext_phy 0x%x common init not required\n",
7908 ext_phy_type); 8163 ext_phy_type);
7909 break; 8164 break;
7910 } 8165 }
7911 8166
8167 if (rc != 0)
8168 netdev_err(bp->dev, "Warning: PHY was not initialized,"
8169 " Port %d\n",
8170 0);
7912 return rc; 8171 return rc;
7913} 8172}
7914 8173
@@ -7916,12 +8175,20 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7916 u32 shmem2_base_path[], u32 chip_id) 8175 u32 shmem2_base_path[], u32 chip_id)
7917{ 8176{
7918 u8 rc = 0; 8177 u8 rc = 0;
8178 u32 phy_ver;
7919 u8 phy_index; 8179 u8 phy_index;
7920 u32 ext_phy_type, ext_phy_config; 8180 u32 ext_phy_type, ext_phy_config;
7921 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 8181 DP(NETIF_MSG_LINK, "Begin common phy init\n");
7922 8182
7923 if (CHIP_REV_IS_EMUL(bp)) 8183 /* Check if common init was already done */
8184 phy_ver = REG_RD(bp, shmem_base_path[0] +
8185 offsetof(struct shmem_region,
8186 port_mb[PORT_0].ext_phy_fw_version));
8187 if (phy_ver) {
8188 DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
8189 phy_ver);
7924 return 0; 8190 return 0;
8191 }
7925 8192
7926 /* Read the ext_phy_type for arbitrary port(0) */ 8193 /* Read the ext_phy_type for arbitrary port(0) */
7927 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 8194 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index bedab1a942c..92f36b6950d 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
1/* Copyright 2008-2010 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH 33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE 34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
35 35
36#define SPEED_AUTO_NEG 0 36#define SPEED_AUTO_NEG 0
37#define SPEED_12000 12000 37#define SPEED_12000 12000
38#define SPEED_12500 12500 38#define SPEED_12500 12500
39#define SPEED_13000 13000 39#define SPEED_13000 13000
@@ -44,8 +44,8 @@
44#define SFP_EEPROM_VENDOR_NAME_SIZE 16 44#define SFP_EEPROM_VENDOR_NAME_SIZE 16
45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
46#define SFP_EEPROM_VENDOR_OUI_SIZE 3 46#define SFP_EEPROM_VENDOR_OUI_SIZE 3
47#define SFP_EEPROM_PART_NO_ADDR 0x28 47#define SFP_EEPROM_PART_NO_ADDR 0x28
48#define SFP_EEPROM_PART_NO_SIZE 16 48#define SFP_EEPROM_PART_NO_SIZE 16
49#define PWR_FLT_ERR_MSG_LEN 250 49#define PWR_FLT_ERR_MSG_LEN 250
50 50
51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ 51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
62#define SINGLE_MEDIA(params) (params->num_phys == 2) 62#define SINGLE_MEDIA(params) (params->num_phys == 2)
63/* Dual Media board contains two external phy with different media */ 63/* Dual Media board contains two external phy with different media */
64#define DUAL_MEDIA(params) (params->num_phys == 3) 64#define DUAL_MEDIA(params) (params->num_phys == 3)
65#define FW_PARAM_MDIO_CTRL_OFFSET 16 65#define FW_PARAM_MDIO_CTRL_OFFSET 16
66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ 66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) 67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
68 68
@@ -201,12 +201,14 @@ struct link_params {
201 201
202 /* Default / User Configuration */ 202 /* Default / User Configuration */
203 u8 loopback_mode; 203 u8 loopback_mode;
204#define LOOPBACK_NONE 0 204#define LOOPBACK_NONE 0
205#define LOOPBACK_EMAC 1 205#define LOOPBACK_EMAC 1
206#define LOOPBACK_BMAC 2 206#define LOOPBACK_BMAC 2
207#define LOOPBACK_XGXS 3 207#define LOOPBACK_XGXS 3
208#define LOOPBACK_EXT_PHY 4 208#define LOOPBACK_EXT_PHY 4
209#define LOOPBACK_EXT 5 209#define LOOPBACK_EXT 5
210#define LOOPBACK_UMAC 6
211#define LOOPBACK_XMAC 7
210 212
211 /* Device parameters */ 213 /* Device parameters */
212 u8 mac_addr[6]; 214 u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
230 /* Phy register parameter */ 232 /* Phy register parameter */
231 u32 chip_id; 233 u32 chip_id;
232 234
235 /* features */
233 u32 feature_config_flags; 236 u32 feature_config_flags;
234#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) 237#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
235#define FEATURE_CONFIG_PFC_ENABLED (1<<1) 238#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
236#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 239#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
237#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) 240#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
238 /* Will be populated during common init */ 241 /* Will be populated during common init */
239 struct bnx2x_phy phy[MAX_PHYS]; 242 struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
334/* Reset the external of SFX7101 */ 337/* Reset the external of SFX7101 */
335void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); 338void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
336 339
340/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
341u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
342 struct link_params *params, u16 addr,
343 u8 byte_cnt, u8 *o_buf);
344
337void bnx2x_hw_reset_phy(struct link_params *params); 345void bnx2x_hw_reset_phy(struct link_params *params);
338 346
339/* Checks if HW lock is required for this phy/board type */ 347/* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
379 387
380/* Used to configure the ETS to BW limited */ 388/* Used to configure the ETS to BW limited */
381void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, 389void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
382 const u32 cos1_bw); 390 const u32 cos1_bw);
383 391
384/* Used to configure the ETS to strict */ 392/* Used to configure the ETS to strict */
385u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); 393u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 8cdcf5b39d1..32e64cc85d2 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -145,13 +145,6 @@ static struct {
145 { "Broadcom NetXtreme II BCM57712E XGb" } 145 { "Broadcom NetXtreme II BCM57712E XGb" }
146}; 146};
147 147
148#ifndef PCI_DEVICE_ID_NX2_57712
149#define PCI_DEVICE_ID_NX2_57712 0x1662
150#endif
151#ifndef PCI_DEVICE_ID_NX2_57712E
152#define PCI_DEVICE_ID_NX2_57712E 0x1663
153#endif
154
155static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 148static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 149 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 150 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -586,7 +579,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587 580
588 /* lock the dmae channel */ 581 /* lock the dmae channel */
589 mutex_lock(&bp->dmae_mutex); 582 spin_lock_bh(&bp->dmae_lock);
590 583
591 /* reset completion */ 584 /* reset completion */
592 *wb_comp = 0; 585 *wb_comp = 0;
@@ -617,7 +610,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618 611
619unlock: 612unlock:
620 mutex_unlock(&bp->dmae_mutex); 613 spin_unlock_bh(&bp->dmae_lock);
621 return rc; 614 return rc;
622} 615}
623 616
@@ -1397,7 +1390,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1397 } 1390 }
1398 1391
1399 smp_mb__before_atomic_inc(); 1392 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left); 1393 atomic_inc(&bp->cq_spq_left);
1401 /* push the change in fp->state and towards the memory */ 1394 /* push the change in fp->state and towards the memory */
1402 smp_wmb(); 1395 smp_wmb();
1403 1396
@@ -1974,13 +1967,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1974 vn_max_rate = 0; 1967 vn_max_rate = 0;
1975 1968
1976 } else { 1969 } else {
1970 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1971
1977 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1972 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1978 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1973 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1979 /* If min rate is zero - set it to 1 */ 1974 /* If fairness is enabled (not all min rates are zeroes) and
1975 if current min rate is zero - set it to 1.
1976 This is a requirement of the algorithm. */
1980 if (bp->vn_weight_sum && (vn_min_rate == 0)) 1977 if (bp->vn_weight_sum && (vn_min_rate == 0))
1981 vn_min_rate = DEF_MIN_RATE; 1978 vn_min_rate = DEF_MIN_RATE;
1982 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1979
1983 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1980 if (IS_MF_SI(bp))
1981 /* maxCfg in percents of linkspeed */
1982 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1983 else
1984 /* maxCfg is absolute in 100Mb units */
1985 vn_max_rate = maxCfg * 100;
1984 } 1986 }
1985 1987
1986 DP(NETIF_MSG_IFUP, 1988 DP(NETIF_MSG_IFUP,
@@ -2006,7 +2008,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2006 m_fair_vn.vn_credit_delta = 2008 m_fair_vn.vn_credit_delta =
2007 max_t(u32, (vn_min_rate * (T_FAIR_COEF / 2009 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2008 (8 * bp->vn_weight_sum))), 2010 (8 * bp->vn_weight_sum))),
2009 (bp->cmng.fair_vars.fair_threshold * 2)); 2011 (bp->cmng.fair_vars.fair_threshold +
2012 MIN_ABOVE_THRESH));
2010 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", 2013 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2011 m_fair_vn.vn_credit_delta); 2014 m_fair_vn.vn_credit_delta);
2012 } 2015 }
@@ -2082,8 +2085,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2082 bnx2x_calc_vn_weight_sum(bp); 2085 bnx2x_calc_vn_weight_sum(bp);
2083 2086
2084 /* calculate and set min-max rate for each vn */ 2087 /* calculate and set min-max rate for each vn */
2085 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2088 if (bp->port.pmf)
2086 bnx2x_init_vn_minmax(bp, vn); 2089 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2090 bnx2x_init_vn_minmax(bp, vn);
2087 2091
2088 /* always enable rate shaping and fairness */ 2092 /* always enable rate shaping and fairness */
2089 bp->cmng.flags.cmng_enables |= 2093 bp->cmng.flags.cmng_enables |=
@@ -2152,13 +2156,6 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2152 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2156 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2153 } 2157 }
2154 2158
2155 /* indicate link status only if link status actually changed */
2156 if (prev_link_status != bp->link_vars.link_status)
2157 bnx2x_link_report(bp);
2158
2159 if (IS_MF(bp))
2160 bnx2x_link_sync_notify(bp);
2161
2162 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2159 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2163 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2160 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2164 2161
@@ -2170,6 +2167,13 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2170 DP(NETIF_MSG_IFUP, 2167 DP(NETIF_MSG_IFUP,
2171 "single function mode without fairness\n"); 2168 "single function mode without fairness\n");
2172 } 2169 }
2170
2171 if (IS_MF(bp))
2172 bnx2x_link_sync_notify(bp);
2173
2174 /* indicate link status only if link status actually changed */
2175 if (prev_link_status != bp->link_vars.link_status)
2176 bnx2x_link_report(bp);
2173} 2177}
2174 2178
2175void bnx2x__link_status_update(struct bnx2x *bp) 2179void bnx2x__link_status_update(struct bnx2x *bp)
@@ -2301,15 +2305,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2301 /* accept matched ucast */ 2305 /* accept matched ucast */
2302 drop_all_ucast = 0; 2306 drop_all_ucast = 0;
2303 } 2307 }
2304 if (filters & BNX2X_ACCEPT_MULTICAST) { 2308 if (filters & BNX2X_ACCEPT_MULTICAST)
2305 /* accept matched mcast */ 2309 /* accept matched mcast */
2306 drop_all_mcast = 0; 2310 drop_all_mcast = 0;
2307 if (IS_MF_SI(bp)) 2311
2308 /* since mcast addresses won't arrive with ovlan,
2309 * fw needs to accept all of them in
2310 * switch-independent mode */
2311 accp_all_mcast = 1;
2312 }
2313 if (filters & BNX2X_ACCEPT_ALL_UNICAST) { 2312 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2314 /* accept all mcast */ 2313 /* accept all mcast */
2315 drop_all_ucast = 0; 2314 drop_all_ucast = 0;
@@ -2478,8 +2477,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2478 rxq_init->sge_map = fp->rx_sge_mapping; 2477 rxq_init->sge_map = fp->rx_sge_mapping;
2479 rxq_init->rcq_map = fp->rx_comp_mapping; 2478 rxq_init->rcq_map = fp->rx_comp_mapping;
2480 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 2479 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2481 rxq_init->mtu = bp->dev->mtu; 2480
2482 rxq_init->buf_sz = bp->rx_buf_size; 2481 /* Always use mini-jumbo MTU for FCoE L2 ring */
2482 if (IS_FCOE_FP(fp))
2483 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2484 else
2485 rxq_init->mtu = bp->dev->mtu;
2486
2487 rxq_init->buf_sz = fp->rx_buf_size;
2483 rxq_init->cl_qzone_id = fp->cl_qzone_id; 2488 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2484 rxq_init->cl_id = fp->cl_id; 2489 rxq_init->cl_id = fp->cl_id;
2485 rxq_init->spcl_id = fp->cl_id; 2490 rxq_init->spcl_id = fp->cl_id;
@@ -2731,11 +2736,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2731 2736
2732 spin_lock_bh(&bp->spq_lock); 2737 spin_lock_bh(&bp->spq_lock);
2733 2738
2734 if (!atomic_read(&bp->spq_left)) { 2739 if (common) {
2735 BNX2X_ERR("BUG! SPQ ring full!\n"); 2740 if (!atomic_read(&bp->eq_spq_left)) {
2736 spin_unlock_bh(&bp->spq_lock); 2741 BNX2X_ERR("BUG! EQ ring full!\n");
2737 bnx2x_panic(); 2742 spin_unlock_bh(&bp->spq_lock);
2738 return -EBUSY; 2743 bnx2x_panic();
2744 return -EBUSY;
2745 }
2746 } else if (!atomic_read(&bp->cq_spq_left)) {
2747 BNX2X_ERR("BUG! SPQ ring full!\n");
2748 spin_unlock_bh(&bp->spq_lock);
2749 bnx2x_panic();
2750 return -EBUSY;
2739 } 2751 }
2740 2752
2741 spe = bnx2x_sp_get_next(bp); 2753 spe = bnx2x_sp_get_next(bp);
@@ -2766,20 +2778,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2766 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 2778 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2767 2779
2768 /* stats ramrod has it's own slot on the spq */ 2780 /* stats ramrod has it's own slot on the spq */
2769 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) 2781 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2770 /* It's ok if the actual decrement is issued towards the memory 2782 /* It's ok if the actual decrement is issued towards the memory
2771 * somewhere between the spin_lock and spin_unlock. Thus no 2783 * somewhere between the spin_lock and spin_unlock. Thus no
2772 * more explict memory barrier is needed. 2784 * more explict memory barrier is needed.
2773 */ 2785 */
2774 atomic_dec(&bp->spq_left); 2786 if (common)
2787 atomic_dec(&bp->eq_spq_left);
2788 else
2789 atomic_dec(&bp->cq_spq_left);
2790 }
2791
2775 2792
2776 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2793 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2777 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) " 2794 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2778 "type(0x%x) left %x\n", 2795 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
2779 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 2796 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2780 (u32)(U64_LO(bp->spq_mapping) + 2797 (u32)(U64_LO(bp->spq_mapping) +
2781 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 2798 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2782 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left)); 2799 HW_CID(bp, cid), data_hi, data_lo, type,
2800 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
2783 2801
2784 bnx2x_sp_prod_update(bp); 2802 bnx2x_sp_prod_update(bp);
2785 spin_unlock_bh(&bp->spq_lock); 2803 spin_unlock_bh(&bp->spq_lock);
@@ -3691,8 +3709,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3691 sw_cons = bp->eq_cons; 3709 sw_cons = bp->eq_cons;
3692 sw_prod = bp->eq_prod; 3710 sw_prod = bp->eq_prod;
3693 3711
3694 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n", 3712 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3695 hw_cons, sw_cons, atomic_read(&bp->spq_left)); 3713 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
3696 3714
3697 for (; sw_cons != hw_cons; 3715 for (; sw_cons != hw_cons;
3698 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 3716 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3757,13 +3775,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3757 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 3775 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3758 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 3776 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3759 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 3777 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3760 bp->set_mac_pending = 0; 3778 if (elem->message.data.set_mac_event.echo)
3779 bp->set_mac_pending = 0;
3761 break; 3780 break;
3762 3781
3763 case (EVENT_RING_OPCODE_SET_MAC | 3782 case (EVENT_RING_OPCODE_SET_MAC |
3764 BNX2X_STATE_CLOSING_WAIT4_HALT): 3783 BNX2X_STATE_CLOSING_WAIT4_HALT):
3765 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 3784 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3766 bp->set_mac_pending = 0; 3785 if (elem->message.data.set_mac_event.echo)
3786 bp->set_mac_pending = 0;
3767 break; 3787 break;
3768 default: 3788 default:
3769 /* unknown event log error and continue */ 3789 /* unknown event log error and continue */
@@ -3775,7 +3795,7 @@ next_spqe:
3775 } /* for */ 3795 } /* for */
3776 3796
3777 smp_mb__before_atomic_inc(); 3797 smp_mb__before_atomic_inc();
3778 atomic_add(spqe_cnt, &bp->spq_left); 3798 atomic_add(spqe_cnt, &bp->eq_spq_left);
3779 3799
3780 bp->eq_cons = sw_cons; 3800 bp->eq_cons = sw_cons;
3781 bp->eq_prod = sw_prod; 3801 bp->eq_prod = sw_prod;
@@ -4202,13 +4222,13 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
4202 4222
4203 for_each_eth_queue(bp, i) 4223 for_each_eth_queue(bp, i)
4204 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 4224 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4205 bp->rx_ticks, bp->tx_ticks); 4225 bp->tx_ticks, bp->rx_ticks);
4206} 4226}
4207 4227
4208static void bnx2x_init_sp_ring(struct bnx2x *bp) 4228static void bnx2x_init_sp_ring(struct bnx2x *bp)
4209{ 4229{
4210 spin_lock_init(&bp->spq_lock); 4230 spin_lock_init(&bp->spq_lock);
4211 atomic_set(&bp->spq_left, MAX_SPQ_PENDING); 4231 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
4212 4232
4213 bp->spq_prod_idx = 0; 4233 bp->spq_prod_idx = 0;
4214 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 4234 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4233,9 +4253,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
4233 bp->eq_cons = 0; 4253 bp->eq_cons = 0;
4234 bp->eq_prod = NUM_EQ_DESC; 4254 bp->eq_prod = NUM_EQ_DESC;
4235 bp->eq_cons_sb = BNX2X_EQ_INDEX; 4255 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4256 /* we want a warning message before it gets rought... */
4257 atomic_set(&bp->eq_spq_left,
4258 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
4236} 4259}
4237 4260
4238static void bnx2x_init_ind_table(struct bnx2x *bp) 4261void bnx2x_push_indir_table(struct bnx2x *bp)
4239{ 4262{
4240 int func = BP_FUNC(bp); 4263 int func = BP_FUNC(bp);
4241 int i; 4264 int i;
@@ -4243,13 +4266,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4243 if (bp->multi_mode == ETH_RSS_MODE_DISABLED) 4266 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4244 return; 4267 return;
4245 4268
4246 DP(NETIF_MSG_IFUP,
4247 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4248 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 4269 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4249 REG_WR8(bp, BAR_TSTRORM_INTMEM + 4270 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4250 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 4271 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4251 bp->fp->cl_id + (i % (bp->num_queues - 4272 bp->fp->cl_id + bp->rx_indir_table[i]);
4252 NONE_ETH_CONTEXT_USE))); 4273}
4274
4275static void bnx2x_init_ind_table(struct bnx2x *bp)
4276{
4277 int i;
4278
4279 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4280 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4281
4282 bnx2x_push_indir_table(bp);
4253} 4283}
4254 4284
4255void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4285void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -4281,9 +4311,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4281 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4311 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4282 BNX2X_ACCEPT_MULTICAST; 4312 BNX2X_ACCEPT_MULTICAST;
4283#ifdef BCM_CNIC 4313#ifdef BCM_CNIC
4284 cl_id = bnx2x_fcoe(bp, cl_id); 4314 if (!NO_FCOE(bp)) {
4285 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4315 cl_id = bnx2x_fcoe(bp, cl_id);
4286 BNX2X_ACCEPT_MULTICAST); 4316 bnx2x_rxq_set_mac_filters(bp, cl_id,
4317 BNX2X_ACCEPT_UNICAST |
4318 BNX2X_ACCEPT_MULTICAST);
4319 }
4287#endif 4320#endif
4288 break; 4321 break;
4289 4322
@@ -4291,18 +4324,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4291 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4324 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4292 BNX2X_ACCEPT_ALL_MULTICAST; 4325 BNX2X_ACCEPT_ALL_MULTICAST;
4293#ifdef BCM_CNIC 4326#ifdef BCM_CNIC
4294 cl_id = bnx2x_fcoe(bp, cl_id); 4327 /*
4295 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4328 * Prevent duplication of multicast packets by configuring FCoE
4296 BNX2X_ACCEPT_MULTICAST); 4329 * L2 Client to receive only matched unicast frames.
4330 */
4331 if (!NO_FCOE(bp)) {
4332 cl_id = bnx2x_fcoe(bp, cl_id);
4333 bnx2x_rxq_set_mac_filters(bp, cl_id,
4334 BNX2X_ACCEPT_UNICAST);
4335 }
4297#endif 4336#endif
4298 break; 4337 break;
4299 4338
4300 case BNX2X_RX_MODE_PROMISC: 4339 case BNX2X_RX_MODE_PROMISC:
4301 def_q_filters |= BNX2X_PROMISCUOUS_MODE; 4340 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4302#ifdef BCM_CNIC 4341#ifdef BCM_CNIC
4303 cl_id = bnx2x_fcoe(bp, cl_id); 4342 /*
4304 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4343 * Prevent packets duplication by configuring DROP_ALL for FCoE
4305 BNX2X_ACCEPT_MULTICAST); 4344 * L2 Client.
4345 */
4346 if (!NO_FCOE(bp)) {
4347 cl_id = bnx2x_fcoe(bp, cl_id);
4348 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4349 }
4306#endif 4350#endif
4307 /* pass management unicast packets as well */ 4351 /* pass management unicast packets as well */
4308 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 4352 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -5296,10 +5340,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5296 } 5340 }
5297 } 5341 }
5298 5342
5299 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5300 bp->common.shmem_base,
5301 bp->common.shmem2_base);
5302
5303 bnx2x_setup_fan_failure_detection(bp); 5343 bnx2x_setup_fan_failure_detection(bp);
5304 5344
5305 /* clear PXP2 attentions */ 5345 /* clear PXP2 attentions */
@@ -5503,9 +5543,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5503 5543
5504 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 5544 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5505 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 5545 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5506 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5507 bp->common.shmem_base,
5508 bp->common.shmem2_base);
5509 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, 5546 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5510 bp->common.shmem2_base, port)) { 5547 bp->common.shmem2_base, port)) {
5511 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5548 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -5838,7 +5875,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5838 BP_ABS_FUNC(bp), load_code); 5875 BP_ABS_FUNC(bp), load_code);
5839 5876
5840 bp->dmae_ready = 0; 5877 bp->dmae_ready = 0;
5841 mutex_init(&bp->dmae_mutex); 5878 spin_lock_init(&bp->dmae_lock);
5842 rc = bnx2x_gunzip_init(bp); 5879 rc = bnx2x_gunzip_init(bp);
5843 if (rc) 5880 if (rc)
5844 return rc; 5881 return rc;
@@ -5990,6 +6027,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
5990 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 6027 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5991 BCM_PAGE_SIZE * NUM_EQ_PAGES); 6028 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5992 6029
6030 BNX2X_FREE(bp->rx_indir_table);
6031
5993#undef BNX2X_PCI_FREE 6032#undef BNX2X_PCI_FREE
5994#undef BNX2X_KFREE 6033#undef BNX2X_KFREE
5995} 6034}
@@ -6120,6 +6159,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
6120 /* EQ */ 6159 /* EQ */
6121 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 6160 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6122 BCM_PAGE_SIZE * NUM_EQ_PAGES); 6161 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6162
6163 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
6164 TSTORM_INDIRECTION_TABLE_SIZE);
6123 return 0; 6165 return 0;
6124 6166
6125alloc_mem_err: 6167alloc_mem_err:
@@ -6173,12 +6215,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6173 int ramrod_flags = WAIT_RAMROD_COMMON; 6215 int ramrod_flags = WAIT_RAMROD_COMMON;
6174 6216
6175 bp->set_mac_pending = 1; 6217 bp->set_mac_pending = 1;
6176 smp_wmb();
6177 6218
6178 config->hdr.length = 1; 6219 config->hdr.length = 1;
6179 config->hdr.offset = cam_offset; 6220 config->hdr.offset = cam_offset;
6180 config->hdr.client_id = 0xff; 6221 config->hdr.client_id = 0xff;
6181 config->hdr.reserved1 = 0; 6222 /* Mark the single MAC configuration ramrod as opposed to a
6223 * UC/MC list configuration).
6224 */
6225 config->hdr.echo = 1;
6182 6226
6183 /* primary MAC */ 6227 /* primary MAC */
6184 config->config_table[0].msb_mac_addr = 6228 config->config_table[0].msb_mac_addr =
@@ -6210,6 +6254,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6210 config->config_table[0].middle_mac_addr, 6254 config->config_table[0].middle_mac_addr,
6211 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); 6255 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6212 6256
6257 mb();
6258
6213 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6259 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6214 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6260 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6215 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); 6261 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6274,20 +6320,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6274 if (CHIP_IS_E1H(bp)) 6320 if (CHIP_IS_E1H(bp))
6275 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6321 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6276 else if (CHIP_MODE_IS_4_PORT(bp)) 6322 else if (CHIP_MODE_IS_4_PORT(bp))
6277 return BP_FUNC(bp) * 32 + rel_offset; 6323 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
6278 else 6324 else
6279 return BP_VN(bp) * 32 + rel_offset; 6325 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
6280} 6326}
6281 6327
6282/** 6328/**
6283 * LLH CAM line allocations: currently only iSCSI and ETH macs are 6329 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6284 * relevant. In addition, current implementation is tuned for a 6330 * relevant. In addition, current implementation is tuned for a
6285 * single ETH MAC. 6331 * single ETH MAC.
6286 *
6287 * When multiple unicast ETH MACs PF configuration in switch
6288 * independent mode is required (NetQ, multiple netdev MACs,
6289 * etc.), consider better utilisation of 16 per function MAC
6290 * entries in the LLH memory.
6291 */ 6332 */
6292enum { 6333enum {
6293 LLH_CAM_ISCSI_ETH_LINE = 0, 6334 LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6362,14 +6403,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6362 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); 6403 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6363 } 6404 }
6364} 6405}
6365static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset) 6406
6407static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6408{
6409 return CHIP_REV_IS_SLOW(bp) ?
6410 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6411 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6412}
6413
6414/* set mc list, do not wait as wait implies sleep and
6415 * set_rx_mode can be invoked from non-sleepable context.
6416 *
6417 * Instead we use the same ramrod data buffer each time we need
6418 * to configure a list of addresses, and use the fact that the
6419 * list of MACs is changed in an incremental way and that the
6420 * function is called under the netif_addr_lock. A temporary
6421 * inconsistent CAM configuration (possible in case of a very fast
6422 * sequence of add/del/add on the host side) will shortly be
6423 * restored by the handler of the last ramrod.
6424 */
6425static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6366{ 6426{
6367 int i = 0, old; 6427 int i = 0, old;
6368 struct net_device *dev = bp->dev; 6428 struct net_device *dev = bp->dev;
6429 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6369 struct netdev_hw_addr *ha; 6430 struct netdev_hw_addr *ha;
6370 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6431 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6371 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6432 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6372 6433
6434 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6435 return -EINVAL;
6436
6373 netdev_for_each_mc_addr(ha, dev) { 6437 netdev_for_each_mc_addr(ha, dev) {
6374 /* copy mac */ 6438 /* copy mac */
6375 config_cmd->config_table[i].msb_mac_addr = 6439 config_cmd->config_table[i].msb_mac_addr =
@@ -6410,32 +6474,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6410 } 6474 }
6411 } 6475 }
6412 6476
6477 wmb();
6478
6413 config_cmd->hdr.length = i; 6479 config_cmd->hdr.length = i;
6414 config_cmd->hdr.offset = offset; 6480 config_cmd->hdr.offset = offset;
6415 config_cmd->hdr.client_id = 0xff; 6481 config_cmd->hdr.client_id = 0xff;
6416 config_cmd->hdr.reserved1 = 0; 6482 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6483 * synchronization.
6484 */
6485 config_cmd->hdr.echo = 0;
6417 6486
6418 bp->set_mac_pending = 1; 6487 mb();
6419 smp_wmb();
6420 6488
6421 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6489 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6422 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6490 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6423} 6491}
6424static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp) 6492
6493void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6425{ 6494{
6426 int i; 6495 int i;
6427 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6496 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6428 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6497 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6429 int ramrod_flags = WAIT_RAMROD_COMMON; 6498 int ramrod_flags = WAIT_RAMROD_COMMON;
6499 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6430 6500
6431 bp->set_mac_pending = 1; 6501 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6432 smp_wmb();
6433
6434 for (i = 0; i < config_cmd->hdr.length; i++)
6435 SET_FLAG(config_cmd->config_table[i].flags, 6502 SET_FLAG(config_cmd->config_table[i].flags,
6436 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 6503 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6437 T_ETH_MAC_COMMAND_INVALIDATE); 6504 T_ETH_MAC_COMMAND_INVALIDATE);
6438 6505
6506 wmb();
6507
6508 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6509 config_cmd->hdr.offset = offset;
6510 config_cmd->hdr.client_id = 0xff;
6511 /* We'll wait for a completion this time... */
6512 config_cmd->hdr.echo = 1;
6513
6514 bp->set_mac_pending = 1;
6515
6516 mb();
6517
6439 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6518 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6440 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6519 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6441 6520
@@ -6445,6 +6524,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6445 6524
6446} 6525}
6447 6526
6527/* Accept one or more multicasts */
6528static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6529{
6530 struct net_device *dev = bp->dev;
6531 struct netdev_hw_addr *ha;
6532 u32 mc_filter[MC_HASH_SIZE];
6533 u32 crc, bit, regidx;
6534 int i;
6535
6536 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6537
6538 netdev_for_each_mc_addr(ha, dev) {
6539 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6540 bnx2x_mc_addr(ha));
6541
6542 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6543 ETH_ALEN);
6544 bit = (crc >> 24) & 0xff;
6545 regidx = bit >> 5;
6546 bit &= 0x1f;
6547 mc_filter[regidx] |= (1 << bit);
6548 }
6549
6550 for (i = 0; i < MC_HASH_SIZE; i++)
6551 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6552 mc_filter[i]);
6553
6554 return 0;
6555}
6556
6557void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6558{
6559 int i;
6560
6561 for (i = 0; i < MC_HASH_SIZE; i++)
6562 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6563}
6564
6448#ifdef BCM_CNIC 6565#ifdef BCM_CNIC
6449/** 6566/**
6450 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 6567 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6463,12 +6580,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6463 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + 6580 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6464 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; 6581 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6465 u32 cl_bit_vec = (1 << iscsi_l2_cl_id); 6582 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6583 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
6466 6584
6467 /* Send a SET_MAC ramrod */ 6585 /* Send a SET_MAC ramrod */
6468 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, 6586 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
6469 cam_offset, 0); 6587 cam_offset, 0);
6470 6588
6471 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); 6589 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6472 6590
6473 return 0; 6591 return 0;
6474} 6592}
@@ -7110,20 +7228,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7110 /* Give HW time to discard old tx messages */ 7228 /* Give HW time to discard old tx messages */
7111 msleep(1); 7229 msleep(1);
7112 7230
7113 if (CHIP_IS_E1(bp)) { 7231 bnx2x_set_eth_mac(bp, 0);
7114 /* invalidate mc list,
7115 * wait and poll (interrupts are off)
7116 */
7117 bnx2x_invlidate_e1_mc_list(bp);
7118 bnx2x_set_eth_mac(bp, 0);
7119 7232
7120 } else { 7233 bnx2x_invalidate_uc_list(bp);
7121 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7122 7234
7123 bnx2x_set_eth_mac(bp, 0); 7235 if (CHIP_IS_E1(bp))
7124 7236 bnx2x_invalidate_e1_mc_list(bp);
7125 for (i = 0; i < MC_HASH_SIZE; i++) 7237 else {
7126 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 7238 bnx2x_invalidate_e1h_mc_list(bp);
7239 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7127 } 7240 }
7128 7241
7129#ifdef BCM_CNIC 7242#ifdef BCM_CNIC
@@ -8379,13 +8492,60 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8379 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8492 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8380 bp->mdio.prtad = 8493 bp->mdio.prtad =
8381 XGXS_EXT_PHY_ADDR(ext_phy_config); 8494 XGXS_EXT_PHY_ADDR(ext_phy_config);
8495
8496 /*
8497 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8498 * In MF mode, it is set to cover self test cases
8499 */
8500 if (IS_MF(bp))
8501 bp->port.need_hw_lock = 1;
8502 else
8503 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8504 bp->common.shmem_base,
8505 bp->common.shmem2_base);
8506}
8507
8508#ifdef BCM_CNIC
8509static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8510{
8511 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8512 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8513 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8514 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8515
8516 /* Get the number of maximum allowed iSCSI and FCoE connections */
8517 bp->cnic_eth_dev.max_iscsi_conn =
8518 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8519 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8520
8521 bp->cnic_eth_dev.max_fcoe_conn =
8522 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8523 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8524
8525 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8526 bp->cnic_eth_dev.max_iscsi_conn,
8527 bp->cnic_eth_dev.max_fcoe_conn);
8528
8529 /* If mamimum allowed number of connections is zero -
8530 * disable the feature.
8531 */
8532 if (!bp->cnic_eth_dev.max_iscsi_conn)
8533 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8534
8535 if (!bp->cnic_eth_dev.max_fcoe_conn)
8536 bp->flags |= NO_FCOE_FLAG;
8382} 8537}
8538#endif
8383 8539
8384static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 8540static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8385{ 8541{
8386 u32 val, val2; 8542 u32 val, val2;
8387 int func = BP_ABS_FUNC(bp); 8543 int func = BP_ABS_FUNC(bp);
8388 int port = BP_PORT(bp); 8544 int port = BP_PORT(bp);
8545#ifdef BCM_CNIC
8546 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8547 u8 *fip_mac = bp->fip_mac;
8548#endif
8389 8549
8390 if (BP_NOMCP(bp)) { 8550 if (BP_NOMCP(bp)) {
8391 BNX2X_ERROR("warning: random MAC workaround active\n"); 8551 BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8398,7 +8558,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8398 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 8558 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8399 8559
8400#ifdef BCM_CNIC 8560#ifdef BCM_CNIC
8401 /* iSCSI NPAR MAC */ 8561 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8562 * FCoE MAC then the appropriate feature should be disabled.
8563 */
8402 if (IS_MF_SI(bp)) { 8564 if (IS_MF_SI(bp)) {
8403 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 8565 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8404 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 8566 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8406,8 +8568,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8406 iscsi_mac_addr_upper); 8568 iscsi_mac_addr_upper);
8407 val = MF_CFG_RD(bp, func_ext_config[func]. 8569 val = MF_CFG_RD(bp, func_ext_config[func].
8408 iscsi_mac_addr_lower); 8570 iscsi_mac_addr_lower);
8409 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8571 BNX2X_DEV_INFO("Read iSCSI MAC: "
8410 } 8572 "0x%x:0x%04x\n", val2, val);
8573 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8574
8575 /* Disable iSCSI OOO if MAC configuration is
8576 * invalid.
8577 */
8578 if (!is_valid_ether_addr(iscsi_mac)) {
8579 bp->flags |= NO_ISCSI_OOO_FLAG |
8580 NO_ISCSI_FLAG;
8581 memset(iscsi_mac, 0, ETH_ALEN);
8582 }
8583 } else
8584 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8585
8586 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8587 val2 = MF_CFG_RD(bp, func_ext_config[func].
8588 fcoe_mac_addr_upper);
8589 val = MF_CFG_RD(bp, func_ext_config[func].
8590 fcoe_mac_addr_lower);
8591 BNX2X_DEV_INFO("Read FCoE MAC to "
8592 "0x%x:0x%04x\n", val2, val);
8593 bnx2x_set_mac_buf(fip_mac, val, val2);
8594
8595 /* Disable FCoE if MAC configuration is
8596 * invalid.
8597 */
8598 if (!is_valid_ether_addr(fip_mac)) {
8599 bp->flags |= NO_FCOE_FLAG;
8600 memset(bp->fip_mac, 0, ETH_ALEN);
8601 }
8602 } else
8603 bp->flags |= NO_FCOE_FLAG;
8411 } 8604 }
8412#endif 8605#endif
8413 } else { 8606 } else {
@@ -8421,7 +8614,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8421 iscsi_mac_upper); 8614 iscsi_mac_upper);
8422 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 8615 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8423 iscsi_mac_lower); 8616 iscsi_mac_lower);
8424 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8617 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8425#endif 8618#endif
8426 } 8619 }
8427 8620
@@ -8429,14 +8622,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8429 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8622 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8430 8623
8431#ifdef BCM_CNIC 8624#ifdef BCM_CNIC
8432 /* Inform the upper layers about FCoE MAC */ 8625 /* Set the FCoE MAC in modes other then MF_SI */
8433 if (!CHIP_IS_E1x(bp)) { 8626 if (!CHIP_IS_E1x(bp)) {
8434 if (IS_MF_SD(bp)) 8627 if (IS_MF_SD(bp))
8435 memcpy(bp->fip_mac, bp->dev->dev_addr, 8628 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8436 sizeof(bp->fip_mac)); 8629 else if (!IS_MF(bp))
8437 else 8630 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
8438 memcpy(bp->fip_mac, bp->iscsi_mac,
8439 sizeof(bp->fip_mac));
8440 } 8631 }
8441#endif 8632#endif
8442} 8633}
@@ -8599,6 +8790,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8599 /* Get MAC addresses */ 8790 /* Get MAC addresses */
8600 bnx2x_get_mac_hwinfo(bp); 8791 bnx2x_get_mac_hwinfo(bp);
8601 8792
8793#ifdef BCM_CNIC
8794 bnx2x_get_cnic_info(bp);
8795#endif
8796
8602 return rc; 8797 return rc;
8603} 8798}
8604 8799
@@ -8813,12 +9008,197 @@ static int bnx2x_close(struct net_device *dev)
8813 return 0; 9008 return 0;
8814} 9009}
8815 9010
9011#define E1_MAX_UC_LIST 29
9012#define E1H_MAX_UC_LIST 30
9013#define E2_MAX_UC_LIST 14
9014static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
9015{
9016 if (CHIP_IS_E1(bp))
9017 return E1_MAX_UC_LIST;
9018 else if (CHIP_IS_E1H(bp))
9019 return E1H_MAX_UC_LIST;
9020 else
9021 return E2_MAX_UC_LIST;
9022}
9023
9024
9025static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
9026{
9027 if (CHIP_IS_E1(bp))
9028 /* CAM Entries for Port0:
9029 * 0 - prim ETH MAC
9030 * 1 - BCAST MAC
9031 * 2 - iSCSI L2 ring ETH MAC
9032 * 3-31 - UC MACs
9033 *
9034 * Port1 entries are allocated the same way starting from
9035 * entry 32.
9036 */
9037 return 3 + 32 * BP_PORT(bp);
9038 else if (CHIP_IS_E1H(bp)) {
9039 /* CAM Entries:
9040 * 0-7 - prim ETH MAC for each function
9041 * 8-15 - iSCSI L2 ring ETH MAC for each function
9042 * 16 till 255 UC MAC lists for each function
9043 *
9044 * Remark: There is no FCoE support for E1H, thus FCoE related
9045 * MACs are not considered.
9046 */
9047 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9048 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9049 } else {
9050 /* CAM Entries (there is a separate CAM per engine):
9051 * 0-4 - prim ETH MAC for each function
9052 * 4-7 - iSCSI L2 ring ETH MAC for each function
9053 * 8-11 - FIP ucast L2 MAC for each function
9054 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
9055 * 16 till 71 UC MAC lists for each function
9056 */
9057 u8 func_idx =
9058 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9059
9060 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9061 bnx2x_max_uc_list(bp) * func_idx;
9062 }
9063}
9064
9065/* set uc list, do not wait as wait implies sleep and
9066 * set_rx_mode can be invoked from non-sleepable context.
9067 *
9068 * Instead we use the same ramrod data buffer each time we need
9069 * to configure a list of addresses, and use the fact that the
9070 * list of MACs is changed in an incremental way and that the
9071 * function is called under the netif_addr_lock. A temporary
9072 * inconsistent CAM configuration (possible in case of very fast
9073 * sequence of add/del/add on the host side) will shortly be
9074 * restored by the handler of the last ramrod.
9075 */
9076static int bnx2x_set_uc_list(struct bnx2x *bp)
9077{
9078 int i = 0, old;
9079 struct net_device *dev = bp->dev;
9080 u8 offset = bnx2x_uc_list_cam_offset(bp);
9081 struct netdev_hw_addr *ha;
9082 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9083 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9084
9085 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9086 return -EINVAL;
9087
9088 netdev_for_each_uc_addr(ha, dev) {
9089 /* copy mac */
9090 config_cmd->config_table[i].msb_mac_addr =
9091 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9092 config_cmd->config_table[i].middle_mac_addr =
9093 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9094 config_cmd->config_table[i].lsb_mac_addr =
9095 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9096
9097 config_cmd->config_table[i].vlan_id = 0;
9098 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9099 config_cmd->config_table[i].clients_bit_vector =
9100 cpu_to_le32(1 << BP_L_ID(bp));
9101
9102 SET_FLAG(config_cmd->config_table[i].flags,
9103 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9104 T_ETH_MAC_COMMAND_SET);
9105
9106 DP(NETIF_MSG_IFUP,
9107 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9108 config_cmd->config_table[i].msb_mac_addr,
9109 config_cmd->config_table[i].middle_mac_addr,
9110 config_cmd->config_table[i].lsb_mac_addr);
9111
9112 i++;
9113
9114 /* Set uc MAC in NIG */
9115 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9116 LLH_CAM_ETH_LINE + i);
9117 }
9118 old = config_cmd->hdr.length;
9119 if (old > i) {
9120 for (; i < old; i++) {
9121 if (CAM_IS_INVALID(config_cmd->
9122 config_table[i])) {
9123 /* already invalidated */
9124 break;
9125 }
9126 /* invalidate */
9127 SET_FLAG(config_cmd->config_table[i].flags,
9128 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9129 T_ETH_MAC_COMMAND_INVALIDATE);
9130 }
9131 }
9132
9133 wmb();
9134
9135 config_cmd->hdr.length = i;
9136 config_cmd->hdr.offset = offset;
9137 config_cmd->hdr.client_id = 0xff;
9138 /* Mark that this ramrod doesn't use bp->set_mac_pending for
9139 * synchronization.
9140 */
9141 config_cmd->hdr.echo = 0;
9142
9143 mb();
9144
9145 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9146 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9147
9148}
9149
9150void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9151{
9152 int i;
9153 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9154 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9155 int ramrod_flags = WAIT_RAMROD_COMMON;
9156 u8 offset = bnx2x_uc_list_cam_offset(bp);
9157 u8 max_list_size = bnx2x_max_uc_list(bp);
9158
9159 for (i = 0; i < max_list_size; i++) {
9160 SET_FLAG(config_cmd->config_table[i].flags,
9161 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9162 T_ETH_MAC_COMMAND_INVALIDATE);
9163 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9164 }
9165
9166 wmb();
9167
9168 config_cmd->hdr.length = max_list_size;
9169 config_cmd->hdr.offset = offset;
9170 config_cmd->hdr.client_id = 0xff;
9171 /* We'll wait for a completion this time... */
9172 config_cmd->hdr.echo = 1;
9173
9174 bp->set_mac_pending = 1;
9175
9176 mb();
9177
9178 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9179 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9180
9181 /* Wait for a completion */
9182 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9183 ramrod_flags);
9184
9185}
9186
9187static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9188{
9189 /* some multicasts */
9190 if (CHIP_IS_E1(bp)) {
9191 return bnx2x_set_e1_mc_list(bp);
9192 } else { /* E1H and newer */
9193 return bnx2x_set_e1h_mc_list(bp);
9194 }
9195}
9196
8816/* called with netif_tx_lock from dev_mcast.c */ 9197/* called with netif_tx_lock from dev_mcast.c */
8817void bnx2x_set_rx_mode(struct net_device *dev) 9198void bnx2x_set_rx_mode(struct net_device *dev)
8818{ 9199{
8819 struct bnx2x *bp = netdev_priv(dev); 9200 struct bnx2x *bp = netdev_priv(dev);
8820 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 9201 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8821 int port = BP_PORT(bp);
8822 9202
8823 if (bp->state != BNX2X_STATE_OPEN) { 9203 if (bp->state != BNX2X_STATE_OPEN) {
8824 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 9204 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8829,47 +9209,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
8829 9209
8830 if (dev->flags & IFF_PROMISC) 9210 if (dev->flags & IFF_PROMISC)
8831 rx_mode = BNX2X_RX_MODE_PROMISC; 9211 rx_mode = BNX2X_RX_MODE_PROMISC;
8832 else if ((dev->flags & IFF_ALLMULTI) || 9212 else if (dev->flags & IFF_ALLMULTI)
8833 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8834 CHIP_IS_E1(bp)))
8835 rx_mode = BNX2X_RX_MODE_ALLMULTI; 9213 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8836 else { /* some multicasts */ 9214 else {
8837 if (CHIP_IS_E1(bp)) { 9215 /* some multicasts */
8838 /* 9216 if (bnx2x_set_mc_list(bp))
8839 * set mc list, do not wait as wait implies sleep 9217 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8840 * and set_rx_mode can be invoked from non-sleepable
8841 * context
8842 */
8843 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8844 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8845 BNX2X_MAX_MULTICAST*(1 + port));
8846
8847 bnx2x_set_e1_mc_list(bp, offset);
8848 } else { /* E1H */
8849 /* Accept one or more multicasts */
8850 struct netdev_hw_addr *ha;
8851 u32 mc_filter[MC_HASH_SIZE];
8852 u32 crc, bit, regidx;
8853 int i;
8854
8855 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8856
8857 netdev_for_each_mc_addr(ha, dev) {
8858 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8859 bnx2x_mc_addr(ha));
8860
8861 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8862 ETH_ALEN);
8863 bit = (crc >> 24) & 0xff;
8864 regidx = bit >> 5;
8865 bit &= 0x1f;
8866 mc_filter[regidx] |= (1 << bit);
8867 }
8868 9218
8869 for (i = 0; i < MC_HASH_SIZE; i++) 9219 /* some unicasts */
8870 REG_WR(bp, MC_HASH_OFFSET(bp, i), 9220 if (bnx2x_set_uc_list(bp))
8871 mc_filter[i]); 9221 rx_mode = BNX2X_RX_MODE_PROMISC;
8872 }
8873 } 9222 }
8874 9223
8875 bp->rx_mode = rx_mode; 9224 bp->rx_mode = rx_mode;
@@ -8950,7 +9299,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
8950 .ndo_stop = bnx2x_close, 9299 .ndo_stop = bnx2x_close,
8951 .ndo_start_xmit = bnx2x_start_xmit, 9300 .ndo_start_xmit = bnx2x_start_xmit,
8952 .ndo_select_queue = bnx2x_select_queue, 9301 .ndo_select_queue = bnx2x_select_queue,
8953 .ndo_set_multicast_list = bnx2x_set_rx_mode, 9302 .ndo_set_rx_mode = bnx2x_set_rx_mode,
8954 .ndo_set_mac_address = bnx2x_change_mac_addr, 9303 .ndo_set_mac_address = bnx2x_change_mac_addr,
8955 .ndo_validate_addr = eth_validate_addr, 9304 .ndo_validate_addr = eth_validate_addr,
8956 .ndo_do_ioctl = bnx2x_ioctl, 9305 .ndo_do_ioctl = bnx2x_ioctl,
@@ -9096,7 +9445,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9096 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 9445 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9097 dev->vlan_features |= NETIF_F_TSO6; 9446 dev->vlan_features |= NETIF_F_TSO6;
9098 9447
9099#ifdef BCM_DCB 9448#ifdef BCM_DCBNL
9100 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 9449 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9101#endif 9450#endif
9102 9451
@@ -9503,6 +9852,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9503 } 9852 }
9504#endif 9853#endif
9505 9854
9855#ifdef BCM_DCBNL
9856 /* Delete app tlvs from dcbnl */
9857 bnx2x_dcbnl_update_applist(bp, true);
9858#endif
9859
9506 unregister_netdev(dev); 9860 unregister_netdev(dev);
9507 9861
9508 /* Delete all NAPI objects */ 9862 /* Delete all NAPI objects */
@@ -9776,15 +10130,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9776 HW_CID(bp, BNX2X_ISCSI_ETH_CID)); 10130 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9777 } 10131 }
9778 10132
9779 /* There may be not more than 8 L2 and COMMON SPEs and not more 10133 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9780 * than 8 L5 SPEs in the air. 10134 * We also check that the number of outstanding
10135 * COMMON ramrods is not more than the EQ and SPQ can
10136 * accommodate.
9781 */ 10137 */
9782 if ((type == NONE_CONNECTION_TYPE) || 10138 if (type == ETH_CONNECTION_TYPE) {
9783 (type == ETH_CONNECTION_TYPE)) { 10139 if (!atomic_read(&bp->cq_spq_left))
9784 if (!atomic_read(&bp->spq_left))
9785 break; 10140 break;
9786 else 10141 else
9787 atomic_dec(&bp->spq_left); 10142 atomic_dec(&bp->cq_spq_left);
10143 } else if (type == NONE_CONNECTION_TYPE) {
10144 if (!atomic_read(&bp->eq_spq_left))
10145 break;
10146 else
10147 atomic_dec(&bp->eq_spq_left);
9788 } else if ((type == ISCSI_CONNECTION_TYPE) || 10148 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9789 (type == FCOE_CONNECTION_TYPE)) { 10149 (type == FCOE_CONNECTION_TYPE)) {
9790 if (bp->cnic_spq_pending >= 10150 if (bp->cnic_spq_pending >=
@@ -9862,7 +10222,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9862 int rc = 0; 10222 int rc = 0;
9863 10223
9864 mutex_lock(&bp->cnic_mutex); 10224 mutex_lock(&bp->cnic_mutex);
9865 c_ops = bp->cnic_ops; 10225 c_ops = rcu_dereference_protected(bp->cnic_ops,
10226 lockdep_is_held(&bp->cnic_mutex));
9866 if (c_ops) 10227 if (c_ops)
9867 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 10228 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9868 mutex_unlock(&bp->cnic_mutex); 10229 mutex_unlock(&bp->cnic_mutex);
@@ -9976,7 +10337,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9976 int count = ctl->data.credit.credit_count; 10337 int count = ctl->data.credit.credit_count;
9977 10338
9978 smp_mb__before_atomic_inc(); 10339 smp_mb__before_atomic_inc();
9979 atomic_add(count, &bp->spq_left); 10340 atomic_add(count, &bp->cq_spq_left);
9980 smp_mb__after_atomic_inc(); 10341 smp_mb__after_atomic_inc();
9981 break; 10342 break;
9982 } 10343 }
@@ -10072,6 +10433,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10072 struct bnx2x *bp = netdev_priv(dev); 10433 struct bnx2x *bp = netdev_priv(dev);
10073 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 10434 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10074 10435
10436 /* If both iSCSI and FCoE are disabled - return NULL in
10437 * order to indicate CNIC that it should not try to work
10438 * with this device.
10439 */
10440 if (NO_ISCSI(bp) && NO_FCOE(bp))
10441 return NULL;
10442
10075 cp->drv_owner = THIS_MODULE; 10443 cp->drv_owner = THIS_MODULE;
10076 cp->chip_id = CHIP_ID(bp); 10444 cp->chip_id = CHIP_ID(bp);
10077 cp->pdev = bp->pdev; 10445 cp->pdev = bp->pdev;
@@ -10092,6 +10460,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10092 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; 10460 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10093 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 10461 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10094 10462
10463 if (NO_ISCSI_OOO(bp))
10464 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10465
10466 if (NO_ISCSI(bp))
10467 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10468
10469 if (NO_FCOE(bp))
10470 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10471
10095 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " 10472 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10096 "starting cid %d\n", 10473 "starting cid %d\n",
10097 cp->ctx_blk_size, 10474 cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index c939683e3d6..1c89f19a442 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e 6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842 6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842
6086#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
6086 6087
6087#define MDIO_AN_REG_8727_MISC_CTRL 0x8309 6088#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
6088 6089
@@ -6194,7 +6195,11 @@ Theotherbitsarereservedandshouldbezero*/
6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 6196#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
6196#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 6197#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
6198#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
6199#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
6197 6200
6201#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
6202#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6198 6203
6199#define IGU_FUNC_BASE 0x0400 6204#define IGU_FUNC_BASE 0x0400
6200 6205
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index bda60d590fa..3445ded6674 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1239 if (unlikely(bp->panic)) 1239 if (unlikely(bp->panic))
1240 return; 1240 return;
1241 1241
1242 bnx2x_stats_stm[bp->stats_state][event].action(bp);
1243
1242 /* Protect a state change flow */ 1244 /* Protect a state change flow */
1243 spin_lock_bh(&bp->stats_lock); 1245 spin_lock_bh(&bp->stats_lock);
1244 state = bp->stats_state; 1246 state = bp->stats_state;
1245 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1247 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1246 spin_unlock_bh(&bp->stats_lock); 1248 spin_unlock_bh(&bp->stats_lock);
1247 1249
1248 bnx2x_stats_stm[state][event].action(bp);
1249
1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1252 state, event, bp->stats_state); 1252 state, event, bp->stats_state);
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 0e2737eac8b..3c5c014e82b 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -6,6 +6,9 @@ obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
8 8
9proc-$(CONFIG_PROC_FS) += bond_procfs.o
10bonding-objs += $(proc-y)
11
9ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o 12ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
10bonding-objs += $(ipv6-y) 13bonding-objs += $(ipv6-y)
11 14
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 171782e2bb3..494bf960442 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -246,7 +246,7 @@ static inline void __enable_port(struct port *port)
246 */ 246 */
247static inline int __port_is_enabled(struct port *port) 247static inline int __port_is_enabled(struct port *port)
248{ 248{
249 return port->slave->state == BOND_STATE_ACTIVE; 249 return bond_is_active_slave(port->slave);
250} 250}
251 251
252/** 252/**
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port)
281} 281}
282 282
283/** 283/**
284 * __get_rx_machine_lock - lock the port's RX machine 284 * __get_state_machine_lock - lock the port's state machines
285 * @port: the port we're looking at 285 * @port: the port we're looking at
286 * 286 *
287 */ 287 */
288static inline void __get_rx_machine_lock(struct port *port) 288static inline void __get_state_machine_lock(struct port *port)
289{ 289{
290 spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 290 spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
291} 291}
292 292
293/** 293/**
294 * __release_rx_machine_lock - unlock the port's RX machine 294 * __release_state_machine_lock - unlock the port's state machines
295 * @port: the port we're looking at 295 * @port: the port we're looking at
296 * 296 *
297 */ 297 */
298static inline void __release_rx_machine_lock(struct port *port) 298static inline void __release_state_machine_lock(struct port *port)
299{ 299{
300 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 300 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
301} 301}
302 302
303/** 303/**
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port)
388} 388}
389 389
390/** 390/**
391 * __initialize_port_locks - initialize a port's RX machine spinlock 391 * __initialize_port_locks - initialize a port's STATE machine spinlock
392 * @port: the port we're looking at 392 * @port: the port we're looking at
393 * 393 *
394 */ 394 */
395static inline void __initialize_port_locks(struct port *port) 395static inline void __initialize_port_locks(struct port *port)
396{ 396{
397 // make sure it isn't called twice 397 // make sure it isn't called twice
398 spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 398 spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
399} 399}
400 400
401//conversions 401//conversions
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1025{ 1025{
1026 rx_states_t last_state; 1026 rx_states_t last_state;
1027 1027
1028 // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
1029 __get_rx_machine_lock(port);
1030
1031 // keep current State Machine state to compare later if it was changed 1028 // keep current State Machine state to compare later if it was changed
1032 last_state = port->sm_rx_state; 1029 last_state = port->sm_rx_state;
1033 1030
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1133 pr_err("%s: An illegal loopback occurred on adapter (%s).\n" 1130 pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
1134 "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", 1131 "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
1135 port->slave->dev->master->name, port->slave->dev->name); 1132 port->slave->dev->master->name, port->slave->dev->name);
1136 __release_rx_machine_lock(port);
1137 return; 1133 return;
1138 } 1134 }
1139 __update_selected(lacpdu, port); 1135 __update_selected(lacpdu, port);
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1153 break; 1149 break;
1154 } 1150 }
1155 } 1151 }
1156 __release_rx_machine_lock(port);
1157} 1152}
1158 1153
1159/** 1154/**
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2155 goto re_arm; 2150 goto re_arm;
2156 } 2151 }
2157 2152
2153 /* Lock around state machines to protect data accessed
2154 * by all (e.g., port->sm_vars). ad_rx_machine may run
2155 * concurrently due to incoming LACPDU.
2156 */
2157 __get_state_machine_lock(port);
2158
2158 ad_rx_machine(NULL, port); 2159 ad_rx_machine(NULL, port);
2159 ad_periodic_machine(port); 2160 ad_periodic_machine(port);
2160 ad_port_selection_logic(port); 2161 ad_port_selection_logic(port);
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2164 // turn off the BEGIN bit, since we already handled it 2165 // turn off the BEGIN bit, since we already handled it
2165 if (port->sm_vars & AD_PORT_BEGIN) 2166 if (port->sm_vars & AD_PORT_BEGIN)
2166 port->sm_vars &= ~AD_PORT_BEGIN; 2167 port->sm_vars &= ~AD_PORT_BEGIN;
2168
2169 __release_state_machine_lock(port);
2167 } 2170 }
2168 2171
2169re_arm: 2172re_arm:
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2200 case AD_TYPE_LACPDU: 2203 case AD_TYPE_LACPDU:
2201 pr_debug("Received LACPDU on port %d\n", 2204 pr_debug("Received LACPDU on port %d\n",
2202 port->actor_port_number); 2205 port->actor_port_number);
2206 /* Protect against concurrent state machines */
2207 __get_state_machine_lock(port);
2203 ad_rx_machine(lacpdu, port); 2208 ad_rx_machine(lacpdu, port);
2209 __release_state_machine_lock(port);
2204 break; 2210 break;
2205 2211
2206 case AD_TYPE_MARKER: 2212 case AD_TYPE_MARKER:
@@ -2470,6 +2476,10 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2470 if (!(dev->flags & IFF_MASTER)) 2476 if (!(dev->flags & IFF_MASTER))
2471 goto out; 2477 goto out;
2472 2478
2479 skb = skb_share_check(skb, GFP_ATOMIC);
2480 if (!skb)
2481 goto out;
2482
2473 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2483 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2474 goto out; 2484 goto out;
2475 2485
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 2c46a154f2c..b28baff7086 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -264,7 +264,8 @@ struct ad_bond_info {
264struct ad_slave_info { 264struct ad_slave_info {
265 struct aggregator aggregator; // 802.3ad aggregator structure 265 struct aggregator aggregator; // 802.3ad aggregator structure
266 struct port port; // 802.3ad port structure 266 struct port port; // 802.3ad port structure
267 spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt 267 spinlock_t state_machine_lock; /* mutex state machines vs.
268 incoming LACPDU */
268 u16 id; 269 u16 id;
269}; 270};
270 271
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f4e638c6512..9bc5de3e04a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -326,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
326 goto out; 326 goto out;
327 } 327 }
328 328
329 skb = skb_share_check(skb, GFP_ATOMIC);
330 if (!skb)
331 goto out;
332
329 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) 333 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
330 goto out; 334 goto out;
331 335
@@ -600,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
600 604
601 _lock_rx_hashtbl(bond); 605 _lock_rx_hashtbl(bond);
602 606
603 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src)); 607 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
604 client_info = &(bond_info->rx_hashtbl[hash_index]); 608 client_info = &(bond_info->rx_hashtbl[hash_index]);
605 609
606 if (client_info->assigned) { 610 if (client_info->assigned) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1025b85acf..16d6fe95469 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,15 +59,12 @@
59#include <linux/uaccess.h> 59#include <linux/uaccess.h>
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/netdevice.h> 61#include <linux/netdevice.h>
62#include <linux/netpoll.h>
63#include <linux/inetdevice.h> 62#include <linux/inetdevice.h>
64#include <linux/igmp.h> 63#include <linux/igmp.h>
65#include <linux/etherdevice.h> 64#include <linux/etherdevice.h>
66#include <linux/skbuff.h> 65#include <linux/skbuff.h>
67#include <net/sock.h> 66#include <net/sock.h>
68#include <linux/rtnetlink.h> 67#include <linux/rtnetlink.h>
69#include <linux/proc_fs.h>
70#include <linux/seq_file.h>
71#include <linux/smp.h> 68#include <linux/smp.h>
72#include <linux/if_ether.h> 69#include <linux/if_ether.h>
73#include <net/arp.h> 70#include <net/arp.h>
@@ -174,9 +171,6 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
174atomic_t netpoll_block_tx = ATOMIC_INIT(0); 171atomic_t netpoll_block_tx = ATOMIC_INIT(0);
175#endif 172#endif
176 173
177static const char * const version =
178 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
179
180int bond_net_id __read_mostly; 174int bond_net_id __read_mostly;
181 175
182static __be32 arp_target[BOND_MAX_ARP_TARGETS]; 176static __be32 arp_target[BOND_MAX_ARP_TARGETS];
@@ -246,7 +240,7 @@ static void bond_uninit(struct net_device *bond_dev);
246 240
247/*---------------------------- General routines -----------------------------*/ 241/*---------------------------- General routines -----------------------------*/
248 242
249static const char *bond_mode_name(int mode) 243const char *bond_mode_name(int mode)
250{ 244{
251 static const char *names[] = { 245 static const char *names[] = {
252 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", 246 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
@@ -424,15 +418,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
424{ 418{
425 skb->dev = slave_dev; 419 skb->dev = slave_dev;
426 skb->priority = 1; 420 skb->priority = 1;
427#ifdef CONFIG_NET_POLL_CONTROLLER 421 if (unlikely(netpoll_tx_running(slave_dev)))
428 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { 422 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
429 struct netpoll *np = bond->dev->npinfo->netpoll; 423 else
430 slave_dev->npinfo = bond->dev->npinfo;
431 slave_dev->priv_flags |= IFF_IN_NETPOLL;
432 netpoll_send_skb_on_dev(np, skb, slave_dev);
433 slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
434 } else
435#endif
436 dev_queue_xmit(skb); 424 dev_queue_xmit(skb);
437 425
438 return 0; 426 return 0;
@@ -1288,63 +1276,103 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1288} 1276}
1289 1277
1290#ifdef CONFIG_NET_POLL_CONTROLLER 1278#ifdef CONFIG_NET_POLL_CONTROLLER
1291/* 1279static inline int slave_enable_netpoll(struct slave *slave)
1292 * You must hold read lock on bond->lock before calling this.
1293 */
1294static bool slaves_support_netpoll(struct net_device *bond_dev)
1295{ 1280{
1296 struct bonding *bond = netdev_priv(bond_dev); 1281 struct netpoll *np;
1297 struct slave *slave; 1282 int err = 0;
1298 int i = 0;
1299 bool ret = true;
1300 1283
1301 bond_for_each_slave(bond, slave, i) { 1284 np = kzalloc(sizeof(*np), GFP_KERNEL);
1302 if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) || 1285 err = -ENOMEM;
1303 !slave->dev->netdev_ops->ndo_poll_controller) 1286 if (!np)
1304 ret = false; 1287 goto out;
1288
1289 np->dev = slave->dev;
1290 err = __netpoll_setup(np);
1291 if (err) {
1292 kfree(np);
1293 goto out;
1305 } 1294 }
1306 return i != 0 && ret; 1295 slave->np = np;
1296out:
1297 return err;
1298}
1299static inline void slave_disable_netpoll(struct slave *slave)
1300{
1301 struct netpoll *np = slave->np;
1302
1303 if (!np)
1304 return;
1305
1306 slave->np = NULL;
1307 synchronize_rcu_bh();
1308 __netpoll_cleanup(np);
1309 kfree(np);
1310}
1311static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
1312{
1313 if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
1314 return false;
1315 if (!slave_dev->netdev_ops->ndo_poll_controller)
1316 return false;
1317 return true;
1307} 1318}
1308 1319
1309static void bond_poll_controller(struct net_device *bond_dev) 1320static void bond_poll_controller(struct net_device *bond_dev)
1310{ 1321{
1311 struct bonding *bond = netdev_priv(bond_dev); 1322}
1323
1324static void __bond_netpoll_cleanup(struct bonding *bond)
1325{
1312 struct slave *slave; 1326 struct slave *slave;
1313 int i; 1327 int i;
1314 1328
1315 bond_for_each_slave(bond, slave, i) { 1329 bond_for_each_slave(bond, slave, i)
1316 if (slave->dev && IS_UP(slave->dev)) 1330 if (IS_UP(slave->dev))
1317 netpoll_poll_dev(slave->dev); 1331 slave_disable_netpoll(slave);
1318 }
1319} 1332}
1320
1321static void bond_netpoll_cleanup(struct net_device *bond_dev) 1333static void bond_netpoll_cleanup(struct net_device *bond_dev)
1322{ 1334{
1323 struct bonding *bond = netdev_priv(bond_dev); 1335 struct bonding *bond = netdev_priv(bond_dev);
1336
1337 read_lock(&bond->lock);
1338 __bond_netpoll_cleanup(bond);
1339 read_unlock(&bond->lock);
1340}
1341
1342static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1343{
1344 struct bonding *bond = netdev_priv(dev);
1324 struct slave *slave; 1345 struct slave *slave;
1325 const struct net_device_ops *ops; 1346 int i, err = 0;
1326 int i;
1327 1347
1328 read_lock(&bond->lock); 1348 read_lock(&bond->lock);
1329 bond_dev->npinfo = NULL;
1330 bond_for_each_slave(bond, slave, i) { 1349 bond_for_each_slave(bond, slave, i) {
1331 if (slave->dev) { 1350 err = slave_enable_netpoll(slave);
1332 ops = slave->dev->netdev_ops; 1351 if (err) {
1333 if (ops->ndo_netpoll_cleanup) 1352 __bond_netpoll_cleanup(bond);
1334 ops->ndo_netpoll_cleanup(slave->dev); 1353 break;
1335 else
1336 slave->dev->npinfo = NULL;
1337 } 1354 }
1338 } 1355 }
1339 read_unlock(&bond->lock); 1356 read_unlock(&bond->lock);
1357 return err;
1340} 1358}
1341 1359
1342#else 1360static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
1361{
1362 return bond->dev->npinfo;
1363}
1343 1364
1365#else
1366static inline int slave_enable_netpoll(struct slave *slave)
1367{
1368 return 0;
1369}
1370static inline void slave_disable_netpoll(struct slave *slave)
1371{
1372}
1344static void bond_netpoll_cleanup(struct net_device *bond_dev) 1373static void bond_netpoll_cleanup(struct net_device *bond_dev)
1345{ 1374{
1346} 1375}
1347
1348#endif 1376#endif
1349 1377
1350/*---------------------------------- IOCTL ----------------------------------*/ 1378/*---------------------------------- IOCTL ----------------------------------*/
@@ -1372,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond)
1372{ 1400{
1373 struct slave *slave; 1401 struct slave *slave;
1374 struct net_device *bond_dev = bond->dev; 1402 struct net_device *bond_dev = bond->dev;
1375 unsigned long features = bond_dev->features; 1403 u32 features = bond_dev->features;
1376 unsigned long vlan_features = 0; 1404 u32 vlan_features = 0;
1377 unsigned short max_hard_header_len = max((u16)ETH_HLEN, 1405 unsigned short max_hard_header_len = max((u16)ETH_HLEN,
1378 bond_dev->hard_header_len); 1406 bond_dev->hard_header_len);
1379 int i; 1407 int i;
@@ -1400,8 +1428,8 @@ static int bond_compute_features(struct bonding *bond)
1400 1428
1401done: 1429done:
1402 features |= (bond_dev->features & BOND_VLAN_FEATURES); 1430 features |= (bond_dev->features & BOND_VLAN_FEATURES);
1403 bond_dev->features = netdev_fix_features(features, NULL); 1431 bond_dev->features = netdev_fix_features(bond_dev, features);
1404 bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL); 1432 bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
1405 bond_dev->hard_header_len = max_hard_header_len; 1433 bond_dev->hard_header_len = max_hard_header_len;
1406 1434
1407 return 0; 1435 return 0;
@@ -1423,6 +1451,72 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
1423 bond->setup_by_slave = 1; 1451 bond->setup_by_slave = 1;
1424} 1452}
1425 1453
1454/* On bonding slaves other than the currently active slave, suppress
1455 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1456 * ARP on active-backup slaves with arp_validate enabled.
1457 */
1458static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1459 struct slave *slave,
1460 struct bonding *bond)
1461{
1462 if (bond_is_slave_inactive(slave)) {
1463 if (slave_do_arp_validate(bond, slave) &&
1464 skb->protocol == __cpu_to_be16(ETH_P_ARP))
1465 return false;
1466
1467 if (bond->params.mode == BOND_MODE_ALB &&
1468 skb->pkt_type != PACKET_BROADCAST &&
1469 skb->pkt_type != PACKET_MULTICAST)
1470 return false;
1471
1472 if (bond->params.mode == BOND_MODE_8023AD &&
1473 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
1474 return false;
1475
1476 return true;
1477 }
1478 return false;
1479}
1480
1481static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1482{
1483 struct sk_buff *skb = *pskb;
1484 struct slave *slave;
1485 struct bonding *bond;
1486
1487 skb = skb_share_check(skb, GFP_ATOMIC);
1488 if (unlikely(!skb))
1489 return RX_HANDLER_CONSUMED;
1490
1491 *pskb = skb;
1492
1493 slave = bond_slave_get_rcu(skb->dev);
1494 bond = slave->bond;
1495
1496 if (bond->params.arp_interval)
1497 slave->dev->last_rx = jiffies;
1498
1499 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1500 return RX_HANDLER_EXACT;
1501 }
1502
1503 skb->dev = bond->dev;
1504
1505 if (bond->params.mode == BOND_MODE_ALB &&
1506 bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1507 skb->pkt_type == PACKET_HOST) {
1508
1509 if (unlikely(skb_cow_head(skb,
1510 skb->data - skb_mac_header(skb)))) {
1511 kfree_skb(skb);
1512 return RX_HANDLER_CONSUMED;
1513 }
1514 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
1515 }
1516
1517 return RX_HANDLER_ANOTHER;
1518}
1519
1426/* enslave device <slave> to bond device <master> */ 1520/* enslave device <slave> to bond device <master> */
1427int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1521int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1428{ 1522{
@@ -1594,11 +1688,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1594 } 1688 }
1595 } 1689 }
1596 1690
1597 res = netdev_set_master(slave_dev, bond_dev); 1691 res = netdev_set_bond_master(slave_dev, bond_dev);
1598 if (res) { 1692 if (res) {
1599 pr_debug("Error %d calling netdev_set_master\n", res); 1693 pr_debug("Error %d calling netdev_set_bond_master\n", res);
1600 goto err_restore_mac; 1694 goto err_restore_mac;
1601 } 1695 }
1696
1602 /* open the slave since the application closed it */ 1697 /* open the slave since the application closed it */
1603 res = dev_open(slave_dev); 1698 res = dev_open(slave_dev);
1604 if (res) { 1699 if (res) {
@@ -1606,6 +1701,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1606 goto err_unset_master; 1701 goto err_unset_master;
1607 } 1702 }
1608 1703
1704 new_slave->bond = bond;
1609 new_slave->dev = slave_dev; 1705 new_slave->dev = slave_dev;
1610 slave_dev->priv_flags |= IFF_BONDING; 1706 slave_dev->priv_flags |= IFF_BONDING;
1611 1707
@@ -1757,7 +1853,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1757 break; 1853 break;
1758 case BOND_MODE_TLB: 1854 case BOND_MODE_TLB:
1759 case BOND_MODE_ALB: 1855 case BOND_MODE_ALB:
1760 new_slave->state = BOND_STATE_ACTIVE; 1856 bond_set_active_slave(new_slave);
1761 bond_set_slave_inactive_flags(new_slave); 1857 bond_set_slave_inactive_flags(new_slave);
1762 bond_select_active_slave(bond); 1858 bond_select_active_slave(bond);
1763 break; 1859 break;
@@ -1765,7 +1861,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1765 pr_debug("This slave is always active in trunk mode\n"); 1861 pr_debug("This slave is always active in trunk mode\n");
1766 1862
1767 /* always active in trunk mode */ 1863 /* always active in trunk mode */
1768 new_slave->state = BOND_STATE_ACTIVE; 1864 bond_set_active_slave(new_slave);
1769 1865
1770 /* In trunking mode there is little meaning to curr_active_slave 1866 /* In trunking mode there is little meaning to curr_active_slave
1771 * anyway (it holds no special properties of the bond device), 1867 * anyway (it holds no special properties of the bond device),
@@ -1782,37 +1878,49 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1782 bond_set_carrier(bond); 1878 bond_set_carrier(bond);
1783 1879
1784#ifdef CONFIG_NET_POLL_CONTROLLER 1880#ifdef CONFIG_NET_POLL_CONTROLLER
1785 if (slaves_support_netpoll(bond_dev)) { 1881 slave_dev->npinfo = bond_netpoll_info(bond);
1786 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 1882 if (slave_dev->npinfo) {
1787 if (bond_dev->npinfo) 1883 if (slave_enable_netpoll(new_slave)) {
1788 slave_dev->npinfo = bond_dev->npinfo; 1884 read_unlock(&bond->lock);
1789 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { 1885 pr_info("Error, %s: master_dev is using netpoll, "
1790 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1886 "but new slave device does not support netpoll.\n",
1791 pr_info("New slave device %s does not support netpoll\n", 1887 bond_dev->name);
1792 slave_dev->name); 1888 res = -EBUSY;
1793 pr_info("Disabling netpoll support for %s\n", bond_dev->name); 1889 goto err_close;
1890 }
1794 } 1891 }
1795#endif 1892#endif
1893
1796 read_unlock(&bond->lock); 1894 read_unlock(&bond->lock);
1797 1895
1798 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1896 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1799 if (res) 1897 if (res)
1800 goto err_close; 1898 goto err_close;
1801 1899
1900 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
1901 new_slave);
1902 if (res) {
1903 pr_debug("Error %d calling netdev_rx_handler_register\n", res);
1904 goto err_dest_symlinks;
1905 }
1906
1802 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1907 pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
1803 bond_dev->name, slave_dev->name, 1908 bond_dev->name, slave_dev->name,
1804 new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup", 1909 bond_is_active_slave(new_slave) ? "n active" : " backup",
1805 new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); 1910 new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
1806 1911
1807 /* enslave is successful */ 1912 /* enslave is successful */
1808 return 0; 1913 return 0;
1809 1914
1810/* Undo stages on error */ 1915/* Undo stages on error */
1916err_dest_symlinks:
1917 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1918
1811err_close: 1919err_close:
1812 dev_close(slave_dev); 1920 dev_close(slave_dev);
1813 1921
1814err_unset_master: 1922err_unset_master:
1815 netdev_set_master(slave_dev, NULL); 1923 netdev_set_bond_master(slave_dev, NULL);
1816 1924
1817err_restore_mac: 1925err_restore_mac:
1818 if (!bond->params.fail_over_mac) { 1926 if (!bond->params.fail_over_mac) {
@@ -1876,6 +1984,14 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1876 return -EINVAL; 1984 return -EINVAL;
1877 } 1985 }
1878 1986
1987 /* unregister rx_handler early so bond_handle_frame wouldn't be called
1988 * for this slave anymore.
1989 */
1990 netdev_rx_handler_unregister(slave_dev);
1991 write_unlock_bh(&bond->lock);
1992 synchronize_net();
1993 write_lock_bh(&bond->lock);
1994
1879 if (!bond->params.fail_over_mac) { 1995 if (!bond->params.fail_over_mac) {
1880 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && 1996 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
1881 bond->slave_cnt > 1) 1997 bond->slave_cnt > 1)
@@ -1895,7 +2011,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1895 2011
1896 pr_info("%s: releasing %s interface %s\n", 2012 pr_info("%s: releasing %s interface %s\n",
1897 bond_dev->name, 2013 bond_dev->name,
1898 (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup", 2014 bond_is_active_slave(slave) ? "active" : "backup",
1899 slave_dev->name); 2015 slave_dev->name);
1900 2016
1901 oldcurrent = bond->curr_active_slave; 2017 oldcurrent = bond->curr_active_slave;
@@ -1992,19 +2108,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1992 netif_addr_unlock_bh(bond_dev); 2108 netif_addr_unlock_bh(bond_dev);
1993 } 2109 }
1994 2110
1995 netdev_set_master(slave_dev, NULL); 2111 netdev_set_bond_master(slave_dev, NULL);
1996
1997#ifdef CONFIG_NET_POLL_CONTROLLER
1998 read_lock_bh(&bond->lock);
1999 2112
2000 if (slaves_support_netpoll(bond_dev)) 2113 slave_disable_netpoll(slave);
2001 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
2002 read_unlock_bh(&bond->lock);
2003 if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
2004 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
2005 else
2006 slave_dev->npinfo = NULL;
2007#endif
2008 2114
2009 /* close slave before restoring its mac address */ 2115 /* close slave before restoring its mac address */
2010 dev_close(slave_dev); 2116 dev_close(slave_dev);
@@ -2018,9 +2124,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2018 2124
2019 dev_set_mtu(slave_dev, slave->original_mtu); 2125 dev_set_mtu(slave_dev, slave->original_mtu);
2020 2126
2021 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | 2127 slave_dev->priv_flags &= ~IFF_BONDING;
2022 IFF_SLAVE_INACTIVE | IFF_BONDING |
2023 IFF_SLAVE_NEEDARP);
2024 2128
2025 kfree(slave); 2129 kfree(slave);
2026 2130
@@ -2028,7 +2132,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2028} 2132}
2029 2133
2030/* 2134/*
2031* First release a slave and than destroy the bond if no more slaves are left. 2135* First release a slave and then destroy the bond if no more slaves are left.
2032* Must be under rtnl_lock when this function is called. 2136* Must be under rtnl_lock when this function is called.
2033*/ 2137*/
2034static int bond_release_and_destroy(struct net_device *bond_dev, 2138static int bond_release_and_destroy(struct net_device *bond_dev,
@@ -2039,6 +2143,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
2039 2143
2040 ret = bond_release(bond_dev, slave_dev); 2144 ret = bond_release(bond_dev, slave_dev);
2041 if ((ret == 0) && (bond->slave_cnt == 0)) { 2145 if ((ret == 0) && (bond->slave_cnt == 0)) {
2146 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2042 pr_info("%s: destroying bond %s.\n", 2147 pr_info("%s: destroying bond %s.\n",
2043 bond_dev->name, bond_dev->name); 2148 bond_dev->name, bond_dev->name);
2044 unregister_netdevice(bond_dev); 2149 unregister_netdevice(bond_dev);
@@ -2083,6 +2188,12 @@ static int bond_release_all(struct net_device *bond_dev)
2083 */ 2188 */
2084 write_unlock_bh(&bond->lock); 2189 write_unlock_bh(&bond->lock);
2085 2190
2191 /* unregister rx_handler early so bond_handle_frame wouldn't
2192 * be called for this slave anymore.
2193 */
2194 netdev_rx_handler_unregister(slave_dev);
2195 synchronize_net();
2196
2086 if (bond_is_lb(bond)) { 2197 if (bond_is_lb(bond)) {
2087 /* must be called only after the slave 2198 /* must be called only after the slave
2088 * has been detached from the list 2199 * has been detached from the list
@@ -2114,7 +2225,9 @@ static int bond_release_all(struct net_device *bond_dev)
2114 netif_addr_unlock_bh(bond_dev); 2225 netif_addr_unlock_bh(bond_dev);
2115 } 2226 }
2116 2227
2117 netdev_set_master(slave_dev, NULL); 2228 netdev_set_bond_master(slave_dev, NULL);
2229
2230 slave_disable_netpoll(slave);
2118 2231
2119 /* close slave before restoring its mac address */ 2232 /* close slave before restoring its mac address */
2120 dev_close(slave_dev); 2233 dev_close(slave_dev);
@@ -2126,9 +2239,6 @@ static int bond_release_all(struct net_device *bond_dev)
2126 dev_set_mac_address(slave_dev, &addr); 2239 dev_set_mac_address(slave_dev, &addr);
2127 } 2240 }
2128 2241
2129 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
2130 IFF_SLAVE_INACTIVE);
2131
2132 kfree(slave); 2242 kfree(slave);
2133 2243
2134 /* re-acquire the lock before getting the next slave */ 2244 /* re-acquire the lock before getting the next slave */
@@ -2242,7 +2352,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2242 res = 0; 2352 res = 0;
2243 strcpy(info->slave_name, slave->dev->name); 2353 strcpy(info->slave_name, slave->dev->name);
2244 info->link = slave->link; 2354 info->link = slave->link;
2245 info->state = slave->state; 2355 info->state = bond_slave_state(slave);
2246 info->link_failure_count = slave->link_failure_count; 2356 info->link_failure_count = slave->link_failure_count;
2247 break; 2357 break;
2248 } 2358 }
@@ -2281,7 +2391,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2281 bond->dev->name, 2391 bond->dev->name,
2282 (bond->params.mode == 2392 (bond->params.mode ==
2283 BOND_MODE_ACTIVEBACKUP) ? 2393 BOND_MODE_ACTIVEBACKUP) ?
2284 ((slave->state == BOND_STATE_ACTIVE) ? 2394 (bond_is_active_slave(slave) ?
2285 "active " : "backup ") : "", 2395 "active " : "backup ") : "",
2286 slave->dev->name, 2396 slave->dev->name,
2287 bond->params.downdelay * bond->params.miimon); 2397 bond->params.downdelay * bond->params.miimon);
@@ -2372,13 +2482,13 @@ static void bond_miimon_commit(struct bonding *bond)
2372 2482
2373 if (bond->params.mode == BOND_MODE_8023AD) { 2483 if (bond->params.mode == BOND_MODE_8023AD) {
2374 /* prevent it from being the active one */ 2484 /* prevent it from being the active one */
2375 slave->state = BOND_STATE_BACKUP; 2485 bond_set_backup_slave(slave);
2376 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 2486 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2377 /* make it immediately active */ 2487 /* make it immediately active */
2378 slave->state = BOND_STATE_ACTIVE; 2488 bond_set_active_slave(slave);
2379 } else if (slave != bond->primary_slave) { 2489 } else if (slave != bond->primary_slave) {
2380 /* prevent it from being the active one */ 2490 /* prevent it from being the active one */
2381 slave->state = BOND_STATE_BACKUP; 2491 bond_set_backup_slave(slave);
2382 } 2492 }
2383 2493
2384 bond_update_speed_duplex(slave); 2494 bond_update_speed_duplex(slave);
@@ -2571,11 +2681,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2571 2681
2572static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2682static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2573{ 2683{
2574 int i, vlan_id, rv; 2684 int i, vlan_id;
2575 __be32 *targets = bond->params.arp_targets; 2685 __be32 *targets = bond->params.arp_targets;
2576 struct vlan_entry *vlan; 2686 struct vlan_entry *vlan;
2577 struct net_device *vlan_dev; 2687 struct net_device *vlan_dev;
2578 struct flowi fl;
2579 struct rtable *rt; 2688 struct rtable *rt;
2580 2689
2581 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 2690 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
@@ -2594,15 +2703,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2594 * determine which VLAN interface would be used, so we 2703 * determine which VLAN interface would be used, so we
2595 * can tag the ARP with the proper VLAN tag. 2704 * can tag the ARP with the proper VLAN tag.
2596 */ 2705 */
2597 memset(&fl, 0, sizeof(fl)); 2706 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2598 fl.fl4_dst = targets[i]; 2707 RTO_ONLINK, 0);
2599 fl.fl4_tos = RTO_ONLINK; 2708 if (IS_ERR(rt)) {
2600
2601 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
2602 if (rv) {
2603 if (net_ratelimit()) { 2709 if (net_ratelimit()) {
2604 pr_warning("%s: no route to arp_ip_target %pI4\n", 2710 pr_warning("%s: no route to arp_ip_target %pI4\n",
2605 bond->dev->name, &fl.fl4_dst); 2711 bond->dev->name, &targets[i]);
2606 } 2712 }
2607 continue; 2713 continue;
2608 } 2714 }
@@ -2638,7 +2744,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2638 2744
2639 if (net_ratelimit()) { 2745 if (net_ratelimit()) {
2640 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2746 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2641 bond->dev->name, &fl.fl4_dst, 2747 bond->dev->name, &targets[i],
2642 rt->dst.dev ? rt->dst.dev->name : "NULL"); 2748 rt->dst.dev ? rt->dst.dev->name : "NULL");
2643 } 2749 }
2644 ip_rt_put(rt); 2750 ip_rt_put(rt);
@@ -2733,6 +2839,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2733 if (!slave || !slave_do_arp_validate(bond, slave)) 2839 if (!slave || !slave_do_arp_validate(bond, slave))
2734 goto out_unlock; 2840 goto out_unlock;
2735 2841
2842 skb = skb_share_check(skb, GFP_ATOMIC);
2843 if (!skb)
2844 goto out_unlock;
2845
2736 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 2846 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
2737 goto out_unlock; 2847 goto out_unlock;
2738 2848
@@ -2752,7 +2862,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2752 memcpy(&tip, arp_ptr, 4); 2862 memcpy(&tip, arp_ptr, 4);
2753 2863
2754 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", 2864 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
2755 bond->dev->name, slave->dev->name, slave->state, 2865 bond->dev->name, slave->dev->name, bond_slave_state(slave),
2756 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 2866 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2757 &sip, &tip); 2867 &sip, &tip);
2758 2868
@@ -2764,7 +2874,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2764 * the active, through one switch, the router, then the other 2874 * the active, through one switch, the router, then the other
2765 * switch before reaching the backup. 2875 * switch before reaching the backup.
2766 */ 2876 */
2767 if (slave->state == BOND_STATE_ACTIVE) 2877 if (bond_is_active_slave(slave))
2768 bond_validate_arp(bond, slave, sip, tip); 2878 bond_validate_arp(bond, slave, sip, tip);
2769 else 2879 else
2770 bond_validate_arp(bond, slave, tip, sip); 2880 bond_validate_arp(bond, slave, tip, sip);
@@ -2826,7 +2936,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2826 slave->dev->last_rx + delta_in_ticks)) { 2936 slave->dev->last_rx + delta_in_ticks)) {
2827 2937
2828 slave->link = BOND_LINK_UP; 2938 slave->link = BOND_LINK_UP;
2829 slave->state = BOND_STATE_ACTIVE; 2939 bond_set_active_slave(slave);
2830 2940
2831 /* primary_slave has no meaning in round-robin 2941 /* primary_slave has no meaning in round-robin
2832 * mode. the window of a slave being up and 2942 * mode. the window of a slave being up and
@@ -2859,7 +2969,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2859 slave->dev->last_rx + 2 * delta_in_ticks)) { 2969 slave->dev->last_rx + 2 * delta_in_ticks)) {
2860 2970
2861 slave->link = BOND_LINK_DOWN; 2971 slave->link = BOND_LINK_DOWN;
2862 slave->state = BOND_STATE_BACKUP; 2972 bond_set_backup_slave(slave);
2863 2973
2864 if (slave->link_failure_count < UINT_MAX) 2974 if (slave->link_failure_count < UINT_MAX)
2865 slave->link_failure_count++; 2975 slave->link_failure_count++;
@@ -2953,7 +3063,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2953 * gives each slave a chance to tx/rx traffic 3063 * gives each slave a chance to tx/rx traffic
2954 * before being taken out 3064 * before being taken out
2955 */ 3065 */
2956 if (slave->state == BOND_STATE_BACKUP && 3066 if (!bond_is_active_slave(slave) &&
2957 !bond->current_arp_slave && 3067 !bond->current_arp_slave &&
2958 !time_in_range(jiffies, 3068 !time_in_range(jiffies,
2959 slave_last_rx(bond, slave) - delta_in_ticks, 3069 slave_last_rx(bond, slave) - delta_in_ticks,
@@ -2970,7 +3080,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2970 * the bond has an IP address) 3080 * the bond has an IP address)
2971 */ 3081 */
2972 trans_start = dev_trans_start(slave->dev); 3082 trans_start = dev_trans_start(slave->dev);
2973 if ((slave->state == BOND_STATE_ACTIVE) && 3083 if (bond_is_active_slave(slave) &&
2974 (!time_in_range(jiffies, 3084 (!time_in_range(jiffies,
2975 trans_start - delta_in_ticks, 3085 trans_start - delta_in_ticks,
2976 trans_start + 2 * delta_in_ticks) || 3086 trans_start + 2 * delta_in_ticks) ||
@@ -3178,299 +3288,6 @@ out:
3178 read_unlock(&bond->lock); 3288 read_unlock(&bond->lock);
3179} 3289}
3180 3290
3181/*------------------------------ proc/seq_file-------------------------------*/
3182
3183#ifdef CONFIG_PROC_FS
3184
3185static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3186 __acquires(RCU)
3187 __acquires(&bond->lock)
3188{
3189 struct bonding *bond = seq->private;
3190 loff_t off = 0;
3191 struct slave *slave;
3192 int i;
3193
3194 /* make sure the bond won't be taken away */
3195 rcu_read_lock();
3196 read_lock(&bond->lock);
3197
3198 if (*pos == 0)
3199 return SEQ_START_TOKEN;
3200
3201 bond_for_each_slave(bond, slave, i) {
3202 if (++off == *pos)
3203 return slave;
3204 }
3205
3206 return NULL;
3207}
3208
3209static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3210{
3211 struct bonding *bond = seq->private;
3212 struct slave *slave = v;
3213
3214 ++*pos;
3215 if (v == SEQ_START_TOKEN)
3216 return bond->first_slave;
3217
3218 slave = slave->next;
3219
3220 return (slave == bond->first_slave) ? NULL : slave;
3221}
3222
3223static void bond_info_seq_stop(struct seq_file *seq, void *v)
3224 __releases(&bond->lock)
3225 __releases(RCU)
3226{
3227 struct bonding *bond = seq->private;
3228
3229 read_unlock(&bond->lock);
3230 rcu_read_unlock();
3231}
3232
3233static void bond_info_show_master(struct seq_file *seq)
3234{
3235 struct bonding *bond = seq->private;
3236 struct slave *curr;
3237 int i;
3238
3239 read_lock(&bond->curr_slave_lock);
3240 curr = bond->curr_active_slave;
3241 read_unlock(&bond->curr_slave_lock);
3242
3243 seq_printf(seq, "Bonding Mode: %s",
3244 bond_mode_name(bond->params.mode));
3245
3246 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
3247 bond->params.fail_over_mac)
3248 seq_printf(seq, " (fail_over_mac %s)",
3249 fail_over_mac_tbl[bond->params.fail_over_mac].modename);
3250
3251 seq_printf(seq, "\n");
3252
3253 if (bond->params.mode == BOND_MODE_XOR ||
3254 bond->params.mode == BOND_MODE_8023AD) {
3255 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
3256 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
3257 bond->params.xmit_policy);
3258 }
3259
3260 if (USES_PRIMARY(bond->params.mode)) {
3261 seq_printf(seq, "Primary Slave: %s",
3262 (bond->primary_slave) ?
3263 bond->primary_slave->dev->name : "None");
3264 if (bond->primary_slave)
3265 seq_printf(seq, " (primary_reselect %s)",
3266 pri_reselect_tbl[bond->params.primary_reselect].modename);
3267
3268 seq_printf(seq, "\nCurrently Active Slave: %s\n",
3269 (curr) ? curr->dev->name : "None");
3270 }
3271
3272 seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
3273 "up" : "down");
3274 seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
3275 seq_printf(seq, "Up Delay (ms): %d\n",
3276 bond->params.updelay * bond->params.miimon);
3277 seq_printf(seq, "Down Delay (ms): %d\n",
3278 bond->params.downdelay * bond->params.miimon);
3279
3280
3281 /* ARP information */
3282 if (bond->params.arp_interval > 0) {
3283 int printed = 0;
3284 seq_printf(seq, "ARP Polling Interval (ms): %d\n",
3285 bond->params.arp_interval);
3286
3287 seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
3288
3289 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
3290 if (!bond->params.arp_targets[i])
3291 break;
3292 if (printed)
3293 seq_printf(seq, ",");
3294 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
3295 printed = 1;
3296 }
3297 seq_printf(seq, "\n");
3298 }
3299
3300 if (bond->params.mode == BOND_MODE_8023AD) {
3301 struct ad_info ad_info;
3302
3303 seq_puts(seq, "\n802.3ad info\n");
3304 seq_printf(seq, "LACP rate: %s\n",
3305 (bond->params.lacp_fast) ? "fast" : "slow");
3306 seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
3307 ad_select_tbl[bond->params.ad_select].modename);
3308
3309 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
3310 seq_printf(seq, "bond %s has no active aggregator\n",
3311 bond->dev->name);
3312 } else {
3313 seq_printf(seq, "Active Aggregator Info:\n");
3314
3315 seq_printf(seq, "\tAggregator ID: %d\n",
3316 ad_info.aggregator_id);
3317 seq_printf(seq, "\tNumber of ports: %d\n",
3318 ad_info.ports);
3319 seq_printf(seq, "\tActor Key: %d\n",
3320 ad_info.actor_key);
3321 seq_printf(seq, "\tPartner Key: %d\n",
3322 ad_info.partner_key);
3323 seq_printf(seq, "\tPartner Mac Address: %pM\n",
3324 ad_info.partner_system);
3325 }
3326 }
3327}
3328
3329static void bond_info_show_slave(struct seq_file *seq,
3330 const struct slave *slave)
3331{
3332 struct bonding *bond = seq->private;
3333
3334 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
3335 seq_printf(seq, "MII Status: %s\n",
3336 (slave->link == BOND_LINK_UP) ? "up" : "down");
3337 seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
3338 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
3339 seq_printf(seq, "Link Failure Count: %u\n",
3340 slave->link_failure_count);
3341
3342 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
3343
3344 if (bond->params.mode == BOND_MODE_8023AD) {
3345 const struct aggregator *agg
3346 = SLAVE_AD_INFO(slave).port.aggregator;
3347
3348 if (agg)
3349 seq_printf(seq, "Aggregator ID: %d\n",
3350 agg->aggregator_identifier);
3351 else
3352 seq_puts(seq, "Aggregator ID: N/A\n");
3353 }
3354 seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
3355}
3356
3357static int bond_info_seq_show(struct seq_file *seq, void *v)
3358{
3359 if (v == SEQ_START_TOKEN) {
3360 seq_printf(seq, "%s\n", version);
3361 bond_info_show_master(seq);
3362 } else
3363 bond_info_show_slave(seq, v);
3364
3365 return 0;
3366}
3367
3368static const struct seq_operations bond_info_seq_ops = {
3369 .start = bond_info_seq_start,
3370 .next = bond_info_seq_next,
3371 .stop = bond_info_seq_stop,
3372 .show = bond_info_seq_show,
3373};
3374
3375static int bond_info_open(struct inode *inode, struct file *file)
3376{
3377 struct seq_file *seq;
3378 struct proc_dir_entry *proc;
3379 int res;
3380
3381 res = seq_open(file, &bond_info_seq_ops);
3382 if (!res) {
3383 /* recover the pointer buried in proc_dir_entry data */
3384 seq = file->private_data;
3385 proc = PDE(inode);
3386 seq->private = proc->data;
3387 }
3388
3389 return res;
3390}
3391
3392static const struct file_operations bond_info_fops = {
3393 .owner = THIS_MODULE,
3394 .open = bond_info_open,
3395 .read = seq_read,
3396 .llseek = seq_lseek,
3397 .release = seq_release,
3398};
3399
3400static void bond_create_proc_entry(struct bonding *bond)
3401{
3402 struct net_device *bond_dev = bond->dev;
3403 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3404
3405 if (bn->proc_dir) {
3406 bond->proc_entry = proc_create_data(bond_dev->name,
3407 S_IRUGO, bn->proc_dir,
3408 &bond_info_fops, bond);
3409 if (bond->proc_entry == NULL)
3410 pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
3411 DRV_NAME, bond_dev->name);
3412 else
3413 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
3414 }
3415}
3416
3417static void bond_remove_proc_entry(struct bonding *bond)
3418{
3419 struct net_device *bond_dev = bond->dev;
3420 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3421
3422 if (bn->proc_dir && bond->proc_entry) {
3423 remove_proc_entry(bond->proc_file_name, bn->proc_dir);
3424 memset(bond->proc_file_name, 0, IFNAMSIZ);
3425 bond->proc_entry = NULL;
3426 }
3427}
3428
3429/* Create the bonding directory under /proc/net, if doesn't exist yet.
3430 * Caller must hold rtnl_lock.
3431 */
3432static void __net_init bond_create_proc_dir(struct bond_net *bn)
3433{
3434 if (!bn->proc_dir) {
3435 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
3436 if (!bn->proc_dir)
3437 pr_warning("Warning: cannot create /proc/net/%s\n",
3438 DRV_NAME);
3439 }
3440}
3441
3442/* Destroy the bonding directory under /proc/net, if empty.
3443 * Caller must hold rtnl_lock.
3444 */
3445static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
3446{
3447 if (bn->proc_dir) {
3448 remove_proc_entry(DRV_NAME, bn->net->proc_net);
3449 bn->proc_dir = NULL;
3450 }
3451}
3452
3453#else /* !CONFIG_PROC_FS */
3454
3455static void bond_create_proc_entry(struct bonding *bond)
3456{
3457}
3458
3459static void bond_remove_proc_entry(struct bonding *bond)
3460{
3461}
3462
3463static inline void bond_create_proc_dir(struct bond_net *bn)
3464{
3465}
3466
3467static inline void bond_destroy_proc_dir(struct bond_net *bn)
3468{
3469}
3470
3471#endif /* CONFIG_PROC_FS */
3472
3473
3474/*-------------------------- netdev event handling --------------------------*/ 3291/*-------------------------- netdev event handling --------------------------*/
3475 3292
3476/* 3293/*
@@ -4327,7 +4144,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
4327 bond_for_each_slave_from(bond, slave, i, start_at) { 4144 bond_for_each_slave_from(bond, slave, i, start_at) {
4328 if (IS_UP(slave->dev) && 4145 if (IS_UP(slave->dev) &&
4329 (slave->link == BOND_LINK_UP) && 4146 (slave->link == BOND_LINK_UP) &&
4330 (slave->state == BOND_STATE_ACTIVE)) { 4147 bond_is_active_slave(slave)) {
4331 res = bond_dev_queue_xmit(bond, skb, slave->dev); 4148 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4332 break; 4149 break;
4333 } 4150 }
@@ -4404,7 +4221,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4404 bond_for_each_slave_from(bond, slave, i, start_at) { 4221 bond_for_each_slave_from(bond, slave, i, start_at) {
4405 if (IS_UP(slave->dev) && 4222 if (IS_UP(slave->dev) &&
4406 (slave->link == BOND_LINK_UP) && 4223 (slave->link == BOND_LINK_UP) &&
4407 (slave->state == BOND_STATE_ACTIVE)) { 4224 bond_is_active_slave(slave)) {
4408 res = bond_dev_queue_xmit(bond, skb, slave->dev); 4225 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4409 break; 4226 break;
4410 } 4227 }
@@ -4445,7 +4262,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4445 bond_for_each_slave_from(bond, slave, i, start_at) { 4262 bond_for_each_slave_from(bond, slave, i, start_at) {
4446 if (IS_UP(slave->dev) && 4263 if (IS_UP(slave->dev) &&
4447 (slave->link == BOND_LINK_UP) && 4264 (slave->link == BOND_LINK_UP) &&
4448 (slave->state == BOND_STATE_ACTIVE)) { 4265 bond_is_active_slave(slave)) {
4449 if (tx_dev) { 4266 if (tx_dev) {
4450 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 4267 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4451 if (!skb2) { 4268 if (!skb2) {
@@ -4533,11 +4350,18 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4533{ 4350{
4534 /* 4351 /*
4535 * This helper function exists to help dev_pick_tx get the correct 4352 * This helper function exists to help dev_pick_tx get the correct
4536 * destination queue. Using a helper function skips the a call to 4353 * destination queue. Using a helper function skips a call to
4537 * skb_tx_hash and will put the skbs in the queue we expect on their 4354 * skb_tx_hash and will put the skbs in the queue we expect on their
4538 * way down to the bonding driver. 4355 * way down to the bonding driver.
4539 */ 4356 */
4540 return skb->queue_mapping; 4357 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4358
4359 if (unlikely(txq >= dev->real_num_tx_queues)) {
4360 do
4361 txq -= dev->real_num_tx_queues;
4362 while (txq >= dev->real_num_tx_queues);
4363 }
4364 return txq;
4541} 4365}
4542 4366
4543static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 4367static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4599,11 +4423,9 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
4599 case BOND_MODE_BROADCAST: 4423 case BOND_MODE_BROADCAST:
4600 break; 4424 break;
4601 case BOND_MODE_8023AD: 4425 case BOND_MODE_8023AD:
4602 bond_set_master_3ad_flags(bond);
4603 bond_set_xmit_hash_policy(bond); 4426 bond_set_xmit_hash_policy(bond);
4604 break; 4427 break;
4605 case BOND_MODE_ALB: 4428 case BOND_MODE_ALB:
4606 bond_set_master_alb_flags(bond);
4607 /* FALLTHRU */ 4429 /* FALLTHRU */
4608 case BOND_MODE_TLB: 4430 case BOND_MODE_TLB:
4609 break; 4431 break;
@@ -4650,9 +4472,12 @@ static const struct net_device_ops bond_netdev_ops = {
4650 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4472 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4651 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4473 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4652#ifdef CONFIG_NET_POLL_CONTROLLER 4474#ifdef CONFIG_NET_POLL_CONTROLLER
4475 .ndo_netpoll_setup = bond_netpoll_setup,
4653 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4476 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
4654 .ndo_poll_controller = bond_poll_controller, 4477 .ndo_poll_controller = bond_poll_controller,
4655#endif 4478#endif
4479 .ndo_add_slave = bond_enslave,
4480 .ndo_del_slave = bond_release,
4656}; 4481};
4657 4482
4658static void bond_destructor(struct net_device *bond_dev) 4483static void bond_destructor(struct net_device *bond_dev)
@@ -4691,9 +4516,6 @@ static void bond_setup(struct net_device *bond_dev)
4691 bond_dev->priv_flags |= IFF_BONDING; 4516 bond_dev->priv_flags |= IFF_BONDING;
4692 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 4517 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
4693 4518
4694 if (bond->params.arp_interval)
4695 bond_dev->priv_flags |= IFF_MASTER_ARPMON;
4696
4697 /* At first, we block adding VLANs. That's the only way to 4519 /* At first, we block adding VLANs. That's the only way to
4698 * prevent problems that occur when adding VLANs over an 4520 * prevent problems that occur when adding VLANs over an
4699 * empty bond. The block will be removed once non-challenged 4521 * empty bond. The block will be removed once non-challenged
@@ -5162,8 +4984,6 @@ static int bond_init(struct net_device *bond_dev)
5162 4984
5163 bond_set_lockdep_class(bond_dev); 4985 bond_set_lockdep_class(bond_dev);
5164 4986
5165 netif_carrier_off(bond_dev);
5166
5167 bond_create_proc_entry(bond); 4987 bond_create_proc_entry(bond);
5168 list_add_tail(&bond->bond_list, &bn->dev_list); 4988 list_add_tail(&bond->bond_list, &bn->dev_list);
5169 4989
@@ -5233,6 +5053,8 @@ int bond_create(struct net *net, const char *name)
5233 5053
5234 res = register_netdevice(bond_dev); 5054 res = register_netdevice(bond_dev);
5235 5055
5056 netif_carrier_off(bond_dev);
5057
5236out: 5058out:
5237 rtnl_unlock(); 5059 rtnl_unlock();
5238 if (res < 0) 5060 if (res < 0)
@@ -5271,7 +5093,7 @@ static int __init bonding_init(void)
5271 int i; 5093 int i;
5272 int res; 5094 int res;
5273 5095
5274 pr_info("%s", version); 5096 pr_info("%s", bond_version);
5275 5097
5276 res = bond_check_params(&bonding_defaults); 5098 res = bond_check_params(&bonding_defaults);
5277 if (res) 5099 if (res)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
new file mode 100644
index 00000000000..c32ff55a34c
--- /dev/null
+++ b/drivers/net/bonding/bond_procfs.c
@@ -0,0 +1,275 @@
1#include <linux/proc_fs.h>
2#include <net/net_namespace.h>
3#include <net/netns/generic.h>
4#include "bonding.h"
5
6
7extern const char *bond_mode_name(int mode);
8
9static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
10 __acquires(RCU)
11 __acquires(&bond->lock)
12{
13 struct bonding *bond = seq->private;
14 loff_t off = 0;
15 struct slave *slave;
16 int i;
17
18 /* make sure the bond won't be taken away */
19 rcu_read_lock();
20 read_lock(&bond->lock);
21
22 if (*pos == 0)
23 return SEQ_START_TOKEN;
24
25 bond_for_each_slave(bond, slave, i) {
26 if (++off == *pos)
27 return slave;
28 }
29
30 return NULL;
31}
32
33static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
34{
35 struct bonding *bond = seq->private;
36 struct slave *slave = v;
37
38 ++*pos;
39 if (v == SEQ_START_TOKEN)
40 return bond->first_slave;
41
42 slave = slave->next;
43
44 return (slave == bond->first_slave) ? NULL : slave;
45}
46
47static void bond_info_seq_stop(struct seq_file *seq, void *v)
48 __releases(&bond->lock)
49 __releases(RCU)
50{
51 struct bonding *bond = seq->private;
52
53 read_unlock(&bond->lock);
54 rcu_read_unlock();
55}
56
57static void bond_info_show_master(struct seq_file *seq)
58{
59 struct bonding *bond = seq->private;
60 struct slave *curr;
61 int i;
62
63 read_lock(&bond->curr_slave_lock);
64 curr = bond->curr_active_slave;
65 read_unlock(&bond->curr_slave_lock);
66
67 seq_printf(seq, "Bonding Mode: %s",
68 bond_mode_name(bond->params.mode));
69
70 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
71 bond->params.fail_over_mac)
72 seq_printf(seq, " (fail_over_mac %s)",
73 fail_over_mac_tbl[bond->params.fail_over_mac].modename);
74
75 seq_printf(seq, "\n");
76
77 if (bond->params.mode == BOND_MODE_XOR ||
78 bond->params.mode == BOND_MODE_8023AD) {
79 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
80 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
81 bond->params.xmit_policy);
82 }
83
84 if (USES_PRIMARY(bond->params.mode)) {
85 seq_printf(seq, "Primary Slave: %s",
86 (bond->primary_slave) ?
87 bond->primary_slave->dev->name : "None");
88 if (bond->primary_slave)
89 seq_printf(seq, " (primary_reselect %s)",
90 pri_reselect_tbl[bond->params.primary_reselect].modename);
91
92 seq_printf(seq, "\nCurrently Active Slave: %s\n",
93 (curr) ? curr->dev->name : "None");
94 }
95
96 seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
97 "up" : "down");
98 seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
99 seq_printf(seq, "Up Delay (ms): %d\n",
100 bond->params.updelay * bond->params.miimon);
101 seq_printf(seq, "Down Delay (ms): %d\n",
102 bond->params.downdelay * bond->params.miimon);
103
104
105 /* ARP information */
106 if (bond->params.arp_interval > 0) {
107 int printed = 0;
108 seq_printf(seq, "ARP Polling Interval (ms): %d\n",
109 bond->params.arp_interval);
110
111 seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
112
113 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
114 if (!bond->params.arp_targets[i])
115 break;
116 if (printed)
117 seq_printf(seq, ",");
118 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
119 printed = 1;
120 }
121 seq_printf(seq, "\n");
122 }
123
124 if (bond->params.mode == BOND_MODE_8023AD) {
125 struct ad_info ad_info;
126
127 seq_puts(seq, "\n802.3ad info\n");
128 seq_printf(seq, "LACP rate: %s\n",
129 (bond->params.lacp_fast) ? "fast" : "slow");
130 seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
131 ad_select_tbl[bond->params.ad_select].modename);
132
133 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
134 seq_printf(seq, "bond %s has no active aggregator\n",
135 bond->dev->name);
136 } else {
137 seq_printf(seq, "Active Aggregator Info:\n");
138
139 seq_printf(seq, "\tAggregator ID: %d\n",
140 ad_info.aggregator_id);
141 seq_printf(seq, "\tNumber of ports: %d\n",
142 ad_info.ports);
143 seq_printf(seq, "\tActor Key: %d\n",
144 ad_info.actor_key);
145 seq_printf(seq, "\tPartner Key: %d\n",
146 ad_info.partner_key);
147 seq_printf(seq, "\tPartner Mac Address: %pM\n",
148 ad_info.partner_system);
149 }
150 }
151}
152
153static void bond_info_show_slave(struct seq_file *seq,
154 const struct slave *slave)
155{
156 struct bonding *bond = seq->private;
157
158 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
159 seq_printf(seq, "MII Status: %s\n",
160 (slave->link == BOND_LINK_UP) ? "up" : "down");
161 seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
162 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
163 seq_printf(seq, "Link Failure Count: %u\n",
164 slave->link_failure_count);
165
166 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
167
168 if (bond->params.mode == BOND_MODE_8023AD) {
169 const struct aggregator *agg
170 = SLAVE_AD_INFO(slave).port.aggregator;
171
172 if (agg)
173 seq_printf(seq, "Aggregator ID: %d\n",
174 agg->aggregator_identifier);
175 else
176 seq_puts(seq, "Aggregator ID: N/A\n");
177 }
178 seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
179}
180
181static int bond_info_seq_show(struct seq_file *seq, void *v)
182{
183 if (v == SEQ_START_TOKEN) {
184 seq_printf(seq, "%s\n", bond_version);
185 bond_info_show_master(seq);
186 } else
187 bond_info_show_slave(seq, v);
188
189 return 0;
190}
191
192static const struct seq_operations bond_info_seq_ops = {
193 .start = bond_info_seq_start,
194 .next = bond_info_seq_next,
195 .stop = bond_info_seq_stop,
196 .show = bond_info_seq_show,
197};
198
199static int bond_info_open(struct inode *inode, struct file *file)
200{
201 struct seq_file *seq;
202 struct proc_dir_entry *proc;
203 int res;
204
205 res = seq_open(file, &bond_info_seq_ops);
206 if (!res) {
207 /* recover the pointer buried in proc_dir_entry data */
208 seq = file->private_data;
209 proc = PDE(inode);
210 seq->private = proc->data;
211 }
212
213 return res;
214}
215
216static const struct file_operations bond_info_fops = {
217 .owner = THIS_MODULE,
218 .open = bond_info_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = seq_release,
222};
223
224void bond_create_proc_entry(struct bonding *bond)
225{
226 struct net_device *bond_dev = bond->dev;
227 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
228
229 if (bn->proc_dir) {
230 bond->proc_entry = proc_create_data(bond_dev->name,
231 S_IRUGO, bn->proc_dir,
232 &bond_info_fops, bond);
233 if (bond->proc_entry == NULL)
234 pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
235 DRV_NAME, bond_dev->name);
236 else
237 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
238 }
239}
240
241void bond_remove_proc_entry(struct bonding *bond)
242{
243 struct net_device *bond_dev = bond->dev;
244 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
245
246 if (bn->proc_dir && bond->proc_entry) {
247 remove_proc_entry(bond->proc_file_name, bn->proc_dir);
248 memset(bond->proc_file_name, 0, IFNAMSIZ);
249 bond->proc_entry = NULL;
250 }
251}
252
253/* Create the bonding directory under /proc/net, if doesn't exist yet.
254 * Caller must hold rtnl_lock.
255 */
256void __net_init bond_create_proc_dir(struct bond_net *bn)
257{
258 if (!bn->proc_dir) {
259 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
260 if (!bn->proc_dir)
261 pr_warning("Warning: cannot create /proc/net/%s\n",
262 DRV_NAME);
263 }
264}
265
266/* Destroy the bonding directory under /proc/net, if empty.
267 * Caller must hold rtnl_lock.
268 */
269void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
270{
271 if (bn->proc_dir) {
272 remove_proc_entry(DRV_NAME, bn->net->proc_net);
273 bn->proc_dir = NULL;
274 }
275}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174c538..de87aea6d01 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -118,7 +118,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
118 pr_info("%s is being created...\n", ifname); 118 pr_info("%s is being created...\n", ifname);
119 rv = bond_create(net, ifname); 119 rv = bond_create(net, ifname);
120 if (rv) { 120 if (rv) {
121 pr_info("Bond creation failed.\n"); 121 if (rv == -EEXIST)
122 pr_info("%s already exists.\n", ifname);
123 else
124 pr_info("%s creation failed.\n", ifname);
122 res = rv; 125 res = rv;
123 } 126 }
124 } else if (command[0] == '-') { 127 } else if (command[0] == '-') {
@@ -322,11 +325,6 @@ static ssize_t bonding_store_mode(struct device *d,
322 ret = -EINVAL; 325 ret = -EINVAL;
323 goto out; 326 goto out;
324 } 327 }
325 if (bond->params.mode == BOND_MODE_8023AD)
326 bond_unset_master_3ad_flags(bond);
327
328 if (bond->params.mode == BOND_MODE_ALB)
329 bond_unset_master_alb_flags(bond);
330 328
331 bond->params.mode = new_value; 329 bond->params.mode = new_value;
332 bond_set_mode_ops(bond, bond->params.mode); 330 bond_set_mode_ops(bond, bond->params.mode);
@@ -527,8 +525,6 @@ static ssize_t bonding_store_arp_interval(struct device *d,
527 pr_info("%s: Setting ARP monitoring interval to %d.\n", 525 pr_info("%s: Setting ARP monitoring interval to %d.\n",
528 bond->dev->name, new_value); 526 bond->dev->name, new_value);
529 bond->params.arp_interval = new_value; 527 bond->params.arp_interval = new_value;
530 if (bond->params.arp_interval)
531 bond->dev->priv_flags |= IFF_MASTER_ARPMON;
532 if (bond->params.miimon) { 528 if (bond->params.miimon) {
533 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", 529 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
534 bond->dev->name, bond->dev->name); 530 bond->dev->name, bond->dev->name);
@@ -1004,7 +1000,6 @@ static ssize_t bonding_store_miimon(struct device *d,
1004 pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", 1000 pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
1005 bond->dev->name); 1001 bond->dev->name);
1006 bond->params.arp_interval = 0; 1002 bond->params.arp_interval = 0;
1007 bond->dev->priv_flags &= ~IFF_MASTER_ARPMON;
1008 if (bond->params.arp_validate) { 1003 if (bond->params.arp_validate) {
1009 bond_unregister_arp(bond); 1004 bond_unregister_arp(bond);
1010 bond->params.arp_validate = 1005 bond->params.arp_validate =
@@ -1198,7 +1193,7 @@ static ssize_t bonding_store_carrier(struct device *d,
1198 bond->dev->name, new_value); 1193 bond->dev->name, new_value);
1199 } 1194 }
1200out: 1195out:
1201 return count; 1196 return ret;
1202} 1197}
1203static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, 1198static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
1204 bonding_show_carrier, bonding_store_carrier); 1199 bonding_show_carrier, bonding_store_carrier);
@@ -1587,15 +1582,15 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1587 } 1582 }
1588 1583
1589 bond_for_each_slave(bond, slave, i) { 1584 bond_for_each_slave(bond, slave, i) {
1590 if (slave->state == BOND_STATE_BACKUP) { 1585 if (!bond_is_active_slave(slave)) {
1591 if (new_value) 1586 if (new_value)
1592 slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE; 1587 slave->inactive = 0;
1593 else 1588 else
1594 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; 1589 slave->inactive = 1;
1595 } 1590 }
1596 } 1591 }
1597out: 1592out:
1598 return count; 1593 return ret;
1599} 1594}
1600static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, 1595static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
1601 bonding_show_slaves_active, bonding_store_slaves_active); 1596 bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 31fe980e4e2..90736cb4d97 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -20,6 +20,7 @@
20#include <linux/if_bonding.h> 20#include <linux/if_bonding.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/in6.h> 22#include <linux/in6.h>
23#include <linux/netpoll.h>
23#include "bond_3ad.h" 24#include "bond_3ad.h"
24#include "bond_alb.h" 25#include "bond_alb.h"
25 26
@@ -28,6 +29,8 @@
28#define DRV_NAME "bonding" 29#define DRV_NAME "bonding"
29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 30#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
30 31
32#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
33
31#define BOND_MAX_ARP_TARGETS 16 34#define BOND_MAX_ARP_TARGETS 16
32 35
33#define IS_UP(dev) \ 36#define IS_UP(dev) \
@@ -52,7 +55,7 @@
52 (((slave)->dev->flags & IFF_UP) && \ 55 (((slave)->dev->flags & IFF_UP) && \
53 netif_running((slave)->dev) && \ 56 netif_running((slave)->dev) && \
54 ((slave)->link == BOND_LINK_UP) && \ 57 ((slave)->link == BOND_LINK_UP) && \
55 ((slave)->state == BOND_STATE_ACTIVE)) 58 bond_is_active_slave(slave))
56 59
57 60
58#define USES_PRIMARY(mode) \ 61#define USES_PRIMARY(mode) \
@@ -132,7 +135,7 @@ static inline void unblock_netpoll_tx(void)
132 135
133static inline int is_netpoll_tx_blocked(struct net_device *dev) 136static inline int is_netpoll_tx_blocked(struct net_device *dev)
134{ 137{
135 if (unlikely(dev->priv_flags & IFF_IN_NETPOLL)) 138 if (unlikely(netpoll_tx_running(dev)))
136 return atomic_read(&netpoll_block_tx); 139 return atomic_read(&netpoll_block_tx);
137 return 0; 140 return 0;
138} 141}
@@ -184,12 +187,15 @@ struct slave {
184 struct net_device *dev; /* first - useful for panic debug */ 187 struct net_device *dev; /* first - useful for panic debug */
185 struct slave *next; 188 struct slave *next;
186 struct slave *prev; 189 struct slave *prev;
190 struct bonding *bond; /* our master */
187 int delay; 191 int delay;
188 unsigned long jiffies; 192 unsigned long jiffies;
189 unsigned long last_arp_rx; 193 unsigned long last_arp_rx;
190 s8 link; /* one of BOND_LINK_XXXX */ 194 s8 link; /* one of BOND_LINK_XXXX */
191 s8 new_link; 195 s8 new_link;
192 s8 state; /* one of BOND_STATE_XXXX */ 196 u8 backup:1, /* indicates backup slave. Value corresponds with
197 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
198 inactive:1; /* indicates inactive slave */
193 u32 original_mtu; 199 u32 original_mtu;
194 u32 link_failure_count; 200 u32 link_failure_count;
195 u8 perm_hwaddr[ETH_ALEN]; 201 u8 perm_hwaddr[ETH_ALEN];
@@ -198,6 +204,9 @@ struct slave {
198 u16 queue_id; 204 u16 queue_id;
199 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ 205 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
200 struct tlb_slave_info tlb_info; 206 struct tlb_slave_info tlb_info;
207#ifdef CONFIG_NET_POLL_CONTROLLER
208 struct netpoll *np;
209#endif
201}; 210};
202 211
203/* 212/*
@@ -260,12 +269,16 @@ struct bonding {
260#endif /* CONFIG_DEBUG_FS */ 269#endif /* CONFIG_DEBUG_FS */
261}; 270};
262 271
272#define bond_slave_get_rcu(dev) \
273 ((struct slave *) rcu_dereference(dev->rx_handler_data))
274
263/** 275/**
264 * Returns NULL if the net_device does not belong to any of the bond's slaves 276 * Returns NULL if the net_device does not belong to any of the bond's slaves
265 * 277 *
266 * Caller must hold bond lock for read 278 * Caller must hold bond lock for read
267 */ 279 */
268static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) 280static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
281 struct net_device *slave_dev)
269{ 282{
270 struct slave *slave = NULL; 283 struct slave *slave = NULL;
271 int i; 284 int i;
@@ -276,7 +289,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
276 } 289 }
277 } 290 }
278 291
279 return 0; 292 return NULL;
280} 293}
281 294
282static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 295static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,6 +307,26 @@ static inline bool bond_is_lb(const struct bonding *bond)
294 bond->params.mode == BOND_MODE_ALB); 307 bond->params.mode == BOND_MODE_ALB);
295} 308}
296 309
310static inline void bond_set_active_slave(struct slave *slave)
311{
312 slave->backup = 0;
313}
314
315static inline void bond_set_backup_slave(struct slave *slave)
316{
317 slave->backup = 1;
318}
319
320static inline int bond_slave_state(struct slave *slave)
321{
322 return slave->backup;
323}
324
325static inline bool bond_is_active_slave(struct slave *slave)
326{
327 return !bond_slave_state(slave);
328}
329
297#define BOND_PRI_RESELECT_ALWAYS 0 330#define BOND_PRI_RESELECT_ALWAYS 0
298#define BOND_PRI_RESELECT_BETTER 1 331#define BOND_PRI_RESELECT_BETTER 1
299#define BOND_PRI_RESELECT_FAILURE 2 332#define BOND_PRI_RESELECT_FAILURE 2
@@ -311,7 +344,7 @@ static inline bool bond_is_lb(const struct bonding *bond)
311static inline int slave_do_arp_validate(struct bonding *bond, 344static inline int slave_do_arp_validate(struct bonding *bond,
312 struct slave *slave) 345 struct slave *slave)
313{ 346{
314 return bond->params.arp_validate & (1 << slave->state); 347 return bond->params.arp_validate & (1 << bond_slave_state(slave));
315} 348}
316 349
317static inline unsigned long slave_last_rx(struct bonding *bond, 350static inline unsigned long slave_last_rx(struct bonding *bond,
@@ -323,41 +356,40 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
323 return slave->dev->last_rx; 356 return slave->dev->last_rx;
324} 357}
325 358
326static inline void bond_set_slave_inactive_flags(struct slave *slave) 359#ifdef CONFIG_NET_POLL_CONTROLLER
360static inline void bond_netpoll_send_skb(const struct slave *slave,
361 struct sk_buff *skb)
327{ 362{
328 struct bonding *bond = netdev_priv(slave->dev->master); 363 struct netpoll *np = slave->np;
329 if (!bond_is_lb(bond))
330 slave->state = BOND_STATE_BACKUP;
331 if (!bond->params.all_slaves_active)
332 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
333 if (slave_do_arp_validate(bond, slave))
334 slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
335}
336 364
337static inline void bond_set_slave_active_flags(struct slave *slave) 365 if (np)
338{ 366 netpoll_send_skb(np, skb);
339 slave->state = BOND_STATE_ACTIVE;
340 slave->dev->priv_flags &= ~(IFF_SLAVE_INACTIVE | IFF_SLAVE_NEEDARP);
341} 367}
342 368#else
343static inline void bond_set_master_3ad_flags(struct bonding *bond) 369static inline void bond_netpoll_send_skb(const struct slave *slave,
370 struct sk_buff *skb)
344{ 371{
345 bond->dev->priv_flags |= IFF_MASTER_8023AD;
346} 372}
373#endif
347 374
348static inline void bond_unset_master_3ad_flags(struct bonding *bond) 375static inline void bond_set_slave_inactive_flags(struct slave *slave)
349{ 376{
350 bond->dev->priv_flags &= ~IFF_MASTER_8023AD; 377 struct bonding *bond = netdev_priv(slave->dev->master);
378 if (!bond_is_lb(bond))
379 bond_set_backup_slave(slave);
380 if (!bond->params.all_slaves_active)
381 slave->inactive = 1;
351} 382}
352 383
353static inline void bond_set_master_alb_flags(struct bonding *bond) 384static inline void bond_set_slave_active_flags(struct slave *slave)
354{ 385{
355 bond->dev->priv_flags |= IFF_MASTER_ALB; 386 bond_set_active_slave(slave);
387 slave->inactive = 0;
356} 388}
357 389
358static inline void bond_unset_master_alb_flags(struct bonding *bond) 390static inline bool bond_is_slave_inactive(struct slave *slave)
359{ 391{
360 bond->dev->priv_flags &= ~IFF_MASTER_ALB; 392 return slave->inactive;
361} 393}
362 394
363struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 395struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
@@ -393,6 +425,30 @@ struct bond_net {
393#endif 425#endif
394}; 426};
395 427
428#ifdef CONFIG_PROC_FS
429void bond_create_proc_entry(struct bonding *bond);
430void bond_remove_proc_entry(struct bonding *bond);
431void bond_create_proc_dir(struct bond_net *bn);
432void bond_destroy_proc_dir(struct bond_net *bn);
433#else
434static inline void bond_create_proc_entry(struct bonding *bond)
435{
436}
437
438static inline void bond_remove_proc_entry(struct bonding *bond)
439{
440}
441
442static inline void bond_create_proc_dir(struct bond_net *bn)
443{
444}
445
446static inline void bond_destroy_proc_dir(struct bond_net *bn)
447{
448}
449#endif
450
451
396/* exported from bond_main.c */ 452/* exported from bond_main.c */
397extern int bond_net_id; 453extern int bond_net_id;
398extern const struct bond_parm_tbl bond_lacp_tbl[]; 454extern const struct bond_parm_tbl bond_lacp_tbl[];
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index b38d987da67..9560b9d624b 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,6 +1,4 @@
1ifeq ($(CONFIG_CAIF_DEBUG),y) 1ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
2EXTRA_CFLAGS += -DDEBUG
3endif
4 2
5# Serial interface 3# Serial interface
6obj-$(CONFIG_CAIF_TTY) += caif_serial.o 4obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5a9db60ade..1d699e3df54 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@ config CAN_SLCAN
23 23
24 As only the sending and receiving of CAN frames is implemented, this 24 As only the sending and receiving of CAN frames is implemented, this
25 driver should work with the (serial/USB) CAN hardware from: 25 driver should work with the (serial/USB) CAN hardware from:
26 www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de 26 www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
27 27
28 Userspace tools to attach the SLCAN line discipline (slcan_attach, 28 Userspace tools to attach the SLCAN line discipline (slcan_attach,
29 slcand) can be found in the can-utils at the SocketCAN SVN, see 29 slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -115,8 +115,12 @@ source "drivers/net/can/mscan/Kconfig"
115 115
116source "drivers/net/can/sja1000/Kconfig" 116source "drivers/net/can/sja1000/Kconfig"
117 117
118source "drivers/net/can/c_can/Kconfig"
119
118source "drivers/net/can/usb/Kconfig" 120source "drivers/net/can/usb/Kconfig"
119 121
122source "drivers/net/can/softing/Kconfig"
123
120config CAN_DEBUG_DEVICES 124config CAN_DEBUG_DEVICES
121 bool "CAN devices debugging messages" 125 bool "CAN devices debugging messages"
122 depends on CAN 126 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 07ca159ba3f..24ebfe8d758 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -9,9 +9,11 @@ obj-$(CONFIG_CAN_DEV) += can-dev.o
9can-dev-y := dev.o 9can-dev-y := dev.o
10 10
11obj-y += usb/ 11obj-y += usb/
12obj-y += softing/
12 13
13obj-$(CONFIG_CAN_SJA1000) += sja1000/ 14obj-$(CONFIG_CAN_SJA1000) += sja1000/
14obj-$(CONFIG_CAN_MSCAN) += mscan/ 15obj-$(CONFIG_CAN_MSCAN) += mscan/
16obj-$(CONFIG_CAN_C_CAN) += c_can/
15obj-$(CONFIG_CAN_AT91) += at91_can.o 17obj-$(CONFIG_CAN_AT91) += at91_can.o
16obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 18obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
17obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 19obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d06f7e..57d2ffbbb43 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
2 * at91_can.c - CAN network driver for AT91 SoC CAN controller 2 * at91_can.c - CAN network driver for AT91 SoC CAN controller
3 * 3 *
4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> 4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
5 * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de> 5 * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
6 * 6 *
7 * This software may be distributed under the terms of the GNU General 7 * This software may be distributed under the terms of the GNU General
8 * Public License ("GPL") version 2 as distributed in the 'COPYING' 8 * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/netdevice.h> 31#include <linux/netdevice.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/rtnetlink.h>
33#include <linux/skbuff.h> 34#include <linux/skbuff.h>
34#include <linux/spinlock.h> 35#include <linux/spinlock.h>
35#include <linux/string.h> 36#include <linux/string.h>
@@ -40,22 +41,23 @@
40 41
41#include <mach/board.h> 42#include <mach/board.h>
42 43
43#define AT91_NAPI_WEIGHT 12 44#define AT91_NAPI_WEIGHT 11
44 45
45/* 46/*
46 * RX/TX Mailbox split 47 * RX/TX Mailbox split
47 * don't dare to touch 48 * don't dare to touch
48 */ 49 */
49#define AT91_MB_RX_NUM 12 50#define AT91_MB_RX_NUM 11
50#define AT91_MB_TX_SHIFT 2 51#define AT91_MB_TX_SHIFT 2
51 52
52#define AT91_MB_RX_FIRST 0 53#define AT91_MB_RX_FIRST 1
53#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) 54#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
54 55
55#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) 56#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
56#define AT91_MB_RX_SPLIT 8 57#define AT91_MB_RX_SPLIT 8
57#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) 58#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
58#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT)) 59#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
60 ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
59 61
60#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) 62#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
61#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) 63#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@ struct at91_priv {
168 170
169 struct clk *clk; 171 struct clk *clk;
170 struct at91_can_data *pdata; 172 struct at91_can_data *pdata;
173
174 canid_t mb0_id;
171}; 175};
172 176
173static struct can_bittiming_const at91_bittiming_const = { 177static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
220 set_mb_mode_prio(priv, mb, mode, 0); 224 set_mb_mode_prio(priv, mb, mode, 0);
221} 225}
222 226
227static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
228{
229 u32 reg_mid;
230
231 if (can_id & CAN_EFF_FLAG)
232 reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
233 else
234 reg_mid = (can_id & CAN_SFF_MASK) << 18;
235
236 return reg_mid;
237}
238
223/* 239/*
224 * Swtich transceiver on or off 240 * Swtich transceiver on or off
225 */ 241 */
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev)
233{ 249{
234 struct at91_priv *priv = netdev_priv(dev); 250 struct at91_priv *priv = netdev_priv(dev);
235 unsigned int i; 251 unsigned int i;
252 u32 reg_mid;
236 253
237 /* 254 /*
238 * The first 12 mailboxes are used as a reception FIFO. The 255 * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
239 * last mailbox is configured with overwrite option. The 256 * mailbox is disabled. The next 11 mailboxes are used as a
240 * overwrite flag indicates a FIFO overflow. 257 * reception FIFO. The last mailbox is configured with
258 * overwrite option. The overwrite flag indicates a FIFO
259 * overflow.
241 */ 260 */
261 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
262 for (i = 0; i < AT91_MB_RX_FIRST; i++) {
263 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
264 at91_write(priv, AT91_MID(i), reg_mid);
265 at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
266 }
267
242 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) 268 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
243 set_mb_mode(priv, i, AT91_MB_MODE_RX); 269 set_mb_mode(priv, i, AT91_MB_MODE_RX);
244 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); 270 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
254 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 280 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
255 281
256 /* Reset tx and rx helper pointers */ 282 /* Reset tx and rx helper pointers */
257 priv->tx_next = priv->tx_echo = priv->rx_next = 0; 283 priv->tx_next = priv->tx_echo = 0;
284 priv->rx_next = AT91_MB_RX_FIRST;
258} 285}
259 286
260static int at91_set_bittiming(struct net_device *dev) 287static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
372 netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); 399 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
373 return NETDEV_TX_BUSY; 400 return NETDEV_TX_BUSY;
374 } 401 }
375 402 reg_mid = at91_can_id_to_reg_mid(cf->can_id);
376 if (cf->can_id & CAN_EFF_FLAG)
377 reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
378 else
379 reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
380
381 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | 403 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
382 (cf->can_dlc << 16) | AT91_MCR_MTCR; 404 (cf->can_dlc << 16) | AT91_MCR_MTCR;
383 405
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
539 * 561 *
540 * Theory of Operation: 562 * Theory of Operation:
541 * 563 *
542 * 12 of the 16 mailboxes on the chip are reserved for RX. we split 564 * 11 of the 16 mailboxes on the chip are reserved for RX. we split
543 * them into 2 groups. The lower group holds 8 and upper 4 mailboxes. 565 * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
544 * 566 *
545 * Like it or not, but the chip always saves a received CAN message 567 * Like it or not, but the chip always saves a received CAN message
546 * into the first free mailbox it finds (starting with the 568 * into the first free mailbox it finds (starting with the
547 * lowest). This makes it very difficult to read the messages in the 569 * lowest). This makes it very difficult to read the messages in the
548 * right order from the chip. This is how we work around that problem: 570 * right order from the chip. This is how we work around that problem:
549 * 571 *
550 * The first message goes into mb nr. 0 and issues an interrupt. All 572 * The first message goes into mb nr. 1 and issues an interrupt. All
551 * rx ints are disabled in the interrupt handler and a napi poll is 573 * rx ints are disabled in the interrupt handler and a napi poll is
552 * scheduled. We read the mailbox, but do _not_ reenable the mb (to 574 * scheduled. We read the mailbox, but do _not_ reenable the mb (to
553 * receive another message). 575 * receive another message).
554 * 576 *
555 * lower mbxs upper 577 * lower mbxs upper
556 * ______^______ __^__ 578 * ____^______ __^__
557 * / \ / \ 579 * / \ / \
558 * +-+-+-+-+-+-+-+-++-+-+-+-+ 580 * +-+-+-+-+-+-+-+-++-+-+-+-+
559 * |x|x|x|x|x|x|x|x|| | | | | 581 * | |x|x|x|x|x|x|x|| | | | |
560 * +-+-+-+-+-+-+-+-++-+-+-+-+ 582 * +-+-+-+-+-+-+-+-++-+-+-+-+
561 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail 583 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
562 * 0 1 2 3 4 5 6 7 8 9 0 1 / box 584 * 0 1 2 3 4 5 6 7 8 9 0 1 / box
585 * ^
586 * |
587 * \
588 * unused, due to chip bug
563 * 589 *
564 * The variable priv->rx_next points to the next mailbox to read a 590 * The variable priv->rx_next points to the next mailbox to read a
565 * message from. As long we're in the lower mailboxes we just read the 591 * message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
590 "order of incoming frames cannot be guaranteed\n"); 616 "order of incoming frames cannot be guaranteed\n");
591 617
592 again: 618 again:
593 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); 619 for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
594 mb < AT91_MB_RX_NUM && quota > 0; 620 mb < AT91_MB_RX_LAST + 1 && quota > 0;
595 reg_sr = at91_read(priv, AT91_SR), 621 reg_sr = at91_read(priv, AT91_SR),
596 mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) { 622 mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
597 at91_read_msg(dev, mb); 623 at91_read_msg(dev, mb);
598 624
599 /* reactivate mailboxes */ 625 /* reactivate mailboxes */
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
610 636
611 /* upper group completed, look again in lower */ 637 /* upper group completed, look again in lower */
612 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 638 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
613 quota > 0 && mb >= AT91_MB_RX_NUM) { 639 quota > 0 && mb > AT91_MB_RX_LAST) {
614 priv->rx_next = 0; 640 priv->rx_next = AT91_MB_RX_FIRST;
615 goto again; 641 goto again;
616 } 642 }
617 643
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = {
1037 .ndo_start_xmit = at91_start_xmit, 1063 .ndo_start_xmit = at91_start_xmit,
1038}; 1064};
1039 1065
1066static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
1067 struct device_attribute *attr, char *buf)
1068{
1069 struct at91_priv *priv = netdev_priv(to_net_dev(dev));
1070
1071 if (priv->mb0_id & CAN_EFF_FLAG)
1072 return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
1073 else
1074 return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
1075}
1076
1077static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
1078 struct device_attribute *attr, const char *buf, size_t count)
1079{
1080 struct net_device *ndev = to_net_dev(dev);
1081 struct at91_priv *priv = netdev_priv(ndev);
1082 unsigned long can_id;
1083 ssize_t ret;
1084 int err;
1085
1086 rtnl_lock();
1087
1088 if (ndev->flags & IFF_UP) {
1089 ret = -EBUSY;
1090 goto out;
1091 }
1092
1093 err = strict_strtoul(buf, 0, &can_id);
1094 if (err) {
1095 ret = err;
1096 goto out;
1097 }
1098
1099 if (can_id & CAN_EFF_FLAG)
1100 can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1101 else
1102 can_id &= CAN_SFF_MASK;
1103
1104 priv->mb0_id = can_id;
1105 ret = count;
1106
1107 out:
1108 rtnl_unlock();
1109 return ret;
1110}
1111
1112static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
1113 at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
1114
1115static struct attribute *at91_sysfs_attrs[] = {
1116 &dev_attr_mb0_id.attr,
1117 NULL,
1118};
1119
1120static struct attribute_group at91_sysfs_attr_group = {
1121 .attrs = at91_sysfs_attrs,
1122};
1123
1040static int __devinit at91_can_probe(struct platform_device *pdev) 1124static int __devinit at91_can_probe(struct platform_device *pdev)
1041{ 1125{
1042 struct net_device *dev; 1126 struct net_device *dev;
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1082 dev->netdev_ops = &at91_netdev_ops; 1166 dev->netdev_ops = &at91_netdev_ops;
1083 dev->irq = irq; 1167 dev->irq = irq;
1084 dev->flags |= IFF_ECHO; 1168 dev->flags |= IFF_ECHO;
1169 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1085 1170
1086 priv = netdev_priv(dev); 1171 priv = netdev_priv(dev);
1087 priv->can.clock.freq = clk_get_rate(clk); 1172 priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1093 priv->dev = dev; 1178 priv->dev = dev;
1094 priv->clk = clk; 1179 priv->clk = clk;
1095 priv->pdata = pdev->dev.platform_data; 1180 priv->pdata = pdev->dev.platform_data;
1181 priv->mb0_id = 0x7ff;
1096 1182
1097 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); 1183 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
1098 1184
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644
index 00000000000..ffb9773d102
--- /dev/null
+++ b/drivers/net/can/c_can/Kconfig
@@ -0,0 +1,15 @@
1menuconfig CAN_C_CAN
2 tristate "Bosch C_CAN devices"
3 depends on CAN_DEV && HAS_IOMEM
4
5if CAN_C_CAN
6
7config CAN_C_CAN_PLATFORM
8 tristate "Generic Platform Bus based C_CAN driver"
9 ---help---
10 This driver adds support for the C_CAN chips connected to
11 the "platform bus" (Linux abstraction for directly to the
12 processor attached devices) which can be found on various
13 boards from ST Microelectronics (http://www.st.com)
14 like the SPEAr1310 and SPEAr320 evaluation boards.
15endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644
index 00000000000..9273f6d5c4b
--- /dev/null
+++ b/drivers/net/can/c_can/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Bosch C_CAN controller drivers.
3#
4
5obj-$(CONFIG_CAN_C_CAN) += c_can.o
6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
7
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644
index 00000000000..31552959aed
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.c
@@ -0,0 +1,1152 @@
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
29#include <linux/version.h>
30#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/if_arp.h>
35#include <linux/if_ether.h>
36#include <linux/list.h>
37#include <linux/delay.h>
38#include <linux/io.h>
39
40#include <linux/can.h>
41#include <linux/can/dev.h>
42#include <linux/can/error.h>
43
44#include "c_can.h"
45
46/* control register */
47#define CONTROL_TEST BIT(7)
48#define CONTROL_CCE BIT(6)
49#define CONTROL_DISABLE_AR BIT(5)
50#define CONTROL_ENABLE_AR (0 << 5)
51#define CONTROL_EIE BIT(3)
52#define CONTROL_SIE BIT(2)
53#define CONTROL_IE BIT(1)
54#define CONTROL_INIT BIT(0)
55
56/* test register */
57#define TEST_RX BIT(7)
58#define TEST_TX1 BIT(6)
59#define TEST_TX2 BIT(5)
60#define TEST_LBACK BIT(4)
61#define TEST_SILENT BIT(3)
62#define TEST_BASIC BIT(2)
63
64/* status register */
65#define STATUS_BOFF BIT(7)
66#define STATUS_EWARN BIT(6)
67#define STATUS_EPASS BIT(5)
68#define STATUS_RXOK BIT(4)
69#define STATUS_TXOK BIT(3)
70
71/* error counter register */
72#define ERR_CNT_TEC_MASK 0xff
73#define ERR_CNT_TEC_SHIFT 0
74#define ERR_CNT_REC_SHIFT 8
75#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
76#define ERR_CNT_RP_SHIFT 15
77#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
78
79/* bit-timing register */
80#define BTR_BRP_MASK 0x3f
81#define BTR_BRP_SHIFT 0
82#define BTR_SJW_SHIFT 6
83#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
84#define BTR_TSEG1_SHIFT 8
85#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
86#define BTR_TSEG2_SHIFT 12
87#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
88
89/* brp extension register */
90#define BRP_EXT_BRPE_MASK 0x0f
91#define BRP_EXT_BRPE_SHIFT 0
92
93/* IFx command request */
94#define IF_COMR_BUSY BIT(15)
95
96/* IFx command mask */
97#define IF_COMM_WR BIT(7)
98#define IF_COMM_MASK BIT(6)
99#define IF_COMM_ARB BIT(5)
100#define IF_COMM_CONTROL BIT(4)
101#define IF_COMM_CLR_INT_PND BIT(3)
102#define IF_COMM_TXRQST BIT(2)
103#define IF_COMM_DATAA BIT(1)
104#define IF_COMM_DATAB BIT(0)
105#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
106 IF_COMM_CONTROL | IF_COMM_TXRQST | \
107 IF_COMM_DATAA | IF_COMM_DATAB)
108
109/* IFx arbitration */
110#define IF_ARB_MSGVAL BIT(15)
111#define IF_ARB_MSGXTD BIT(14)
112#define IF_ARB_TRANSMIT BIT(13)
113
114/* IFx message control */
115#define IF_MCONT_NEWDAT BIT(15)
116#define IF_MCONT_MSGLST BIT(14)
117#define IF_MCONT_CLR_MSGLST (0 << 14)
118#define IF_MCONT_INTPND BIT(13)
119#define IF_MCONT_UMASK BIT(12)
120#define IF_MCONT_TXIE BIT(11)
121#define IF_MCONT_RXIE BIT(10)
122#define IF_MCONT_RMTEN BIT(9)
123#define IF_MCONT_TXRQST BIT(8)
124#define IF_MCONT_EOB BIT(7)
125#define IF_MCONT_DLC_MASK 0xf
126
127/*
128 * IFx register masks:
129 * allow easy operation on 16-bit registers when the
130 * argument is 32-bit instead
131 */
132#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
133#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
134
135/* message object split */
136#define C_CAN_NO_OF_OBJECTS 32
137#define C_CAN_MSG_OBJ_RX_NUM 16
138#define C_CAN_MSG_OBJ_TX_NUM 16
139
140#define C_CAN_MSG_OBJ_RX_FIRST 1
141#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
142 C_CAN_MSG_OBJ_RX_NUM - 1)
143
144#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
145#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
146 C_CAN_MSG_OBJ_TX_NUM - 1)
147
148#define C_CAN_MSG_OBJ_RX_SPLIT 9
149#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
150
151#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
152#define RECEIVE_OBJECT_BITS 0x0000ffff
153
154/* status interrupt */
155#define STATUS_INTERRUPT 0x8000
156
157/* global interrupt masks */
158#define ENABLE_ALL_INTERRUPTS 1
159#define DISABLE_ALL_INTERRUPTS 0
160
161/* minimum timeout for checking BUSY status */
162#define MIN_TIMEOUT_VALUE 6
163
164/* napi related */
165#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
166
167/* c_can lec values */
168enum c_can_lec_type {
169 LEC_NO_ERROR = 0,
170 LEC_STUFF_ERROR,
171 LEC_FORM_ERROR,
172 LEC_ACK_ERROR,
173 LEC_BIT1_ERROR,
174 LEC_BIT0_ERROR,
175 LEC_CRC_ERROR,
176 LEC_UNUSED,
177};
178
179/*
180 * c_can error types:
181 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
182 */
183enum c_can_bus_error_types {
184 C_CAN_NO_ERROR = 0,
185 C_CAN_BUS_OFF,
186 C_CAN_ERROR_WARNING,
187 C_CAN_ERROR_PASSIVE,
188};
189
190static struct can_bittiming_const c_can_bittiming_const = {
191 .name = KBUILD_MODNAME,
192 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
193 .tseg1_max = 16,
194 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
195 .tseg2_max = 8,
196 .sjw_max = 4,
197 .brp_min = 1,
198 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
199 .brp_inc = 1,
200};
201
202static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
203{
204 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
205 C_CAN_MSG_OBJ_TX_FIRST;
206}
207
208static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
209{
210 return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
211 C_CAN_MSG_OBJ_TX_FIRST;
212}
213
214static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
215{
216 u32 val = priv->read_reg(priv, reg);
217 val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
218 return val;
219}
220
221static void c_can_enable_all_interrupts(struct c_can_priv *priv,
222 int enable)
223{
224 unsigned int cntrl_save = priv->read_reg(priv,
225 &priv->regs->control);
226
227 if (enable)
228 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
229 else
230 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
231
232 priv->write_reg(priv, &priv->regs->control, cntrl_save);
233}
234
235static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
236{
237 int count = MIN_TIMEOUT_VALUE;
238
239 while (count && priv->read_reg(priv,
240 &priv->regs->ifregs[iface].com_req) &
241 IF_COMR_BUSY) {
242 count--;
243 udelay(1);
244 }
245
246 if (!count)
247 return 1;
248
249 return 0;
250}
251
252static inline void c_can_object_get(struct net_device *dev,
253 int iface, int objno, int mask)
254{
255 struct c_can_priv *priv = netdev_priv(dev);
256
257 /*
258 * As per specs, after writting the message object number in the
259 * IF command request register the transfer b/w interface
260 * register and message RAM must be complete in 6 CAN-CLK
261 * period.
262 */
263 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
264 IFX_WRITE_LOW_16BIT(mask));
265 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
266 IFX_WRITE_LOW_16BIT(objno));
267
268 if (c_can_msg_obj_is_busy(priv, iface))
269 netdev_err(dev, "timed out in object get\n");
270}
271
272static inline void c_can_object_put(struct net_device *dev,
273 int iface, int objno, int mask)
274{
275 struct c_can_priv *priv = netdev_priv(dev);
276
277 /*
278 * As per specs, after writting the message object number in the
279 * IF command request register the transfer b/w interface
280 * register and message RAM must be complete in 6 CAN-CLK
281 * period.
282 */
283 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
284 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
285 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
286 IFX_WRITE_LOW_16BIT(objno));
287
288 if (c_can_msg_obj_is_busy(priv, iface))
289 netdev_err(dev, "timed out in object put\n");
290}
291
292static void c_can_write_msg_object(struct net_device *dev,
293 int iface, struct can_frame *frame, int objno)
294{
295 int i;
296 u16 flags = 0;
297 unsigned int id;
298 struct c_can_priv *priv = netdev_priv(dev);
299
300 if (!(frame->can_id & CAN_RTR_FLAG))
301 flags |= IF_ARB_TRANSMIT;
302
303 if (frame->can_id & CAN_EFF_FLAG) {
304 id = frame->can_id & CAN_EFF_MASK;
305 flags |= IF_ARB_MSGXTD;
306 } else
307 id = ((frame->can_id & CAN_SFF_MASK) << 18);
308
309 flags |= IF_ARB_MSGVAL;
310
311 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
312 IFX_WRITE_LOW_16BIT(id));
313 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
314 IFX_WRITE_HIGH_16BIT(id));
315
316 for (i = 0; i < frame->can_dlc; i += 2) {
317 priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
318 frame->data[i] | (frame->data[i + 1] << 8));
319 }
320
321 /* enable interrupt for this message object */
322 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
323 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
324 frame->can_dlc);
325 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
326}
327
328static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
329 int iface, int ctrl_mask,
330 int obj)
331{
332 struct c_can_priv *priv = netdev_priv(dev);
333
334 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
335 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
336 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
337
338}
339
340static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
341 int iface,
342 int ctrl_mask)
343{
344 int i;
345 struct c_can_priv *priv = netdev_priv(dev);
346
347 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
348 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
349 ctrl_mask & ~(IF_MCONT_MSGLST |
350 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
351 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
352 }
353}
354
355static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
356 int iface, int ctrl_mask,
357 int obj)
358{
359 struct c_can_priv *priv = netdev_priv(dev);
360
361 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
362 ctrl_mask & ~(IF_MCONT_MSGLST |
363 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
364 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
365}
366
367static void c_can_handle_lost_msg_obj(struct net_device *dev,
368 int iface, int objno)
369{
370 struct c_can_priv *priv = netdev_priv(dev);
371 struct net_device_stats *stats = &dev->stats;
372 struct sk_buff *skb;
373 struct can_frame *frame;
374
375 netdev_err(dev, "msg lost in buffer %d\n", objno);
376
377 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
378
379 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
380 IF_MCONT_CLR_MSGLST);
381
382 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
383
384 /* create an error msg */
385 skb = alloc_can_err_skb(dev, &frame);
386 if (unlikely(!skb))
387 return;
388
389 frame->can_id |= CAN_ERR_CRTL;
390 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
391 stats->rx_errors++;
392 stats->rx_over_errors++;
393
394 netif_receive_skb(skb);
395}
396
397static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
398{
399 u16 flags, data;
400 int i;
401 unsigned int val;
402 struct c_can_priv *priv = netdev_priv(dev);
403 struct net_device_stats *stats = &dev->stats;
404 struct sk_buff *skb;
405 struct can_frame *frame;
406
407 skb = alloc_can_skb(dev, &frame);
408 if (!skb) {
409 stats->rx_dropped++;
410 return -ENOMEM;
411 }
412
413 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
414
415 flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
416 val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
417 (flags << 16);
418
419 if (flags & IF_ARB_MSGXTD)
420 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
421 else
422 frame->can_id = (val >> 18) & CAN_SFF_MASK;
423
424 if (flags & IF_ARB_TRANSMIT)
425 frame->can_id |= CAN_RTR_FLAG;
426 else {
427 for (i = 0; i < frame->can_dlc; i += 2) {
428 data = priv->read_reg(priv,
429 &priv->regs->ifregs[iface].data[i / 2]);
430 frame->data[i] = data;
431 frame->data[i + 1] = data >> 8;
432 }
433 }
434
435 netif_receive_skb(skb);
436
437 stats->rx_packets++;
438 stats->rx_bytes += frame->can_dlc;
439
440 return 0;
441}
442
443static void c_can_setup_receive_object(struct net_device *dev, int iface,
444 int objno, unsigned int mask,
445 unsigned int id, unsigned int mcont)
446{
447 struct c_can_priv *priv = netdev_priv(dev);
448
449 priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
450 IFX_WRITE_LOW_16BIT(mask));
451 priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
452 IFX_WRITE_HIGH_16BIT(mask));
453
454 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
455 IFX_WRITE_LOW_16BIT(id));
456 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
457 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
458
459 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
460 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
461
462 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
463 c_can_read_reg32(priv, &priv->regs->msgval1));
464}
465
466static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
467{
468 struct c_can_priv *priv = netdev_priv(dev);
469
470 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
471 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
472 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
473
474 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
475
476 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
477 c_can_read_reg32(priv, &priv->regs->msgval1));
478}
479
480static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
481{
482 int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
483
484 /*
485 * as transmission request register's bit n-1 corresponds to
486 * message object n, we need to handle the same properly.
487 */
488 if (val & (1 << (objno - 1)))
489 return 1;
490
491 return 0;
492}
493
494static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
495 struct net_device *dev)
496{
497 u32 msg_obj_no;
498 struct c_can_priv *priv = netdev_priv(dev);
499 struct can_frame *frame = (struct can_frame *)skb->data;
500
501 if (can_dropped_invalid_skb(dev, skb))
502 return NETDEV_TX_OK;
503
504 msg_obj_no = get_tx_next_msg_obj(priv);
505
506 /* prepare message object for transmission */
507 c_can_write_msg_object(dev, 0, frame, msg_obj_no);
508 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
509
510 /*
511 * we have to stop the queue in case of a wrap around or
512 * if the next TX message object is still in use
513 */
514 priv->tx_next++;
515 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
516 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
517 netif_stop_queue(dev);
518
519 return NETDEV_TX_OK;
520}
521
522static int c_can_set_bittiming(struct net_device *dev)
523{
524 unsigned int reg_btr, reg_brpe, ctrl_save;
525 u8 brp, brpe, sjw, tseg1, tseg2;
526 u32 ten_bit_brp;
527 struct c_can_priv *priv = netdev_priv(dev);
528 const struct can_bittiming *bt = &priv->can.bittiming;
529
530 /* c_can provides a 6-bit brp and 4-bit brpe fields */
531 ten_bit_brp = bt->brp - 1;
532 brp = ten_bit_brp & BTR_BRP_MASK;
533 brpe = ten_bit_brp >> 6;
534
535 sjw = bt->sjw - 1;
536 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
537 tseg2 = bt->phase_seg2 - 1;
538 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
539 (tseg2 << BTR_TSEG2_SHIFT);
540 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
541
542 netdev_info(dev,
543 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
544
545 ctrl_save = priv->read_reg(priv, &priv->regs->control);
546 priv->write_reg(priv, &priv->regs->control,
547 ctrl_save | CONTROL_CCE | CONTROL_INIT);
548 priv->write_reg(priv, &priv->regs->btr, reg_btr);
549 priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
550 priv->write_reg(priv, &priv->regs->control, ctrl_save);
551
552 return 0;
553}
554
555/*
556 * Configure C_CAN message objects for Tx and Rx purposes:
557 * C_CAN provides a total of 32 message objects that can be configured
558 * either for Tx or Rx purposes. Here the first 16 message objects are used as
559 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
560 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
561 * See user guide document for further details on configuring message
562 * objects.
563 */
564static void c_can_configure_msg_objects(struct net_device *dev)
565{
566 int i;
567
568 /* first invalidate all message objects */
569 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
570 c_can_inval_msg_object(dev, 0, i);
571
572 /* setup receive message objects */
573 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
574 c_can_setup_receive_object(dev, 0, i, 0, 0,
575 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
576
577 c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
578 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
579}
580
581/*
582 * Configure C_CAN chip:
583 * - enable/disable auto-retransmission
584 * - set operating mode
585 * - configure message objects
586 */
587static void c_can_chip_config(struct net_device *dev)
588{
589 struct c_can_priv *priv = netdev_priv(dev);
590
591 /* enable automatic retransmission */
592 priv->write_reg(priv, &priv->regs->control,
593 CONTROL_ENABLE_AR);
594
595 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
596 CAN_CTRLMODE_LOOPBACK)) {
597 /* loopback + silent mode : useful for hot self-test */
598 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
599 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
600 priv->write_reg(priv, &priv->regs->test,
601 TEST_LBACK | TEST_SILENT);
602 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
603 /* loopback mode : useful for self-test function */
604 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
605 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
606 priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
607 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
608 /* silent mode : bus-monitoring mode */
609 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
610 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
611 priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
612 } else
613 /* normal mode*/
614 priv->write_reg(priv, &priv->regs->control,
615 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
616
617 /* configure message objects */
618 c_can_configure_msg_objects(dev);
619
620 /* set a `lec` value so that we can check for updates later */
621 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
622
623 /* set bittiming params */
624 c_can_set_bittiming(dev);
625}
626
627static void c_can_start(struct net_device *dev)
628{
629 struct c_can_priv *priv = netdev_priv(dev);
630
631 /* basic c_can configuration */
632 c_can_chip_config(dev);
633
634 priv->can.state = CAN_STATE_ERROR_ACTIVE;
635
636 /* reset tx helper pointers */
637 priv->tx_next = priv->tx_echo = 0;
638
639 /* enable status change, error and module interrupts */
640 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
641}
642
643static void c_can_stop(struct net_device *dev)
644{
645 struct c_can_priv *priv = netdev_priv(dev);
646
647 /* disable all interrupts */
648 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
649
650 /* set the state as STOPPED */
651 priv->can.state = CAN_STATE_STOPPED;
652}
653
654static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
655{
656 switch (mode) {
657 case CAN_MODE_START:
658 c_can_start(dev);
659 netif_wake_queue(dev);
660 break;
661 default:
662 return -EOPNOTSUPP;
663 }
664
665 return 0;
666}
667
668static int c_can_get_berr_counter(const struct net_device *dev,
669 struct can_berr_counter *bec)
670{
671 unsigned int reg_err_counter;
672 struct c_can_priv *priv = netdev_priv(dev);
673
674 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
675 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
676 ERR_CNT_REC_SHIFT;
677 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
678
679 return 0;
680}
681
682/*
683 * theory of operation:
684 *
685 * priv->tx_echo holds the number of the oldest can_frame put for
686 * transmission into the hardware, but not yet ACKed by the CAN tx
687 * complete IRQ.
688 *
689 * We iterate from priv->tx_echo to priv->tx_next and check if the
690 * packet has been transmitted, echo it back to the CAN framework.
691 * If we discover a not yet transmitted package, stop looking for more.
692 */
693static void c_can_do_tx(struct net_device *dev)
694{
695 u32 val;
696 u32 msg_obj_no;
697 struct c_can_priv *priv = netdev_priv(dev);
698 struct net_device_stats *stats = &dev->stats;
699
700 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
701 msg_obj_no = get_tx_echo_msg_obj(priv);
702 val = c_can_read_reg32(priv, &priv->regs->txrqst1);
703 if (!(val & (1 << msg_obj_no))) {
704 can_get_echo_skb(dev,
705 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
706 stats->tx_bytes += priv->read_reg(priv,
707 &priv->regs->ifregs[0].msg_cntrl)
708 & IF_MCONT_DLC_MASK;
709 stats->tx_packets++;
710 c_can_inval_msg_object(dev, 0, msg_obj_no);
711 }
712 }
713
714 /* restart queue if wrap-up or if queue stalled on last pkt */
715 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
716 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
717 netif_wake_queue(dev);
718}
719
720/*
721 * theory of operation:
722 *
723 * c_can core saves a received CAN message into the first free message
724 * object it finds free (starting with the lowest). Bits NEWDAT and
725 * INTPND are set for this message object indicating that a new message
726 * has arrived. To work-around this issue, we keep two groups of message
727 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
728 *
729 * To ensure in-order frame reception we use the following
730 * approach while re-activating a message object to receive further
731 * frames:
732 * - if the current message object number is lower than
733 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
734 * the INTPND bit.
735 * - if the current message object number is equal to
736 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
737 * receive message objects.
738 * - if the current message object number is greater than
739 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
740 * only this message object.
741 */
742static int c_can_do_rx_poll(struct net_device *dev, int quota)
743{
744 u32 num_rx_pkts = 0;
745 unsigned int msg_obj, msg_ctrl_save;
746 struct c_can_priv *priv = netdev_priv(dev);
747 u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
748
749 for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
750 msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
751 val = c_can_read_reg32(priv, &priv->regs->intpnd1),
752 msg_obj++) {
753 /*
754 * as interrupt pending register's bit n-1 corresponds to
755 * message object n, we need to handle the same properly.
756 */
757 if (val & (1 << (msg_obj - 1))) {
758 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
759 ~IF_COMM_TXRQST);
760 msg_ctrl_save = priv->read_reg(priv,
761 &priv->regs->ifregs[0].msg_cntrl);
762
763 if (msg_ctrl_save & IF_MCONT_EOB)
764 return num_rx_pkts;
765
766 if (msg_ctrl_save & IF_MCONT_MSGLST) {
767 c_can_handle_lost_msg_obj(dev, 0, msg_obj);
768 num_rx_pkts++;
769 quota--;
770 continue;
771 }
772
773 if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
774 continue;
775
776 /* read the data from the message object */
777 c_can_read_msg_object(dev, 0, msg_ctrl_save);
778
779 if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
780 c_can_mark_rx_msg_obj(dev, 0,
781 msg_ctrl_save, msg_obj);
782 else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
783 /* activate this msg obj */
784 c_can_activate_rx_msg_obj(dev, 0,
785 msg_ctrl_save, msg_obj);
786 else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
787 /* activate all lower message objects */
788 c_can_activate_all_lower_rx_msg_obj(dev,
789 0, msg_ctrl_save);
790
791 num_rx_pkts++;
792 quota--;
793 }
794 }
795
796 return num_rx_pkts;
797}
798
799static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
800{
801 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
802 (priv->current_status & LEC_UNUSED);
803}
804
805static int c_can_handle_state_change(struct net_device *dev,
806 enum c_can_bus_error_types error_type)
807{
808 unsigned int reg_err_counter;
809 unsigned int rx_err_passive;
810 struct c_can_priv *priv = netdev_priv(dev);
811 struct net_device_stats *stats = &dev->stats;
812 struct can_frame *cf;
813 struct sk_buff *skb;
814 struct can_berr_counter bec;
815
816 /* propogate the error condition to the CAN stack */
817 skb = alloc_can_err_skb(dev, &cf);
818 if (unlikely(!skb))
819 return 0;
820
821 c_can_get_berr_counter(dev, &bec);
822 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
823 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
824 ERR_CNT_RP_SHIFT;
825
826 switch (error_type) {
827 case C_CAN_ERROR_WARNING:
828 /* error warning state */
829 priv->can.can_stats.error_warning++;
830 priv->can.state = CAN_STATE_ERROR_WARNING;
831 cf->can_id |= CAN_ERR_CRTL;
832 cf->data[1] = (bec.txerr > bec.rxerr) ?
833 CAN_ERR_CRTL_TX_WARNING :
834 CAN_ERR_CRTL_RX_WARNING;
835 cf->data[6] = bec.txerr;
836 cf->data[7] = bec.rxerr;
837
838 break;
839 case C_CAN_ERROR_PASSIVE:
840 /* error passive state */
841 priv->can.can_stats.error_passive++;
842 priv->can.state = CAN_STATE_ERROR_PASSIVE;
843 cf->can_id |= CAN_ERR_CRTL;
844 if (rx_err_passive)
845 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
846 if (bec.txerr > 127)
847 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
848
849 cf->data[6] = bec.txerr;
850 cf->data[7] = bec.rxerr;
851 break;
852 case C_CAN_BUS_OFF:
853 /* bus-off state */
854 priv->can.state = CAN_STATE_BUS_OFF;
855 cf->can_id |= CAN_ERR_BUSOFF;
856 /*
857 * disable all interrupts in bus-off mode to ensure that
858 * the CPU is not hogged down
859 */
860 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
861 can_bus_off(dev);
862 break;
863 default:
864 break;
865 }
866
867 netif_receive_skb(skb);
868 stats->rx_packets++;
869 stats->rx_bytes += cf->can_dlc;
870
871 return 1;
872}
873
874static int c_can_handle_bus_err(struct net_device *dev,
875 enum c_can_lec_type lec_type)
876{
877 struct c_can_priv *priv = netdev_priv(dev);
878 struct net_device_stats *stats = &dev->stats;
879 struct can_frame *cf;
880 struct sk_buff *skb;
881
882 /*
883 * early exit if no lec update or no error.
884 * no lec update means that no CAN bus event has been detected
885 * since CPU wrote 0x7 value to status reg.
886 */
887 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
888 return 0;
889
890 /* propogate the error condition to the CAN stack */
891 skb = alloc_can_err_skb(dev, &cf);
892 if (unlikely(!skb))
893 return 0;
894
895 /*
896 * check for 'last error code' which tells us the
897 * type of the last error to occur on the CAN bus
898 */
899
900 /* common for all type of bus errors */
901 priv->can.can_stats.bus_error++;
902 stats->rx_errors++;
903 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
904 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
905
906 switch (lec_type) {
907 case LEC_STUFF_ERROR:
908 netdev_dbg(dev, "stuff error\n");
909 cf->data[2] |= CAN_ERR_PROT_STUFF;
910 break;
911 case LEC_FORM_ERROR:
912 netdev_dbg(dev, "form error\n");
913 cf->data[2] |= CAN_ERR_PROT_FORM;
914 break;
915 case LEC_ACK_ERROR:
916 netdev_dbg(dev, "ack error\n");
917 cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
918 CAN_ERR_PROT_LOC_ACK_DEL);
919 break;
920 case LEC_BIT1_ERROR:
921 netdev_dbg(dev, "bit1 error\n");
922 cf->data[2] |= CAN_ERR_PROT_BIT1;
923 break;
924 case LEC_BIT0_ERROR:
925 netdev_dbg(dev, "bit0 error\n");
926 cf->data[2] |= CAN_ERR_PROT_BIT0;
927 break;
928 case LEC_CRC_ERROR:
929 netdev_dbg(dev, "CRC error\n");
930 cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
931 CAN_ERR_PROT_LOC_CRC_DEL);
932 break;
933 default:
934 break;
935 }
936
937 /* set a `lec` value so that we can check for updates later */
938 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
939
940 netif_receive_skb(skb);
941 stats->rx_packets++;
942 stats->rx_bytes += cf->can_dlc;
943
944 return 1;
945}
946
947static int c_can_poll(struct napi_struct *napi, int quota)
948{
949 u16 irqstatus;
950 int lec_type = 0;
951 int work_done = 0;
952 struct net_device *dev = napi->dev;
953 struct c_can_priv *priv = netdev_priv(dev);
954
955 irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
956 if (!irqstatus)
957 goto end;
958
959 /* status events have the highest priority */
960 if (irqstatus == STATUS_INTERRUPT) {
961 priv->current_status = priv->read_reg(priv,
962 &priv->regs->status);
963
964 /* handle Tx/Rx events */
965 if (priv->current_status & STATUS_TXOK)
966 priv->write_reg(priv, &priv->regs->status,
967 priv->current_status & ~STATUS_TXOK);
968
969 if (priv->current_status & STATUS_RXOK)
970 priv->write_reg(priv, &priv->regs->status,
971 priv->current_status & ~STATUS_RXOK);
972
973 /* handle state changes */
974 if ((priv->current_status & STATUS_EWARN) &&
975 (!(priv->last_status & STATUS_EWARN))) {
976 netdev_dbg(dev, "entered error warning state\n");
977 work_done += c_can_handle_state_change(dev,
978 C_CAN_ERROR_WARNING);
979 }
980 if ((priv->current_status & STATUS_EPASS) &&
981 (!(priv->last_status & STATUS_EPASS))) {
982 netdev_dbg(dev, "entered error passive state\n");
983 work_done += c_can_handle_state_change(dev,
984 C_CAN_ERROR_PASSIVE);
985 }
986 if ((priv->current_status & STATUS_BOFF) &&
987 (!(priv->last_status & STATUS_BOFF))) {
988 netdev_dbg(dev, "entered bus off state\n");
989 work_done += c_can_handle_state_change(dev,
990 C_CAN_BUS_OFF);
991 }
992
993 /* handle bus recovery events */
994 if ((!(priv->current_status & STATUS_BOFF)) &&
995 (priv->last_status & STATUS_BOFF)) {
996 netdev_dbg(dev, "left bus off state\n");
997 priv->can.state = CAN_STATE_ERROR_ACTIVE;
998 }
999 if ((!(priv->current_status & STATUS_EPASS)) &&
1000 (priv->last_status & STATUS_EPASS)) {
1001 netdev_dbg(dev, "left error passive state\n");
1002 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1003 }
1004
1005 priv->last_status = priv->current_status;
1006
1007 /* handle lec errors on the bus */
1008 lec_type = c_can_has_and_handle_berr(priv);
1009 if (lec_type)
1010 work_done += c_can_handle_bus_err(dev, lec_type);
1011 } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
1012 (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
1013 /* handle events corresponding to receive message objects */
1014 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1015 } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
1016 (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
1017 /* handle events corresponding to transmit message objects */
1018 c_can_do_tx(dev);
1019 }
1020
1021end:
1022 if (work_done < quota) {
1023 napi_complete(napi);
1024 /* enable all IRQs */
1025 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
1026 }
1027
1028 return work_done;
1029}
1030
1031static irqreturn_t c_can_isr(int irq, void *dev_id)
1032{
1033 u16 irqstatus;
1034 struct net_device *dev = (struct net_device *)dev_id;
1035 struct c_can_priv *priv = netdev_priv(dev);
1036
1037 irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
1038 if (!irqstatus)
1039 return IRQ_NONE;
1040
1041 /* disable all interrupts and schedule the NAPI */
1042 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1043 napi_schedule(&priv->napi);
1044
1045 return IRQ_HANDLED;
1046}
1047
1048static int c_can_open(struct net_device *dev)
1049{
1050 int err;
1051 struct c_can_priv *priv = netdev_priv(dev);
1052
1053 /* open the can device */
1054 err = open_candev(dev);
1055 if (err) {
1056 netdev_err(dev, "failed to open can device\n");
1057 return err;
1058 }
1059
1060 /* register interrupt handler */
1061 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1062 dev);
1063 if (err < 0) {
1064 netdev_err(dev, "failed to request interrupt\n");
1065 goto exit_irq_fail;
1066 }
1067
1068 /* start the c_can controller */
1069 c_can_start(dev);
1070
1071 napi_enable(&priv->napi);
1072 netif_start_queue(dev);
1073
1074 return 0;
1075
1076exit_irq_fail:
1077 close_candev(dev);
1078 return err;
1079}
1080
1081static int c_can_close(struct net_device *dev)
1082{
1083 struct c_can_priv *priv = netdev_priv(dev);
1084
1085 netif_stop_queue(dev);
1086 napi_disable(&priv->napi);
1087 c_can_stop(dev);
1088 free_irq(dev->irq, dev);
1089 close_candev(dev);
1090
1091 return 0;
1092}
1093
1094struct net_device *alloc_c_can_dev(void)
1095{
1096 struct net_device *dev;
1097 struct c_can_priv *priv;
1098
1099 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1100 if (!dev)
1101 return NULL;
1102
1103 priv = netdev_priv(dev);
1104 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1105
1106 priv->dev = dev;
1107 priv->can.bittiming_const = &c_can_bittiming_const;
1108 priv->can.do_set_mode = c_can_set_mode;
1109 priv->can.do_get_berr_counter = c_can_get_berr_counter;
1110 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1111 CAN_CTRLMODE_LISTENONLY |
1112 CAN_CTRLMODE_BERR_REPORTING;
1113
1114 return dev;
1115}
1116EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1117
1118void free_c_can_dev(struct net_device *dev)
1119{
1120 free_candev(dev);
1121}
1122EXPORT_SYMBOL_GPL(free_c_can_dev);
1123
1124static const struct net_device_ops c_can_netdev_ops = {
1125 .ndo_open = c_can_open,
1126 .ndo_stop = c_can_close,
1127 .ndo_start_xmit = c_can_start_xmit,
1128};
1129
1130int register_c_can_dev(struct net_device *dev)
1131{
1132 dev->flags |= IFF_ECHO; /* we support local echo */
1133 dev->netdev_ops = &c_can_netdev_ops;
1134
1135 return register_candev(dev);
1136}
1137EXPORT_SYMBOL_GPL(register_c_can_dev);
1138
1139void unregister_c_can_dev(struct net_device *dev)
1140{
1141 struct c_can_priv *priv = netdev_priv(dev);
1142
1143 /* disable all interrupts */
1144 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1145
1146 unregister_candev(dev);
1147}
1148EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1149
1150MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1151MODULE_LICENSE("GPL v2");
1152MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644
index 00000000000..9b7fbef3d09
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.h
@@ -0,0 +1,86 @@
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
13 * Bosch C_CAN user manual can be obtained from:
14 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
15 * users_manual_c_can.pdf
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#ifndef C_CAN_H
23#define C_CAN_H
24
25/* c_can IF registers */
26struct c_can_if_regs {
27 u16 com_req;
28 u16 com_mask;
29 u16 mask1;
30 u16 mask2;
31 u16 arb1;
32 u16 arb2;
33 u16 msg_cntrl;
34 u16 data[4];
35 u16 _reserved[13];
36};
37
38/* c_can hardware registers */
39struct c_can_regs {
40 u16 control;
41 u16 status;
42 u16 err_cnt;
43 u16 btr;
44 u16 interrupt;
45 u16 test;
46 u16 brp_ext;
47 u16 _reserved1;
48 struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
49 u16 _reserved2[8];
50 u16 txrqst1;
51 u16 txrqst2;
52 u16 _reserved3[6];
53 u16 newdat1;
54 u16 newdat2;
55 u16 _reserved4[6];
56 u16 intpnd1;
57 u16 intpnd2;
58 u16 _reserved5[6];
59 u16 msgval1;
60 u16 msgval2;
61 u16 _reserved6[6];
62};
63
64/* c_can private data structure */
65struct c_can_priv {
66 struct can_priv can; /* must be the first member */
67 struct napi_struct napi;
68 struct net_device *dev;
69 int tx_object;
70 int current_status;
71 int last_status;
72 u16 (*read_reg) (struct c_can_priv *priv, void *reg);
73 void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
74 struct c_can_regs __iomem *regs;
75 unsigned long irq_flags; /* for request_irq() */
76 unsigned int tx_next;
77 unsigned int tx_echo;
78 void *priv; /* for board-specific data */
79};
80
81struct net_device *alloc_c_can_dev(void);
82void free_c_can_dev(struct net_device *dev);
83int register_c_can_dev(struct net_device *dev);
84void unregister_c_can_dev(struct net_device *dev);
85
86#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644
index 00000000000..cc90824f2c9
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -0,0 +1,216 @@
1/*
2 * Platform CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
13 * Bosch C_CAN user manual can be obtained from:
14 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
15 * users_manual_c_can.pdf
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#include <linux/kernel.h>
23#include <linux/version.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/delay.h>
32#include <linux/io.h>
33#include <linux/platform_device.h>
34#include <linux/clk.h>
35
36#include <linux/can/dev.h>
37
38#include "c_can.h"
39
40/*
41 * 16-bit c_can registers can be arranged differently in the memory
42 * architecture of different implementations. For example: 16-bit
43 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
44 * Handle the same by providing a common read/write interface.
45 */
46static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
47 void *reg)
48{
49 return readw(reg);
50}
51
52static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
53 void *reg, u16 val)
54{
55 writew(val, reg);
56}
57
58static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
59 void *reg)
60{
61 return readw(reg + (long)reg - (long)priv->regs);
62}
63
64static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
65 void *reg, u16 val)
66{
67 writew(val, reg + (long)reg - (long)priv->regs);
68}
69
70static int __devinit c_can_plat_probe(struct platform_device *pdev)
71{
72 int ret;
73 void __iomem *addr;
74 struct net_device *dev;
75 struct c_can_priv *priv;
76 struct resource *mem;
77 int irq;
78#ifdef CONFIG_HAVE_CLK
79 struct clk *clk;
80
81 /* get the appropriate clk */
82 clk = clk_get(&pdev->dev, NULL);
83 if (IS_ERR(clk)) {
84 dev_err(&pdev->dev, "no clock defined\n");
85 ret = -ENODEV;
86 goto exit;
87 }
88#endif
89
90 /* get the platform data */
91 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
92 irq = platform_get_irq(pdev, 0);
93 if (!mem || irq <= 0) {
94 ret = -ENODEV;
95 goto exit_free_clk;
96 }
97
98 if (!request_mem_region(mem->start, resource_size(mem),
99 KBUILD_MODNAME)) {
100 dev_err(&pdev->dev, "resource unavailable\n");
101 ret = -ENODEV;
102 goto exit_free_clk;
103 }
104
105 addr = ioremap(mem->start, resource_size(mem));
106 if (!addr) {
107 dev_err(&pdev->dev, "failed to map can port\n");
108 ret = -ENOMEM;
109 goto exit_release_mem;
110 }
111
112 /* allocate the c_can device */
113 dev = alloc_c_can_dev();
114 if (!dev) {
115 ret = -ENOMEM;
116 goto exit_iounmap;
117 }
118
119 priv = netdev_priv(dev);
120
121 dev->irq = irq;
122 priv->regs = addr;
123#ifdef CONFIG_HAVE_CLK
124 priv->can.clock.freq = clk_get_rate(clk);
125 priv->priv = clk;
126#endif
127
128 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
129 case IORESOURCE_MEM_32BIT:
130 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
131 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
132 break;
133 case IORESOURCE_MEM_16BIT:
134 default:
135 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
136 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
137 break;
138 }
139
140 platform_set_drvdata(pdev, dev);
141 SET_NETDEV_DEV(dev, &pdev->dev);
142
143 ret = register_c_can_dev(dev);
144 if (ret) {
145 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
146 KBUILD_MODNAME, ret);
147 goto exit_free_device;
148 }
149
150 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
151 KBUILD_MODNAME, priv->regs, dev->irq);
152 return 0;
153
154exit_free_device:
155 platform_set_drvdata(pdev, NULL);
156 free_c_can_dev(dev);
157exit_iounmap:
158 iounmap(addr);
159exit_release_mem:
160 release_mem_region(mem->start, resource_size(mem));
161exit_free_clk:
162#ifdef CONFIG_HAVE_CLK
163 clk_put(clk);
164exit:
165#endif
166 dev_err(&pdev->dev, "probe failed\n");
167
168 return ret;
169}
170
171static int __devexit c_can_plat_remove(struct platform_device *pdev)
172{
173 struct net_device *dev = platform_get_drvdata(pdev);
174 struct c_can_priv *priv = netdev_priv(dev);
175 struct resource *mem;
176
177 unregister_c_can_dev(dev);
178 platform_set_drvdata(pdev, NULL);
179
180 free_c_can_dev(dev);
181 iounmap(priv->regs);
182
183 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
184 release_mem_region(mem->start, resource_size(mem));
185
186#ifdef CONFIG_HAVE_CLK
187 clk_put(priv->priv);
188#endif
189
190 return 0;
191}
192
193static struct platform_driver c_can_plat_driver = {
194 .driver = {
195 .name = KBUILD_MODNAME,
196 .owner = THIS_MODULE,
197 },
198 .probe = c_can_plat_probe,
199 .remove = __devexit_p(c_can_plat_remove),
200};
201
202static int __init c_can_plat_init(void)
203{
204 return platform_driver_register(&c_can_plat_driver);
205}
206module_init(c_can_plat_init);
207
208static void __exit c_can_plat_exit(void)
209{
210 platform_driver_unregister(&c_can_plat_driver);
211}
212module_exit(c_can_plat_exit);
213
214MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
215MODULE_LICENSE("GPL v2");
216MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index b9a6d7a5a73..102b16c6cc9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -15,6 +15,7 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/mfd/core.h>
18 19
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/can.h> 21#include <linux/can.h>
@@ -1618,7 +1619,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
1618 return count; 1619 return count;
1619} 1620}
1620 1621
1621static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, 1622static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
1622 ican3_sysfs_set_term); 1623 ican3_sysfs_set_term);
1623 1624
1624static struct attribute *ican3_sysfs_attrs[] = { 1625static struct attribute *ican3_sysfs_attrs[] = {
@@ -1643,7 +1644,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
1643 struct device *dev; 1644 struct device *dev;
1644 int ret; 1645 int ret;
1645 1646
1646 pdata = pdev->dev.platform_data; 1647 pdata = mfd_get_data(pdev);
1647 if (!pdata) 1648 if (!pdata)
1648 return -ENXIO; 1649 return -ENXIO;
1649 1650
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534aee45..7513c4523ac 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
940 goto open_unlock; 940 goto open_unlock;
941 } 941 }
942 942
943 priv->wq = create_freezeable_workqueue("mcp251x_wq"); 943 priv->wq = create_freezable_workqueue("mcp251x_wq");
944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
946 946
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d398e25..d38706958af 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
1config CAN_MSCAN 1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU) 2 depends on CAN_DEV && (PPC || M68K)
3 tristate "Support for Freescale MSCAN based chips" 3 tristate "Support for Freescale MSCAN based chips"
4 ---help--- 4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition 5 The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 312b9c8f4f3..c0a1bc5b143 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -247,10 +247,9 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
247} 247}
248#endif /* CONFIG_PPC_MPC512x */ 248#endif /* CONFIG_PPC_MPC512x */
249 249
250static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev, 250static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
251 const struct of_device_id *id)
252{ 251{
253 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data; 252 struct mpc5xxx_can_data *data;
254 struct device_node *np = ofdev->dev.of_node; 253 struct device_node *np = ofdev->dev.of_node;
255 struct net_device *dev; 254 struct net_device *dev;
256 struct mscan_priv *priv; 255 struct mscan_priv *priv;
@@ -259,6 +258,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev,
259 int irq, mscan_clksrc = 0; 258 int irq, mscan_clksrc = 0;
260 int err = -ENOMEM; 259 int err = -ENOMEM;
261 260
261 if (!ofdev->dev.of_match)
262 return -EINVAL;
263 data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
264
262 base = of_iomap(np, 0); 265 base = of_iomap(np, 0);
263 if (!base) { 266 if (!base) {
264 dev_err(&ofdev->dev, "couldn't ioremap\n"); 267 dev_err(&ofdev->dev, "couldn't ioremap\n");
@@ -391,7 +394,7 @@ static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
391 {}, 394 {},
392}; 395};
393 396
394static struct of_platform_driver mpc5xxx_can_driver = { 397static struct platform_driver mpc5xxx_can_driver = {
395 .driver = { 398 .driver = {
396 .name = "mpc5xxx_can", 399 .name = "mpc5xxx_can",
397 .owner = THIS_MODULE, 400 .owner = THIS_MODULE,
@@ -407,13 +410,13 @@ static struct of_platform_driver mpc5xxx_can_driver = {
407 410
408static int __init mpc5xxx_can_init(void) 411static int __init mpc5xxx_can_init(void)
409{ 412{
410 return of_register_platform_driver(&mpc5xxx_can_driver); 413 return platform_driver_register(&mpc5xxx_can_driver);
411} 414}
412module_init(mpc5xxx_can_init); 415module_init(mpc5xxx_can_init);
413 416
414static void __exit mpc5xxx_can_exit(void) 417static void __exit mpc5xxx_can_exit(void)
415{ 418{
416 return of_unregister_platform_driver(&mpc5xxx_can_driver); 419 platform_driver_unregister(&mpc5xxx_can_driver);
417}; 420};
418module_exit(mpc5xxx_can_exit); 421module_exit(mpc5xxx_can_exit);
419 422
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e9726824..e54712b22c2 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@ struct pch_can_priv {
185 185
186static struct can_bittiming_const pch_can_bittiming_const = { 186static struct can_bittiming_const pch_can_bittiming_const = {
187 .name = KBUILD_MODNAME, 187 .name = KBUILD_MODNAME,
188 .tseg1_min = 1, 188 .tseg1_min = 2,
189 .tseg1_max = 16, 189 .tseg1_max = 16,
190 .tseg2_min = 1, 190 .tseg2_min = 1,
191 .tseg2_max = 8, 191 .tseg2_max = 8,
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
959 struct pch_can_priv *priv = netdev_priv(ndev); 959 struct pch_can_priv *priv = netdev_priv(ndev);
960 960
961 unregister_candev(priv->ndev); 961 unregister_candev(priv->ndev);
962 pci_iounmap(pdev, priv->regs);
963 if (priv->use_msi) 962 if (priv->use_msi)
964 pci_disable_msi(priv->dev); 963 pci_disable_msi(priv->dev);
965 pci_release_regions(pdev); 964 pci_release_regions(pdev);
966 pci_disable_device(pdev); 965 pci_disable_device(pdev);
967 pci_set_drvdata(pdev, NULL); 966 pci_set_drvdata(pdev, NULL);
968 pch_can_reset(priv); 967 pch_can_reset(priv);
968 pci_iounmap(pdev, priv->regs);
969 free_candev(priv->ndev); 969 free_candev(priv->ndev);
970} 970}
971 971
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1238 priv->use_msi = 0; 1238 priv->use_msi = 0;
1239 } else { 1239 } else {
1240 netdev_err(ndev, "PCH CAN opened with MSI\n"); 1240 netdev_err(ndev, "PCH CAN opened with MSI\n");
1241 pci_set_master(pdev);
1241 priv->use_msi = 1; 1242 priv->use_msi = 1;
1242 } 1243 }
1243 1244
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 09c3e9db931..9793df6e345 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -87,8 +87,7 @@ static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
87 return 0; 87 return 0;
88} 88}
89 89
90static int __devinit sja1000_ofp_probe(struct platform_device *ofdev, 90static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
91 const struct of_device_id *id)
92{ 91{
93 struct device_node *np = ofdev->dev.of_node; 92 struct device_node *np = ofdev->dev.of_node;
94 struct net_device *dev; 93 struct net_device *dev;
@@ -210,7 +209,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
210}; 209};
211MODULE_DEVICE_TABLE(of, sja1000_ofp_table); 210MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
212 211
213static struct of_platform_driver sja1000_ofp_driver = { 212static struct platform_driver sja1000_ofp_driver = {
214 .driver = { 213 .driver = {
215 .owner = THIS_MODULE, 214 .owner = THIS_MODULE,
216 .name = DRV_NAME, 215 .name = DRV_NAME,
@@ -222,12 +221,12 @@ static struct of_platform_driver sja1000_ofp_driver = {
222 221
223static int __init sja1000_ofp_init(void) 222static int __init sja1000_ofp_init(void)
224{ 223{
225 return of_register_platform_driver(&sja1000_ofp_driver); 224 return platform_driver_register(&sja1000_ofp_driver);
226} 225}
227module_init(sja1000_ofp_init); 226module_init(sja1000_ofp_init);
228 227
229static void __exit sja1000_ofp_exit(void) 228static void __exit sja1000_ofp_exit(void)
230{ 229{
231 return of_unregister_platform_driver(&sja1000_ofp_driver); 230 return platform_driver_unregister(&sja1000_ofp_driver);
232}; 231};
233module_exit(sja1000_ofp_exit); 232module_exit(sja1000_ofp_exit);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 00000000000..5de46a9a77b
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
1config CAN_SOFTING
2 tristate "Softing Gmbh CAN generic support"
3 depends on CAN_DEV && HAS_IOMEM
4 ---help---
5 Support for CAN cards from Softing Gmbh & some cards
6 from Vector Gmbh.
7 Softing Gmbh CAN cards come with 1 or 2 physical busses.
8 Those cards typically use Dual Port RAM to communicate
9 with the host CPU. The interface is then identical for PCI
10 and PCMCIA cards. This driver operates on a platform device,
11 which has been created by softing_cs or softing_pci driver.
12 Warning:
13 The API of the card does not allow fine control per bus, but
14 controls the 2 busses on the card together.
15 As such, some actions (start/stop/busoff recovery) on 1 bus
16 must bring down the other bus too temporarily.
17
18config CAN_SOFTING_CS
19 tristate "Softing Gmbh CAN pcmcia cards"
20 depends on PCMCIA
21 depends on CAN_SOFTING
22 ---help---
23 Support for PCMCIA cards from Softing Gmbh & some cards
24 from Vector Gmbh.
25 You need firmware for these, which you can get at
26 http://developer.berlios.de/projects/socketcan/
27 This version of the driver is written against
28 firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
29 In order to use the card as CAN device, you need the Softing generic
30 support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 00000000000..c5e5016c742
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
1
2softing-y := softing_main.o softing_fw.o
3obj-$(CONFIG_CAN_SOFTING) += softing.o
4obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
5
6ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 00000000000..7ec9f4db3d5
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
1/*
2 * softing common interfaces
3 *
4 * by Kurt Van Dijck, 2008-2010
5 */
6
7#include <linux/atomic.h>
8#include <linux/netdevice.h>
9#include <linux/ktime.h>
10#include <linux/mutex.h>
11#include <linux/spinlock.h>
12#include <linux/can.h>
13#include <linux/can/dev.h>
14
15#include "softing_platform.h"
16
17struct softing;
18
19struct softing_priv {
20 struct can_priv can; /* must be the first member! */
21 struct net_device *netdev;
22 struct softing *card;
23 struct {
24 int pending;
25 /* variables wich hold the circular buffer */
26 int echo_put;
27 int echo_get;
28 } tx;
29 struct can_bittiming_const btr_const;
30 int index;
31 uint8_t output;
32 uint16_t chip;
33};
34#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev))
35
36struct softing {
37 const struct softing_platform_data *pdat;
38 struct platform_device *pdev;
39 struct net_device *net[2];
40 spinlock_t spin; /* protect this structure & DPRAM access */
41 ktime_t ts_ref;
42 ktime_t ts_overflow; /* timestamp overflow value, in ktime */
43
44 struct {
45 /* indication of firmware status */
46 int up;
47 /* protection of the 'up' variable */
48 struct mutex lock;
49 } fw;
50 struct {
51 int nr;
52 int requested;
53 int svc_count;
54 unsigned int dpram_position;
55 } irq;
56 struct {
57 int pending;
58 int last_bus;
59 /*
60 * keep the bus that last tx'd a message,
61 * in order to let every netdev queue resume
62 */
63 } tx;
64 __iomem uint8_t *dpram;
65 unsigned long dpram_phys;
66 unsigned long dpram_size;
67 struct {
68 uint16_t fw_version, hw_version, license, serial;
69 uint16_t chip[2];
70 unsigned int freq; /* remote cpu's operating frequency */
71 } id;
72};
73
74extern int softing_default_output(struct net_device *netdev);
75
76extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
77
78extern int softing_chip_poweron(struct softing *card);
79
80extern int softing_bootloader_command(struct softing *card, int16_t cmd,
81 const char *msg);
82
83/* Load firmware after reset */
84extern int softing_load_fw(const char *file, struct softing *card,
85 __iomem uint8_t *virt, unsigned int size, int offset);
86
87/* Load final application firmware after bootloader */
88extern int softing_load_app_fw(const char *file, struct softing *card);
89
90/*
91 * enable or disable irq
92 * only called with fw.lock locked
93 */
94extern int softing_enable_irq(struct softing *card, int enable);
95
96/* start/stop 1 bus on card */
97extern int softing_startstop(struct net_device *netdev, int up);
98
99/* netif_rx() */
100extern int softing_netdev_rx(struct net_device *netdev,
101 const struct can_frame *msg, ktime_t ktime);
102
103/* SOFTING DPRAM mappings */
104#define DPRAM_RX 0x0000
105 #define DPRAM_RX_SIZE 32
106 #define DPRAM_RX_CNT 16
107#define DPRAM_RX_RD 0x0201 /* uint8_t */
108#define DPRAM_RX_WR 0x0205 /* uint8_t */
109#define DPRAM_RX_LOST 0x0207 /* uint8_t */
110
111#define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */
112#define DPRAM_FCT_RESULT 0x0328 /* int16_t */
113#define DPRAM_FCT_HOST 0x032b /* uint16_t */
114
115#define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */
116#define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */
117#define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */
118#define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */
119#define DPRAM_RESET 0x0341 /* uint16_t */
120#define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */
121#define DPRAM_RESET_TIME 0x034d /* uint16_t */
122#define DPRAM_TIME 0x0350 /* uint64_t */
123#define DPRAM_WR_START 0x0358 /* uint8_t */
124#define DPRAM_WR_END 0x0359 /* uint8_t */
125#define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */
126#define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */
127#define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */
128#define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */
129#define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */
130
131#define DPRAM_TX 0x0400 /* uint16_t */
132 #define DPRAM_TX_SIZE 16
133 #define DPRAM_TX_CNT 32
134#define DPRAM_TX_RD 0x0601 /* uint8_t */
135#define DPRAM_TX_WR 0x0605 /* uint8_t */
136
137#define DPRAM_COMMAND 0x07e0 /* uint16_t */
138#define DPRAM_RECEIPT 0x07f0 /* uint16_t */
139#define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */
140#define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */
141
142#define DPRAM_V2_RESET 0x0e00 /* uint8_t */
143#define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */
144
145#define TXMAX (DPRAM_TX_CNT - 1)
146
147/* DPRAM return codes */
148#define RES_NONE 0
149#define RES_OK 1
150#define RES_NOK 2
151#define RES_UNKNOWN 3
152/* DPRAM flags */
153#define CMD_TX 0x01
154#define CMD_ACK 0x02
155#define CMD_XTD 0x04
156#define CMD_RTR 0x08
157#define CMD_ERR 0x10
158#define CMD_BUS2 0x80
159
160/* returned fifo entry bus state masks */
161#define SF_MASK_BUSOFF 0x80
162#define SF_MASK_EPASSIVE 0x60
163
164/* bus states */
165#define STATE_BUSOFF 2
166#define STATE_EPASSIVE 1
167#define STATE_EACTIVE 0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 00000000000..c11bb4de863
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,360 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23
24#include <pcmcia/cistpl.h>
25#include <pcmcia/ds.h>
26
27#include "softing_platform.h"
28
29static int softingcs_index;
30static spinlock_t softingcs_index_lock;
31
32static int softingcs_reset(struct platform_device *pdev, int v);
33static int softingcs_enable_irq(struct platform_device *pdev, int v);
34
35/*
36 * platform_data descriptions
37 */
38#define MHZ (1000*1000)
39static const struct softing_platform_data softingcs_platform_data[] = {
40{
41 .name = "CANcard",
42 .manf = 0x0168, .prod = 0x001,
43 .generation = 1,
44 .nbus = 2,
45 .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
46 .dpram_size = 0x0800,
47 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
48 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
49 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
50 .reset = softingcs_reset,
51 .enable_irq = softingcs_enable_irq,
52}, {
53 .name = "CANcard-NEC",
54 .manf = 0x0168, .prod = 0x002,
55 .generation = 1,
56 .nbus = 2,
57 .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
58 .dpram_size = 0x0800,
59 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
60 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
61 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
62 .reset = softingcs_reset,
63 .enable_irq = softingcs_enable_irq,
64}, {
65 .name = "CANcard-SJA",
66 .manf = 0x0168, .prod = 0x004,
67 .generation = 1,
68 .nbus = 2,
69 .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
70 .dpram_size = 0x0800,
71 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
72 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
73 .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
74 .reset = softingcs_reset,
75 .enable_irq = softingcs_enable_irq,
76}, {
77 .name = "CANcard-2",
78 .manf = 0x0168, .prod = 0x005,
79 .generation = 2,
80 .nbus = 2,
81 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
82 .dpram_size = 0x1000,
83 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
84 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
85 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
86 .reset = softingcs_reset,
87 .enable_irq = NULL,
88}, {
89 .name = "Vector-CANcard",
90 .manf = 0x0168, .prod = 0x081,
91 .generation = 1,
92 .nbus = 2,
93 .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
94 .dpram_size = 0x0800,
95 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
96 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
97 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
98 .reset = softingcs_reset,
99 .enable_irq = softingcs_enable_irq,
100}, {
101 .name = "Vector-CANcard-SJA",
102 .manf = 0x0168, .prod = 0x084,
103 .generation = 1,
104 .nbus = 2,
105 .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
106 .dpram_size = 0x0800,
107 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
108 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
109 .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
110 .reset = softingcs_reset,
111 .enable_irq = softingcs_enable_irq,
112}, {
113 .name = "Vector-CANcard-2",
114 .manf = 0x0168, .prod = 0x085,
115 .generation = 2,
116 .nbus = 2,
117 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
118 .dpram_size = 0x1000,
119 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
120 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
121 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
122 .reset = softingcs_reset,
123 .enable_irq = NULL,
124}, {
125 .name = "EDICcard-NEC",
126 .manf = 0x0168, .prod = 0x102,
127 .generation = 1,
128 .nbus = 2,
129 .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
130 .dpram_size = 0x0800,
131 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
132 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
133 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
134 .reset = softingcs_reset,
135 .enable_irq = softingcs_enable_irq,
136}, {
137 .name = "EDICcard-2",
138 .manf = 0x0168, .prod = 0x105,
139 .generation = 2,
140 .nbus = 2,
141 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
142 .dpram_size = 0x1000,
143 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
144 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
145 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
146 .reset = softingcs_reset,
147 .enable_irq = NULL,
148}, {
149 0, 0,
150},
151};
152
153MODULE_FIRMWARE(fw_dir "bcard.bin");
154MODULE_FIRMWARE(fw_dir "ldcard.bin");
155MODULE_FIRMWARE(fw_dir "cancard.bin");
156MODULE_FIRMWARE(fw_dir "cansja.bin");
157
158MODULE_FIRMWARE(fw_dir "bcard2.bin");
159MODULE_FIRMWARE(fw_dir "ldcard2.bin");
160MODULE_FIRMWARE(fw_dir "cancrd2.bin");
161
162static __devinit const struct softing_platform_data
163*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
164{
165 const struct softing_platform_data *lp;
166
167 for (lp = softingcs_platform_data; lp->manf; ++lp) {
168 if ((lp->manf == manf) && (lp->prod == prod))
169 return lp;
170 }
171 return NULL;
172}
173
174/*
175 * platformdata callbacks
176 */
177static int softingcs_reset(struct platform_device *pdev, int v)
178{
179 struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
180
181 dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
182 return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
183}
184
185static int softingcs_enable_irq(struct platform_device *pdev, int v)
186{
187 struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
188
189 dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
190 return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
191}
192
193/*
194 * pcmcia check
195 */
196static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
197 void *priv_data)
198{
199 struct softing_platform_data *pdat = priv_data;
200 struct resource *pres;
201 int memspeed = 0;
202
203 WARN_ON(!pdat);
204 pres = pcmcia->resource[PCMCIA_IOMEM_0];
205 if (resource_size(pres) < 0x1000)
206 return -ERANGE;
207
208 pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
209 if (pdat->generation < 2) {
210 pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
211 memspeed = 3;
212 } else {
213 pres->flags |= WIN_DATA_WIDTH_16;
214 }
215 return pcmcia_request_window(pcmcia, pres, memspeed);
216}
217
218static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
219{
220 struct platform_device *pdev = pcmcia->priv;
221
222 /* free bits */
223 platform_device_unregister(pdev);
224 /* release pcmcia stuff */
225 pcmcia_disable_device(pcmcia);
226}
227
228/*
229 * platform_device wrapper
230 * pdev->resource has 2 entries: io & irq
231 */
232static void softingcs_pdev_release(struct device *dev)
233{
234 struct platform_device *pdev = to_platform_device(dev);
235 kfree(pdev);
236}
237
238static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
239{
240 int ret;
241 struct platform_device *pdev;
242 const struct softing_platform_data *pdat;
243 struct resource *pres;
244 struct dev {
245 struct platform_device pdev;
246 struct resource res[2];
247 } *dev;
248
249 /* find matching platform_data */
250 pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
251 if (!pdat)
252 return -ENOTTY;
253
254 /* setup pcmcia device */
255 pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
256 CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
257 ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
258 if (ret)
259 goto pcmcia_failed;
260
261 ret = pcmcia_enable_device(pcmcia);
262 if (ret < 0)
263 goto pcmcia_failed;
264
265 pres = pcmcia->resource[PCMCIA_IOMEM_0];
266 if (!pres) {
267 ret = -EBADF;
268 goto pcmcia_bad;
269 }
270
271 /* create softing platform device */
272 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
273 if (!dev) {
274 ret = -ENOMEM;
275 goto mem_failed;
276 }
277 dev->pdev.resource = dev->res;
278 dev->pdev.num_resources = ARRAY_SIZE(dev->res);
279 dev->pdev.dev.release = softingcs_pdev_release;
280
281 pdev = &dev->pdev;
282 pdev->dev.platform_data = (void *)pdat;
283 pdev->dev.parent = &pcmcia->dev;
284 pcmcia->priv = pdev;
285
286 /* platform device resources */
287 pdev->resource[0].flags = IORESOURCE_MEM;
288 pdev->resource[0].start = pres->start;
289 pdev->resource[0].end = pres->end;
290
291 pdev->resource[1].flags = IORESOURCE_IRQ;
292 pdev->resource[1].start = pcmcia->irq;
293 pdev->resource[1].end = pdev->resource[1].start;
294
295 /* platform device setup */
296 spin_lock(&softingcs_index_lock);
297 pdev->id = softingcs_index++;
298 spin_unlock(&softingcs_index_lock);
299 pdev->name = "softing";
300 dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
301 ret = platform_device_register(pdev);
302 if (ret < 0)
303 goto platform_failed;
304
305 dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
306 return 0;
307
308platform_failed:
309 kfree(dev);
310mem_failed:
311pcmcia_bad:
312pcmcia_failed:
313 pcmcia_disable_device(pcmcia);
314 pcmcia->priv = NULL;
315 return ret ?: -ENODEV;
316}
317
318static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
319 /* softing */
320 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
321 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
322 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
323 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
324 /* vector, manufacturer? */
325 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
326 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
327 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
328 /* EDIC */
329 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
330 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
331 PCMCIA_DEVICE_NULL,
332};
333
334MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
335
336static struct pcmcia_driver softingcs_driver = {
337 .owner = THIS_MODULE,
338 .name = "softingcs",
339 .id_table = softingcs_ids,
340 .probe = softingcs_probe,
341 .remove = __devexit_p(softingcs_remove),
342};
343
344static int __init softingcs_start(void)
345{
346 spin_lock_init(&softingcs_index_lock);
347 return pcmcia_register_driver(&softingcs_driver);
348}
349
350static void __exit softingcs_stop(void)
351{
352 pcmcia_unregister_driver(&softingcs_driver);
353}
354
355module_init(softingcs_start);
356module_exit(softingcs_stop);
357
358MODULE_DESCRIPTION("softing CANcard driver"
359 ", links PCMCIA card to softing driver");
360MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 00000000000..b520784fb19
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/firmware.h>
21#include <linux/sched.h>
22#include <asm/div64.h>
23
24#include "softing.h"
25
26/*
27 * low level DPRAM command.
28 * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
29 */
30static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
31 const char *msg)
32{
33 int ret;
34 unsigned long stamp;
35
36 iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
37 iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
38 iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
39 /* be sure to flush this to the card */
40 wmb();
41 stamp = jiffies + 1 * HZ;
42 /* wait for card */
43 do {
44 /* DPRAM_FCT_HOST is _not_ aligned */
45 ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
46 (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
47 /* don't have any cached variables */
48 rmb();
49 if (ret == RES_OK)
50 /* read return-value now */
51 return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
52
53 if ((ret != vector) || time_after(jiffies, stamp))
54 break;
55 /* process context => relax */
56 usleep_range(500, 10000);
57 } while (1);
58
59 ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
60 dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
61 return ret;
62}
63
64static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
65{
66 int ret;
67
68 ret = _softing_fct_cmd(card, cmd, 0, msg);
69 if (ret > 0) {
70 dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
71 ret = -EIO;
72 }
73 return ret;
74}
75
76int softing_bootloader_command(struct softing *card, int16_t cmd,
77 const char *msg)
78{
79 int ret;
80 unsigned long stamp;
81
82 iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
83 iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
84 /* be sure to flush this to the card */
85 wmb();
86 stamp = jiffies + 3 * HZ;
87 /* wait for card */
88 do {
89 ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
90 /* don't have any cached variables */
91 rmb();
92 if (ret == RES_OK)
93 return 0;
94 if (time_after(jiffies, stamp))
95 break;
96 /* process context => relax */
97 usleep_range(500, 10000);
98 } while (!signal_pending(current));
99
100 ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
101 dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
102 return ret;
103}
104
105static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
106 uint16_t *plen, const uint8_t **pdat)
107{
108 uint16_t checksum[2];
109 const uint8_t *mem;
110 const uint8_t *end;
111
112 /*
113 * firmware records are a binary, unaligned stream composed of:
114 * uint16_t type;
115 * uint32_t addr;
116 * uint16_t len;
117 * uint8_t dat[len];
118 * uint16_t checksum;
119 * all values in little endian.
120 * We could define a struct for this, with __attribute__((packed)),
121 * but would that solve the alignment in _all_ cases (cfr. the
122 * struct itself may be an odd address)?
123 *
124 * I chose to use leXX_to_cpup() since this solves both
125 * endianness & alignment.
126 */
127 mem = *pmem;
128 *ptype = le16_to_cpup((void *)&mem[0]);
129 *paddr = le32_to_cpup((void *)&mem[2]);
130 *plen = le16_to_cpup((void *)&mem[6]);
131 *pdat = &mem[8];
132 /* verify checksum */
133 end = &mem[8 + *plen];
134 checksum[0] = le16_to_cpup((void *)end);
135 for (checksum[1] = 0; mem < end; ++mem)
136 checksum[1] += *mem;
137 if (checksum[0] != checksum[1])
138 return -EINVAL;
139 /* increment */
140 *pmem += 10 + *plen;
141 return 0;
142}
143
144int softing_load_fw(const char *file, struct softing *card,
145 __iomem uint8_t *dpram, unsigned int size, int offset)
146{
147 const struct firmware *fw;
148 int ret;
149 const uint8_t *mem, *end, *dat;
150 uint16_t type, len;
151 uint32_t addr;
152 uint8_t *buf = NULL;
153 int buflen = 0;
154 int8_t type_end = 0;
155
156 ret = request_firmware(&fw, file, &card->pdev->dev);
157 if (ret < 0)
158 return ret;
159 dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
160 ", offset %c0x%04x\n",
161 card->pdat->name, file, (unsigned int)fw->size,
162 (offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
163 /* parse the firmware */
164 mem = fw->data;
165 end = &mem[fw->size];
166 /* look for header record */
167 ret = fw_parse(&mem, &type, &addr, &len, &dat);
168 if (ret < 0)
169 goto failed;
170 if (type != 0xffff)
171 goto failed;
172 if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
173 ret = -EINVAL;
174 goto failed;
175 }
176 /* ok, we had a header */
177 while (mem < end) {
178 ret = fw_parse(&mem, &type, &addr, &len, &dat);
179 if (ret < 0)
180 goto failed;
181 if (type == 3) {
182 /* start address, not used here */
183 continue;
184 } else if (type == 1) {
185 /* eof */
186 type_end = 1;
187 break;
188 } else if (type != 0) {
189 ret = -EINVAL;
190 goto failed;
191 }
192
193 if ((addr + len + offset) > size)
194 goto failed;
195 memcpy_toio(&dpram[addr + offset], dat, len);
196 /* be sure to flush caches from IO space */
197 mb();
198 if (len > buflen) {
199 /* align buflen */
200 buflen = (len + (1024-1)) & ~(1024-1);
201 buf = krealloc(buf, buflen, GFP_KERNEL);
202 if (!buf) {
203 ret = -ENOMEM;
204 goto failed;
205 }
206 }
207 /* verify record data */
208 memcpy_fromio(buf, &dpram[addr + offset], len);
209 if (memcmp(buf, dat, len)) {
210 /* is not ok */
211 dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
212 ret = -EIO;
213 goto failed;
214 }
215 }
216 if (!type_end)
217 /* no end record seen */
218 goto failed;
219 ret = 0;
220failed:
221 kfree(buf);
222 release_firmware(fw);
223 if (ret < 0)
224 dev_info(&card->pdev->dev, "firmware %s failed\n", file);
225 return ret;
226}
227
228int softing_load_app_fw(const char *file, struct softing *card)
229{
230 const struct firmware *fw;
231 const uint8_t *mem, *end, *dat;
232 int ret, j;
233 uint16_t type, len;
234 uint32_t addr, start_addr = 0;
235 unsigned int sum, rx_sum;
236 int8_t type_end = 0, type_entrypoint = 0;
237
238 ret = request_firmware(&fw, file, &card->pdev->dev);
239 if (ret) {
240 dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
241 file, ret);
242 return ret;
243 }
244 dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
245 file, (unsigned long)fw->size);
246 /* parse the firmware */
247 mem = fw->data;
248 end = &mem[fw->size];
249 /* look for header record */
250 ret = fw_parse(&mem, &type, &addr, &len, &dat);
251 if (ret)
252 goto failed;
253 ret = -EINVAL;
254 if (type != 0xffff) {
255 dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
256 type);
257 goto failed;
258 }
259 if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
260 dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
261 len, dat);
262 goto failed;
263 }
264 /* ok, we had a header */
265 while (mem < end) {
266 ret = fw_parse(&mem, &type, &addr, &len, &dat);
267 if (ret)
268 goto failed;
269
270 if (type == 3) {
271 /* start address */
272 start_addr = addr;
273 type_entrypoint = 1;
274 continue;
275 } else if (type == 1) {
276 /* eof */
277 type_end = 1;
278 break;
279 } else if (type != 0) {
280 dev_alert(&card->pdev->dev,
281 "unknown record type 0x%04x\n", type);
282 ret = -EINVAL;
283 goto failed;
284 }
285
286 /* regualar data */
287 for (sum = 0, j = 0; j < len; ++j)
288 sum += dat[j];
289 /* work in 16bit (target) */
290 sum &= 0xffff;
291
292 memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
293 iowrite32(card->pdat->app.offs + card->pdat->app.addr,
294 &card->dpram[DPRAM_COMMAND + 2]);
295 iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
296 iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
297 iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
298 ret = softing_bootloader_command(card, 1, "loading app.");
299 if (ret < 0)
300 goto failed;
301 /* verify checksum */
302 rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
303 if (rx_sum != sum) {
304 dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
305 ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
306 ret = -EIO;
307 goto failed;
308 }
309 }
310 if (!type_end || !type_entrypoint)
311 goto failed;
312 /* start application in card */
313 iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
314 iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
315 ret = softing_bootloader_command(card, 3, "start app.");
316 if (ret < 0)
317 goto failed;
318 ret = 0;
319failed:
320 release_firmware(fw);
321 if (ret < 0)
322 dev_info(&card->pdev->dev, "firmware %s failed\n", file);
323 return ret;
324}
325
326static int softing_reset_chip(struct softing *card)
327{
328 int ret;
329
330 do {
331 /* reset chip */
332 iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
333 iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
334 iowrite8(1, &card->dpram[DPRAM_RESET]);
335 iowrite8(0, &card->dpram[DPRAM_RESET+1]);
336
337 ret = softing_fct_cmd(card, 0, "reset_can");
338 if (!ret)
339 break;
340 if (signal_pending(current))
341 /* don't wait any longer */
342 break;
343 } while (1);
344 card->tx.pending = 0;
345 return ret;
346}
347
348int softing_chip_poweron(struct softing *card)
349{
350 int ret;
351 /* sync */
352 ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
353 if (ret < 0)
354 goto failed;
355
356 ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
357 if (ret < 0)
358 goto failed;
359
360 ret = softing_reset_chip(card);
361 if (ret < 0)
362 goto failed;
363 /* get_serial */
364 ret = softing_fct_cmd(card, 43, "get_serial_number");
365 if (ret < 0)
366 goto failed;
367 card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
368 /* get_version */
369 ret = softing_fct_cmd(card, 12, "get_version");
370 if (ret < 0)
371 goto failed;
372 card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
373 card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
374 card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
375 card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
376 card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
377 return 0;
378failed:
379 return ret;
380}
381
382static void softing_initialize_timestamp(struct softing *card)
383{
384 uint64_t ovf;
385
386 card->ts_ref = ktime_get();
387
388 /* 16MHz is the reference */
389 ovf = 0x100000000ULL * 16;
390 do_div(ovf, card->pdat->freq ?: 16);
391
392 card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
393}
394
395ktime_t softing_raw2ktime(struct softing *card, u32 raw)
396{
397 uint64_t rawl;
398 ktime_t now, real_offset;
399 ktime_t target;
400 ktime_t tmp;
401
402 now = ktime_get();
403 real_offset = ktime_sub(ktime_get_real(), now);
404
405 /* find nsec from card */
406 rawl = raw * 16;
407 do_div(rawl, card->pdat->freq ?: 16);
408 target = ktime_add_us(card->ts_ref, rawl);
409 /* test for overflows */
410 tmp = ktime_add(target, card->ts_overflow);
411 while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
412 card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
413 target = tmp;
414 tmp = ktime_add(target, card->ts_overflow);
415 }
416 return ktime_add(target, real_offset);
417}
418
419static inline int softing_error_reporting(struct net_device *netdev)
420{
421 struct softing_priv *priv = netdev_priv(netdev);
422
423 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
424 ? 1 : 0;
425}
426
427int softing_startstop(struct net_device *dev, int up)
428{
429 int ret;
430 struct softing *card;
431 struct softing_priv *priv;
432 struct net_device *netdev;
433 int bus_bitmask_start;
434 int j, error_reporting;
435 struct can_frame msg;
436 const struct can_bittiming *bt;
437
438 priv = netdev_priv(dev);
439 card = priv->card;
440
441 if (!card->fw.up)
442 return -EIO;
443
444 ret = mutex_lock_interruptible(&card->fw.lock);
445 if (ret)
446 return ret;
447
448 bus_bitmask_start = 0;
449 if (dev && up)
450 /* prepare to start this bus as well */
451 bus_bitmask_start |= (1 << priv->index);
452 /* bring netdevs down */
453 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
454 netdev = card->net[j];
455 if (!netdev)
456 continue;
457 priv = netdev_priv(netdev);
458
459 if (dev != netdev)
460 netif_stop_queue(netdev);
461
462 if (netif_running(netdev)) {
463 if (dev != netdev)
464 bus_bitmask_start |= (1 << j);
465 priv->tx.pending = 0;
466 priv->tx.echo_put = 0;
467 priv->tx.echo_get = 0;
468 /*
469 * this bus' may just have called open_candev()
470 * which is rather stupid to call close_candev()
471 * already
472 * but we may come here from busoff recovery too
473 * in which case the echo_skb _needs_ flushing too.
474 * just be sure to call open_candev() again
475 */
476 close_candev(netdev);
477 }
478 priv->can.state = CAN_STATE_STOPPED;
479 }
480 card->tx.pending = 0;
481
482 softing_enable_irq(card, 0);
483 ret = softing_reset_chip(card);
484 if (ret)
485 goto failed;
486 if (!bus_bitmask_start)
487 /* no busses to be brought up */
488 goto card_done;
489
490 if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
491 && (softing_error_reporting(card->net[0])
492 != softing_error_reporting(card->net[1]))) {
493 dev_alert(&card->pdev->dev,
494 "err_reporting flag differs for busses\n");
495 goto invalid;
496 }
497 error_reporting = 0;
498 if (bus_bitmask_start & 1) {
499 netdev = card->net[0];
500 priv = netdev_priv(netdev);
501 error_reporting += softing_error_reporting(netdev);
502 /* init chip 1 */
503 bt = &priv->can.bittiming;
504 iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
505 iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
506 iowrite16(bt->phase_seg1 + bt->prop_seg,
507 &card->dpram[DPRAM_FCT_PARAM + 6]);
508 iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
509 iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
510 &card->dpram[DPRAM_FCT_PARAM + 10]);
511 ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
512 if (ret < 0)
513 goto failed;
514 /* set mode */
515 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
516 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
517 ret = softing_fct_cmd(card, 3, "set_mode[0]");
518 if (ret < 0)
519 goto failed;
520 /* set filter */
521 /* 11bit id & mask */
522 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
523 iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
524 /* 29bit id.lo & mask.lo & id.hi & mask.hi */
525 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
526 iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
527 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
528 iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
529 ret = softing_fct_cmd(card, 7, "set_filter[0]");
530 if (ret < 0)
531 goto failed;
532 /* set output control */
533 iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
534 ret = softing_fct_cmd(card, 5, "set_output[0]");
535 if (ret < 0)
536 goto failed;
537 }
538 if (bus_bitmask_start & 2) {
539 netdev = card->net[1];
540 priv = netdev_priv(netdev);
541 error_reporting += softing_error_reporting(netdev);
542 /* init chip2 */
543 bt = &priv->can.bittiming;
544 iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
545 iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
546 iowrite16(bt->phase_seg1 + bt->prop_seg,
547 &card->dpram[DPRAM_FCT_PARAM + 6]);
548 iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
549 iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
550 &card->dpram[DPRAM_FCT_PARAM + 10]);
551 ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
552 if (ret < 0)
553 goto failed;
554 /* set mode2 */
555 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
556 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
557 ret = softing_fct_cmd(card, 4, "set_mode[1]");
558 if (ret < 0)
559 goto failed;
560 /* set filter2 */
561 /* 11bit id & mask */
562 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
563 iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
564 /* 29bit id.lo & mask.lo & id.hi & mask.hi */
565 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
566 iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
567 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
568 iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
569 ret = softing_fct_cmd(card, 8, "set_filter[1]");
570 if (ret < 0)
571 goto failed;
572 /* set output control2 */
573 iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
574 ret = softing_fct_cmd(card, 6, "set_output[1]");
575 if (ret < 0)
576 goto failed;
577 }
578 /* enable_error_frame */
579 /*
580 * Error reporting is switched off at the moment since
581 * the receiving of them is not yet 100% verified
582 * This should be enabled sooner or later
583 *
584 if (error_reporting) {
585 ret = softing_fct_cmd(card, 51, "enable_error_frame");
586 if (ret < 0)
587 goto failed;
588 }
589 */
590 /* initialize interface */
591 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
592 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
593 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
594 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
595 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
596 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
597 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
598 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
599 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
600 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
601 ret = softing_fct_cmd(card, 17, "initialize_interface");
602 if (ret < 0)
603 goto failed;
604 /* enable_fifo */
605 ret = softing_fct_cmd(card, 36, "enable_fifo");
606 if (ret < 0)
607 goto failed;
608 /* enable fifo tx ack */
609 ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
610 if (ret < 0)
611 goto failed;
612 /* enable fifo tx ack2 */
613 ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
614 if (ret < 0)
615 goto failed;
616 /* start_chip */
617 ret = softing_fct_cmd(card, 11, "start_chip");
618 if (ret < 0)
619 goto failed;
620 iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
621 iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
622 if (card->pdat->generation < 2) {
623 iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
624 /* flush the DPRAM caches */
625 wmb();
626 }
627
628 softing_initialize_timestamp(card);
629
630 /*
631 * do socketcan notifications/status changes
632 * from here, no errors should occur, or the failed: part
633 * must be reviewed
634 */
635 memset(&msg, 0, sizeof(msg));
636 msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
637 msg.can_dlc = CAN_ERR_DLC;
638 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
639 if (!(bus_bitmask_start & (1 << j)))
640 continue;
641 netdev = card->net[j];
642 if (!netdev)
643 continue;
644 priv = netdev_priv(netdev);
645 priv->can.state = CAN_STATE_ERROR_ACTIVE;
646 open_candev(netdev);
647 if (dev != netdev) {
648 /* notify other busses on the restart */
649 softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
650 ++priv->can.can_stats.restarts;
651 }
652 netif_wake_queue(netdev);
653 }
654
655 /* enable interrupts */
656 ret = softing_enable_irq(card, 1);
657 if (ret)
658 goto failed;
659card_done:
660 mutex_unlock(&card->fw.lock);
661 return 0;
662invalid:
663 ret = -EINVAL;
664failed:
665 softing_enable_irq(card, 0);
666 softing_reset_chip(card);
667 mutex_unlock(&card->fw.lock);
668 /* bring all other interfaces down */
669 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
670 netdev = card->net[j];
671 if (!netdev)
672 continue;
673 dev_close(netdev);
674 }
675 return ret;
676}
677
678int softing_default_output(struct net_device *netdev)
679{
680 struct softing_priv *priv = netdev_priv(netdev);
681 struct softing *card = priv->card;
682
683 switch (priv->chip) {
684 case 1000:
685 return (card->pdat->generation < 2) ? 0xfb : 0xfa;
686 case 5:
687 return 0x60;
688 default:
689 return 0x40;
690 }
691}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 00000000000..aeea9f9ff6e
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,894 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/version.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24
25#include "softing.h"
26
27#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
28
29/*
30 * test is a specific CAN netdev
31 * is online (ie. up 'n running, not sleeping, not busoff
32 */
33static inline int canif_is_active(struct net_device *netdev)
34{
35 struct can_priv *can = netdev_priv(netdev);
36
37 if (!netif_running(netdev))
38 return 0;
39 return (can->state <= CAN_STATE_ERROR_PASSIVE);
40}
41
42/* reset DPRAM */
43static inline void softing_set_reset_dpram(struct softing *card)
44{
45 if (card->pdat->generation >= 2) {
46 spin_lock_bh(&card->spin);
47 iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
48 &card->dpram[DPRAM_V2_RESET]);
49 spin_unlock_bh(&card->spin);
50 }
51}
52
53static inline void softing_clr_reset_dpram(struct softing *card)
54{
55 if (card->pdat->generation >= 2) {
56 spin_lock_bh(&card->spin);
57 iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
58 &card->dpram[DPRAM_V2_RESET]);
59 spin_unlock_bh(&card->spin);
60 }
61}
62
63/* trigger the tx queue-ing */
64static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
65 struct net_device *dev)
66{
67 struct softing_priv *priv = netdev_priv(dev);
68 struct softing *card = priv->card;
69 int ret;
70 uint8_t *ptr;
71 uint8_t fifo_wr, fifo_rd;
72 struct can_frame *cf = (struct can_frame *)skb->data;
73 uint8_t buf[DPRAM_TX_SIZE];
74
75 if (can_dropped_invalid_skb(dev, skb))
76 return NETDEV_TX_OK;
77
78 spin_lock(&card->spin);
79
80 ret = NETDEV_TX_BUSY;
81 if (!card->fw.up ||
82 (card->tx.pending >= TXMAX) ||
83 (priv->tx.pending >= TX_ECHO_SKB_MAX))
84 goto xmit_done;
85 fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
86 fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
87 if (fifo_wr == fifo_rd)
88 /* fifo full */
89 goto xmit_done;
90 memset(buf, 0, sizeof(buf));
91 ptr = buf;
92 *ptr = CMD_TX;
93 if (cf->can_id & CAN_RTR_FLAG)
94 *ptr |= CMD_RTR;
95 if (cf->can_id & CAN_EFF_FLAG)
96 *ptr |= CMD_XTD;
97 if (priv->index)
98 *ptr |= CMD_BUS2;
99 ++ptr;
100 *ptr++ = cf->can_dlc;
101 *ptr++ = (cf->can_id >> 0);
102 *ptr++ = (cf->can_id >> 8);
103 if (cf->can_id & CAN_EFF_FLAG) {
104 *ptr++ = (cf->can_id >> 16);
105 *ptr++ = (cf->can_id >> 24);
106 } else {
107 /* increment 1, not 2 as you might think */
108 ptr += 1;
109 }
110 if (!(cf->can_id & CAN_RTR_FLAG))
111 memcpy(ptr, &cf->data[0], cf->can_dlc);
112 memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
113 buf, DPRAM_TX_SIZE);
114 if (++fifo_wr >= DPRAM_TX_CNT)
115 fifo_wr = 0;
116 iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
117 card->tx.last_bus = priv->index;
118 ++card->tx.pending;
119 ++priv->tx.pending;
120 can_put_echo_skb(skb, dev, priv->tx.echo_put);
121 ++priv->tx.echo_put;
122 if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
123 priv->tx.echo_put = 0;
124 /* can_put_echo_skb() saves the skb, safe to return TX_OK */
125 ret = NETDEV_TX_OK;
126xmit_done:
127 spin_unlock(&card->spin);
128 if (card->tx.pending >= TXMAX) {
129 int j;
130 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
131 if (card->net[j])
132 netif_stop_queue(card->net[j]);
133 }
134 }
135 if (ret != NETDEV_TX_OK)
136 netif_stop_queue(dev);
137
138 return ret;
139}
140
141/*
142 * shortcut for skb delivery
143 */
144int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
145 ktime_t ktime)
146{
147 struct sk_buff *skb;
148 struct can_frame *cf;
149
150 skb = alloc_can_skb(netdev, &cf);
151 if (!skb)
152 return -ENOMEM;
153 memcpy(cf, msg, sizeof(*msg));
154 skb->tstamp = ktime;
155 return netif_rx(skb);
156}
157
158/*
159 * softing_handle_1
160 * pop 1 entry from the DPRAM queue, and process
161 */
162static int softing_handle_1(struct softing *card)
163{
164 struct net_device *netdev;
165 struct softing_priv *priv;
166 ktime_t ktime;
167 struct can_frame msg;
168 int cnt = 0, lost_msg;
169 uint8_t fifo_rd, fifo_wr, cmd;
170 uint8_t *ptr;
171 uint32_t tmp_u32;
172 uint8_t buf[DPRAM_RX_SIZE];
173
174 memset(&msg, 0, sizeof(msg));
175 /* test for lost msgs */
176 lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
177 if (lost_msg) {
178 int j;
179 /* reset condition */
180 iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
181 /* prepare msg */
182 msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
183 msg.can_dlc = CAN_ERR_DLC;
184 msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
185 /*
186 * service to all busses, we don't know which it was applicable
187 * but only service busses that are online
188 */
189 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
190 netdev = card->net[j];
191 if (!netdev)
192 continue;
193 if (!canif_is_active(netdev))
194 /* a dead bus has no overflows */
195 continue;
196 ++netdev->stats.rx_over_errors;
197 softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
198 }
199 /* prepare for other use */
200 memset(&msg, 0, sizeof(msg));
201 ++cnt;
202 }
203
204 fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
205 fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
206
207 if (++fifo_rd >= DPRAM_RX_CNT)
208 fifo_rd = 0;
209 if (fifo_wr == fifo_rd)
210 return cnt;
211
212 memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
213 DPRAM_RX_SIZE);
214 mb();
215 /* trigger dual port RAM */
216 iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
217
218 ptr = buf;
219 cmd = *ptr++;
220 if (cmd == 0xff)
221 /* not quite usefull, probably the card has got out */
222 return 0;
223 netdev = card->net[0];
224 if (cmd & CMD_BUS2)
225 netdev = card->net[1];
226 priv = netdev_priv(netdev);
227
228 if (cmd & CMD_ERR) {
229 uint8_t can_state, state;
230
231 state = *ptr++;
232
233 msg.can_id = CAN_ERR_FLAG;
234 msg.can_dlc = CAN_ERR_DLC;
235
236 if (state & SF_MASK_BUSOFF) {
237 can_state = CAN_STATE_BUS_OFF;
238 msg.can_id |= CAN_ERR_BUSOFF;
239 state = STATE_BUSOFF;
240 } else if (state & SF_MASK_EPASSIVE) {
241 can_state = CAN_STATE_ERROR_PASSIVE;
242 msg.can_id |= CAN_ERR_CRTL;
243 msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
244 state = STATE_EPASSIVE;
245 } else {
246 can_state = CAN_STATE_ERROR_ACTIVE;
247 msg.can_id |= CAN_ERR_CRTL;
248 state = STATE_EACTIVE;
249 }
250 /* update DPRAM */
251 iowrite8(state, &card->dpram[priv->index ?
252 DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
253 /* timestamp */
254 tmp_u32 = le32_to_cpup((void *)ptr);
255 ptr += 4;
256 ktime = softing_raw2ktime(card, tmp_u32);
257
258 ++netdev->stats.rx_errors;
259 /* update internal status */
260 if (can_state != priv->can.state) {
261 priv->can.state = can_state;
262 if (can_state == CAN_STATE_ERROR_PASSIVE)
263 ++priv->can.can_stats.error_passive;
264 else if (can_state == CAN_STATE_BUS_OFF) {
265 /* this calls can_close_cleanup() */
266 can_bus_off(netdev);
267 netif_stop_queue(netdev);
268 }
269 /* trigger socketcan */
270 softing_netdev_rx(netdev, &msg, ktime);
271 }
272
273 } else {
274 if (cmd & CMD_RTR)
275 msg.can_id |= CAN_RTR_FLAG;
276 msg.can_dlc = get_can_dlc(*ptr++);
277 if (cmd & CMD_XTD) {
278 msg.can_id |= CAN_EFF_FLAG;
279 msg.can_id |= le32_to_cpup((void *)ptr);
280 ptr += 4;
281 } else {
282 msg.can_id |= le16_to_cpup((void *)ptr);
283 ptr += 2;
284 }
285 /* timestamp */
286 tmp_u32 = le32_to_cpup((void *)ptr);
287 ptr += 4;
288 ktime = softing_raw2ktime(card, tmp_u32);
289 if (!(msg.can_id & CAN_RTR_FLAG))
290 memcpy(&msg.data[0], ptr, 8);
291 ptr += 8;
292 /* update socket */
293 if (cmd & CMD_ACK) {
294 /* acknowledge, was tx msg */
295 struct sk_buff *skb;
296 skb = priv->can.echo_skb[priv->tx.echo_get];
297 if (skb)
298 skb->tstamp = ktime;
299 can_get_echo_skb(netdev, priv->tx.echo_get);
300 ++priv->tx.echo_get;
301 if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
302 priv->tx.echo_get = 0;
303 if (priv->tx.pending)
304 --priv->tx.pending;
305 if (card->tx.pending)
306 --card->tx.pending;
307 ++netdev->stats.tx_packets;
308 if (!(msg.can_id & CAN_RTR_FLAG))
309 netdev->stats.tx_bytes += msg.can_dlc;
310 } else {
311 int ret;
312
313 ret = softing_netdev_rx(netdev, &msg, ktime);
314 if (ret == NET_RX_SUCCESS) {
315 ++netdev->stats.rx_packets;
316 if (!(msg.can_id & CAN_RTR_FLAG))
317 netdev->stats.rx_bytes += msg.can_dlc;
318 } else {
319 ++netdev->stats.rx_dropped;
320 }
321 }
322 }
323 ++cnt;
324 return cnt;
325}
326
327/*
328 * real interrupt handler
329 */
330static irqreturn_t softing_irq_thread(int irq, void *dev_id)
331{
332 struct softing *card = (struct softing *)dev_id;
333 struct net_device *netdev;
334 struct softing_priv *priv;
335 int j, offset, work_done;
336
337 work_done = 0;
338 spin_lock_bh(&card->spin);
339 while (softing_handle_1(card) > 0) {
340 ++card->irq.svc_count;
341 ++work_done;
342 }
343 spin_unlock_bh(&card->spin);
344 /* resume tx queue's */
345 offset = card->tx.last_bus;
346 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
347 if (card->tx.pending >= TXMAX)
348 break;
349 netdev = card->net[(j + offset + 1) % card->pdat->nbus];
350 if (!netdev)
351 continue;
352 priv = netdev_priv(netdev);
353 if (!canif_is_active(netdev))
354 /* it makes no sense to wake dead busses */
355 continue;
356 if (priv->tx.pending >= TX_ECHO_SKB_MAX)
357 continue;
358 ++work_done;
359 netif_wake_queue(netdev);
360 }
361 return work_done ? IRQ_HANDLED : IRQ_NONE;
362}
363
364/*
365 * interrupt routines:
366 * schedule the 'real interrupt handler'
367 */
368static irqreturn_t softing_irq_v2(int irq, void *dev_id)
369{
370 struct softing *card = (struct softing *)dev_id;
371 uint8_t ir;
372
373 ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
374 iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
375 return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
376}
377
378static irqreturn_t softing_irq_v1(int irq, void *dev_id)
379{
380 struct softing *card = (struct softing *)dev_id;
381 uint8_t ir;
382
383 ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
384 iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
385 return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
386}
387
388/*
389 * netdev/candev inter-operability
390 */
391static int softing_netdev_open(struct net_device *ndev)
392{
393 int ret;
394
395 /* check or determine and set bittime */
396 ret = open_candev(ndev);
397 if (!ret)
398 ret = softing_startstop(ndev, 1);
399 return ret;
400}
401
402static int softing_netdev_stop(struct net_device *ndev)
403{
404 int ret;
405
406 netif_stop_queue(ndev);
407
408 /* softing cycle does close_candev() */
409 ret = softing_startstop(ndev, 0);
410 return ret;
411}
412
413static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
414{
415 int ret;
416
417 switch (mode) {
418 case CAN_MODE_START:
419 /* softing_startstop does close_candev() */
420 ret = softing_startstop(ndev, 1);
421 return ret;
422 case CAN_MODE_STOP:
423 case CAN_MODE_SLEEP:
424 return -EOPNOTSUPP;
425 }
426 return 0;
427}
428
429/*
430 * Softing device management helpers
431 */
432int softing_enable_irq(struct softing *card, int enable)
433{
434 int ret;
435
436 if (!card->irq.nr) {
437 return 0;
438 } else if (card->irq.requested && !enable) {
439 free_irq(card->irq.nr, card);
440 card->irq.requested = 0;
441 } else if (!card->irq.requested && enable) {
442 ret = request_threaded_irq(card->irq.nr,
443 (card->pdat->generation >= 2) ?
444 softing_irq_v2 : softing_irq_v1,
445 softing_irq_thread, IRQF_SHARED,
446 dev_name(&card->pdev->dev), card);
447 if (ret) {
448 dev_alert(&card->pdev->dev,
449 "request_threaded_irq(%u) failed\n",
450 card->irq.nr);
451 return ret;
452 }
453 card->irq.requested = 1;
454 }
455 return 0;
456}
457
458static void softing_card_shutdown(struct softing *card)
459{
460 int fw_up = 0;
461
462 if (mutex_lock_interruptible(&card->fw.lock))
463 /* return -ERESTARTSYS */;
464 fw_up = card->fw.up;
465 card->fw.up = 0;
466
467 if (card->irq.requested && card->irq.nr) {
468 free_irq(card->irq.nr, card);
469 card->irq.requested = 0;
470 }
471 if (fw_up) {
472 if (card->pdat->enable_irq)
473 card->pdat->enable_irq(card->pdev, 0);
474 softing_set_reset_dpram(card);
475 if (card->pdat->reset)
476 card->pdat->reset(card->pdev, 1);
477 }
478 mutex_unlock(&card->fw.lock);
479}
480
481static __devinit int softing_card_boot(struct softing *card)
482{
483 int ret, j;
484 static const uint8_t stream[] = {
485 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
486 unsigned char back[sizeof(stream)];
487
488 if (mutex_lock_interruptible(&card->fw.lock))
489 return -ERESTARTSYS;
490 if (card->fw.up) {
491 mutex_unlock(&card->fw.lock);
492 return 0;
493 }
494 /* reset board */
495 if (card->pdat->enable_irq)
496 card->pdat->enable_irq(card->pdev, 1);
497 /* boot card */
498 softing_set_reset_dpram(card);
499 if (card->pdat->reset)
500 card->pdat->reset(card->pdev, 1);
501 for (j = 0; (j + sizeof(stream)) < card->dpram_size;
502 j += sizeof(stream)) {
503
504 memcpy_toio(&card->dpram[j], stream, sizeof(stream));
505 /* flush IO cache */
506 mb();
507 memcpy_fromio(back, &card->dpram[j], sizeof(stream));
508
509 if (!memcmp(back, stream, sizeof(stream)))
510 continue;
511 /* memory is not equal */
512 dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
513 ret = -EIO;
514 goto failed;
515 }
516 wmb();
517 /* load boot firmware */
518 ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
519 card->dpram_size,
520 card->pdat->boot.offs - card->pdat->boot.addr);
521 if (ret < 0)
522 goto failed;
523 /* load loader firmware */
524 ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
525 card->dpram_size,
526 card->pdat->load.offs - card->pdat->load.addr);
527 if (ret < 0)
528 goto failed;
529
530 if (card->pdat->reset)
531 card->pdat->reset(card->pdev, 0);
532 softing_clr_reset_dpram(card);
533 ret = softing_bootloader_command(card, 0, "card boot");
534 if (ret < 0)
535 goto failed;
536 ret = softing_load_app_fw(card->pdat->app.fw, card);
537 if (ret < 0)
538 goto failed;
539
540 ret = softing_chip_poweron(card);
541 if (ret < 0)
542 goto failed;
543
544 card->fw.up = 1;
545 mutex_unlock(&card->fw.lock);
546 return 0;
547failed:
548 card->fw.up = 0;
549 if (card->pdat->enable_irq)
550 card->pdat->enable_irq(card->pdev, 0);
551 softing_set_reset_dpram(card);
552 if (card->pdat->reset)
553 card->pdat->reset(card->pdev, 1);
554 mutex_unlock(&card->fw.lock);
555 return ret;
556}
557
558/*
559 * netdev sysfs
560 */
561static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
562 char *buf)
563{
564 struct net_device *ndev = to_net_dev(dev);
565 struct softing_priv *priv = netdev2softing(ndev);
566
567 return sprintf(buf, "%i\n", priv->index);
568}
569
570static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
571 char *buf)
572{
573 struct net_device *ndev = to_net_dev(dev);
574 struct softing_priv *priv = netdev2softing(ndev);
575
576 return sprintf(buf, "%i\n", priv->chip);
577}
578
579static ssize_t show_output(struct device *dev, struct device_attribute *attr,
580 char *buf)
581{
582 struct net_device *ndev = to_net_dev(dev);
583 struct softing_priv *priv = netdev2softing(ndev);
584
585 return sprintf(buf, "0x%02x\n", priv->output);
586}
587
588static ssize_t store_output(struct device *dev, struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 struct net_device *ndev = to_net_dev(dev);
592 struct softing_priv *priv = netdev2softing(ndev);
593 struct softing *card = priv->card;
594 unsigned long val;
595 int ret;
596
597 ret = strict_strtoul(buf, 0, &val);
598 if (ret < 0)
599 return ret;
600 val &= 0xFF;
601
602 ret = mutex_lock_interruptible(&card->fw.lock);
603 if (ret)
604 return -ERESTARTSYS;
605 if (netif_running(ndev)) {
606 mutex_unlock(&card->fw.lock);
607 return -EBUSY;
608 }
609 priv->output = val;
610 mutex_unlock(&card->fw.lock);
611 return count;
612}
613
614static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
615static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
616static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
617
618static const struct attribute *const netdev_sysfs_attrs[] = {
619 &dev_attr_channel.attr,
620 &dev_attr_chip.attr,
621 &dev_attr_output.attr,
622 NULL,
623};
624static const struct attribute_group netdev_sysfs_group = {
625 .name = NULL,
626 .attrs = (struct attribute **)netdev_sysfs_attrs,
627};
628
629static const struct net_device_ops softing_netdev_ops = {
630 .ndo_open = softing_netdev_open,
631 .ndo_stop = softing_netdev_stop,
632 .ndo_start_xmit = softing_netdev_start_xmit,
633};
634
635static const struct can_bittiming_const softing_btr_const = {
636 .name = "softing",
637 .tseg1_min = 1,
638 .tseg1_max = 16,
639 .tseg2_min = 1,
640 .tseg2_max = 8,
641 .sjw_max = 4, /* overruled */
642 .brp_min = 1,
643 .brp_max = 32, /* overruled */
644 .brp_inc = 1,
645};
646
647
648static __devinit struct net_device *softing_netdev_create(struct softing *card,
649 uint16_t chip_id)
650{
651 struct net_device *netdev;
652 struct softing_priv *priv;
653
654 netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
655 if (!netdev) {
656 dev_alert(&card->pdev->dev, "alloc_candev failed\n");
657 return NULL;
658 }
659 priv = netdev_priv(netdev);
660 priv->netdev = netdev;
661 priv->card = card;
662 memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
663 priv->btr_const.brp_max = card->pdat->max_brp;
664 priv->btr_const.sjw_max = card->pdat->max_sjw;
665 priv->can.bittiming_const = &priv->btr_const;
666 priv->can.clock.freq = 8000000;
667 priv->chip = chip_id;
668 priv->output = softing_default_output(netdev);
669 SET_NETDEV_DEV(netdev, &card->pdev->dev);
670
671 netdev->flags |= IFF_ECHO;
672 netdev->netdev_ops = &softing_netdev_ops;
673 priv->can.do_set_mode = softing_candev_set_mode;
674 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
675
676 return netdev;
677}
678
679static __devinit int softing_netdev_register(struct net_device *netdev)
680{
681 int ret;
682
683 netdev->sysfs_groups[0] = &netdev_sysfs_group;
684 ret = register_candev(netdev);
685 if (ret) {
686 dev_alert(&netdev->dev, "register failed\n");
687 return ret;
688 }
689 return 0;
690}
691
692static void softing_netdev_cleanup(struct net_device *netdev)
693{
694 unregister_candev(netdev);
695 free_candev(netdev);
696}
697
698/*
699 * sysfs for Platform device
700 */
701#define DEV_ATTR_RO(name, member) \
702static ssize_t show_##name(struct device *dev, \
703 struct device_attribute *attr, char *buf) \
704{ \
705 struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
706 return sprintf(buf, "%u\n", card->member); \
707} \
708static DEVICE_ATTR(name, 0444, show_##name, NULL)
709
710#define DEV_ATTR_RO_STR(name, member) \
711static ssize_t show_##name(struct device *dev, \
712 struct device_attribute *attr, char *buf) \
713{ \
714 struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
715 return sprintf(buf, "%s\n", card->member); \
716} \
717static DEVICE_ATTR(name, 0444, show_##name, NULL)
718
719DEV_ATTR_RO(serial, id.serial);
720DEV_ATTR_RO_STR(firmware, pdat->app.fw);
721DEV_ATTR_RO(firmware_version, id.fw_version);
722DEV_ATTR_RO_STR(hardware, pdat->name);
723DEV_ATTR_RO(hardware_version, id.hw_version);
724DEV_ATTR_RO(license, id.license);
725DEV_ATTR_RO(frequency, id.freq);
726DEV_ATTR_RO(txpending, tx.pending);
727
728static struct attribute *softing_pdev_attrs[] = {
729 &dev_attr_serial.attr,
730 &dev_attr_firmware.attr,
731 &dev_attr_firmware_version.attr,
732 &dev_attr_hardware.attr,
733 &dev_attr_hardware_version.attr,
734 &dev_attr_license.attr,
735 &dev_attr_frequency.attr,
736 &dev_attr_txpending.attr,
737 NULL,
738};
739
740static const struct attribute_group softing_pdev_group = {
741 .name = NULL,
742 .attrs = softing_pdev_attrs,
743};
744
745/*
746 * platform driver
747 */
748static __devexit int softing_pdev_remove(struct platform_device *pdev)
749{
750 struct softing *card = platform_get_drvdata(pdev);
751 int j;
752
753 /* first, disable card*/
754 softing_card_shutdown(card);
755
756 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
757 if (!card->net[j])
758 continue;
759 softing_netdev_cleanup(card->net[j]);
760 card->net[j] = NULL;
761 }
762 sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
763
764 iounmap(card->dpram);
765 kfree(card);
766 return 0;
767}
768
769static __devinit int softing_pdev_probe(struct platform_device *pdev)
770{
771 const struct softing_platform_data *pdat = pdev->dev.platform_data;
772 struct softing *card;
773 struct net_device *netdev;
774 struct softing_priv *priv;
775 struct resource *pres;
776 int ret;
777 int j;
778
779 if (!pdat) {
780 dev_warn(&pdev->dev, "no platform data\n");
781 return -EINVAL;
782 }
783 if (pdat->nbus > ARRAY_SIZE(card->net)) {
784 dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
785 return -EINVAL;
786 }
787
788 card = kzalloc(sizeof(*card), GFP_KERNEL);
789 if (!card)
790 return -ENOMEM;
791 card->pdat = pdat;
792 card->pdev = pdev;
793 platform_set_drvdata(pdev, card);
794 mutex_init(&card->fw.lock);
795 spin_lock_init(&card->spin);
796
797 ret = -EINVAL;
798 pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
799 if (!pres)
800 goto platform_resource_failed;;
801 card->dpram_phys = pres->start;
802 card->dpram_size = pres->end - pres->start + 1;
803 card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
804 if (!card->dpram) {
805 dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
806 goto ioremap_failed;
807 }
808
809 pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
810 if (pres)
811 card->irq.nr = pres->start;
812
813 /* reset card */
814 ret = softing_card_boot(card);
815 if (ret < 0) {
816 dev_alert(&pdev->dev, "failed to boot\n");
817 goto boot_failed;
818 }
819
820 /* only now, the chip's are known */
821 card->id.freq = card->pdat->freq;
822
823 ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
824 if (ret < 0) {
825 dev_alert(&card->pdev->dev, "sysfs failed\n");
826 goto sysfs_failed;
827 }
828
829 ret = -ENOMEM;
830 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
831 card->net[j] = netdev =
832 softing_netdev_create(card, card->id.chip[j]);
833 if (!netdev) {
834 dev_alert(&pdev->dev, "failed to make can[%i]", j);
835 goto netdev_failed;
836 }
837 priv = netdev_priv(card->net[j]);
838 priv->index = j;
839 ret = softing_netdev_register(netdev);
840 if (ret) {
841 free_candev(netdev);
842 card->net[j] = NULL;
843 dev_alert(&card->pdev->dev,
844 "failed to register can[%i]\n", j);
845 goto netdev_failed;
846 }
847 }
848 dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
849 return 0;
850
851netdev_failed:
852 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
853 if (!card->net[j])
854 continue;
855 softing_netdev_cleanup(card->net[j]);
856 }
857 sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
858sysfs_failed:
859 softing_card_shutdown(card);
860boot_failed:
861 iounmap(card->dpram);
862ioremap_failed:
863platform_resource_failed:
864 kfree(card);
865 return ret;
866}
867
868static struct platform_driver softing_driver = {
869 .driver = {
870 .name = "softing",
871 .owner = THIS_MODULE,
872 },
873 .probe = softing_pdev_probe,
874 .remove = __devexit_p(softing_pdev_remove),
875};
876
877MODULE_ALIAS("platform:softing");
878
879static int __init softing_start(void)
880{
881 return platform_driver_register(&softing_driver);
882}
883
884static void __exit softing_stop(void)
885{
886 platform_driver_unregister(&softing_driver);
887}
888
889module_init(softing_start);
890module_exit(softing_stop);
891
892MODULE_DESCRIPTION("Softing DPRAM CAN driver");
893MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
894MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 00000000000..ebbf6981562
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
1
2#include <linux/platform_device.h>
3
4#ifndef _SOFTING_DEVICE_H_
5#define _SOFTING_DEVICE_H_
6
7/* softing firmware directory prefix */
8#define fw_dir "softing-4.6/"
9
10struct softing_platform_data {
11 unsigned int manf;
12 unsigned int prod;
13 /*
14 * generation
15 * 1st with NEC or SJA1000
16 * 8bit, exclusive interrupt, ...
17 * 2nd only SJA1000
18 * 16bit, shared interrupt
19 */
20 int generation;
21 int nbus; /* # busses on device */
22 unsigned int freq; /* operating frequency in Hz */
23 unsigned int max_brp;
24 unsigned int max_sjw;
25 unsigned long dpram_size;
26 const char *name;
27 struct {
28 unsigned long offs;
29 unsigned long addr;
30 const char *fw;
31 } boot, load, app;
32 /*
33 * reset() function
34 * bring pdev in or out of reset, depending on value
35 */
36 int (*reset)(struct platform_device *pdev, int value);
37 int (*enable_irq)(struct platform_device *pdev, int value);
38};
39
40#endif
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 05a52754f48..dc53c831ea9 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -659,7 +659,7 @@ failed:
659static void unlink_all_urbs(struct esd_usb2 *dev) 659static void unlink_all_urbs(struct esd_usb2 *dev)
660{ 660{
661 struct esd_usb2_net_priv *priv; 661 struct esd_usb2_net_priv *priv;
662 int i; 662 int i, j;
663 663
664 usb_kill_anchored_urbs(&dev->rx_submitted); 664 usb_kill_anchored_urbs(&dev->rx_submitted);
665 for (i = 0; i < dev->net_count; i++) { 665 for (i = 0; i < dev->net_count; i++) {
@@ -668,8 +668,8 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
668 usb_kill_anchored_urbs(&priv->tx_submitted); 668 usb_kill_anchored_urbs(&priv->tx_submitted);
669 atomic_set(&priv->active_tx_jobs, 0); 669 atomic_set(&priv->active_tx_jobs, 0);
670 670
671 for (i = 0; i < MAX_TX_URBS; i++) 671 for (j = 0; j < MAX_TX_URBS; j++)
672 priv->tx_contexts[i].echo_index = MAX_TX_URBS; 672 priv->tx_contexts[j].echo_index = MAX_TX_URBS;
673 } 673 }
674 } 674 }
675} 675}
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a2944566..8cca60e4344 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
65static DEFINE_RWLOCK(cnic_dev_lock); 65static DEFINE_RWLOCK(cnic_dev_lock);
66static DEFINE_MUTEX(cnic_lock); 66static DEFINE_MUTEX(cnic_lock);
67 67
68static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 68static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
69
70/* helper function, assuming cnic_lock is held */
71static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
72{
73 return rcu_dereference_protected(cnic_ulp_tbl[type],
74 lockdep_is_held(&cnic_lock));
75}
69 76
70static int cnic_service_bnx2(void *, void *); 77static int cnic_service_bnx2(void *, void *);
71static int cnic_service_bnx2x(void *, void *); 78static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
435 return -EINVAL; 442 return -EINVAL;
436 } 443 }
437 mutex_lock(&cnic_lock); 444 mutex_lock(&cnic_lock);
438 if (cnic_ulp_tbl[ulp_type]) { 445 if (cnic_ulp_tbl_prot(ulp_type)) {
439 pr_err("%s: Type %d has already been registered\n", 446 pr_err("%s: Type %d has already been registered\n",
440 __func__, ulp_type); 447 __func__, ulp_type);
441 mutex_unlock(&cnic_lock); 448 mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
478 return -EINVAL; 485 return -EINVAL;
479 } 486 }
480 mutex_lock(&cnic_lock); 487 mutex_lock(&cnic_lock);
481 ulp_ops = cnic_ulp_tbl[ulp_type]; 488 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
482 if (!ulp_ops) { 489 if (!ulp_ops) {
483 pr_err("%s: Type %d has not been registered\n", 490 pr_err("%s: Type %d has not been registered\n",
484 __func__, ulp_type); 491 __func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
529 return -EINVAL; 536 return -EINVAL;
530 } 537 }
531 mutex_lock(&cnic_lock); 538 mutex_lock(&cnic_lock);
532 if (cnic_ulp_tbl[ulp_type] == NULL) { 539 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
533 pr_err("%s: Driver with type %d has not been registered\n", 540 pr_err("%s: Driver with type %d has not been registered\n",
534 __func__, ulp_type); 541 __func__, ulp_type);
535 mutex_unlock(&cnic_lock); 542 mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
544 551
545 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 552 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
546 cp->ulp_handle[ulp_type] = ulp_ctx; 553 cp->ulp_handle[ulp_type] = ulp_ctx;
547 ulp_ops = cnic_ulp_tbl[ulp_type]; 554 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
548 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 555 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
549 cnic_hold(dev); 556 cnic_hold(dev);
550 557
@@ -699,13 +706,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
699static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 706static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
700{ 707{
701 int i; 708 int i;
702 u32 *page_table = dma->pgtbl; 709 __le32 *page_table = (__le32 *) dma->pgtbl;
703 710
704 for (i = 0; i < dma->num_pages; i++) { 711 for (i = 0; i < dma->num_pages; i++) {
705 /* Each entry needs to be in big endian format. */ 712 /* Each entry needs to be in big endian format. */
706 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 713 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
707 page_table++; 714 page_table++;
708 *page_table = (u32) dma->pg_map_arr[i]; 715 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
709 page_table++; 716 page_table++;
710 } 717 }
711} 718}
@@ -713,13 +720,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
713static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 720static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
714{ 721{
715 int i; 722 int i;
716 u32 *page_table = dma->pgtbl; 723 __le32 *page_table = (__le32 *) dma->pgtbl;
717 724
718 for (i = 0; i < dma->num_pages; i++) { 725 for (i = 0; i < dma->num_pages; i++) {
719 /* Each entry needs to be in little endian format. */ 726 /* Each entry needs to be in little endian format. */
720 *page_table = dma->pg_map_arr[i] & 0xffffffff; 727 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
721 page_table++; 728 page_table++;
722 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 729 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
723 page_table++; 730 page_table++;
724 } 731 }
725} 732}
@@ -2760,6 +2767,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2760 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2767 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2761 int kcqe_cnt; 2768 int kcqe_cnt;
2762 2769
2770 /* status block index must be read before reading other fields */
2771 rmb();
2763 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2772 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2764 2773
2765 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2774 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2779,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2770 barrier(); 2779 barrier();
2771 if (status_idx != *cp->kcq1.status_idx_ptr) { 2780 if (status_idx != *cp->kcq1.status_idx_ptr) {
2772 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2781 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2782 /* status block index must be read first */
2783 rmb();
2773 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2784 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2774 } else 2785 } else
2775 break; 2786 break;
@@ -2888,6 +2899,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2888 u32 last_status = *info->status_idx_ptr; 2899 u32 last_status = *info->status_idx_ptr;
2889 int kcqe_cnt; 2900 int kcqe_cnt;
2890 2901
2902 /* status block index must be read before reading the KCQ */
2903 rmb();
2891 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2904 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2892 2905
2893 service_kcqes(dev, kcqe_cnt); 2906 service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2911,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2898 break; 2911 break;
2899 2912
2900 last_status = *info->status_idx_ptr; 2913 last_status = *info->status_idx_ptr;
2914 /* status block index must be read before reading the KCQ */
2915 rmb();
2901 } 2916 }
2902 return last_status; 2917 return last_status;
2903} 2918}
@@ -2906,26 +2921,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2906{ 2921{
2907 struct cnic_dev *dev = (struct cnic_dev *) data; 2922 struct cnic_dev *dev = (struct cnic_dev *) data;
2908 struct cnic_local *cp = dev->cnic_priv; 2923 struct cnic_local *cp = dev->cnic_priv;
2909 u32 status_idx; 2924 u32 status_idx, new_status_idx;
2910 2925
2911 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2926 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2912 return; 2927 return;
2913 2928
2914 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2929 while (1) {
2930 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2915 2931
2916 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2932 CNIC_WR16(dev, cp->kcq1.io_addr,
2933 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2917 2934
2918 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 2935 if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
2919 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2936 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2937 status_idx, IGU_INT_ENABLE, 1);
2938 break;
2939 }
2940
2941 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2942
2943 if (new_status_idx != status_idx)
2944 continue;
2920 2945
2921 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2946 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2922 MAX_KCQ_IDX); 2947 MAX_KCQ_IDX);
2923 2948
2924 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2949 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2925 status_idx, IGU_INT_ENABLE, 1); 2950 status_idx, IGU_INT_ENABLE, 1);
2926 } else { 2951
2927 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2952 break;
2928 status_idx, IGU_INT_ENABLE, 1);
2929 } 2953 }
2930} 2954}
2931 2955
@@ -2953,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
2953 struct cnic_ulp_ops *ulp_ops; 2977 struct cnic_ulp_ops *ulp_ops;
2954 2978
2955 mutex_lock(&cnic_lock); 2979 mutex_lock(&cnic_lock);
2956 ulp_ops = cp->ulp_ops[if_type]; 2980 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2981 lockdep_is_held(&cnic_lock));
2957 if (!ulp_ops) { 2982 if (!ulp_ops) {
2958 mutex_unlock(&cnic_lock); 2983 mutex_unlock(&cnic_lock);
2959 continue; 2984 continue;
@@ -2977,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
2977 struct cnic_ulp_ops *ulp_ops; 3002 struct cnic_ulp_ops *ulp_ops;
2978 3003
2979 mutex_lock(&cnic_lock); 3004 mutex_lock(&cnic_lock);
2980 ulp_ops = cp->ulp_ops[if_type]; 3005 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3006 lockdep_is_held(&cnic_lock));
2981 if (!ulp_ops || !ulp_ops->cnic_start) { 3007 if (!ulp_ops || !ulp_ops->cnic_start) {
2982 mutex_unlock(&cnic_lock); 3008 mutex_unlock(&cnic_lock);
2983 continue; 3009 continue;
@@ -3041,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
3041 struct cnic_ulp_ops *ulp_ops; 3067 struct cnic_ulp_ops *ulp_ops;
3042 3068
3043 mutex_lock(&cnic_lock); 3069 mutex_lock(&cnic_lock);
3044 ulp_ops = cnic_ulp_tbl[i]; 3070 ulp_ops = cnic_ulp_tbl_prot(i);
3045 if (!ulp_ops || !ulp_ops->cnic_init) { 3071 if (!ulp_ops || !ulp_ops->cnic_init) {
3046 mutex_unlock(&cnic_lock); 3072 mutex_unlock(&cnic_lock);
3047 continue; 3073 continue;
@@ -3065,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
3065 struct cnic_ulp_ops *ulp_ops; 3091 struct cnic_ulp_ops *ulp_ops;
3066 3092
3067 mutex_lock(&cnic_lock); 3093 mutex_lock(&cnic_lock);
3068 ulp_ops = cnic_ulp_tbl[i]; 3094 ulp_ops = cnic_ulp_tbl_prot(i);
3069 if (!ulp_ops || !ulp_ops->cnic_exit) { 3095 if (!ulp_ops || !ulp_ops->cnic_exit) {
3070 mutex_unlock(&cnic_lock); 3096 mutex_unlock(&cnic_lock);
3071 continue; 3097 continue;
@@ -3381,17 +3407,14 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3381 struct dst_entry **dst) 3407 struct dst_entry **dst)
3382{ 3408{
3383#if defined(CONFIG_INET) 3409#if defined(CONFIG_INET)
3384 struct flowi fl;
3385 int err;
3386 struct rtable *rt; 3410 struct rtable *rt;
3387 3411
3388 memset(&fl, 0, sizeof(fl)); 3412 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3389 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; 3413 if (!IS_ERR(rt)) {
3390
3391 err = ip_route_output_key(&init_net, &rt, &fl);
3392 if (!err)
3393 *dst = &rt->dst; 3414 *dst = &rt->dst;
3394 return err; 3415 return 0;
3416 }
3417 return PTR_ERR(rt);
3395#else 3418#else
3396 return -ENETUNREACH; 3419 return -ENETUNREACH;
3397#endif 3420#endif
@@ -3401,14 +3424,14 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3401 struct dst_entry **dst) 3424 struct dst_entry **dst)
3402{ 3425{
3403#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 3426#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3404 struct flowi fl; 3427 struct flowi6 fl6;
3405 3428
3406 memset(&fl, 0, sizeof(fl)); 3429 memset(&fl6, 0, sizeof(fl6));
3407 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); 3430 ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
3408 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) 3431 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3409 fl.oif = dst_addr->sin6_scope_id; 3432 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3410 3433
3411 *dst = ip6_route_output(&init_net, NULL, &fl); 3434 *dst = ip6_route_output(&init_net, NULL, &fl6);
3412 if (*dst) 3435 if (*dst)
3413 return 0; 3436 return 0;
3414#endif 3437#endif
@@ -4170,6 +4193,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4170 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4193 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4171} 4194}
4172 4195
4196static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
4197{
4198 u32 max_conn;
4199
4200 max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
4201 dev->max_iscsi_conn = max_conn;
4202}
4203
4173static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4204static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4174{ 4205{
4175 struct cnic_local *cp = dev->cnic_priv; 4206 struct cnic_local *cp = dev->cnic_priv;
@@ -4494,6 +4525,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4494 return err; 4525 return err;
4495 } 4526 }
4496 4527
4528 cnic_get_bnx2_iscsi_info(dev);
4529
4497 return 0; 4530 return 0;
4498} 4531}
4499 4532
@@ -4705,129 +4738,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4705 cp->rx_cons = *cp->rx_cons_ptr; 4738 cp->rx_cons = *cp->rx_cons_ptr;
4706} 4739}
4707 4740
4708static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
4709 u32 lower_addr)
4710{
4711 u32 val;
4712 u8 mac[6];
4713
4714 val = CNIC_RD(dev, upper_addr);
4715
4716 mac[0] = (u8) (val >> 8);
4717 mac[1] = (u8) val;
4718
4719 val = CNIC_RD(dev, lower_addr);
4720
4721 mac[2] = (u8) (val >> 24);
4722 mac[3] = (u8) (val >> 16);
4723 mac[4] = (u8) (val >> 8);
4724 mac[5] = (u8) val;
4725
4726 if (is_valid_ether_addr(mac)) {
4727 memcpy(dev->mac_addr, mac, 6);
4728 return 0;
4729 } else {
4730 return -EINVAL;
4731 }
4732}
4733
4734static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4735{
4736 struct cnic_local *cp = dev->cnic_priv;
4737 u32 base, base2, addr, addr1, val;
4738 int port = CNIC_PORT(cp);
4739
4740 dev->max_iscsi_conn = 0;
4741 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4742 if (base == 0)
4743 return;
4744
4745 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4746 MISC_REG_GENERIC_CR_0));
4747 addr = BNX2X_SHMEM_ADDR(base,
4748 dev_info.port_hw_config[port].iscsi_mac_upper);
4749
4750 addr1 = BNX2X_SHMEM_ADDR(base,
4751 dev_info.port_hw_config[port].iscsi_mac_lower);
4752
4753 cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
4754
4755 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4756 val = CNIC_RD(dev, addr);
4757
4758 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4759 u16 val16;
4760
4761 addr = BNX2X_SHMEM_ADDR(base,
4762 drv_lic_key[port].max_iscsi_init_conn);
4763 val16 = CNIC_RD16(dev, addr);
4764
4765 if (val16)
4766 val16 ^= 0x1e1e;
4767 dev->max_iscsi_conn = val16;
4768 }
4769
4770 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4771 dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
4772
4773 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
4774 int func = CNIC_FUNC(cp);
4775 u32 mf_cfg_addr;
4776
4777 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4778 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4779 mf_cfg_addr));
4780 else
4781 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4782
4783 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4784 /* Must determine if the MF is SD vs SI mode */
4785 addr = BNX2X_SHMEM_ADDR(base,
4786 dev_info.shared_feature_config.config);
4787 val = CNIC_RD(dev, addr);
4788 if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
4789 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
4790 int rc;
4791
4792 /* MULTI_FUNCTION_SI mode */
4793 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4794 func_ext_config[func].func_cfg);
4795 val = CNIC_RD(dev, addr);
4796 if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
4797 dev->max_iscsi_conn = 0;
4798
4799 if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
4800 dev->max_fcoe_conn = 0;
4801
4802 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4803 func_ext_config[func].
4804 iscsi_mac_addr_upper);
4805 addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4806 func_ext_config[func].
4807 iscsi_mac_addr_lower);
4808 rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
4809 addr1);
4810 if (rc && func > 1)
4811 dev->max_iscsi_conn = 0;
4812
4813 return;
4814 }
4815 }
4816
4817 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4818 func_mf_config[func].e1hov_tag);
4819
4820 val = CNIC_RD(dev, addr);
4821 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4822 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4823 dev->max_fcoe_conn = 0;
4824 dev->max_iscsi_conn = 0;
4825 }
4826 }
4827 if (!is_valid_ether_addr(dev->mac_addr))
4828 dev->max_iscsi_conn = 0;
4829}
4830
4831static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 4741static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4832{ 4742{
4833 struct cnic_local *cp = dev->cnic_priv; 4743 struct cnic_local *cp = dev->cnic_priv;
@@ -4909,8 +4819,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4909 4819
4910 cnic_init_bnx2x_kcq(dev); 4820 cnic_init_bnx2x_kcq(dev);
4911 4821
4912 cnic_get_bnx2x_iscsi_info(dev);
4913
4914 /* Only 1 EQ */ 4822 /* Only 1 EQ */
4915 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4823 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4916 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4824 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5264,15 +5172,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5264 5172
5265 dev_hold(dev); 5173 dev_hold(dev);
5266 pci_dev_get(pdev); 5174 pci_dev_get(pdev);
5267 if (pdev->device == PCI_DEVICE_ID_NX2_5709 || 5175 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5268 pdev->device == PCI_DEVICE_ID_NX2_5709S) { 5176 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5269 u8 rev; 5177 (pdev->revision < 0x10)) {
5270 5178 pci_dev_put(pdev);
5271 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 5179 goto cnic_err;
5272 if (rev < 0x10) {
5273 pci_dev_put(pdev);
5274 goto cnic_err;
5275 }
5276 } 5180 }
5277 pci_dev_put(pdev); 5181 pci_dev_put(pdev);
5278 5182
@@ -5343,6 +5247,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5343 cdev->pcidev = pdev; 5247 cdev->pcidev = pdev;
5344 cp->chip_id = ethdev->chip_id; 5248 cp->chip_id = ethdev->chip_id;
5345 5249
5250 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5251 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5252 if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
5253 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5254 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5255
5256 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5257
5346 cp->cnic_ops = &cnic_bnx2x_ops; 5258 cp->cnic_ops = &cnic_bnx2x_ops;
5347 cp->start_hw = cnic_start_bnx2x_hw; 5259 cp->start_hw = cnic_start_bnx2x_hw;
5348 cp->stop_hw = cnic_stop_bnx2x_hw; 5260 cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index b328f6c924c..4456260c653 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -220,7 +220,7 @@ struct cnic_local {
220#define ULP_F_INIT 0 220#define ULP_F_INIT 0
221#define ULP_F_START 1 221#define ULP_F_START 1
222#define ULP_F_CALL_PENDING 2 222#define ULP_F_CALL_PENDING 2
223 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; 223 struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
224 224
225 unsigned long cnic_local_flags; 225 unsigned long cnic_local_flags;
226#define CNIC_LCL_FL_KWQ_INIT 0x0 226#define CNIC_LCL_FL_KWQ_INIT 0x0
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 9f44e0ffe00..e01b49ee359 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.2.12" 15#define CNIC_MODULE_VERSION "2.2.13"
16#define CNIC_MODULE_RELDATE "Jan 03, 2011" 16#define CNIC_MODULE_RELDATE "Jan 31, 2011"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
159 u32 drv_state; 159 u32 drv_state;
160#define CNIC_DRV_STATE_REGD 0x00000001 160#define CNIC_DRV_STATE_REGD 0x00000001
161#define CNIC_DRV_STATE_USING_MSIX 0x00000002 161#define CNIC_DRV_STATE_USING_MSIX 0x00000002
162#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
163#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
164#define CNIC_DRV_STATE_NO_FCOE 0x00000010
162 u32 chip_id; 165 u32 chip_id;
163 u32 max_kwqe_pending; 166 u32 max_kwqe_pending;
164 struct pci_dev *pdev; 167 struct pci_dev *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
176 u32 fcoe_init_cid; 179 u32 fcoe_init_cid;
177 u16 iscsi_l2_client_id; 180 u16 iscsi_l2_client_id;
178 u16 iscsi_l2_cid; 181 u16 iscsi_l2_cid;
182 u8 iscsi_mac[ETH_ALEN];
179 183
180 int num_irq; 184 int num_irq;
181 struct cnic_irq irq_arr[MAX_CNIC_VEC]; 185 struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index d325e01a53e..537a4b2e202 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -95,6 +95,9 @@
95 Dmitry Pervushin : dpervushin@ru.mvista.com 95 Dmitry Pervushin : dpervushin@ru.mvista.com
96 : PNX010X platform support 96 : PNX010X platform support
97 97
98 Domenico Andreoli : cavokz@gmail.com
99 : QQ2440 platform support
100
98*/ 101*/
99 102
100/* Always include 'config.h' first in case the user wants to turn on 103/* Always include 'config.h' first in case the user wants to turn on
@@ -176,6 +179,10 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
176#elif defined(CONFIG_ARCH_IXDP2X01) 179#elif defined(CONFIG_ARCH_IXDP2X01)
177static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; 180static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
178static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; 181static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
182#elif defined(CONFIG_MACH_QQ2440)
183#include <mach/qq2440.h>
184static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 };
185static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 };
179#elif defined(CONFIG_MACH_MX31ADS) 186#elif defined(CONFIG_MACH_MX31ADS)
180#include <mach/board-mx31ads.h> 187#include <mach/board-mx31ads.h>
181static unsigned int netcard_portlist[] __used __initdata = { 188static unsigned int netcard_portlist[] __used __initdata = {
@@ -521,6 +528,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
521#endif 528#endif
522 lp->force = g_cs89x0_media__force; 529 lp->force = g_cs89x0_media__force;
523#endif 530#endif
531
532#if defined(CONFIG_MACH_QQ2440)
533 lp->force |= FORCE_RJ45 | FORCE_FULL;
534#endif
524 } 535 }
525 536
526 /* Grab the region so we can find another board if autoIRQ fails. */ 537 /* Grab the region so we can find another board if autoIRQ fails. */
@@ -943,10 +954,10 @@ skip_this_frame:
943static void __init reset_chip(struct net_device *dev) 954static void __init reset_chip(struct net_device *dev)
944{ 955{
945#if !defined(CONFIG_MACH_MX31ADS) 956#if !defined(CONFIG_MACH_MX31ADS)
946#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01) 957#if !defined(CS89x0_NONISA_IRQ)
947 struct net_local *lp = netdev_priv(dev); 958 struct net_local *lp = netdev_priv(dev);
948 int ioaddr = dev->base_addr; 959 int ioaddr = dev->base_addr;
949#endif 960#endif /* CS89x0_NONISA_IRQ */
950 int reset_start_time; 961 int reset_start_time;
951 962
952 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); 963 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -954,7 +965,7 @@ static void __init reset_chip(struct net_device *dev)
954 /* wait 30 ms */ 965 /* wait 30 ms */
955 msleep(30); 966 msleep(30);
956 967
957#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01) 968#if !defined(CS89x0_NONISA_IRQ)
958 if (lp->chip_type != CS8900) { 969 if (lp->chip_type != CS8900) {
959 /* Hardware problem requires PNP registers to be reconfigured after a reset */ 970 /* Hardware problem requires PNP registers to be reconfigured after a reset */
960 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT); 971 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
@@ -965,7 +976,7 @@ static void __init reset_chip(struct net_device *dev)
965 outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT); 976 outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
966 outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1); 977 outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
967 } 978 }
968#endif /* IXDP2x01 */ 979#endif /* CS89x0_NONISA_IRQ */
969 980
970 /* Wait until the chip is reset */ 981 /* Wait until the chip is reset */
971 reset_start_time = jiffies; 982 reset_start_time = jiffies;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 4d538a4e9d5..91089314329 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1983,14 +1983,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1983{ 1983{
1984 struct port_info *pi = netdev_priv(dev); 1984 struct port_info *pi = netdev_priv(dev);
1985 struct adapter *adapter = pi->adapter; 1985 struct adapter *adapter = pi->adapter;
1986 struct qset_params *qsp = &adapter->params.sge.qset[0]; 1986 struct qset_params *qsp;
1987 struct sge_qset *qs = &adapter->sge.qs[0]; 1987 struct sge_qset *qs;
1988 int i;
1988 1989
1989 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) 1990 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1990 return -EINVAL; 1991 return -EINVAL;
1991 1992
1992 qsp->coalesce_usecs = c->rx_coalesce_usecs; 1993 for (i = 0; i < pi->nqsets; i++) {
1993 t3_update_qset_coalesce(qs, qsp); 1994 qsp = &adapter->params.sge.qset[i];
1995 qs = &adapter->sge.qs[i];
1996 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1997 t3_update_qset_coalesce(qs, qsp);
1998 }
1999
1994 return 0; 2000 return 0;
1995} 2001}
1996 2002
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ef02aa68c92..862804f32b6 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
186 dev = NULL; 186 dev = NULL;
187 if (grp) 187 if (grp)
188 dev = vlan_group_get_device(grp, vlan); 188 dev = vlan_group_get_device(grp, vlan);
189 } else 189 } else if (netif_is_bond_slave(dev)) {
190 while (dev->master) 190 while (dev->master)
191 dev = dev->master; 191 dev = dev->master;
192 }
192 return dev; 193 return dev;
193 } 194 }
194 } 195 }
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
967 cxgb_neigh_update((struct neighbour *)ctx); 968 cxgb_neigh_update((struct neighbour *)ctx);
968 break; 969 break;
969 } 970 }
970 case (NETEVENT_PMTU_UPDATE):
971 break;
972 case (NETEVENT_REDIRECT):{ 971 case (NETEVENT_REDIRECT):{
973 struct netevent_redirect *nr = ctx; 972 struct netevent_redirect *nr = ctx;
974 cxgb_redirect(nr->old, nr->new); 973 cxgb_redirect(nr->old, nr->new);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 059c1eec8c3..5352c8a23f4 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
2471 case NETEVENT_NEIGH_UPDATE: 2471 case NETEVENT_NEIGH_UPDATE:
2472 check_neigh_update(data); 2472 check_neigh_update(data);
2473 break; 2473 break;
2474 case NETEVENT_PMTU_UPDATE:
2475 case NETEVENT_REDIRECT: 2474 case NETEVENT_REDIRECT:
2476 default: 2475 default:
2477 break; 2476 break;
@@ -2710,6 +2709,8 @@ static int cxgb_open(struct net_device *dev)
2710 struct port_info *pi = netdev_priv(dev); 2709 struct port_info *pi = netdev_priv(dev);
2711 struct adapter *adapter = pi->adapter; 2710 struct adapter *adapter = pi->adapter;
2712 2711
2712 netif_carrier_off(dev);
2713
2713 if (!(adapter->flags & FULL_INIT_DONE)) { 2714 if (!(adapter->flags & FULL_INIT_DONE)) {
2714 err = cxgb_up(adapter); 2715 err = cxgb_up(adapter);
2715 if (err < 0) 2716 if (err < 0)
@@ -3661,7 +3662,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3661 pi->xact_addr_filt = -1; 3662 pi->xact_addr_filt = -1;
3662 pi->rx_offload = RX_CSO; 3663 pi->rx_offload = RX_CSO;
3663 pi->port_id = i; 3664 pi->port_id = i;
3664 netif_carrier_off(netdev);
3665 netdev->irq = pdev->irq; 3665 netdev->irq = pdev->irq;
3666 3666
3667 netdev->features |= NETIF_F_SG | TSO_FLAGS; 3667 netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index a550d0c706f..eb71b8250b9 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -123,6 +123,7 @@ enum {
123 ULP_MODE_NONE = 0, 123 ULP_MODE_NONE = 0,
124 ULP_MODE_ISCSI = 2, 124 ULP_MODE_ISCSI = 2,
125 ULP_MODE_RDMA = 4, 125 ULP_MODE_RDMA = 4,
126 ULP_MODE_TCPDDP = 5,
126 ULP_MODE_FCOE = 6, 127 ULP_MODE_FCOE = 6,
127}; 128};
128 129
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae2059..6aad64df4dc 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2040{ 2040{
2041 int i; 2041 int i;
2042 2042
2043 BUG_ON(adapter->debugfs_root == NULL); 2043 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2044 2044
2045 /* 2045 /*
2046 * Debugfs support is best effort. 2046 * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2061 */ 2061 */
2062static void cleanup_debugfs(struct adapter *adapter) 2062static void cleanup_debugfs(struct adapter *adapter)
2063{ 2063{
2064 BUG_ON(adapter->debugfs_root == NULL); 2064 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2065 2065
2066 /* 2066 /*
2067 * Unlike our sister routine cleanup_proc(), we don't need to remove 2067 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2489 struct net_device *netdev; 2489 struct net_device *netdev;
2490 2490
2491 /* 2491 /*
2492 * Vet our module parameters.
2493 */
2494 if (msi != MSI_MSIX && msi != MSI_MSI) {
2495 dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
2496 " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
2497 MSI_MSI);
2498 err = -EINVAL;
2499 goto err_out;
2500 }
2501
2502 /*
2503 * Print our driver banner the first time we're called to initialize a 2492 * Print our driver banner the first time we're called to initialize a
2504 * device. 2493 * device.
2505 */ 2494 */
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2711 /* 2700 /*
2712 * Set up our debugfs entries. 2701 * Set up our debugfs entries.
2713 */ 2702 */
2714 if (cxgb4vf_debugfs_root) { 2703 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2715 adapter->debugfs_root = 2704 adapter->debugfs_root =
2716 debugfs_create_dir(pci_name(pdev), 2705 debugfs_create_dir(pci_name(pdev),
2717 cxgb4vf_debugfs_root); 2706 cxgb4vf_debugfs_root);
2718 if (adapter->debugfs_root == NULL) 2707 if (IS_ERR_OR_NULL(adapter->debugfs_root))
2719 dev_warn(&pdev->dev, "could not create debugfs" 2708 dev_warn(&pdev->dev, "could not create debugfs"
2720 " directory"); 2709 " directory");
2721 else 2710 else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2770 */ 2759 */
2771 2760
2772err_free_debugfs: 2761err_free_debugfs:
2773 if (adapter->debugfs_root) { 2762 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2774 cleanup_debugfs(adapter); 2763 cleanup_debugfs(adapter);
2775 debugfs_remove_recursive(adapter->debugfs_root); 2764 debugfs_remove_recursive(adapter->debugfs_root);
2776 } 2765 }
@@ -2802,7 +2791,6 @@ err_release_regions:
2802err_disable_device: 2791err_disable_device:
2803 pci_disable_device(pdev); 2792 pci_disable_device(pdev);
2804 2793
2805err_out:
2806 return err; 2794 return err;
2807} 2795}
2808 2796
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2840 /* 2828 /*
2841 * Tear down our debugfs entries. 2829 * Tear down our debugfs entries.
2842 */ 2830 */
2843 if (adapter->debugfs_root) { 2831 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2844 cleanup_debugfs(adapter); 2832 cleanup_debugfs(adapter);
2845 debugfs_remove_recursive(adapter->debugfs_root); 2833 debugfs_remove_recursive(adapter->debugfs_root);
2846 } 2834 }
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2874} 2862}
2875 2863
2876/* 2864/*
2865 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2866 * delivery.
2867 */
2868static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2869{
2870 struct adapter *adapter;
2871 int pidx;
2872
2873 adapter = pci_get_drvdata(pdev);
2874 if (!adapter)
2875 return;
2876
2877 /*
2878 * Disable all Virtual Interfaces. This will shut down the
2879 * delivery of all ingress packets into the chip for these
2880 * Virtual Interfaces.
2881 */
2882 for_each_port(adapter, pidx) {
2883 struct net_device *netdev;
2884 struct port_info *pi;
2885
2886 if (!test_bit(pidx, &adapter->registered_device_map))
2887 continue;
2888
2889 netdev = adapter->port[pidx];
2890 if (!netdev)
2891 continue;
2892
2893 pi = netdev_priv(netdev);
2894 t4vf_enable_vi(adapter, pi->viid, false, false);
2895 }
2896
2897 /*
2898 * Free up all Queues which will prevent further DMA and
2899 * Interrupts allowing various internal pathways to drain.
2900 */
2901 t4vf_free_sge_resources(adapter);
2902}
2903
2904/*
2877 * PCI Device registration data structures. 2905 * PCI Device registration data structures.
2878 */ 2906 */
2879#define CH_DEVICE(devid, idx) \ 2907#define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
2906 .id_table = cxgb4vf_pci_tbl, 2934 .id_table = cxgb4vf_pci_tbl,
2907 .probe = cxgb4vf_pci_probe, 2935 .probe = cxgb4vf_pci_probe,
2908 .remove = __devexit_p(cxgb4vf_pci_remove), 2936 .remove = __devexit_p(cxgb4vf_pci_remove),
2937 .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
2909}; 2938};
2910 2939
2911/* 2940/*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
2915{ 2944{
2916 int ret; 2945 int ret;
2917 2946
2947 /*
2948 * Vet our module parameters.
2949 */
2950 if (msi != MSI_MSIX && msi != MSI_MSI) {
2951 printk(KERN_WARNING KBUILD_MODNAME
2952 ": bad module parameter msi=%d; must be %d"
2953 " (MSI-X or MSI) or %d (MSI)\n",
2954 msi, MSI_MSIX, MSI_MSI);
2955 return -EINVAL;
2956 }
2957
2918 /* Debugfs support is optional, just warn if this fails */ 2958 /* Debugfs support is optional, just warn if this fails */
2919 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2959 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2920 if (!cxgb4vf_debugfs_root) 2960 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2921 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2961 printk(KERN_WARNING KBUILD_MODNAME ": could not create"
2922 " debugfs entry, continuing\n"); 2962 " debugfs entry, continuing\n");
2923 2963
2924 ret = pci_register_driver(&cxgb4vf_driver); 2964 ret = pci_register_driver(&cxgb4vf_driver);
2925 if (ret < 0) 2965 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2926 debugfs_remove(cxgb4vf_debugfs_root); 2966 debugfs_remove(cxgb4vf_debugfs_root);
2927 return ret; 2967 return ret;
2928} 2968}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80475c..192db226ec7 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
171 delay_idx = 0; 171 delay_idx = 0;
172 ms = delay[0]; 172 ms = delay[0];
173 173
174 for (i = 0; i < 500; i += ms) { 174 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
175 if (sleep_ok) { 175 if (sleep_ok) {
176 ms = delay[delay_idx]; 176 ms = delay[delay_idx];
177 if (delay_idx < ARRAY_SIZE(delay) - 1) 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
index e92b2b6cd8c..ae47f23ba93 100644
--- a/drivers/net/davinci_cpdma.c
+++ b/drivers/net/davinci_cpdma.c
@@ -76,6 +76,7 @@ struct cpdma_desc {
76 76
77struct cpdma_desc_pool { 77struct cpdma_desc_pool {
78 u32 phys; 78 u32 phys;
79 u32 hw_addr;
79 void __iomem *iomap; /* ioremap map */ 80 void __iomem *iomap; /* ioremap map */
80 void *cpumap; /* dma_alloc map */ 81 void *cpumap; /* dma_alloc map */
81 int desc_size, mem_size; 82 int desc_size, mem_size;
@@ -137,7 +138,8 @@ struct cpdma_chan {
137 * abstract out these details 138 * abstract out these details
138 */ 139 */
139static struct cpdma_desc_pool * 140static struct cpdma_desc_pool *
140cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align) 141cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
142 int size, int align)
141{ 143{
142 int bitmap_size; 144 int bitmap_size;
143 struct cpdma_desc_pool *pool; 145 struct cpdma_desc_pool *pool;
@@ -161,10 +163,12 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
161 if (phys) { 163 if (phys) {
162 pool->phys = phys; 164 pool->phys = phys;
163 pool->iomap = ioremap(phys, size); 165 pool->iomap = ioremap(phys, size);
166 pool->hw_addr = hw_addr;
164 } else { 167 } else {
165 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 168 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
166 GFP_KERNEL); 169 GFP_KERNEL);
167 pool->iomap = (void __force __iomem *)pool->cpumap; 170 pool->iomap = (void __force __iomem *)pool->cpumap;
171 pool->hw_addr = pool->phys;
168 } 172 }
169 173
170 if (pool->iomap) 174 if (pool->iomap)
@@ -201,14 +205,14 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
201{ 205{
202 if (!desc) 206 if (!desc)
203 return 0; 207 return 0;
204 return pool->phys + (__force dma_addr_t)desc - 208 return pool->hw_addr + (__force dma_addr_t)desc -
205 (__force dma_addr_t)pool->iomap; 209 (__force dma_addr_t)pool->iomap;
206} 210}
207 211
208static inline struct cpdma_desc __iomem * 212static inline struct cpdma_desc __iomem *
209desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 213desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
210{ 214{
211 return dma ? pool->iomap + dma - pool->phys : NULL; 215 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
212} 216}
213 217
214static struct cpdma_desc __iomem * 218static struct cpdma_desc __iomem *
@@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
260 264
261 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 265 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
262 ctlr->params.desc_mem_phys, 266 ctlr->params.desc_mem_phys,
267 ctlr->params.desc_hw_addr,
263 ctlr->params.desc_mem_size, 268 ctlr->params.desc_mem_size,
264 ctlr->params.desc_align); 269 ctlr->params.desc_align);
265 if (!ctlr->pool) { 270 if (!ctlr->pool) {
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h
index 868e50ebde4..afa19a0c0d8 100644
--- a/drivers/net/davinci_cpdma.h
+++ b/drivers/net/davinci_cpdma.h
@@ -33,6 +33,7 @@ struct cpdma_params {
33 bool has_soft_reset; 33 bool has_soft_reset;
34 int min_packet_size; 34 int min_packet_size;
35 u32 desc_mem_phys; 35 u32 desc_mem_phys;
36 u32 desc_hw_addr;
36 int desc_mem_size; 37 int desc_mem_size;
37 int desc_align; 38 int desc_align;
38 39
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d17d17..baca6bfcb08 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
1008 int ret; 1008 int ret;
1009 1009
1010 /* free and bail if we are shutting down */ 1010 /* free and bail if we are shutting down */
1011 if (unlikely(!netif_running(ndev))) { 1011 if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
1012 dev_kfree_skb_any(skb); 1012 dev_kfree_skb_any(skb);
1013 return; 1013 return;
1014 } 1014 }
@@ -1730,7 +1730,7 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
1730 emac_read(EMAC_TXCARRIERSENSE); 1730 emac_read(EMAC_TXCARRIERSENSE);
1731 emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); 1731 emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
1732 1732
1733 ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); 1733 ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
1734 emac_write(EMAC_TXUNDERRUN, stats_clear_mask); 1734 emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
1735 1735
1736 return &ndev->stats; 1736 return &ndev->stats;
@@ -1854,10 +1854,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1854 dma_params.rxcp = priv->emac_base + 0x660; 1854 dma_params.rxcp = priv->emac_base + 0x660;
1855 dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; 1855 dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
1856 dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; 1856 dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
1857 dma_params.desc_mem_phys = hw_ram_addr; 1857 dma_params.desc_hw_addr = hw_ram_addr;
1858 dma_params.desc_mem_size = pdata->ctrl_ram_size; 1858 dma_params.desc_mem_size = pdata->ctrl_ram_size;
1859 dma_params.desc_align = 16; 1859 dma_params.desc_align = 16;
1860 1860
1861 dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 :
1862 (u32 __force)res->start + pdata->ctrl_ram_offset;
1863
1861 priv->dma = cpdma_ctlr_create(&dma_params); 1864 priv->dma = cpdma_ctlr_create(&dma_params);
1862 if (!priv->dma) { 1865 if (!priv->dma) {
1863 dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); 1866 dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1b48b68ad4f..8b0084d17c8 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
1094 } 1094 }
1095 } 1095 }
1096 /* Change buffer ownership for this last frame, back to the adapter */ 1096 /* Change buffer ownership for this last frame, back to the adapter */
1097 for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) { 1097 for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); 1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
1099 } 1099 }
1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); 1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
1103 /* 1103 /*
1104 ** Update entry information 1104 ** Update entry information
1105 */ 1105 */
1106 lp->rx_new = (++lp->rx_new) & lp->rxRingMask; 1106 lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
1107 } 1107 }
1108 1108
1109 return 0; 1109 return 0;
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
1148 } 1148 }
1149 1149
1150 /* Update all the pointers */ 1150 /* Update all the pointers */
1151 lp->tx_old = (++lp->tx_old) & lp->txRingMask; 1151 lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
1152 } 1152 }
1153 1153
1154 return 0; 1154 return 0;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff69..c05db604605 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
1753 1753
1754 /* Free all the skbuffs in the queue. */ 1754 /* Free all the skbuffs in the queue. */
1755 for (i = 0; i < RX_RING_SIZE; i++) { 1755 for (i = 0; i < RX_RING_SIZE; i++) {
1756 np->rx_ring[i].status = 0;
1757 np->rx_ring[i].fraginfo = 0;
1758 skb = np->rx_skbuff[i]; 1756 skb = np->rx_skbuff[i];
1759 if (skb) { 1757 if (skb) {
1760 pci_unmap_single(np->pdev, 1758 pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
1763 dev_kfree_skb (skb); 1761 dev_kfree_skb (skb);
1764 np->rx_skbuff[i] = NULL; 1762 np->rx_skbuff[i] = NULL;
1765 } 1763 }
1764 np->rx_ring[i].status = 0;
1765 np->rx_ring[i].fraginfo = 0;
1766 } 1766 }
1767 for (i = 0; i < TX_RING_SIZE; i++) { 1767 for (i = 0; i < TX_RING_SIZE; i++) {
1768 skb = np->tx_skbuff[i]; 1768 skb = np->tx_skbuff[i];
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc1d90..b7af5bab993 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -621,9 +621,9 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
621 /* change in wol state, update IRQ state */ 621 /* change in wol state, update IRQ state */
622 622
623 if (!dm->wake_state) 623 if (!dm->wake_state)
624 set_irq_wake(dm->irq_wake, 1); 624 irq_set_irq_wake(dm->irq_wake, 1);
625 else if (dm->wake_state & !opts) 625 else if (dm->wake_state & !opts)
626 set_irq_wake(dm->irq_wake, 0); 626 irq_set_irq_wake(dm->irq_wake, 0);
627 } 627 }
628 628
629 dm->wake_state = opts; 629 dm->wake_state = opts;
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
802 /* Checksum mode */ 802 /* Checksum mode */
803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum); 803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
804 804
805 /* GPIO0 on pre-activate PHY */
806 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
807 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 805 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
808 iow(db, DM9000_GPR, 0); /* Enable PHY */
809 806
810 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 807 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
811 808
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
852 unsigned long flags; 849 unsigned long flags;
853 850
854 /* Save previous register address */ 851 /* Save previous register address */
855 reg_save = readb(db->io_addr);
856 spin_lock_irqsave(&db->lock, flags); 852 spin_lock_irqsave(&db->lock, flags);
853 reg_save = readb(db->io_addr);
857 854
858 netif_stop_queue(dev); 855 netif_stop_queue(dev);
859 dm9000_reset(db); 856 dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
1194 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1191 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1195 return -EAGAIN; 1192 return -EAGAIN;
1196 1193
1194 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1195 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1196 mdelay(1); /* delay needs by DM9000B */
1197
1197 /* Initialize DM9000 board */ 1198 /* Initialize DM9000 board */
1198 dm9000_reset(db); 1199 dm9000_reset(db);
1199 dm9000_init_dm9000(dev); 1200 dm9000_init_dm9000(dev);
@@ -1423,13 +1424,13 @@ dm9000_probe(struct platform_device *pdev)
1423 } else { 1424 } else {
1424 1425
1425 /* test to see if irq is really wakeup capable */ 1426 /* test to see if irq is really wakeup capable */
1426 ret = set_irq_wake(db->irq_wake, 1); 1427 ret = irq_set_irq_wake(db->irq_wake, 1);
1427 if (ret) { 1428 if (ret) {
1428 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", 1429 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1429 db->irq_wake, ret); 1430 db->irq_wake, ret);
1430 ret = 0; 1431 ret = 0;
1431 } else { 1432 } else {
1432 set_irq_wake(db->irq_wake, 0); 1433 irq_set_irq_wake(db->irq_wake, 0);
1433 db->wake_supported = 1; 1434 db->wake_supported = 1;
1434 } 1435 }
1435 } 1436 }
@@ -1592,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
1592 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 1593 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1593 } 1594 }
1594 1595
1595 if (!is_valid_ether_addr(ndev->dev_addr)) 1596 if (!is_valid_ether_addr(ndev->dev_addr)) {
1596 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " 1597 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1597 "set using ifconfig\n", ndev->name); 1598 "set using ifconfig\n", ndev->name);
1598 1599
1600 random_ether_addr(ndev->dev_addr);
1601 mac_src = "random";
1602 }
1603
1604
1599 platform_set_drvdata(pdev, ndev); 1605 platform_set_drvdata(pdev, ndev);
1600 ret = register_netdev(ndev); 1606 ret = register_netdev(ndev);
1601 1607
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b72fa..8318ea06cb6 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
337 for (i = 0; i < PHY_MAX_ADDR; i++) 337 for (i = 0; i < PHY_MAX_ADDR; i++)
338 bp->mii_bus->irq[i] = PHY_POLL; 338 bp->mii_bus->irq[i] = PHY_POLL;
339 339
340 platform_set_drvdata(bp->dev, bp->mii_bus);
341
342 if (mdiobus_register(bp->mii_bus)) { 340 if (mdiobus_register(bp->mii_bus)) {
343 err = -ENXIO; 341 err = -ENXIO;
344 goto err_out_free_mdio_irq; 342 goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
863 bp = netdev_priv(dev); 861 bp = netdev_priv(dev);
864 bp->dev = dev; 862 bp->dev = dev;
865 863
864 platform_set_drvdata(pdev, dev);
866 SET_NETDEV_DEV(dev, &pdev->dev); 865 SET_NETDEV_DEV(dev, &pdev->dev);
867 866
868 spin_lock_init(&bp->lock); 867 spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index aed223b1b89..7501d977d99 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
124 case M88E1000_I_PHY_ID: 124 case M88E1000_I_PHY_ID:
125 case M88E1011_I_PHY_ID: 125 case M88E1011_I_PHY_ID:
126 case M88E1111_I_PHY_ID: 126 case M88E1111_I_PHY_ID:
127 case M88E1118_E_PHY_ID:
127 hw->phy_type = e1000_phy_m88; 128 hw->phy_type = e1000_phy_m88;
128 break; 129 break;
129 case IGP01E1000_I_PHY_ID: 130 case IGP01E1000_I_PHY_ID:
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3222 break; 3223 break;
3223 case e1000_ce4100: 3224 case e1000_ce4100:
3224 if ((hw->phy_id == RTL8211B_PHY_ID) || 3225 if ((hw->phy_id == RTL8211B_PHY_ID) ||
3225 (hw->phy_id == RTL8201N_PHY_ID)) 3226 (hw->phy_id == RTL8201N_PHY_ID) ||
3227 (hw->phy_id == M88E1118_E_PHY_ID))
3226 match = true; 3228 match = true;
3227 break; 3229 break;
3228 case e1000_82541: 3230 case e1000_82541:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 196eeda2dd6..c70b23d5228 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info {
2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
2918#define M88E1011_I_REV_4 0x04 2918#define M88E1011_I_REV_4 0x04
2919#define M88E1111_I_PHY_ID 0x01410CC0 2919#define M88E1111_I_PHY_ID 0x01410CC0
2920#define M88E1118_E_PHY_ID 0x01410E40
2920#define L1LXT971A_PHY_ID 0x001378E0 2921#define L1LXT971A_PHY_ID 0x001378E0
2921 2922
2922#define RTL8211B_PHY_ID 0x001CC910 2923#define RTL8211B_PHY_ID 0x001CC910
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 55c1711f168..33e7c45a4fe 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -42,7 +42,8 @@
42#define GBE_CONFIG_RAM_BASE \ 42#define GBE_CONFIG_RAM_BASE \
43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) 43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
44 44
45#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) 45#define GBE_CONFIG_BASE_VIRT \
46 ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
46 47
47#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ 48#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
48 (iowrite16_rep(base + offset, data, count)) 49 (iowrite16_rep(base + offset, data, count))
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 13149983d07..c516a7440be 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -86,6 +86,7 @@
86#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 86#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
87#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 87#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
88#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 88#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
89#define E1000_CTRL_EXT_LSECCK 0x00001000
89#define E1000_CTRL_EXT_PHYPDEN 0x00100000 90#define E1000_CTRL_EXT_PHYPDEN 0x00100000
90 91
91/* Receive Descriptor bit definitions */ 92/* Receive Descriptor bit definitions */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index e610e136905..00bf595ebd6 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -364,6 +364,7 @@ struct e1000_adapter {
364 /* structs defined in e1000_hw.h */ 364 /* structs defined in e1000_hw.h */
365 struct e1000_hw hw; 365 struct e1000_hw hw;
366 366
367 spinlock_t stats64_lock;
367 struct e1000_hw_stats stats; 368 struct e1000_hw_stats stats;
368 struct e1000_phy_info phy_info; 369 struct e1000_phy_info phy_info;
369 struct e1000_phy_stats phy_stats; 370 struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
494extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); 495extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
495extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 496extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
496extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 497extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
497extern void e1000e_update_stats(struct e1000_adapter *adapter); 498extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
499 struct rtnl_link_stats64
500 *stats);
498extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 501extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
499extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 502extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
500extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 503extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index fa08b6336cf..07f09e96e45 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,15 +46,15 @@ struct e1000_stats {
46}; 46};
47 47
48#define E1000_STAT(str, m) { \ 48#define E1000_STAT(str, m) { \
49 .stat_string = str, \ 49 .stat_string = str, \
50 .type = E1000_STATS, \ 50 .type = E1000_STATS, \
51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ 51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
52 .stat_offset = offsetof(struct e1000_adapter, m) } 52 .stat_offset = offsetof(struct e1000_adapter, m) }
53#define E1000_NETDEV_STAT(str, m) { \ 53#define E1000_NETDEV_STAT(str, m) { \
54 .stat_string = str, \ 54 .stat_string = str, \
55 .type = NETDEV_STATS, \ 55 .type = NETDEV_STATS, \
56 .sizeof_stat = sizeof(((struct net_device *)0)->m), \ 56 .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
57 .stat_offset = offsetof(struct net_device, m) } 57 .stat_offset = offsetof(struct rtnl_link_stats64, m) }
58 58
59static const struct e1000_stats e1000_gstrings_stats[] = { 59static const struct e1000_stats e1000_gstrings_stats[] = {
60 E1000_STAT("rx_packets", stats.gprc), 60 E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
65 E1000_STAT("tx_broadcast", stats.bptc), 65 E1000_STAT("tx_broadcast", stats.bptc),
66 E1000_STAT("rx_multicast", stats.mprc), 66 E1000_STAT("rx_multicast", stats.mprc),
67 E1000_STAT("tx_multicast", stats.mptc), 67 E1000_STAT("tx_multicast", stats.mptc),
68 E1000_NETDEV_STAT("rx_errors", stats.rx_errors), 68 E1000_NETDEV_STAT("rx_errors", rx_errors),
69 E1000_NETDEV_STAT("tx_errors", stats.tx_errors), 69 E1000_NETDEV_STAT("tx_errors", tx_errors),
70 E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped), 70 E1000_NETDEV_STAT("tx_dropped", tx_dropped),
71 E1000_STAT("multicast", stats.mprc), 71 E1000_STAT("multicast", stats.mprc),
72 E1000_STAT("collisions", stats.colc), 72 E1000_STAT("collisions", stats.colc),
73 E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors), 73 E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
74 E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors), 74 E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
75 E1000_STAT("rx_crc_errors", stats.crcerrs), 75 E1000_STAT("rx_crc_errors", stats.crcerrs),
76 E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors), 76 E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
77 E1000_STAT("rx_no_buffer_count", stats.rnbc), 77 E1000_STAT("rx_no_buffer_count", stats.rnbc),
78 E1000_STAT("rx_missed_errors", stats.mpc), 78 E1000_STAT("rx_missed_errors", stats.mpc),
79 E1000_STAT("tx_aborted_errors", stats.ecol), 79 E1000_STAT("tx_aborted_errors", stats.ecol),
80 E1000_STAT("tx_carrier_errors", stats.tncrs), 80 E1000_STAT("tx_carrier_errors", stats.tncrs),
81 E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors), 81 E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
82 E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors), 82 E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
83 E1000_STAT("tx_window_errors", stats.latecol), 83 E1000_STAT("tx_window_errors", stats.latecol),
84 E1000_STAT("tx_abort_late_coll", stats.latecol), 84 E1000_STAT("tx_abort_late_coll", stats.latecol),
85 E1000_STAT("tx_deferred_ok", stats.dc), 85 E1000_STAT("tx_deferred_ok", stats.dc),
@@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
433 struct e1000_hw *hw = &adapter->hw; 433 struct e1000_hw *hw = &adapter->hw;
434 u32 *regs_buff = p; 434 u32 *regs_buff = p;
435 u16 phy_data; 435 u16 phy_data;
436 u8 revision_id;
437 436
438 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 437 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
439 438
440 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); 439 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
441 440 adapter->pdev->device;
442 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
443 441
444 regs_buff[0] = er32(CTRL); 442 regs_buff[0] = er32(CTRL);
445 regs_buff[1] = er32(STATUS); 443 regs_buff[1] = er32(STATUS);
@@ -684,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
684 rx_old = adapter->rx_ring; 682 rx_old = adapter->rx_ring;
685 683
686 err = -ENOMEM; 684 err = -ENOMEM;
687 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 685 tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
688 if (!tx_ring) 686 if (!tx_ring)
689 goto err_alloc_tx; 687 goto err_alloc_tx;
690 /*
691 * use a memcpy to save any previously configured
692 * items like napi structs from having to be
693 * reinitialized
694 */
695 memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
696 688
697 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 689 rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
698 if (!rx_ring) 690 if (!rx_ring)
699 goto err_alloc_rx; 691 goto err_alloc_rx;
700 memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
701 692
702 adapter->tx_ring = tx_ring; 693 adapter->tx_ring = tx_ring;
703 adapter->rx_ring = rx_ring; 694 adapter->rx_ring = rx_ring;
@@ -1255,7 +1246,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1255{ 1246{
1256 struct e1000_hw *hw = &adapter->hw; 1247 struct e1000_hw *hw = &adapter->hw;
1257 u32 ctrl_reg = 0; 1248 u32 ctrl_reg = 0;
1258 u32 stat_reg = 0;
1259 u16 phy_reg = 0; 1249 u16 phy_reg = 0;
1260 s32 ret_val = 0; 1250 s32 ret_val = 0;
1261 1251
@@ -1363,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1363 * Set the ILOS bit on the fiber Nic if half duplex link is 1353 * Set the ILOS bit on the fiber Nic if half duplex link is
1364 * detected. 1354 * detected.
1365 */ 1355 */
1366 stat_reg = er32(STATUS); 1356 if ((er32(STATUS) & E1000_STATUS_FD) == 0)
1367 if ((stat_reg & E1000_STATUS_FD) == 0)
1368 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1357 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1369 } 1358 }
1370 1359
@@ -1677,10 +1666,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1677 } else { 1666 } else {
1678 hw->mac.ops.check_for_link(hw); 1667 hw->mac.ops.check_for_link(hw);
1679 if (hw->mac.autoneg) 1668 if (hw->mac.autoneg)
1680 msleep(4000); 1669 /*
1670 * On some Phy/switch combinations, link establishment
1671 * can take a few seconds more than expected.
1672 */
1673 msleep(5000);
1681 1674
1682 if (!(er32(STATUS) & 1675 if (!(er32(STATUS) & E1000_STATUS_LU))
1683 E1000_STATUS_LU))
1684 *data = 1; 1676 *data = 1;
1685 } 1677 }
1686 return *data; 1678 return *data;
@@ -1807,8 +1799,7 @@ static void e1000_get_wol(struct net_device *netdev,
1807 return; 1799 return;
1808 1800
1809 wol->supported = WAKE_UCAST | WAKE_MCAST | 1801 wol->supported = WAKE_UCAST | WAKE_MCAST |
1810 WAKE_BCAST | WAKE_MAGIC | 1802 WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
1811 WAKE_PHY | WAKE_ARP;
1812 1803
1813 /* apply any specific unsupported masks here */ 1804 /* apply any specific unsupported masks here */
1814 if (adapter->flags & FLAG_NO_WAKE_UCAST) { 1805 if (adapter->flags & FLAG_NO_WAKE_UCAST) {
@@ -1829,19 +1820,16 @@ static void e1000_get_wol(struct net_device *netdev,
1829 wol->wolopts |= WAKE_MAGIC; 1820 wol->wolopts |= WAKE_MAGIC;
1830 if (adapter->wol & E1000_WUFC_LNKC) 1821 if (adapter->wol & E1000_WUFC_LNKC)
1831 wol->wolopts |= WAKE_PHY; 1822 wol->wolopts |= WAKE_PHY;
1832 if (adapter->wol & E1000_WUFC_ARP)
1833 wol->wolopts |= WAKE_ARP;
1834} 1823}
1835 1824
1836static int e1000_set_wol(struct net_device *netdev, 1825static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1837 struct ethtool_wolinfo *wol)
1838{ 1826{
1839 struct e1000_adapter *adapter = netdev_priv(netdev); 1827 struct e1000_adapter *adapter = netdev_priv(netdev);
1840 1828
1841 if (!(adapter->flags & FLAG_HAS_WOL) || 1829 if (!(adapter->flags & FLAG_HAS_WOL) ||
1842 !device_can_wakeup(&adapter->pdev->dev) || 1830 !device_can_wakeup(&adapter->pdev->dev) ||
1843 (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | 1831 (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1844 WAKE_MAGIC | WAKE_PHY | WAKE_ARP))) 1832 WAKE_MAGIC | WAKE_PHY)))
1845 return -EOPNOTSUPP; 1833 return -EOPNOTSUPP;
1846 1834
1847 /* these settings will always override what we currently have */ 1835 /* these settings will always override what we currently have */
@@ -1857,8 +1845,6 @@ static int e1000_set_wol(struct net_device *netdev,
1857 adapter->wol |= E1000_WUFC_MAG; 1845 adapter->wol |= E1000_WUFC_MAG;
1858 if (wol->wolopts & WAKE_PHY) 1846 if (wol->wolopts & WAKE_PHY)
1859 adapter->wol |= E1000_WUFC_LNKC; 1847 adapter->wol |= E1000_WUFC_LNKC;
1860 if (wol->wolopts & WAKE_ARP)
1861 adapter->wol |= E1000_WUFC_ARP;
1862 1848
1863 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1849 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1864 1850
@@ -1972,8 +1958,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
1972static int e1000_nway_reset(struct net_device *netdev) 1958static int e1000_nway_reset(struct net_device *netdev)
1973{ 1959{
1974 struct e1000_adapter *adapter = netdev_priv(netdev); 1960 struct e1000_adapter *adapter = netdev_priv(netdev);
1975 if (netif_running(netdev)) 1961
1976 e1000e_reinit_locked(adapter); 1962 if (!netif_running(netdev))
1963 return -EAGAIN;
1964
1965 if (!adapter->hw.mac.autoneg)
1966 return -EINVAL;
1967
1968 e1000e_reinit_locked(adapter);
1969
1977 return 0; 1970 return 0;
1978} 1971}
1979 1972
@@ -1982,14 +1975,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1982 u64 *data) 1975 u64 *data)
1983{ 1976{
1984 struct e1000_adapter *adapter = netdev_priv(netdev); 1977 struct e1000_adapter *adapter = netdev_priv(netdev);
1978 struct rtnl_link_stats64 net_stats;
1985 int i; 1979 int i;
1986 char *p = NULL; 1980 char *p = NULL;
1987 1981
1988 e1000e_update_stats(adapter); 1982 e1000e_get_stats64(netdev, &net_stats);
1989 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1983 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1990 switch (e1000_gstrings_stats[i].type) { 1984 switch (e1000_gstrings_stats[i].type) {
1991 case NETDEV_STATS: 1985 case NETDEV_STATS:
1992 p = (char *) netdev + 1986 p = (char *) &net_stats +
1993 e1000_gstrings_stats[i].stat_offset; 1987 e1000_gstrings_stats[i].stat_offset;
1994 break; 1988 break;
1995 case E1000_STATS: 1989 case E1000_STATS:
@@ -2014,7 +2008,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
2014 2008
2015 switch (stringset) { 2009 switch (stringset) {
2016 case ETH_SS_TEST: 2010 case ETH_SS_TEST:
2017 memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test)); 2011 memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
2018 break; 2012 break;
2019 case ETH_SS_STATS: 2013 case ETH_SS_STATS:
2020 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 2014 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index bc0860a598c..307e1ec2241 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -812,9 +812,8 @@ struct e1000_nvm_operations {
812 812
813struct e1000_mac_info { 813struct e1000_mac_info {
814 struct e1000_mac_operations ops; 814 struct e1000_mac_operations ops;
815 815 u8 addr[ETH_ALEN];
816 u8 addr[6]; 816 u8 perm_addr[ETH_ALEN];
817 u8 perm_addr[6];
818 817
819 enum e1000_mac_type type; 818 enum e1000_mac_type type;
820 819
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index fb46974cfec..ce1dbfdca11 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -140,6 +140,11 @@
140#define I82579_LPI_CTRL PHY_REG(772, 20) 140#define I82579_LPI_CTRL PHY_REG(772, 20)
141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
142 142
143/* EMI Registers */
144#define I82579_EMI_ADDR 0x10
145#define I82579_EMI_DATA 0x11
146#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
147
143/* Strapping Option Register - RO */ 148/* Strapping Option Register - RO */
144#define E1000_STRAP 0x0000C 149#define E1000_STRAP 0x0000C
145#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 150#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
@@ -302,9 +307,9 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
302 * the interconnect to PCIe mode. 307 * the interconnect to PCIe mode.
303 */ 308 */
304 fwsm = er32(FWSM); 309 fwsm = er32(FWSM);
305 if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) { 310 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
306 ctrl = er32(CTRL); 311 ctrl = er32(CTRL);
307 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 312 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
308 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 313 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
309 ew32(CTRL, ctrl); 314 ew32(CTRL, ctrl);
310 udelay(10); 315 udelay(10);
@@ -331,7 +336,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
331 goto out; 336 goto out;
332 337
333 /* Ungate automatic PHY configuration on non-managed 82579 */ 338 /* Ungate automatic PHY configuration on non-managed 82579 */
334 if ((hw->mac.type == e1000_pch2lan) && 339 if ((hw->mac.type == e1000_pch2lan) &&
335 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 340 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
336 msleep(10); 341 msleep(10);
337 e1000_gate_hw_phy_config_ich8lan(hw, false); 342 e1000_gate_hw_phy_config_ich8lan(hw, false);
@@ -366,7 +371,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
366 case e1000_phy_82579: 371 case e1000_phy_82579:
367 phy->ops.check_polarity = e1000_check_polarity_82577; 372 phy->ops.check_polarity = e1000_check_polarity_82577;
368 phy->ops.force_speed_duplex = 373 phy->ops.force_speed_duplex =
369 e1000_phy_force_speed_duplex_82577; 374 e1000_phy_force_speed_duplex_82577;
370 phy->ops.get_cable_length = e1000_get_cable_length_82577; 375 phy->ops.get_cable_length = e1000_get_cable_length_82577;
371 phy->ops.get_info = e1000_get_phy_info_82577; 376 phy->ops.get_info = e1000_get_phy_info_82577;
372 phy->ops.commit = e1000e_phy_sw_reset; 377 phy->ops.commit = e1000e_phy_sw_reset;
@@ -753,7 +758,13 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
753 if (rc) 758 if (rc)
754 return rc; 759 return rc;
755 760
756 if (adapter->hw.phy.type == e1000_phy_ife) { 761 /*
762 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
763 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
764 */
765 if ((adapter->hw.phy.type == e1000_phy_ife) ||
766 ((adapter->hw.mac.type >= e1000_pch2lan) &&
767 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
757 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; 768 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
758 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; 769 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
759 } 770 }
@@ -1723,11 +1734,25 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1723 /* Configure the LCD with the OEM bits in NVM */ 1734 /* Configure the LCD with the OEM bits in NVM */
1724 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1735 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1725 1736
1726 /* Ungate automatic PHY configuration on non-managed 82579 */ 1737 if (hw->mac.type == e1000_pch2lan) {
1727 if ((hw->mac.type == e1000_pch2lan) && 1738 /* Ungate automatic PHY configuration on non-managed 82579 */
1728 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 1739 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1729 msleep(10); 1740 msleep(10);
1730 e1000_gate_hw_phy_config_ich8lan(hw, false); 1741 e1000_gate_hw_phy_config_ich8lan(hw, false);
1742 }
1743
1744 /* Set EEE LPI Update Timer to 200usec */
1745 ret_val = hw->phy.ops.acquire(hw);
1746 if (ret_val)
1747 goto out;
1748 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1749 I82579_LPI_UPDATE_TIMER);
1750 if (ret_val)
1751 goto release;
1752 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1753 0x1387);
1754release:
1755 hw->phy.ops.release(hw);
1731 } 1756 }
1732 1757
1733out: 1758out:
@@ -2104,7 +2129,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2104{ 2129{
2105 union ich8_hws_flash_status hsfsts; 2130 union ich8_hws_flash_status hsfsts;
2106 s32 ret_val = -E1000_ERR_NVM; 2131 s32 ret_val = -E1000_ERR_NVM;
2107 s32 i = 0;
2108 2132
2109 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2133 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2110 2134
@@ -2140,6 +2164,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2140 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 2164 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2141 ret_val = 0; 2165 ret_val = 0;
2142 } else { 2166 } else {
2167 s32 i = 0;
2168
2143 /* 2169 /*
2144 * Otherwise poll for sometime so the current 2170 * Otherwise poll for sometime so the current
2145 * cycle has a chance to end before giving up. 2171 * cycle has a chance to end before giving up.
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 68aa1749bf6..96921de5df2 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1978{ 1978{
1979 struct e1000_nvm_info *nvm = &hw->nvm; 1979 struct e1000_nvm_info *nvm = &hw->nvm;
1980 u32 eecd = er32(EECD); 1980 u32 eecd = er32(EECD);
1981 u16 timeout = 0;
1982 u8 spi_stat_reg; 1981 u8 spi_stat_reg;
1983 1982
1984 if (nvm->type == e1000_nvm_eeprom_spi) { 1983 if (nvm->type == e1000_nvm_eeprom_spi) {
1984 u16 timeout = NVM_MAX_RETRY_SPI;
1985
1985 /* Clear SK and CS */ 1986 /* Clear SK and CS */
1986 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 1987 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1987 ew32(EECD, eecd); 1988 ew32(EECD, eecd);
1988 udelay(1); 1989 udelay(1);
1989 timeout = NVM_MAX_RETRY_SPI;
1990 1990
1991 /* 1991 /*
1992 * Read "Status Register" repeatedly until the LSB is cleared. 1992 * Read "Status Register" repeatedly until the LSB is cleared.
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 1c18f26b081..a39d4a4d871 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -54,7 +54,7 @@
54 54
55#define DRV_EXTRAVERSION "-k2" 55#define DRV_EXTRAVERSION "-k2"
56 56
57#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION 57#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
58char e1000e_driver_name[] = "e1000e"; 58char e1000e_driver_name[] = "e1000e";
59const char e1000e_driver_version[] = DRV_VERSION; 59const char e1000e_driver_version[] = DRV_VERSION;
60 60
@@ -900,8 +900,6 @@ next_desc:
900 900
901 adapter->total_rx_bytes += total_rx_bytes; 901 adapter->total_rx_bytes += total_rx_bytes;
902 adapter->total_rx_packets += total_rx_packets; 902 adapter->total_rx_packets += total_rx_packets;
903 netdev->stats.rx_bytes += total_rx_bytes;
904 netdev->stats.rx_packets += total_rx_packets;
905 return cleaned; 903 return cleaned;
906} 904}
907 905
@@ -937,6 +935,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
937 u16 phy_status, phy_1000t_status, phy_ext_status; 935 u16 phy_status, phy_1000t_status, phy_ext_status;
938 u16 pci_status; 936 u16 pci_status;
939 937
938 if (test_bit(__E1000_DOWN, &adapter->state))
939 return;
940
940 e1e_rphy(hw, PHY_STATUS, &phy_status); 941 e1e_rphy(hw, PHY_STATUS, &phy_status);
941 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 942 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
942 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 943 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1057,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1057 } 1058 }
1058 adapter->total_tx_bytes += total_tx_bytes; 1059 adapter->total_tx_bytes += total_tx_bytes;
1059 adapter->total_tx_packets += total_tx_packets; 1060 adapter->total_tx_packets += total_tx_packets;
1060 netdev->stats.tx_bytes += total_tx_bytes;
1061 netdev->stats.tx_packets += total_tx_packets;
1062 return count < tx_ring->count; 1061 return count < tx_ring->count;
1063} 1062}
1064 1063
@@ -1245,8 +1244,6 @@ next_desc:
1245 1244
1246 adapter->total_rx_bytes += total_rx_bytes; 1245 adapter->total_rx_bytes += total_rx_bytes;
1247 adapter->total_rx_packets += total_rx_packets; 1246 adapter->total_rx_packets += total_rx_packets;
1248 netdev->stats.rx_bytes += total_rx_bytes;
1249 netdev->stats.rx_packets += total_rx_packets;
1250 return cleaned; 1247 return cleaned;
1251} 1248}
1252 1249
@@ -1325,7 +1322,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1325 /* an error means any chain goes out the window 1322 /* an error means any chain goes out the window
1326 * too */ 1323 * too */
1327 if (rx_ring->rx_skb_top) 1324 if (rx_ring->rx_skb_top)
1328 dev_kfree_skb(rx_ring->rx_skb_top); 1325 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1329 rx_ring->rx_skb_top = NULL; 1326 rx_ring->rx_skb_top = NULL;
1330 goto next_desc; 1327 goto next_desc;
1331 } 1328 }
@@ -1398,7 +1395,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1398 /* eth type trans needs skb->data to point to something */ 1395 /* eth type trans needs skb->data to point to something */
1399 if (!pskb_may_pull(skb, ETH_HLEN)) { 1396 if (!pskb_may_pull(skb, ETH_HLEN)) {
1400 e_err("pskb_may_pull failed.\n"); 1397 e_err("pskb_may_pull failed.\n");
1401 dev_kfree_skb(skb); 1398 dev_kfree_skb_irq(skb);
1402 goto next_desc; 1399 goto next_desc;
1403 } 1400 }
1404 1401
@@ -1426,8 +1423,6 @@ next_desc:
1426 1423
1427 adapter->total_rx_bytes += total_rx_bytes; 1424 adapter->total_rx_bytes += total_rx_bytes;
1428 adapter->total_rx_packets += total_rx_packets; 1425 adapter->total_rx_packets += total_rx_packets;
1429 netdev->stats.rx_bytes += total_rx_bytes;
1430 netdev->stats.rx_packets += total_rx_packets;
1431 return cleaned; 1426 return cleaned;
1432} 1427}
1433 1428
@@ -1506,6 +1501,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
1506 struct e1000_adapter *adapter = container_of(work, 1501 struct e1000_adapter *adapter = container_of(work,
1507 struct e1000_adapter, downshift_task); 1502 struct e1000_adapter, downshift_task);
1508 1503
1504 if (test_bit(__E1000_DOWN, &adapter->state))
1505 return;
1506
1509 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1507 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1510} 1508}
1511 1509
@@ -1851,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1851 int err = 0, vector = 0; 1849 int err = 0, vector = 0;
1852 1850
1853 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1851 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1854 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1852 snprintf(adapter->rx_ring->name,
1853 sizeof(adapter->rx_ring->name) - 1,
1854 "%s-rx-0", netdev->name);
1855 else 1855 else
1856 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1856 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1857 err = request_irq(adapter->msix_entries[vector].vector, 1857 err = request_irq(adapter->msix_entries[vector].vector,
@@ -1864,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1864 vector++; 1864 vector++;
1865 1865
1866 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1866 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1867 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1867 snprintf(adapter->tx_ring->name,
1868 sizeof(adapter->tx_ring->name) - 1,
1869 "%s-tx-0", netdev->name);
1868 else 1870 else
1869 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1871 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1870 err = request_irq(adapter->msix_entries[vector].vector, 1872 err = request_irq(adapter->msix_entries[vector].vector,
@@ -2728,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2728{ 2730{
2729 struct e1000_hw *hw = &adapter->hw; 2731 struct e1000_hw *hw = &adapter->hw;
2730 u32 rctl, rfctl; 2732 u32 rctl, rfctl;
2731 u32 psrctl = 0;
2732 u32 pages = 0; 2733 u32 pages = 0;
2733 2734
2734 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2735 /* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2827,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2827 adapter->rx_ps_pages = 0; 2828 adapter->rx_ps_pages = 0;
2828 2829
2829 if (adapter->rx_ps_pages) { 2830 if (adapter->rx_ps_pages) {
2831 u32 psrctl = 0;
2832
2830 /* Configure extra packet-split registers */ 2833 /* Configure extra packet-split registers */
2831 rfctl = er32(RFCTL); 2834 rfctl = er32(RFCTL);
2832 rfctl |= E1000_RFCTL_EXTEN; 2835 rfctl |= E1000_RFCTL_EXTEN;
@@ -3028,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
3028 struct netdev_hw_addr *ha; 3031 struct netdev_hw_addr *ha;
3029 u8 *mta_list; 3032 u8 *mta_list;
3030 u32 rctl; 3033 u32 rctl;
3031 int i;
3032 3034
3033 /* Check for Promiscuous and All Multicast modes */ 3035 /* Check for Promiscuous and All Multicast modes */
3034 3036
@@ -3051,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
3051 ew32(RCTL, rctl); 3053 ew32(RCTL, rctl);
3052 3054
3053 if (!netdev_mc_empty(netdev)) { 3055 if (!netdev_mc_empty(netdev)) {
3056 int i = 0;
3057
3054 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); 3058 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3055 if (!mta_list) 3059 if (!mta_list)
3056 return; 3060 return;
3057 3061
3058 /* prepare a packed array of only addresses. */ 3062 /* prepare a packed array of only addresses. */
3059 i = 0;
3060 netdev_for_each_mc_addr(ha, netdev) 3063 netdev_for_each_mc_addr(ha, netdev)
3061 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3064 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3062 3065
@@ -3338,6 +3341,23 @@ int e1000e_up(struct e1000_adapter *adapter)
3338 return 0; 3341 return 0;
3339} 3342}
3340 3343
3344static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3345{
3346 struct e1000_hw *hw = &adapter->hw;
3347
3348 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3349 return;
3350
3351 /* flush pending descriptor writebacks to memory */
3352 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3353 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3354
3355 /* execute the writes immediately */
3356 e1e_flush();
3357}
3358
3359static void e1000e_update_stats(struct e1000_adapter *adapter);
3360
3341void e1000e_down(struct e1000_adapter *adapter) 3361void e1000e_down(struct e1000_adapter *adapter)
3342{ 3362{
3343 struct net_device *netdev = adapter->netdev; 3363 struct net_device *netdev = adapter->netdev;
@@ -3372,11 +3392,19 @@ void e1000e_down(struct e1000_adapter *adapter)
3372 del_timer_sync(&adapter->phy_info_timer); 3392 del_timer_sync(&adapter->phy_info_timer);
3373 3393
3374 netif_carrier_off(netdev); 3394 netif_carrier_off(netdev);
3395
3396 spin_lock(&adapter->stats64_lock);
3397 e1000e_update_stats(adapter);
3398 spin_unlock(&adapter->stats64_lock);
3399
3375 adapter->link_speed = 0; 3400 adapter->link_speed = 0;
3376 adapter->link_duplex = 0; 3401 adapter->link_duplex = 0;
3377 3402
3378 if (!pci_channel_offline(adapter->pdev)) 3403 if (!pci_channel_offline(adapter->pdev))
3379 e1000e_reset(adapter); 3404 e1000e_reset(adapter);
3405
3406 e1000e_flush_descriptors(adapter);
3407
3380 e1000_clean_tx_ring(adapter); 3408 e1000_clean_tx_ring(adapter);
3381 e1000_clean_rx_ring(adapter); 3409 e1000_clean_rx_ring(adapter);
3382 3410
@@ -3413,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3413 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3441 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3414 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3442 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3415 3443
3444 spin_lock_init(&adapter->stats64_lock);
3445
3416 e1000e_set_interrupt_capability(adapter); 3446 e1000e_set_interrupt_capability(adapter);
3417 3447
3418 if (e1000_alloc_queues(adapter)) 3448 if (e1000_alloc_queues(adapter))
@@ -3765,6 +3795,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3765{ 3795{
3766 struct e1000_adapter *adapter = container_of(work, 3796 struct e1000_adapter *adapter = container_of(work,
3767 struct e1000_adapter, update_phy_task); 3797 struct e1000_adapter, update_phy_task);
3798
3799 if (test_bit(__E1000_DOWN, &adapter->state))
3800 return;
3801
3768 e1000_get_phy_info(&adapter->hw); 3802 e1000_get_phy_info(&adapter->hw);
3769} 3803}
3770 3804
@@ -3775,6 +3809,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3775static void e1000_update_phy_info(unsigned long data) 3809static void e1000_update_phy_info(unsigned long data)
3776{ 3810{
3777 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3811 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3812
3813 if (test_bit(__E1000_DOWN, &adapter->state))
3814 return;
3815
3778 schedule_work(&adapter->update_phy_task); 3816 schedule_work(&adapter->update_phy_task);
3779} 3817}
3780 3818
@@ -3886,7 +3924,7 @@ release:
3886 * e1000e_update_stats - Update the board statistics counters 3924 * e1000e_update_stats - Update the board statistics counters
3887 * @adapter: board private structure 3925 * @adapter: board private structure
3888 **/ 3926 **/
3889void e1000e_update_stats(struct e1000_adapter *adapter) 3927static void e1000e_update_stats(struct e1000_adapter *adapter)
3890{ 3928{
3891 struct net_device *netdev = adapter->netdev; 3929 struct net_device *netdev = adapter->netdev;
3892 struct e1000_hw *hw = &adapter->hw; 3930 struct e1000_hw *hw = &adapter->hw;
@@ -3998,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
3998{ 4036{
3999 struct e1000_hw *hw = &adapter->hw; 4037 struct e1000_hw *hw = &adapter->hw;
4000 struct e1000_phy_regs *phy = &adapter->phy_regs; 4038 struct e1000_phy_regs *phy = &adapter->phy_regs;
4001 int ret_val;
4002 4039
4003 if ((er32(STATUS) & E1000_STATUS_LU) && 4040 if ((er32(STATUS) & E1000_STATUS_LU) &&
4004 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4041 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4042 int ret_val;
4043
4005 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); 4044 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4006 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); 4045 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4007 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); 4046 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4147,7 +4186,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4147 struct e1000_ring *tx_ring = adapter->tx_ring; 4186 struct e1000_ring *tx_ring = adapter->tx_ring;
4148 struct e1000_hw *hw = &adapter->hw; 4187 struct e1000_hw *hw = &adapter->hw;
4149 u32 link, tctl; 4188 u32 link, tctl;
4150 int tx_pending = 0; 4189
4190 if (test_bit(__E1000_DOWN, &adapter->state))
4191 return;
4151 4192
4152 link = e1000e_has_link(adapter); 4193 link = e1000e_has_link(adapter);
4153 if ((netif_carrier_ok(netdev)) && link) { 4194 if ((netif_carrier_ok(netdev)) && link) {
@@ -4285,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4285 } 4326 }
4286 4327
4287link_up: 4328link_up:
4329 spin_lock(&adapter->stats64_lock);
4288 e1000e_update_stats(adapter); 4330 e1000e_update_stats(adapter);
4331 spin_unlock(&adapter->stats64_lock);
4289 4332
4290 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4333 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4291 adapter->tpt_old = adapter->stats.tpt; 4334 adapter->tpt_old = adapter->stats.tpt;
@@ -4299,21 +4342,17 @@ link_up:
4299 4342
4300 e1000e_update_adaptive(&adapter->hw); 4343 e1000e_update_adaptive(&adapter->hw);
4301 4344
4302 if (!netif_carrier_ok(netdev)) { 4345 if (!netif_carrier_ok(netdev) &&
4303 tx_pending = (e1000_desc_unused(tx_ring) + 1 < 4346 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4304 tx_ring->count); 4347 /*
4305 if (tx_pending) { 4348 * We've lost link, so the controller stops DMA,
4306 /* 4349 * but we've got queued Tx work that's never going
4307 * We've lost link, so the controller stops DMA, 4350 * to get done, so reset controller to flush Tx.
4308 * but we've got queued Tx work that's never going 4351 * (Do the reset outside of interrupt context).
4309 * to get done, so reset controller to flush Tx. 4352 */
4310 * (Do the reset outside of interrupt context). 4353 schedule_work(&adapter->reset_task);
4311 */ 4354 /* return immediately since reset is imminent */
4312 adapter->tx_timeout_count++; 4355 return;
4313 schedule_work(&adapter->reset_task);
4314 /* return immediately since reset is imminent */
4315 return;
4316 }
4317 } 4356 }
4318 4357
4319 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4358 /* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4338,19 +4377,12 @@ link_up:
4338 else 4377 else
4339 ew32(ICS, E1000_ICS_RXDMT0); 4378 ew32(ICS, E1000_ICS_RXDMT0);
4340 4379
4380 /* flush pending descriptors to memory before detecting Tx hang */
4381 e1000e_flush_descriptors(adapter);
4382
4341 /* Force detection of hung controller every watchdog period */ 4383 /* Force detection of hung controller every watchdog period */
4342 adapter->detect_tx_hung = 1; 4384 adapter->detect_tx_hung = 1;
4343 4385
4344 /* flush partial descriptors to memory before detecting Tx hang */
4345 if (adapter->flags2 & FLAG2_DMA_BURST) {
4346 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4347 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4348 /*
4349 * no need to flush the writes because the timeout code does
4350 * an er32 first thing
4351 */
4352 }
4353
4354 /* 4386 /*
4355 * With 82571 controllers, LAA may be overwritten due to controller 4387 * With 82571 controllers, LAA may be overwritten due to controller
4356 * reset from the other port. Set the appropriate LAA in RAR[0] 4388 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4384,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
4384 u32 cmd_length = 0; 4416 u32 cmd_length = 0;
4385 u16 ipcse = 0, tucse, mss; 4417 u16 ipcse = 0, tucse, mss;
4386 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4418 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4387 int err;
4388 4419
4389 if (!skb_is_gso(skb)) 4420 if (!skb_is_gso(skb))
4390 return 0; 4421 return 0;
4391 4422
4392 if (skb_header_cloned(skb)) { 4423 if (skb_header_cloned(skb)) {
4393 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4424 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4425
4394 if (err) 4426 if (err)
4395 return err; 4427 return err;
4396 } 4428 }
@@ -4888,6 +4920,10 @@ static void e1000_reset_task(struct work_struct *work)
4888 struct e1000_adapter *adapter; 4920 struct e1000_adapter *adapter;
4889 adapter = container_of(work, struct e1000_adapter, reset_task); 4921 adapter = container_of(work, struct e1000_adapter, reset_task);
4890 4922
4923 /* don't run the task if already down */
4924 if (test_bit(__E1000_DOWN, &adapter->state))
4925 return;
4926
4891 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4927 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4892 (adapter->flags & FLAG_RX_RESTART_NOW))) { 4928 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4893 e1000e_dump(adapter); 4929 e1000e_dump(adapter);
@@ -4897,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
4897} 4933}
4898 4934
4899/** 4935/**
4900 * e1000_get_stats - Get System Network Statistics 4936 * e1000_get_stats64 - Get System Network Statistics
4901 * @netdev: network interface device structure 4937 * @netdev: network interface device structure
4938 * @stats: rtnl_link_stats64 pointer
4902 * 4939 *
4903 * Returns the address of the device statistics structure. 4940 * Returns the address of the device statistics structure.
4904 * The statistics are actually updated from the timer callback.
4905 **/ 4941 **/
4906static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 4942struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
4943 struct rtnl_link_stats64 *stats)
4907{ 4944{
4908 /* only return the current stats */ 4945 struct e1000_adapter *adapter = netdev_priv(netdev);
4909 return &netdev->stats; 4946
4947 memset(stats, 0, sizeof(struct rtnl_link_stats64));
4948 spin_lock(&adapter->stats64_lock);
4949 e1000e_update_stats(adapter);
4950 /* Fill out the OS statistics structure */
4951 stats->rx_bytes = adapter->stats.gorc;
4952 stats->rx_packets = adapter->stats.gprc;
4953 stats->tx_bytes = adapter->stats.gotc;
4954 stats->tx_packets = adapter->stats.gptc;
4955 stats->multicast = adapter->stats.mprc;
4956 stats->collisions = adapter->stats.colc;
4957
4958 /* Rx Errors */
4959
4960 /*
4961 * RLEC on some newer hardware can be incorrect so build
4962 * our own version based on RUC and ROC
4963 */
4964 stats->rx_errors = adapter->stats.rxerrc +
4965 adapter->stats.crcerrs + adapter->stats.algnerrc +
4966 adapter->stats.ruc + adapter->stats.roc +
4967 adapter->stats.cexterr;
4968 stats->rx_length_errors = adapter->stats.ruc +
4969 adapter->stats.roc;
4970 stats->rx_crc_errors = adapter->stats.crcerrs;
4971 stats->rx_frame_errors = adapter->stats.algnerrc;
4972 stats->rx_missed_errors = adapter->stats.mpc;
4973
4974 /* Tx Errors */
4975 stats->tx_errors = adapter->stats.ecol +
4976 adapter->stats.latecol;
4977 stats->tx_aborted_errors = adapter->stats.ecol;
4978 stats->tx_window_errors = adapter->stats.latecol;
4979 stats->tx_carrier_errors = adapter->stats.tncrs;
4980
4981 /* Tx Dropped needs to be maintained elsewhere */
4982
4983 spin_unlock(&adapter->stats64_lock);
4984 return stats;
4910} 4985}
4911 4986
4912/** 4987/**
@@ -5307,7 +5382,7 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5307 __e1000e_disable_aspm(pdev, state); 5382 __e1000e_disable_aspm(pdev, state);
5308} 5383}
5309 5384
5310#ifdef CONFIG_PM_OPS 5385#ifdef CONFIG_PM
5311static bool e1000e_pm_ready(struct e1000_adapter *adapter) 5386static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5312{ 5387{
5313 return !!adapter->tx_ring->buffer_info; 5388 return !!adapter->tx_ring->buffer_info;
@@ -5458,7 +5533,7 @@ static int e1000_runtime_resume(struct device *dev)
5458 return __e1000_resume(pdev); 5533 return __e1000_resume(pdev);
5459} 5534}
5460#endif /* CONFIG_PM_RUNTIME */ 5535#endif /* CONFIG_PM_RUNTIME */
5461#endif /* CONFIG_PM_OPS */ 5536#endif /* CONFIG_PM */
5462 5537
5463static void e1000_shutdown(struct pci_dev *pdev) 5538static void e1000_shutdown(struct pci_dev *pdev)
5464{ 5539{
@@ -5476,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
5476{ 5551{
5477 struct net_device *netdev = data; 5552 struct net_device *netdev = data;
5478 struct e1000_adapter *adapter = netdev_priv(netdev); 5553 struct e1000_adapter *adapter = netdev_priv(netdev);
5479 int vector, msix_irq;
5480 5554
5481 if (adapter->msix_entries) { 5555 if (adapter->msix_entries) {
5556 int vector, msix_irq;
5557
5482 vector = 0; 5558 vector = 0;
5483 msix_irq = adapter->msix_entries[vector].vector; 5559 msix_irq = adapter->msix_entries[vector].vector;
5484 disable_irq(msix_irq); 5560 disable_irq(msix_irq);
@@ -5675,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
5675 .ndo_open = e1000_open, 5751 .ndo_open = e1000_open,
5676 .ndo_stop = e1000_close, 5752 .ndo_stop = e1000_close,
5677 .ndo_start_xmit = e1000_xmit_frame, 5753 .ndo_start_xmit = e1000_xmit_frame,
5678 .ndo_get_stats = e1000_get_stats, 5754 .ndo_get_stats64 = e1000e_get_stats64,
5679 .ndo_set_multicast_list = e1000_set_multi, 5755 .ndo_set_multicast_list = e1000_set_multi,
5680 .ndo_set_mac_address = e1000_set_mac, 5756 .ndo_set_mac_address = e1000_set_mac,
5681 .ndo_change_mtu = e1000_change_mtu, 5757 .ndo_change_mtu = e1000_change_mtu,
@@ -5936,7 +6012,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5936 /* APME bit in EEPROM is mapped to WUC.APME */ 6012 /* APME bit in EEPROM is mapped to WUC.APME */
5937 eeprom_data = er32(WUC); 6013 eeprom_data = er32(WUC);
5938 eeprom_apme_mask = E1000_WUC_APME; 6014 eeprom_apme_mask = E1000_WUC_APME;
5939 if (eeprom_data & E1000_WUC_PHY_WAKE) 6015 if ((hw->mac.type > e1000_ich10lan) &&
6016 (eeprom_data & E1000_WUC_PHY_WAKE))
5940 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 6017 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
5941 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 6018 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5942 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 6019 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
@@ -6164,7 +6241,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6164}; 6241};
6165MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6242MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6166 6243
6167#ifdef CONFIG_PM_OPS 6244#ifdef CONFIG_PM
6168static const struct dev_pm_ops e1000_pm_ops = { 6245static const struct dev_pm_ops e1000_pm_ops = {
6169 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 6246 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6170 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, 6247 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
@@ -6178,7 +6255,7 @@ static struct pci_driver e1000_driver = {
6178 .id_table = e1000_pci_tbl, 6255 .id_table = e1000_pci_tbl,
6179 .probe = e1000_probe, 6256 .probe = e1000_probe,
6180 .remove = __devexit_p(e1000_remove), 6257 .remove = __devexit_p(e1000_remove),
6181#ifdef CONFIG_PM_OPS 6258#ifdef CONFIG_PM
6182 .driver.pm = &e1000_pm_ops, 6259 .driver.pm = &e1000_pm_ops,
6183#endif 6260#endif
6184 .shutdown = e1000_shutdown, 6261 .shutdown = e1000_shutdown,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6bea051b134..6ae31fcfb62 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
2409s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) 2409s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2410{ 2410{
2411 s32 ret_val; 2411 s32 ret_val;
2412 u32 page_select = 0;
2413 u32 page = offset >> IGP_PAGE_SHIFT; 2412 u32 page = offset >> IGP_PAGE_SHIFT;
2414 u32 page_shift = 0;
2415 2413
2416 ret_val = hw->phy.ops.acquire(hw); 2414 ret_val = hw->phy.ops.acquire(hw);
2417 if (ret_val) 2415 if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2427 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2425 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2428 2426
2429 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2427 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2428 u32 page_shift, page_select;
2429
2430 /* 2430 /*
2431 * Page select is register 31 for phy address 1 and 22 for 2431 * Page select is register 31 for phy address 1 and 22 for
2432 * phy address 2 and 3. Page select is shifted only for 2432 * phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
2468s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) 2468s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2469{ 2469{
2470 s32 ret_val; 2470 s32 ret_val;
2471 u32 page_select = 0;
2472 u32 page = offset >> IGP_PAGE_SHIFT; 2471 u32 page = offset >> IGP_PAGE_SHIFT;
2473 u32 page_shift = 0;
2474 2472
2475 ret_val = hw->phy.ops.acquire(hw); 2473 ret_val = hw->phy.ops.acquire(hw);
2476 if (ret_val) 2474 if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2486 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2484 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2487 2485
2488 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2486 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2487 u32 page_shift, page_select;
2488
2489 /* 2489 /*
2490 * Page select is register 31 for phy address 1 and 22 for 2490 * Page select is register 31 for phy address 1 and 22 for
2491 * phy address 2 and 3. Page select is shifted only for 2491 * phy address 2 and 3. Page select is shifted only for
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa9af7..907b05a1c65 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
812 if (netif_msg_hw(priv)) 812 if (netif_msg_hw(priv))
813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", 813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
814 endptr + 1); 814 endptr + 1);
815 enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv); 815 enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
816} 816}
817 817
818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, 818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index e7b6c31880b..2e573be16c1 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o 4 enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
5 5
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a937f49d9db..3a3c3c8a3a9 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,13 +32,13 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.10" 35#define DRV_VERSION "2.1.1.12"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
39 39
40#define ENIC_WQ_MAX 8 40#define ENIC_WQ_MAX 1
41#define ENIC_RQ_MAX 8 41#define ENIC_RQ_MAX 1
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
44 44
@@ -49,7 +49,7 @@ struct enic_msix_entry {
49 void *devid; 49 void *devid;
50}; 50};
51 51
52#define ENIC_SET_APPLIED (1 << 0) 52#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
53#define ENIC_SET_REQUEST (1 << 1) 53#define ENIC_SET_REQUEST (1 << 1)
54#define ENIC_SET_NAME (1 << 2) 54#define ENIC_SET_NAME (1 << 2)
55#define ENIC_SET_INSTANCE (1 << 3) 55#define ENIC_SET_INSTANCE (1 << 3)
@@ -101,7 +101,6 @@ struct enic {
101 /* receive queue cache line section */ 101 /* receive queue cache line section */
102 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; 102 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
103 unsigned int rq_count; 103 unsigned int rq_count;
104 int (*rq_alloc_buf)(struct vnic_rq *rq);
105 u64 rq_truncated_pkts; 104 u64 rq_truncated_pkts;
106 u64 rq_bad_fcs; 105 u64 rq_bad_fcs;
107 struct napi_struct napi[ENIC_RQ_MAX]; 106 struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644
index 00000000000..37ad3a1c82e
--- /dev/null
+++ b/drivers/net/enic/enic_dev.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/pci.h>
20#include <linux/etherdevice.h>
21
22#include "vnic_dev.h"
23#include "vnic_vic.h"
24#include "enic_res.h"
25#include "enic.h"
26#include "enic_dev.h"
27
28int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
29{
30 int err;
31
32 spin_lock(&enic->devcmd_lock);
33 err = vnic_dev_fw_info(enic->vdev, fw_info);
34 spin_unlock(&enic->devcmd_lock);
35
36 return err;
37}
38
39int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
40{
41 int err;
42
43 spin_lock(&enic->devcmd_lock);
44 err = vnic_dev_stats_dump(enic->vdev, vstats);
45 spin_unlock(&enic->devcmd_lock);
46
47 return err;
48}
49
50int enic_dev_add_station_addr(struct enic *enic)
51{
52 int err;
53
54 if (!is_valid_ether_addr(enic->netdev->dev_addr))
55 return -EADDRNOTAVAIL;
56
57 spin_lock(&enic->devcmd_lock);
58 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
59 spin_unlock(&enic->devcmd_lock);
60
61 return err;
62}
63
64int enic_dev_del_station_addr(struct enic *enic)
65{
66 int err;
67
68 if (!is_valid_ether_addr(enic->netdev->dev_addr))
69 return -EADDRNOTAVAIL;
70
71 spin_lock(&enic->devcmd_lock);
72 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
73 spin_unlock(&enic->devcmd_lock);
74
75 return err;
76}
77
78int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
79 int broadcast, int promisc, int allmulti)
80{
81 int err;
82
83 spin_lock(&enic->devcmd_lock);
84 err = vnic_dev_packet_filter(enic->vdev, directed,
85 multicast, broadcast, promisc, allmulti);
86 spin_unlock(&enic->devcmd_lock);
87
88 return err;
89}
90
91int enic_dev_add_addr(struct enic *enic, u8 *addr)
92{
93 int err;
94
95 spin_lock(&enic->devcmd_lock);
96 err = vnic_dev_add_addr(enic->vdev, addr);
97 spin_unlock(&enic->devcmd_lock);
98
99 return err;
100}
101
102int enic_dev_del_addr(struct enic *enic, u8 *addr)
103{
104 int err;
105
106 spin_lock(&enic->devcmd_lock);
107 err = vnic_dev_del_addr(enic->vdev, addr);
108 spin_unlock(&enic->devcmd_lock);
109
110 return err;
111}
112
113int enic_dev_notify_unset(struct enic *enic)
114{
115 int err;
116
117 spin_lock(&enic->devcmd_lock);
118 err = vnic_dev_notify_unset(enic->vdev);
119 spin_unlock(&enic->devcmd_lock);
120
121 return err;
122}
123
124int enic_dev_hang_notify(struct enic *enic)
125{
126 int err;
127
128 spin_lock(&enic->devcmd_lock);
129 err = vnic_dev_hang_notify(enic->vdev);
130 spin_unlock(&enic->devcmd_lock);
131
132 return err;
133}
134
135int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
136{
137 int err;
138
139 spin_lock(&enic->devcmd_lock);
140 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
141 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
142 spin_unlock(&enic->devcmd_lock);
143
144 return err;
145}
146
147int enic_dev_enable(struct enic *enic)
148{
149 int err;
150
151 spin_lock(&enic->devcmd_lock);
152 err = vnic_dev_enable_wait(enic->vdev);
153 spin_unlock(&enic->devcmd_lock);
154
155 return err;
156}
157
158int enic_dev_disable(struct enic *enic)
159{
160 int err;
161
162 spin_lock(&enic->devcmd_lock);
163 err = vnic_dev_disable(enic->vdev);
164 spin_unlock(&enic->devcmd_lock);
165
166 return err;
167}
168
169int enic_vnic_dev_deinit(struct enic *enic)
170{
171 int err;
172
173 spin_lock(&enic->devcmd_lock);
174 err = vnic_dev_deinit(enic->vdev);
175 spin_unlock(&enic->devcmd_lock);
176
177 return err;
178}
179
180int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
181{
182 int err;
183
184 spin_lock(&enic->devcmd_lock);
185 err = vnic_dev_init_prov(enic->vdev,
186 (u8 *)vp, vic_provinfo_size(vp));
187 spin_unlock(&enic->devcmd_lock);
188
189 return err;
190}
191
192int enic_dev_init_done(struct enic *enic, int *done, int *error)
193{
194 int err;
195
196 spin_lock(&enic->devcmd_lock);
197 err = vnic_dev_init_done(enic->vdev, done, error);
198 spin_unlock(&enic->devcmd_lock);
199
200 return err;
201}
202
203/* rtnl lock is held */
204void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
205{
206 struct enic *enic = netdev_priv(netdev);
207
208 spin_lock(&enic->devcmd_lock);
209 enic_add_vlan(enic, vid);
210 spin_unlock(&enic->devcmd_lock);
211}
212
213/* rtnl lock is held */
214void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
215{
216 struct enic *enic = netdev_priv(netdev);
217
218 spin_lock(&enic->devcmd_lock);
219 enic_del_vlan(enic, vid);
220 spin_unlock(&enic->devcmd_lock);
221}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644
index 00000000000..495f57fcb88
--- /dev/null
+++ b/drivers/net/enic/enic_dev.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef _ENIC_DEV_H_
20#define _ENIC_DEV_H_
21
22int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
23int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
24int enic_dev_add_station_addr(struct enic *enic);
25int enic_dev_del_station_addr(struct enic *enic);
26int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
27 int broadcast, int promisc, int allmulti);
28int enic_dev_add_addr(struct enic *enic, u8 *addr);
29int enic_dev_del_addr(struct enic *enic, u8 *addr);
30void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
31void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
32int enic_dev_notify_unset(struct enic *enic);
33int enic_dev_hang_notify(struct enic *enic);
34int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
35int enic_dev_enable(struct enic *enic);
36int enic_dev_disable(struct enic *enic);
37int enic_vnic_dev_deinit(struct enic *enic);
38int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
39int enic_dev_init_done(struct enic *enic, int *done, int *error);
40
41#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a0af48c51fb..8b9cad5e971 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -44,6 +44,7 @@
44#include "vnic_vic.h" 44#include "vnic_vic.h"
45#include "enic_res.h" 45#include "enic_res.h"
46#include "enic.h" 46#include "enic.h"
47#include "enic_dev.h"
47 48
48#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 49#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
49#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 50#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
190 return 0; 191 return 0;
191} 192}
192 193
193static int enic_dev_fw_info(struct enic *enic,
194 struct vnic_devcmd_fw_info **fw_info)
195{
196 int err;
197
198 spin_lock(&enic->devcmd_lock);
199 err = vnic_dev_fw_info(enic->vdev, fw_info);
200 spin_unlock(&enic->devcmd_lock);
201
202 return err;
203}
204
205static void enic_get_drvinfo(struct net_device *netdev, 194static void enic_get_drvinfo(struct net_device *netdev,
206 struct ethtool_drvinfo *drvinfo) 195 struct ethtool_drvinfo *drvinfo)
207{ 196{
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
246 } 235 }
247} 236}
248 237
249static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
250{
251 int err;
252
253 spin_lock(&enic->devcmd_lock);
254 err = vnic_dev_stats_dump(enic->vdev, vstats);
255 spin_unlock(&enic->devcmd_lock);
256
257 return err;
258}
259
260static void enic_get_ethtool_stats(struct net_device *netdev, 238static void enic_get_ethtool_stats(struct net_device *netdev,
261 struct ethtool_stats *stats, u64 *data) 239 struct ethtool_stats *stats, u64 *data)
262{ 240{
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
896 return net_stats; 874 return net_stats;
897} 875}
898 876
899static void enic_reset_multicast_list(struct enic *enic) 877static void enic_reset_addr_lists(struct enic *enic)
900{ 878{
901 enic->mc_count = 0; 879 enic->mc_count = 0;
880 enic->uc_count = 0;
902 enic->flags = 0; 881 enic->flags = 0;
903} 882}
904 883
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
919 return 0; 898 return 0;
920} 899}
921 900
922static int enic_dev_add_station_addr(struct enic *enic)
923{
924 int err = 0;
925
926 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
927 spin_lock(&enic->devcmd_lock);
928 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
929 spin_unlock(&enic->devcmd_lock);
930 }
931
932 return err;
933}
934
935static int enic_dev_del_station_addr(struct enic *enic)
936{
937 int err = 0;
938
939 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
940 spin_lock(&enic->devcmd_lock);
941 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
942 spin_unlock(&enic->devcmd_lock);
943 }
944
945 return err;
946}
947
948static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) 901static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
949{ 902{
950 struct enic *enic = netdev_priv(netdev); 903 struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
989 return enic_dev_add_station_addr(enic); 942 return enic_dev_add_station_addr(enic);
990} 943}
991 944
992static int enic_dev_packet_filter(struct enic *enic, int directed, 945static void enic_update_multicast_addr_list(struct enic *enic)
993 int multicast, int broadcast, int promisc, int allmulti)
994{
995 int err;
996
997 spin_lock(&enic->devcmd_lock);
998 err = vnic_dev_packet_filter(enic->vdev, directed,
999 multicast, broadcast, promisc, allmulti);
1000 spin_unlock(&enic->devcmd_lock);
1001
1002 return err;
1003}
1004
1005static int enic_dev_add_addr(struct enic *enic, u8 *addr)
1006{
1007 int err;
1008
1009 spin_lock(&enic->devcmd_lock);
1010 err = vnic_dev_add_addr(enic->vdev, addr);
1011 spin_unlock(&enic->devcmd_lock);
1012
1013 return err;
1014}
1015
1016static int enic_dev_del_addr(struct enic *enic, u8 *addr)
1017{
1018 int err;
1019
1020 spin_lock(&enic->devcmd_lock);
1021 err = vnic_dev_del_addr(enic->vdev, addr);
1022 spin_unlock(&enic->devcmd_lock);
1023
1024 return err;
1025}
1026
1027static void enic_add_multicast_addr_list(struct enic *enic)
1028{ 946{
1029 struct net_device *netdev = enic->netdev; 947 struct net_device *netdev = enic->netdev;
1030 struct netdev_hw_addr *ha; 948 struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
1079 enic->mc_count = mc_count; 997 enic->mc_count = mc_count;
1080} 998}
1081 999
1082static void enic_add_unicast_addr_list(struct enic *enic) 1000static void enic_update_unicast_addr_list(struct enic *enic)
1083{ 1001{
1084 struct net_device *netdev = enic->netdev; 1002 struct net_device *netdev = enic->netdev;
1085 struct netdev_hw_addr *ha; 1003 struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
1156 } 1074 }
1157 1075
1158 if (!promisc) { 1076 if (!promisc) {
1159 enic_add_unicast_addr_list(enic); 1077 enic_update_unicast_addr_list(enic);
1160 if (!allmulti) 1078 if (!allmulti)
1161 enic_add_multicast_addr_list(enic); 1079 enic_update_multicast_addr_list(enic);
1162 } 1080 }
1163} 1081}
1164 1082
@@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
1170 enic->vlan_group = vlan_group; 1088 enic->vlan_group = vlan_group;
1171} 1089}
1172 1090
1173/* rtnl lock is held */
1174static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1175{
1176 struct enic *enic = netdev_priv(netdev);
1177
1178 spin_lock(&enic->devcmd_lock);
1179 enic_add_vlan(enic, vid);
1180 spin_unlock(&enic->devcmd_lock);
1181}
1182
1183/* rtnl lock is held */
1184static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1185{
1186 struct enic *enic = netdev_priv(netdev);
1187
1188 spin_lock(&enic->devcmd_lock);
1189 enic_del_vlan(enic, vid);
1190 spin_unlock(&enic->devcmd_lock);
1191}
1192
1193/* netif_tx_lock held, BHs disabled */ 1091/* netif_tx_lock held, BHs disabled */
1194static void enic_tx_timeout(struct net_device *netdev) 1092static void enic_tx_timeout(struct net_device *netdev)
1195{ 1093{
@@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
1197 schedule_work(&enic->reset); 1095 schedule_work(&enic->reset);
1198} 1096}
1199 1097
1200static int enic_vnic_dev_deinit(struct enic *enic)
1201{
1202 int err;
1203
1204 spin_lock(&enic->devcmd_lock);
1205 err = vnic_dev_deinit(enic->vdev);
1206 spin_unlock(&enic->devcmd_lock);
1207
1208 return err;
1209}
1210
1211static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
1212{
1213 int err;
1214
1215 spin_lock(&enic->devcmd_lock);
1216 err = vnic_dev_init_prov(enic->vdev,
1217 (u8 *)vp, vic_provinfo_size(vp));
1218 spin_unlock(&enic->devcmd_lock);
1219
1220 return err;
1221}
1222
1223static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1224{
1225 int err;
1226
1227 spin_lock(&enic->devcmd_lock);
1228 err = vnic_dev_init_done(enic->vdev, done, error);
1229 spin_unlock(&enic->devcmd_lock);
1230
1231 return err;
1232}
1233
1234static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1098static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1235{ 1099{
1236 struct enic *enic = netdev_priv(netdev); 1100 struct enic *enic = netdev_priv(netdev);
@@ -1262,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1262 if (err) 1126 if (err)
1263 return err; 1127 return err;
1264 1128
1129 enic_reset_addr_lists(enic);
1130
1265 switch (enic->pp.request) { 1131 switch (enic->pp.request) {
1266 1132
1267 case PORT_REQUEST_ASSOCIATE: 1133 case PORT_REQUEST_ASSOCIATE:
@@ -1318,18 +1184,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1318 vic_provinfo_free(vp); 1184 vic_provinfo_free(vp);
1319 if (err) 1185 if (err)
1320 return err; 1186 return err;
1321
1322 enic->pp.set |= ENIC_SET_APPLIED;
1323 break; 1187 break;
1324 1188
1325 case PORT_REQUEST_DISASSOCIATE: 1189 case PORT_REQUEST_DISASSOCIATE:
1326 enic->pp.set &= ~ENIC_SET_APPLIED;
1327 break; 1190 break;
1328 1191
1329 default: 1192 default:
1330 return -EINVAL; 1193 return -EINVAL;
1331 } 1194 }
1332 1195
1196 /* Set flag to indicate that the port assoc/disassoc
1197 * request has been sent out to fw
1198 */
1199 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1200
1333 return 0; 1201 return 0;
1334} 1202}
1335 1203
@@ -1379,9 +1247,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1379 1247
1380 if (is_zero_ether_addr(netdev->dev_addr)) 1248 if (is_zero_ether_addr(netdev->dev_addr))
1381 random_ether_addr(netdev->dev_addr); 1249 random_ether_addr(netdev->dev_addr);
1382 } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
1383 if (!is_zero_ether_addr(enic->pp.mac_addr))
1384 enic_dev_del_addr(enic, enic->pp.mac_addr);
1385 } 1250 }
1386 1251
1387 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile)); 1252 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1255,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1390 if (err) 1255 if (err)
1391 goto set_port_profile_cleanup; 1256 goto set_port_profile_cleanup;
1392 1257
1393 if (!is_zero_ether_addr(enic->pp.mac_addr))
1394 enic_dev_add_addr(enic, enic->pp.mac_addr);
1395
1396set_port_profile_cleanup: 1258set_port_profile_cleanup:
1397 memset(enic->pp.vf_mac, 0, ETH_ALEN); 1259 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1398 1260
@@ -1411,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1411 int err, error, done; 1273 int err, error, done;
1412 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1274 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1413 1275
1414 if (!(enic->pp.set & ENIC_SET_APPLIED)) 1276 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1415 return -ENODATA; 1277 return -ENODATA;
1416 1278
1417 err = enic_dev_init_done(enic, &done, &error); 1279 err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
1489 return 0; 1351 return 0;
1490} 1352}
1491 1353
1492static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
1493{
1494 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
1495
1496 if (vnic_rq_posting_soon(rq)) {
1497
1498 /* SW workaround for A0 HW erratum: if we're just about
1499 * to write posted_index, insert a dummy desc
1500 * of type resvd
1501 */
1502
1503 rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
1504 vnic_rq_post(rq, 0, 0, 0, 0);
1505 } else {
1506 return enic_rq_alloc_buf(rq);
1507 }
1508
1509 return 0;
1510}
1511
1512static int enic_dev_hw_version(struct enic *enic,
1513 enum vnic_dev_hw_version *hw_ver)
1514{
1515 int err;
1516
1517 spin_lock(&enic->devcmd_lock);
1518 err = vnic_dev_hw_version(enic->vdev, hw_ver);
1519 spin_unlock(&enic->devcmd_lock);
1520
1521 return err;
1522}
1523
1524static int enic_set_rq_alloc_buf(struct enic *enic)
1525{
1526 enum vnic_dev_hw_version hw_ver;
1527 int err;
1528
1529 err = enic_dev_hw_version(enic, &hw_ver);
1530 if (err)
1531 return err;
1532
1533 switch (hw_ver) {
1534 case VNIC_DEV_HW_VER_A1:
1535 enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
1536 break;
1537 case VNIC_DEV_HW_VER_A2:
1538 case VNIC_DEV_HW_VER_UNKNOWN:
1539 enic->rq_alloc_buf = enic_rq_alloc_buf;
1540 break;
1541 default:
1542 return -ENODEV;
1543 }
1544
1545 return 0;
1546}
1547
1548static void enic_rq_indicate_buf(struct vnic_rq *rq, 1354static void enic_rq_indicate_buf(struct vnic_rq *rq,
1549 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1355 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1550 int skipped, void *opaque) 1356 int skipped, void *opaque)
@@ -1681,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1681 0 /* don't unmask intr */, 1487 0 /* don't unmask intr */,
1682 0 /* don't reset intr timer */); 1488 0 /* don't reset intr timer */);
1683 1489
1684 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1490 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1685 1491
1686 /* Buffer allocation failed. Stay in polling 1492 /* Buffer allocation failed. Stay in polling
1687 * mode so we can try to fill the ring again. 1493 * mode so we can try to fill the ring again.
@@ -1731,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1731 0 /* don't unmask intr */, 1537 0 /* don't unmask intr */,
1732 0 /* don't reset intr timer */); 1538 0 /* don't reset intr timer */);
1733 1539
1734 err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf); 1540 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1735 1541
1736 /* Buffer allocation failed. Stay in polling mode 1542 /* Buffer allocation failed. Stay in polling mode
1737 * so we can try to fill the ring again. 1543 * so we can try to fill the ring again.
@@ -1901,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic)
1901 return err; 1707 return err;
1902} 1708}
1903 1709
1904static int enic_dev_notify_unset(struct enic *enic)
1905{
1906 int err;
1907
1908 spin_lock(&enic->devcmd_lock);
1909 err = vnic_dev_notify_unset(enic->vdev);
1910 spin_unlock(&enic->devcmd_lock);
1911
1912 return err;
1913}
1914
1915static int enic_dev_enable(struct enic *enic)
1916{
1917 int err;
1918
1919 spin_lock(&enic->devcmd_lock);
1920 err = vnic_dev_enable_wait(enic->vdev);
1921 spin_unlock(&enic->devcmd_lock);
1922
1923 return err;
1924}
1925
1926static int enic_dev_disable(struct enic *enic)
1927{
1928 int err;
1929
1930 spin_lock(&enic->devcmd_lock);
1931 err = vnic_dev_disable(enic->vdev);
1932 spin_unlock(&enic->devcmd_lock);
1933
1934 return err;
1935}
1936
1937static void enic_notify_timer_start(struct enic *enic) 1710static void enic_notify_timer_start(struct enic *enic)
1938{ 1711{
1939 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1712 switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1740,7 @@ static int enic_open(struct net_device *netdev)
1967 } 1740 }
1968 1741
1969 for (i = 0; i < enic->rq_count; i++) { 1742 for (i = 0; i < enic->rq_count; i++) {
1970 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1743 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1971 /* Need at least one buffer on ring to get going */ 1744 /* Need at least one buffer on ring to get going */
1972 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1745 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1973 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1746 netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
2285 rss_hash_bits, rss_base_cpu, rss_enable); 2058 rss_hash_bits, rss_base_cpu, rss_enable);
2286} 2059}
2287 2060
2288static int enic_dev_hang_notify(struct enic *enic)
2289{
2290 int err;
2291
2292 spin_lock(&enic->devcmd_lock);
2293 err = vnic_dev_hang_notify(enic->vdev);
2294 spin_unlock(&enic->devcmd_lock);
2295
2296 return err;
2297}
2298
2299static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
2300{
2301 int err;
2302
2303 spin_lock(&enic->devcmd_lock);
2304 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
2305 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
2306 spin_unlock(&enic->devcmd_lock);
2307
2308 return err;
2309}
2310
2311static void enic_reset(struct work_struct *work) 2061static void enic_reset(struct work_struct *work)
2312{ 2062{
2313 struct enic *enic = container_of(work, struct enic, reset); 2063 struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2070,7 @@ static void enic_reset(struct work_struct *work)
2320 enic_dev_hang_notify(enic); 2070 enic_dev_hang_notify(enic);
2321 enic_stop(enic->netdev); 2071 enic_stop(enic->netdev);
2322 enic_dev_hang_reset(enic); 2072 enic_dev_hang_reset(enic);
2323 enic_reset_multicast_list(enic); 2073 enic_reset_addr_lists(enic);
2324 enic_init_vnic_resources(enic); 2074 enic_init_vnic_resources(enic);
2325 enic_set_rss_nic_cfg(enic); 2075 enic_set_rss_nic_cfg(enic);
2326 enic_dev_set_ig_vlan_rewrite_mode(enic); 2076 enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2082,7 @@ static void enic_reset(struct work_struct *work)
2332static int enic_set_intr_mode(struct enic *enic) 2082static int enic_set_intr_mode(struct enic *enic)
2333{ 2083{
2334 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); 2084 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2335 unsigned int m = 1; 2085 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2336 unsigned int i; 2086 unsigned int i;
2337 2087
2338 /* Set interrupt mode (INTx, MSI, MSI-X) depending 2088 /* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2475 .ndo_tx_timeout = enic_tx_timeout, 2225 .ndo_tx_timeout = enic_tx_timeout,
2476 .ndo_set_vf_port = enic_set_vf_port, 2226 .ndo_set_vf_port = enic_set_vf_port,
2477 .ndo_get_vf_port = enic_get_vf_port, 2227 .ndo_get_vf_port = enic_get_vf_port,
2478#ifdef IFLA_VF_MAX
2479 .ndo_set_vf_mac = enic_set_vf_mac, 2228 .ndo_set_vf_mac = enic_set_vf_mac,
2480#endif
2481#ifdef CONFIG_NET_POLL_CONTROLLER 2229#ifdef CONFIG_NET_POLL_CONTROLLER
2482 .ndo_poll_controller = enic_poll_controller, 2230 .ndo_poll_controller = enic_poll_controller,
2483#endif 2231#endif
@@ -2556,25 +2304,12 @@ static int enic_dev_init(struct enic *enic)
2556 2304
2557 enic_init_vnic_resources(enic); 2305 enic_init_vnic_resources(enic);
2558 2306
2559 err = enic_set_rq_alloc_buf(enic);
2560 if (err) {
2561 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
2562 goto err_out_free_vnic_resources;
2563 }
2564
2565 err = enic_set_rss_nic_cfg(enic); 2307 err = enic_set_rss_nic_cfg(enic);
2566 if (err) { 2308 if (err) {
2567 dev_err(dev, "Failed to config nic, aborting\n"); 2309 dev_err(dev, "Failed to config nic, aborting\n");
2568 goto err_out_free_vnic_resources; 2310 goto err_out_free_vnic_resources;
2569 } 2311 }
2570 2312
2571 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2572 if (err) {
2573 dev_err(dev,
2574 "Failed to set ingress vlan rewrite mode, aborting.\n");
2575 goto err_out_free_vnic_resources;
2576 }
2577
2578 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2313 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2579 default: 2314 default:
2580 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); 2315 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2713 goto err_out_vnic_unregister; 2448 goto err_out_vnic_unregister;
2714 } 2449 }
2715 2450
2451 /* Setup devcmd lock
2452 */
2453
2454 spin_lock_init(&enic->devcmd_lock);
2455
2456 /*
2457 * Set ingress vlan rewrite mode before vnic initialization
2458 */
2459
2460 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2461 if (err) {
2462 dev_err(dev,
2463 "Failed to set ingress vlan rewrite mode, aborting.\n");
2464 goto err_out_dev_close;
2465 }
2466
2716 /* Issue device init to initialize the vnic-to-switch link. 2467 /* Issue device init to initialize the vnic-to-switch link.
2717 * We'll start with carrier off and wait for link UP 2468 * We'll start with carrier off and wait for link UP
2718 * notification later to turn on carrier. We don't need 2469 * notification later to turn on carrier. We don't need
@@ -2736,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2736 } 2487 }
2737 } 2488 }
2738 2489
2739 /* Setup devcmd lock
2740 */
2741
2742 spin_lock_init(&enic->devcmd_lock);
2743
2744 err = enic_dev_init(enic); 2490 err = enic_dev_init(enic);
2745 if (err) { 2491 if (err) {
2746 dev_err(dev, "Device initialization failed, aborting\n"); 2492 dev_err(dev, "Device initialization failed, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index fb35d8b1766..c089b362a36 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -408,10 +408,17 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
408 if (!vdev->fw_info) 408 if (!vdev->fw_info)
409 return -ENOMEM; 409 return -ENOMEM;
410 410
411 memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
412
411 a0 = vdev->fw_info_pa; 413 a0 = vdev->fw_info_pa;
414 a1 = sizeof(struct vnic_devcmd_fw_info);
412 415
413 /* only get fw_info once and cache it */ 416 /* only get fw_info once and cache it */
414 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); 417 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
418 if (err == ERR_ECMDUNKNOWN) {
419 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
420 &a0, &a1, wait);
421 }
415 } 422 }
416 423
417 *fw_info = vdev->fw_info; 424 *fw_info = vdev->fw_info;
@@ -419,25 +426,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
419 return err; 426 return err;
420} 427}
421 428
422int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
423{
424 struct vnic_devcmd_fw_info *fw_info;
425 int err;
426
427 err = vnic_dev_fw_info(vdev, &fw_info);
428 if (err)
429 return err;
430
431 if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
432 *hw_ver = VNIC_DEV_HW_VER_A1;
433 else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
434 *hw_ver = VNIC_DEV_HW_VER_A2;
435 else
436 *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
437
438 return 0;
439}
440
441int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 429int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
442 void *value) 430 void *value)
443{ 431{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 05f9a24cd45..e837546213a 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
44#undef pr_fmt 44#undef pr_fmt
45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 46
47enum vnic_dev_hw_version {
48 VNIC_DEV_HW_VER_UNKNOWN,
49 VNIC_DEV_HW_VER_A1,
50 VNIC_DEV_HW_VER_A2,
51};
52
53enum vnic_dev_intr_mode { 47enum vnic_dev_intr_mode {
54 VNIC_DEV_INTR_MODE_UNKNOWN, 48 VNIC_DEV_INTR_MODE_UNKNOWN,
55 VNIC_DEV_INTR_MODE_INTX, 49 VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
93 u64 *a0, u64 *a1, int wait); 87 u64 *a0, u64 *a1, int wait);
94int vnic_dev_fw_info(struct vnic_dev *vdev, 88int vnic_dev_fw_info(struct vnic_dev *vdev,
95 struct vnic_devcmd_fw_info **fw_info); 89 struct vnic_devcmd_fw_info **fw_info);
96int vnic_dev_hw_version(struct vnic_dev *vdev,
97 enum vnic_dev_hw_version *hw_ver);
98int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 90int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
99 void *value); 91 void *value);
100int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); 92int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index 9abb3d51dea..d833a071bac 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -80,8 +80,34 @@
80enum vnic_devcmd_cmd { 80enum vnic_devcmd_cmd {
81 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), 81 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
82 82
83 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ 83 /*
84 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), 84 * mcpu fw info in mem:
85 * in:
86 * (u64)a0=paddr to struct vnic_devcmd_fw_info
87 * action:
88 * Fills in struct vnic_devcmd_fw_info (128 bytes)
89 * note:
90 * An old definition of CMD_MCPU_FW_INFO
91 */
92 CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
93
94 /*
95 * mcpu fw info in mem:
96 * in:
97 * (u64)a0=paddr to struct vnic_devcmd_fw_info
98 * (u16)a1=size of the structure
99 * out:
100 * (u16)a1=0 for in:a1 = 0,
101 * data size actually written for other values.
102 * action:
103 * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
104 * first in:a1 bytes for 0 < in:a1 <= 132,
105 * 132 bytes for other values of in:a1.
106 * note:
107 * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
108 * for source compatibility.
109 */
110 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
85 111
86 /* dev-specific block member: 112 /* dev-specific block member:
87 * in: (u16)a0=offset,(u8)a1=size 113 * in: (u16)a0=offset,(u8)a1=size
@@ -291,11 +317,19 @@ enum vnic_devcmd_error {
291 ERR_EMAXRES = 10, 317 ERR_EMAXRES = 10,
292}; 318};
293 319
320/*
321 * note: hw_version and asic_rev refer to the same thing,
322 * but have different formats. hw_version is
323 * a 32-byte string (e.g. "A2") and asic_rev is
324 * a 16-bit integer (e.g. 0xA2).
325 */
294struct vnic_devcmd_fw_info { 326struct vnic_devcmd_fw_info {
295 char fw_version[32]; 327 char fw_version[32];
296 char fw_build[32]; 328 char fw_build[32];
297 char hw_version[32]; 329 char hw_version[32];
298 char hw_serial_number[32]; 330 char hw_serial_number[32];
331 u16 asic_type;
332 u16 asic_rev;
299}; 333};
300 334
301struct vnic_devcmd_notify { 335struct vnic_devcmd_notify {
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 37f08de2454..2056586f4d4 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
141 } 141 }
142} 142}
143 143
144static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
145{
146 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
147}
148
149static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) 144static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
150{ 145{
151 rq->ring.desc_avail += count; 146 rq->ring.desc_avail += count;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 0cb1cf9cf4b..a59cf961a43 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -111,6 +111,8 @@
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM 111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112 */ 112 */
113 113
114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
114#include <linux/capability.h> 116#include <linux/capability.h>
115#include <linux/module.h> 117#include <linux/module.h>
116#include <linux/kernel.h> 118#include <linux/kernel.h>
@@ -162,7 +164,7 @@ static void eql_timer(unsigned long param)
162} 164}
163 165
164static const char version[] __initconst = 166static const char version[] __initconst =
165 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; 167 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
166 168
167static const struct net_device_ops eql_netdev_ops = { 169static const struct net_device_ops eql_netdev_ops = {
168 .ndo_open = eql_open, 170 .ndo_open = eql_open,
@@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev)
204 equalizer_t *eql = netdev_priv(dev); 206 equalizer_t *eql = netdev_priv(dev);
205 207
206 /* XXX We should force this off automatically for the user. */ 208 /* XXX We should force this off automatically for the user. */
207 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " 209 netdev_info(dev,
208 "your slave devices.\n", dev->name); 210 "remember to turn off Van-Jacobson compression on your slave devices\n");
209 211
210 BUG_ON(!list_empty(&eql->queue.all_slaves)); 212 BUG_ON(!list_empty(&eql->queue.all_slaves));
211 213
@@ -591,7 +593,7 @@ static int __init eql_init_module(void)
591{ 593{
592 int err; 594 int err;
593 595
594 printk(version); 596 pr_info("%s\n", version);
595 597
596 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup); 598 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
597 if (!dev_eql) 599 if (!dev_eql)
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b79d7e1555d..db0290f05bd 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -1163,15 +1163,11 @@ static int ethoc_resume(struct platform_device *pdev)
1163# define ethoc_resume NULL 1163# define ethoc_resume NULL
1164#endif 1164#endif
1165 1165
1166#ifdef CONFIG_OF
1167static struct of_device_id ethoc_match[] = { 1166static struct of_device_id ethoc_match[] = {
1168 { 1167 { .compatible = "opencores,ethoc", },
1169 .compatible = "opencores,ethoc",
1170 },
1171 {}, 1168 {},
1172}; 1169};
1173MODULE_DEVICE_TABLE(of, ethoc_match); 1170MODULE_DEVICE_TABLE(of, ethoc_match);
1174#endif
1175 1171
1176static struct platform_driver ethoc_driver = { 1172static struct platform_driver ethoc_driver = {
1177 .probe = ethoc_probe, 1173 .probe = ethoc_probe,
@@ -1181,9 +1177,7 @@ static struct platform_driver ethoc_driver = {
1181 .driver = { 1177 .driver = {
1182 .name = "ethoc", 1178 .name = "ethoc",
1183 .owner = THIS_MODULE, 1179 .owner = THIS_MODULE,
1184#ifdef CONFIG_OF
1185 .of_match_table = ethoc_match, 1180 .of_match_table = ethoc_match,
1186#endif
1187 }, 1181 },
1188}; 1182};
1189 1183
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373719a..885d8baff7d 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -54,7 +54,7 @@
54 54
55#include "fec.h" 55#include "fec.h"
56 56
57#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 57#if defined(CONFIG_ARM)
58#define FEC_ALIGNMENT 0xf 58#define FEC_ALIGNMENT 0xf
59#else 59#else
60#define FEC_ALIGNMENT 0x3 60#define FEC_ALIGNMENT 0x3
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
74 }, { 74 }, {
75 .name = "imx28-fec", 75 .name = "imx28-fec",
76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
77 } 77 },
78 { }
78}; 79};
79 80
80static unsigned char macaddr[ETH_ALEN]; 81static unsigned char macaddr[ETH_ALEN];
@@ -147,8 +148,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
147 * account when setting it. 148 * account when setting it.
148 */ 149 */
149#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 150#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 151 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
151 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
152#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
153#else 153#else
154#define OPT_FRAME_SIZE 0 154#define OPT_FRAME_SIZE 0
@@ -183,7 +183,7 @@ struct fec_enet_private {
183 struct bufdesc *rx_bd_base; 183 struct bufdesc *rx_bd_base;
184 struct bufdesc *tx_bd_base; 184 struct bufdesc *tx_bd_base;
185 /* The next free ring entry */ 185 /* The next free ring entry */
186 struct bufdesc *cur_rx, *cur_tx; 186 struct bufdesc *cur_rx, *cur_tx;
187 /* The ring entries to be free()ed */ 187 /* The ring entries to be free()ed */
188 struct bufdesc *dirty_tx; 188 struct bufdesc *dirty_tx;
189 189
@@ -191,28 +191,21 @@ struct fec_enet_private {
191 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 191 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
192 spinlock_t hw_lock; 192 spinlock_t hw_lock;
193 193
194 struct platform_device *pdev; 194 struct platform_device *pdev;
195 195
196 int opened; 196 int opened;
197 197
198 /* Phylib and MDIO interface */ 198 /* Phylib and MDIO interface */
199 struct mii_bus *mii_bus; 199 struct mii_bus *mii_bus;
200 struct phy_device *phy_dev; 200 struct phy_device *phy_dev;
201 int mii_timeout; 201 int mii_timeout;
202 uint phy_speed; 202 uint phy_speed;
203 phy_interface_t phy_interface; 203 phy_interface_t phy_interface;
204 int link; 204 int link;
205 int full_duplex; 205 int full_duplex;
206 struct completion mdio_done; 206 struct completion mdio_done;
207}; 207};
208 208
209static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
210static void fec_enet_tx(struct net_device *dev);
211static void fec_enet_rx(struct net_device *dev);
212static int fec_enet_close(struct net_device *dev);
213static void fec_restart(struct net_device *dev, int duplex);
214static void fec_stop(struct net_device *dev);
215
216/* FEC MII MMFR bits definition */ 209/* FEC MII MMFR bits definition */
217#define FEC_MMFR_ST (1 << 30) 210#define FEC_MMFR_ST (1 << 30)
218#define FEC_MMFR_OP_READ (2 << 28) 211#define FEC_MMFR_OP_READ (2 << 28)
@@ -239,9 +232,9 @@ static void *swap_buffer(void *bufaddr, int len)
239} 232}
240 233
241static netdev_tx_t 234static netdev_tx_t
242fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 235fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
243{ 236{
244 struct fec_enet_private *fep = netdev_priv(dev); 237 struct fec_enet_private *fep = netdev_priv(ndev);
245 const struct platform_device_id *id_entry = 238 const struct platform_device_id *id_entry =
246 platform_get_device_id(fep->pdev); 239 platform_get_device_id(fep->pdev);
247 struct bufdesc *bdp; 240 struct bufdesc *bdp;
@@ -262,9 +255,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
262 255
263 if (status & BD_ENET_TX_READY) { 256 if (status & BD_ENET_TX_READY) {
264 /* Ooops. All transmit buffers are full. Bail out. 257 /* Ooops. All transmit buffers are full. Bail out.
265 * This should not happen, since dev->tbusy should be set. 258 * This should not happen, since ndev->tbusy should be set.
266 */ 259 */
267 printk("%s: tx queue full!.\n", dev->name); 260 printk("%s: tx queue full!.\n", ndev->name);
268 spin_unlock_irqrestore(&fep->hw_lock, flags); 261 spin_unlock_irqrestore(&fep->hw_lock, flags);
269 return NETDEV_TX_BUSY; 262 return NETDEV_TX_BUSY;
270 } 263 }
@@ -284,7 +277,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
284 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 277 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
285 unsigned int index; 278 unsigned int index;
286 index = bdp - fep->tx_bd_base; 279 index = bdp - fep->tx_bd_base;
287 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 280 memcpy(fep->tx_bounce[index], skb->data, skb->len);
288 bufaddr = fep->tx_bounce[index]; 281 bufaddr = fep->tx_bounce[index];
289 } 282 }
290 283
@@ -299,13 +292,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
299 /* Save skb pointer */ 292 /* Save skb pointer */
300 fep->tx_skbuff[fep->skb_cur] = skb; 293 fep->tx_skbuff[fep->skb_cur] = skb;
301 294
302 dev->stats.tx_bytes += skb->len; 295 ndev->stats.tx_bytes += skb->len;
303 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 296 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
304 297
305 /* Push the data cache so the CPM does not get stale memory 298 /* Push the data cache so the CPM does not get stale memory
306 * data. 299 * data.
307 */ 300 */
308 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, 301 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
309 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 302 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
310 303
311 /* Send it on its way. Tell FEC it's ready, interrupt when done, 304 /* Send it on its way. Tell FEC it's ready, interrupt when done,
@@ -326,7 +319,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
326 319
327 if (bdp == fep->dirty_tx) { 320 if (bdp == fep->dirty_tx) {
328 fep->tx_full = 1; 321 fep->tx_full = 1;
329 netif_stop_queue(dev); 322 netif_stop_queue(ndev);
330 } 323 }
331 324
332 fep->cur_tx = bdp; 325 fep->cur_tx = bdp;
@@ -336,62 +329,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
336 return NETDEV_TX_OK; 329 return NETDEV_TX_OK;
337} 330}
338 331
332/* This function is called to start or restart the FEC during a link
333 * change. This only happens when switching between half and full
334 * duplex.
335 */
339static void 336static void
340fec_timeout(struct net_device *dev) 337fec_restart(struct net_device *ndev, int duplex)
341{ 338{
342 struct fec_enet_private *fep = netdev_priv(dev); 339 struct fec_enet_private *fep = netdev_priv(ndev);
340 const struct platform_device_id *id_entry =
341 platform_get_device_id(fep->pdev);
342 int i;
343 u32 temp_mac[2];
344 u32 rcntl = OPT_FRAME_SIZE | 0x04;
343 345
344 dev->stats.tx_errors++; 346 /* Whack a reset. We should wait for this. */
347 writel(1, fep->hwp + FEC_ECNTRL);
348 udelay(10);
345 349
346 fec_restart(dev, fep->full_duplex); 350 /*
347 netif_wake_queue(dev); 351 * enet-mac reset will reset mac address registers too,
348} 352 * so need to reconfigure it.
353 */
354 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
355 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
356 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
357 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
358 }
349 359
350static irqreturn_t 360 /* Clear any outstanding interrupt. */
351fec_enet_interrupt(int irq, void * dev_id) 361 writel(0xffc00000, fep->hwp + FEC_IEVENT);
352{
353 struct net_device *dev = dev_id;
354 struct fec_enet_private *fep = netdev_priv(dev);
355 uint int_events;
356 irqreturn_t ret = IRQ_NONE;
357 362
358 do { 363 /* Reset all multicast. */
359 int_events = readl(fep->hwp + FEC_IEVENT); 364 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
360 writel(int_events, fep->hwp + FEC_IEVENT); 365 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
366#ifndef CONFIG_M5272
367 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
368 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
369#endif
361 370
362 if (int_events & FEC_ENET_RXF) { 371 /* Set maximum receive buffer size. */
363 ret = IRQ_HANDLED; 372 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
364 fec_enet_rx(dev);
365 }
366 373
367 /* Transmit OK, or non-fatal error. Update the buffer 374 /* Set receive and transmit descriptor base. */
368 * descriptors. FEC handles all errors, we just discover 375 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
369 * them as part of the transmit process. 376 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
370 */ 377 fep->hwp + FEC_X_DES_START);
371 if (int_events & FEC_ENET_TXF) { 378
372 ret = IRQ_HANDLED; 379 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
373 fec_enet_tx(dev); 380 fep->cur_rx = fep->rx_bd_base;
381
382 /* Reset SKB transmit buffers. */
383 fep->skb_cur = fep->skb_dirty = 0;
384 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
385 if (fep->tx_skbuff[i]) {
386 dev_kfree_skb_any(fep->tx_skbuff[i]);
387 fep->tx_skbuff[i] = NULL;
374 } 388 }
389 }
375 390
376 if (int_events & FEC_ENET_MII) { 391 /* Enable MII mode */
377 ret = IRQ_HANDLED; 392 if (duplex) {
378 complete(&fep->mdio_done); 393 /* FD enable */
394 writel(0x04, fep->hwp + FEC_X_CNTRL);
395 } else {
396 /* No Rcv on Xmit */
397 rcntl |= 0x02;
398 writel(0x0, fep->hwp + FEC_X_CNTRL);
399 }
400
401 fep->full_duplex = duplex;
402
403 /* Set MII speed */
404 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
405
406 /*
407 * The phy interface and speed need to get configured
408 * differently on enet-mac.
409 */
410 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
411 /* Enable flow control and length check */
412 rcntl |= 0x40000000 | 0x00000020;
413
414 /* MII or RMII */
415 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
416 rcntl |= (1 << 8);
417 else
418 rcntl &= ~(1 << 8);
419
420 /* 10M or 100M */
421 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
422 rcntl &= ~(1 << 9);
423 else
424 rcntl |= (1 << 9);
425
426 } else {
427#ifdef FEC_MIIGSK_ENR
428 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
429 /* disable the gasket and wait */
430 writel(0, fep->hwp + FEC_MIIGSK_ENR);
431 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
432 udelay(1);
433
434 /*
435 * configure the gasket:
436 * RMII, 50 MHz, no loopback, no echo
437 */
438 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
439
440 /* re-enable the gasket */
441 writel(2, fep->hwp + FEC_MIIGSK_ENR);
379 } 442 }
380 } while (int_events); 443#endif
444 }
445 writel(rcntl, fep->hwp + FEC_R_CNTRL);
381 446
382 return ret; 447 /* And last, enable the transmit and receive processing */
448 writel(2, fep->hwp + FEC_ECNTRL);
449 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
450
451 /* Enable interrupts we wish to service */
452 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
383} 453}
384 454
455static void
456fec_stop(struct net_device *ndev)
457{
458 struct fec_enet_private *fep = netdev_priv(ndev);
459
460 /* We cannot expect a graceful transmit stop without link !!! */
461 if (fep->link) {
462 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
463 udelay(10);
464 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
465 printk("fec_stop : Graceful transmit stop did not complete !\n");
466 }
467
468 /* Whack a reset. We should wait for this. */
469 writel(1, fep->hwp + FEC_ECNTRL);
470 udelay(10);
471 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
472 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
473}
474
475
476static void
477fec_timeout(struct net_device *ndev)
478{
479 struct fec_enet_private *fep = netdev_priv(ndev);
480
481 ndev->stats.tx_errors++;
482
483 fec_restart(ndev, fep->full_duplex);
484 netif_wake_queue(ndev);
485}
385 486
386static void 487static void
387fec_enet_tx(struct net_device *dev) 488fec_enet_tx(struct net_device *ndev)
388{ 489{
389 struct fec_enet_private *fep; 490 struct fec_enet_private *fep;
390 struct bufdesc *bdp; 491 struct bufdesc *bdp;
391 unsigned short status; 492 unsigned short status;
392 struct sk_buff *skb; 493 struct sk_buff *skb;
393 494
394 fep = netdev_priv(dev); 495 fep = netdev_priv(ndev);
395 spin_lock(&fep->hw_lock); 496 spin_lock(&fep->hw_lock);
396 bdp = fep->dirty_tx; 497 bdp = fep->dirty_tx;
397 498
@@ -399,7 +500,8 @@ fec_enet_tx(struct net_device *dev)
399 if (bdp == fep->cur_tx && fep->tx_full == 0) 500 if (bdp == fep->cur_tx && fep->tx_full == 0)
400 break; 501 break;
401 502
402 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 503 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
504 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
403 bdp->cbd_bufaddr = 0; 505 bdp->cbd_bufaddr = 0;
404 506
405 skb = fep->tx_skbuff[fep->skb_dirty]; 507 skb = fep->tx_skbuff[fep->skb_dirty];
@@ -407,19 +509,19 @@ fec_enet_tx(struct net_device *dev)
407 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 509 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
408 BD_ENET_TX_RL | BD_ENET_TX_UN | 510 BD_ENET_TX_RL | BD_ENET_TX_UN |
409 BD_ENET_TX_CSL)) { 511 BD_ENET_TX_CSL)) {
410 dev->stats.tx_errors++; 512 ndev->stats.tx_errors++;
411 if (status & BD_ENET_TX_HB) /* No heartbeat */ 513 if (status & BD_ENET_TX_HB) /* No heartbeat */
412 dev->stats.tx_heartbeat_errors++; 514 ndev->stats.tx_heartbeat_errors++;
413 if (status & BD_ENET_TX_LC) /* Late collision */ 515 if (status & BD_ENET_TX_LC) /* Late collision */
414 dev->stats.tx_window_errors++; 516 ndev->stats.tx_window_errors++;
415 if (status & BD_ENET_TX_RL) /* Retrans limit */ 517 if (status & BD_ENET_TX_RL) /* Retrans limit */
416 dev->stats.tx_aborted_errors++; 518 ndev->stats.tx_aborted_errors++;
417 if (status & BD_ENET_TX_UN) /* Underrun */ 519 if (status & BD_ENET_TX_UN) /* Underrun */
418 dev->stats.tx_fifo_errors++; 520 ndev->stats.tx_fifo_errors++;
419 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 521 if (status & BD_ENET_TX_CSL) /* Carrier lost */
420 dev->stats.tx_carrier_errors++; 522 ndev->stats.tx_carrier_errors++;
421 } else { 523 } else {
422 dev->stats.tx_packets++; 524 ndev->stats.tx_packets++;
423 } 525 }
424 526
425 if (status & BD_ENET_TX_READY) 527 if (status & BD_ENET_TX_READY)
@@ -429,7 +531,7 @@ fec_enet_tx(struct net_device *dev)
429 * but we eventually sent the packet OK. 531 * but we eventually sent the packet OK.
430 */ 532 */
431 if (status & BD_ENET_TX_DEF) 533 if (status & BD_ENET_TX_DEF)
432 dev->stats.collisions++; 534 ndev->stats.collisions++;
433 535
434 /* Free the sk buffer associated with this last transmit */ 536 /* Free the sk buffer associated with this last transmit */
435 dev_kfree_skb_any(skb); 537 dev_kfree_skb_any(skb);
@@ -446,8 +548,8 @@ fec_enet_tx(struct net_device *dev)
446 */ 548 */
447 if (fep->tx_full) { 549 if (fep->tx_full) {
448 fep->tx_full = 0; 550 fep->tx_full = 0;
449 if (netif_queue_stopped(dev)) 551 if (netif_queue_stopped(ndev))
450 netif_wake_queue(dev); 552 netif_wake_queue(ndev);
451 } 553 }
452 } 554 }
453 fep->dirty_tx = bdp; 555 fep->dirty_tx = bdp;
@@ -461,9 +563,9 @@ fec_enet_tx(struct net_device *dev)
461 * effectively tossing the packet. 563 * effectively tossing the packet.
462 */ 564 */
463static void 565static void
464fec_enet_rx(struct net_device *dev) 566fec_enet_rx(struct net_device *ndev)
465{ 567{
466 struct fec_enet_private *fep = netdev_priv(dev); 568 struct fec_enet_private *fep = netdev_priv(ndev);
467 const struct platform_device_id *id_entry = 569 const struct platform_device_id *id_entry =
468 platform_get_device_id(fep->pdev); 570 platform_get_device_id(fep->pdev);
469 struct bufdesc *bdp; 571 struct bufdesc *bdp;
@@ -497,17 +599,17 @@ fec_enet_rx(struct net_device *dev)
497 /* Check for errors. */ 599 /* Check for errors. */
498 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 600 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
499 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 601 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
500 dev->stats.rx_errors++; 602 ndev->stats.rx_errors++;
501 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 603 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
502 /* Frame too long or too short. */ 604 /* Frame too long or too short. */
503 dev->stats.rx_length_errors++; 605 ndev->stats.rx_length_errors++;
504 } 606 }
505 if (status & BD_ENET_RX_NO) /* Frame alignment */ 607 if (status & BD_ENET_RX_NO) /* Frame alignment */
506 dev->stats.rx_frame_errors++; 608 ndev->stats.rx_frame_errors++;
507 if (status & BD_ENET_RX_CR) /* CRC Error */ 609 if (status & BD_ENET_RX_CR) /* CRC Error */
508 dev->stats.rx_crc_errors++; 610 ndev->stats.rx_crc_errors++;
509 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 611 if (status & BD_ENET_RX_OV) /* FIFO overrun */
510 dev->stats.rx_fifo_errors++; 612 ndev->stats.rx_fifo_errors++;
511 } 613 }
512 614
513 /* Report late collisions as a frame error. 615 /* Report late collisions as a frame error.
@@ -515,19 +617,19 @@ fec_enet_rx(struct net_device *dev)
515 * have in the buffer. So, just drop this frame on the floor. 617 * have in the buffer. So, just drop this frame on the floor.
516 */ 618 */
517 if (status & BD_ENET_RX_CL) { 619 if (status & BD_ENET_RX_CL) {
518 dev->stats.rx_errors++; 620 ndev->stats.rx_errors++;
519 dev->stats.rx_frame_errors++; 621 ndev->stats.rx_frame_errors++;
520 goto rx_processing_done; 622 goto rx_processing_done;
521 } 623 }
522 624
523 /* Process the incoming frame. */ 625 /* Process the incoming frame. */
524 dev->stats.rx_packets++; 626 ndev->stats.rx_packets++;
525 pkt_len = bdp->cbd_datlen; 627 pkt_len = bdp->cbd_datlen;
526 dev->stats.rx_bytes += pkt_len; 628 ndev->stats.rx_bytes += pkt_len;
527 data = (__u8*)__va(bdp->cbd_bufaddr); 629 data = (__u8*)__va(bdp->cbd_bufaddr);
528 630
529 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 631 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
530 DMA_FROM_DEVICE); 632 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
531 633
532 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 634 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
533 swap_buffer(data, pkt_len); 635 swap_buffer(data, pkt_len);
@@ -541,18 +643,18 @@ fec_enet_rx(struct net_device *dev)
541 643
542 if (unlikely(!skb)) { 644 if (unlikely(!skb)) {
543 printk("%s: Memory squeeze, dropping packet.\n", 645 printk("%s: Memory squeeze, dropping packet.\n",
544 dev->name); 646 ndev->name);
545 dev->stats.rx_dropped++; 647 ndev->stats.rx_dropped++;
546 } else { 648 } else {
547 skb_reserve(skb, NET_IP_ALIGN); 649 skb_reserve(skb, NET_IP_ALIGN);
548 skb_put(skb, pkt_len - 4); /* Make room */ 650 skb_put(skb, pkt_len - 4); /* Make room */
549 skb_copy_to_linear_data(skb, data, pkt_len - 4); 651 skb_copy_to_linear_data(skb, data, pkt_len - 4);
550 skb->protocol = eth_type_trans(skb, dev); 652 skb->protocol = eth_type_trans(skb, ndev);
551 netif_rx(skb); 653 netif_rx(skb);
552 } 654 }
553 655
554 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, 656 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
555 DMA_FROM_DEVICE); 657 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
556rx_processing_done: 658rx_processing_done:
557 /* Clear the status flags for this buffer */ 659 /* Clear the status flags for this buffer */
558 status &= ~BD_ENET_RX_STATS; 660 status &= ~BD_ENET_RX_STATS;
@@ -577,10 +679,47 @@ rx_processing_done:
577 spin_unlock(&fep->hw_lock); 679 spin_unlock(&fep->hw_lock);
578} 680}
579 681
682static irqreturn_t
683fec_enet_interrupt(int irq, void *dev_id)
684{
685 struct net_device *ndev = dev_id;
686 struct fec_enet_private *fep = netdev_priv(ndev);
687 uint int_events;
688 irqreturn_t ret = IRQ_NONE;
689
690 do {
691 int_events = readl(fep->hwp + FEC_IEVENT);
692 writel(int_events, fep->hwp + FEC_IEVENT);
693
694 if (int_events & FEC_ENET_RXF) {
695 ret = IRQ_HANDLED;
696 fec_enet_rx(ndev);
697 }
698
699 /* Transmit OK, or non-fatal error. Update the buffer
700 * descriptors. FEC handles all errors, we just discover
701 * them as part of the transmit process.
702 */
703 if (int_events & FEC_ENET_TXF) {
704 ret = IRQ_HANDLED;
705 fec_enet_tx(ndev);
706 }
707
708 if (int_events & FEC_ENET_MII) {
709 ret = IRQ_HANDLED;
710 complete(&fep->mdio_done);
711 }
712 } while (int_events);
713
714 return ret;
715}
716
717
718
580/* ------------------------------------------------------------------------- */ 719/* ------------------------------------------------------------------------- */
581static void __inline__ fec_get_mac(struct net_device *dev) 720static void __inline__ fec_get_mac(struct net_device *ndev)
582{ 721{
583 struct fec_enet_private *fep = netdev_priv(dev); 722 struct fec_enet_private *fep = netdev_priv(ndev);
584 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 723 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
585 unsigned char *iap, tmpaddr[ETH_ALEN]; 724 unsigned char *iap, tmpaddr[ETH_ALEN];
586 725
@@ -616,11 +755,11 @@ static void __inline__ fec_get_mac(struct net_device *dev)
616 iap = &tmpaddr[0]; 755 iap = &tmpaddr[0];
617 } 756 }
618 757
619 memcpy(dev->dev_addr, iap, ETH_ALEN); 758 memcpy(ndev->dev_addr, iap, ETH_ALEN);
620 759
621 /* Adjust MAC if using macaddr */ 760 /* Adjust MAC if using macaddr */
622 if (iap == macaddr) 761 if (iap == macaddr)
623 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; 762 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
624} 763}
625 764
626/* ------------------------------------------------------------------------- */ 765/* ------------------------------------------------------------------------- */
@@ -628,9 +767,9 @@ static void __inline__ fec_get_mac(struct net_device *dev)
628/* 767/*
629 * Phy section 768 * Phy section
630 */ 769 */
631static void fec_enet_adjust_link(struct net_device *dev) 770static void fec_enet_adjust_link(struct net_device *ndev)
632{ 771{
633 struct fec_enet_private *fep = netdev_priv(dev); 772 struct fec_enet_private *fep = netdev_priv(ndev);
634 struct phy_device *phy_dev = fep->phy_dev; 773 struct phy_device *phy_dev = fep->phy_dev;
635 unsigned long flags; 774 unsigned long flags;
636 775
@@ -647,7 +786,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
647 /* Duplex link change */ 786 /* Duplex link change */
648 if (phy_dev->link) { 787 if (phy_dev->link) {
649 if (fep->full_duplex != phy_dev->duplex) { 788 if (fep->full_duplex != phy_dev->duplex) {
650 fec_restart(dev, phy_dev->duplex); 789 fec_restart(ndev, phy_dev->duplex);
651 status_change = 1; 790 status_change = 1;
652 } 791 }
653 } 792 }
@@ -656,9 +795,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
656 if (phy_dev->link != fep->link) { 795 if (phy_dev->link != fep->link) {
657 fep->link = phy_dev->link; 796 fep->link = phy_dev->link;
658 if (phy_dev->link) 797 if (phy_dev->link)
659 fec_restart(dev, phy_dev->duplex); 798 fec_restart(ndev, phy_dev->duplex);
660 else 799 else
661 fec_stop(dev); 800 fec_stop(ndev);
662 status_change = 1; 801 status_change = 1;
663 } 802 }
664 803
@@ -727,9 +866,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
727 return 0; 866 return 0;
728} 867}
729 868
730static int fec_enet_mii_probe(struct net_device *dev) 869static int fec_enet_mii_probe(struct net_device *ndev)
731{ 870{
732 struct fec_enet_private *fep = netdev_priv(dev); 871 struct fec_enet_private *fep = netdev_priv(ndev);
733 struct phy_device *phy_dev = NULL; 872 struct phy_device *phy_dev = NULL;
734 char mdio_bus_id[MII_BUS_ID_SIZE]; 873 char mdio_bus_id[MII_BUS_ID_SIZE];
735 char phy_name[MII_BUS_ID_SIZE + 3]; 874 char phy_name[MII_BUS_ID_SIZE + 3];
@@ -754,16 +893,16 @@ static int fec_enet_mii_probe(struct net_device *dev)
754 893
755 if (phy_id >= PHY_MAX_ADDR) { 894 if (phy_id >= PHY_MAX_ADDR) {
756 printk(KERN_INFO "%s: no PHY, assuming direct connection " 895 printk(KERN_INFO "%s: no PHY, assuming direct connection "
757 "to switch\n", dev->name); 896 "to switch\n", ndev->name);
758 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); 897 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
759 phy_id = 0; 898 phy_id = 0;
760 } 899 }
761 900
762 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 901 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
763 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0, 902 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
764 PHY_INTERFACE_MODE_MII); 903 PHY_INTERFACE_MODE_MII);
765 if (IS_ERR(phy_dev)) { 904 if (IS_ERR(phy_dev)) {
766 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); 905 printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
767 return PTR_ERR(phy_dev); 906 return PTR_ERR(phy_dev);
768 } 907 }
769 908
@@ -776,7 +915,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
776 fep->full_duplex = 0; 915 fep->full_duplex = 0;
777 916
778 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " 917 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
779 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, 918 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
780 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), 919 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
781 fep->phy_dev->irq); 920 fep->phy_dev->irq);
782 921
@@ -786,8 +925,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
786static int fec_enet_mii_init(struct platform_device *pdev) 925static int fec_enet_mii_init(struct platform_device *pdev)
787{ 926{
788 static struct mii_bus *fec0_mii_bus; 927 static struct mii_bus *fec0_mii_bus;
789 struct net_device *dev = platform_get_drvdata(pdev); 928 struct net_device *ndev = platform_get_drvdata(pdev);
790 struct fec_enet_private *fep = netdev_priv(dev); 929 struct fec_enet_private *fep = netdev_priv(ndev);
791 const struct platform_device_id *id_entry = 930 const struct platform_device_id *id_entry =
792 platform_get_device_id(fep->pdev); 931 platform_get_device_id(fep->pdev);
793 int err = -ENXIO, i; 932 int err = -ENXIO, i;
@@ -845,8 +984,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
845 for (i = 0; i < PHY_MAX_ADDR; i++) 984 for (i = 0; i < PHY_MAX_ADDR; i++)
846 fep->mii_bus->irq[i] = PHY_POLL; 985 fep->mii_bus->irq[i] = PHY_POLL;
847 986
848 platform_set_drvdata(dev, fep->mii_bus);
849
850 if (mdiobus_register(fep->mii_bus)) 987 if (mdiobus_register(fep->mii_bus))
851 goto err_out_free_mdio_irq; 988 goto err_out_free_mdio_irq;
852 989
@@ -873,10 +1010,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
873 mdiobus_free(fep->mii_bus); 1010 mdiobus_free(fep->mii_bus);
874} 1011}
875 1012
876static int fec_enet_get_settings(struct net_device *dev, 1013static int fec_enet_get_settings(struct net_device *ndev,
877 struct ethtool_cmd *cmd) 1014 struct ethtool_cmd *cmd)
878{ 1015{
879 struct fec_enet_private *fep = netdev_priv(dev); 1016 struct fec_enet_private *fep = netdev_priv(ndev);
880 struct phy_device *phydev = fep->phy_dev; 1017 struct phy_device *phydev = fep->phy_dev;
881 1018
882 if (!phydev) 1019 if (!phydev)
@@ -885,10 +1022,10 @@ static int fec_enet_get_settings(struct net_device *dev,
885 return phy_ethtool_gset(phydev, cmd); 1022 return phy_ethtool_gset(phydev, cmd);
886} 1023}
887 1024
888static int fec_enet_set_settings(struct net_device *dev, 1025static int fec_enet_set_settings(struct net_device *ndev,
889 struct ethtool_cmd *cmd) 1026 struct ethtool_cmd *cmd)
890{ 1027{
891 struct fec_enet_private *fep = netdev_priv(dev); 1028 struct fec_enet_private *fep = netdev_priv(ndev);
892 struct phy_device *phydev = fep->phy_dev; 1029 struct phy_device *phydev = fep->phy_dev;
893 1030
894 if (!phydev) 1031 if (!phydev)
@@ -897,14 +1034,14 @@ static int fec_enet_set_settings(struct net_device *dev,
897 return phy_ethtool_sset(phydev, cmd); 1034 return phy_ethtool_sset(phydev, cmd);
898} 1035}
899 1036
900static void fec_enet_get_drvinfo(struct net_device *dev, 1037static void fec_enet_get_drvinfo(struct net_device *ndev,
901 struct ethtool_drvinfo *info) 1038 struct ethtool_drvinfo *info)
902{ 1039{
903 struct fec_enet_private *fep = netdev_priv(dev); 1040 struct fec_enet_private *fep = netdev_priv(ndev);
904 1041
905 strcpy(info->driver, fep->pdev->dev.driver->name); 1042 strcpy(info->driver, fep->pdev->dev.driver->name);
906 strcpy(info->version, "Revision: 1.0"); 1043 strcpy(info->version, "Revision: 1.0");
907 strcpy(info->bus_info, dev_name(&dev->dev)); 1044 strcpy(info->bus_info, dev_name(&ndev->dev));
908} 1045}
909 1046
910static struct ethtool_ops fec_enet_ethtool_ops = { 1047static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -914,12 +1051,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
914 .get_link = ethtool_op_get_link, 1051 .get_link = ethtool_op_get_link,
915}; 1052};
916 1053
917static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1054static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
918{ 1055{
919 struct fec_enet_private *fep = netdev_priv(dev); 1056 struct fec_enet_private *fep = netdev_priv(ndev);
920 struct phy_device *phydev = fep->phy_dev; 1057 struct phy_device *phydev = fep->phy_dev;
921 1058
922 if (!netif_running(dev)) 1059 if (!netif_running(ndev))
923 return -EINVAL; 1060 return -EINVAL;
924 1061
925 if (!phydev) 1062 if (!phydev)
@@ -928,9 +1065,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
928 return phy_mii_ioctl(phydev, rq, cmd); 1065 return phy_mii_ioctl(phydev, rq, cmd);
929} 1066}
930 1067
931static void fec_enet_free_buffers(struct net_device *dev) 1068static void fec_enet_free_buffers(struct net_device *ndev)
932{ 1069{
933 struct fec_enet_private *fep = netdev_priv(dev); 1070 struct fec_enet_private *fep = netdev_priv(ndev);
934 int i; 1071 int i;
935 struct sk_buff *skb; 1072 struct sk_buff *skb;
936 struct bufdesc *bdp; 1073 struct bufdesc *bdp;
@@ -940,7 +1077,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
940 skb = fep->rx_skbuff[i]; 1077 skb = fep->rx_skbuff[i];
941 1078
942 if (bdp->cbd_bufaddr) 1079 if (bdp->cbd_bufaddr)
943 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, 1080 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
944 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1081 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
945 if (skb) 1082 if (skb)
946 dev_kfree_skb(skb); 1083 dev_kfree_skb(skb);
@@ -952,9 +1089,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
952 kfree(fep->tx_bounce[i]); 1089 kfree(fep->tx_bounce[i]);
953} 1090}
954 1091
955static int fec_enet_alloc_buffers(struct net_device *dev) 1092static int fec_enet_alloc_buffers(struct net_device *ndev)
956{ 1093{
957 struct fec_enet_private *fep = netdev_priv(dev); 1094 struct fec_enet_private *fep = netdev_priv(ndev);
958 int i; 1095 int i;
959 struct sk_buff *skb; 1096 struct sk_buff *skb;
960 struct bufdesc *bdp; 1097 struct bufdesc *bdp;
@@ -963,12 +1100,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
963 for (i = 0; i < RX_RING_SIZE; i++) { 1100 for (i = 0; i < RX_RING_SIZE; i++) {
964 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); 1101 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
965 if (!skb) { 1102 if (!skb) {
966 fec_enet_free_buffers(dev); 1103 fec_enet_free_buffers(ndev);
967 return -ENOMEM; 1104 return -ENOMEM;
968 } 1105 }
969 fep->rx_skbuff[i] = skb; 1106 fep->rx_skbuff[i] = skb;
970 1107
971 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 1108 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
972 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1109 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
973 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1110 bdp->cbd_sc = BD_ENET_RX_EMPTY;
974 bdp++; 1111 bdp++;
@@ -995,45 +1132,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
995} 1132}
996 1133
997static int 1134static int
998fec_enet_open(struct net_device *dev) 1135fec_enet_open(struct net_device *ndev)
999{ 1136{
1000 struct fec_enet_private *fep = netdev_priv(dev); 1137 struct fec_enet_private *fep = netdev_priv(ndev);
1001 int ret; 1138 int ret;
1002 1139
1003 /* I should reset the ring buffers here, but I don't yet know 1140 /* I should reset the ring buffers here, but I don't yet know
1004 * a simple way to do that. 1141 * a simple way to do that.
1005 */ 1142 */
1006 1143
1007 ret = fec_enet_alloc_buffers(dev); 1144 ret = fec_enet_alloc_buffers(ndev);
1008 if (ret) 1145 if (ret)
1009 return ret; 1146 return ret;
1010 1147
1011 /* Probe and connect to PHY when open the interface */ 1148 /* Probe and connect to PHY when open the interface */
1012 ret = fec_enet_mii_probe(dev); 1149 ret = fec_enet_mii_probe(ndev);
1013 if (ret) { 1150 if (ret) {
1014 fec_enet_free_buffers(dev); 1151 fec_enet_free_buffers(ndev);
1015 return ret; 1152 return ret;
1016 } 1153 }
1017 phy_start(fep->phy_dev); 1154 phy_start(fep->phy_dev);
1018 netif_start_queue(dev); 1155 netif_start_queue(ndev);
1019 fep->opened = 1; 1156 fep->opened = 1;
1020 return 0; 1157 return 0;
1021} 1158}
1022 1159
1023static int 1160static int
1024fec_enet_close(struct net_device *dev) 1161fec_enet_close(struct net_device *ndev)
1025{ 1162{
1026 struct fec_enet_private *fep = netdev_priv(dev); 1163 struct fec_enet_private *fep = netdev_priv(ndev);
1027 1164
1028 /* Don't know what to do yet. */ 1165 /* Don't know what to do yet. */
1029 fep->opened = 0; 1166 fep->opened = 0;
1030 netif_stop_queue(dev); 1167 netif_stop_queue(ndev);
1031 fec_stop(dev); 1168 fec_stop(ndev);
1032 1169
1033 if (fep->phy_dev) 1170 if (fep->phy_dev) {
1171 phy_stop(fep->phy_dev);
1034 phy_disconnect(fep->phy_dev); 1172 phy_disconnect(fep->phy_dev);
1173 }
1035 1174
1036 fec_enet_free_buffers(dev); 1175 fec_enet_free_buffers(ndev);
1037 1176
1038 return 0; 1177 return 0;
1039} 1178}
@@ -1051,14 +1190,14 @@ fec_enet_close(struct net_device *dev)
1051#define HASH_BITS 6 /* #bits in hash */ 1190#define HASH_BITS 6 /* #bits in hash */
1052#define CRC32_POLY 0xEDB88320 1191#define CRC32_POLY 0xEDB88320
1053 1192
1054static void set_multicast_list(struct net_device *dev) 1193static void set_multicast_list(struct net_device *ndev)
1055{ 1194{
1056 struct fec_enet_private *fep = netdev_priv(dev); 1195 struct fec_enet_private *fep = netdev_priv(ndev);
1057 struct netdev_hw_addr *ha; 1196 struct netdev_hw_addr *ha;
1058 unsigned int i, bit, data, crc, tmp; 1197 unsigned int i, bit, data, crc, tmp;
1059 unsigned char hash; 1198 unsigned char hash;
1060 1199
1061 if (dev->flags & IFF_PROMISC) { 1200 if (ndev->flags & IFF_PROMISC) {
1062 tmp = readl(fep->hwp + FEC_R_CNTRL); 1201 tmp = readl(fep->hwp + FEC_R_CNTRL);
1063 tmp |= 0x8; 1202 tmp |= 0x8;
1064 writel(tmp, fep->hwp + FEC_R_CNTRL); 1203 writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1069,7 +1208,7 @@ static void set_multicast_list(struct net_device *dev)
1069 tmp &= ~0x8; 1208 tmp &= ~0x8;
1070 writel(tmp, fep->hwp + FEC_R_CNTRL); 1209 writel(tmp, fep->hwp + FEC_R_CNTRL);
1071 1210
1072 if (dev->flags & IFF_ALLMULTI) { 1211 if (ndev->flags & IFF_ALLMULTI) {
1073 /* Catch all multicast addresses, so set the 1212 /* Catch all multicast addresses, so set the
1074 * filter to all 1's 1213 * filter to all 1's
1075 */ 1214 */
@@ -1084,7 +1223,7 @@ static void set_multicast_list(struct net_device *dev)
1084 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1223 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1085 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1224 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1086 1225
1087 netdev_for_each_mc_addr(ha, dev) { 1226 netdev_for_each_mc_addr(ha, ndev) {
1088 /* Only support group multicast for now */ 1227 /* Only support group multicast for now */
1089 if (!(ha->addr[0] & 1)) 1228 if (!(ha->addr[0] & 1))
1090 continue; 1229 continue;
@@ -1092,7 +1231,7 @@ static void set_multicast_list(struct net_device *dev)
1092 /* calculate crc32 value of mac address */ 1231 /* calculate crc32 value of mac address */
1093 crc = 0xffffffff; 1232 crc = 0xffffffff;
1094 1233
1095 for (i = 0; i < dev->addr_len; i++) { 1234 for (i = 0; i < ndev->addr_len; i++) {
1096 data = ha->addr[i]; 1235 data = ha->addr[i];
1097 for (bit = 0; bit < 8; bit++, data >>= 1) { 1236 for (bit = 0; bit < 8; bit++, data >>= 1) {
1098 crc = (crc >> 1) ^ 1237 crc = (crc >> 1) ^
@@ -1119,20 +1258,20 @@ static void set_multicast_list(struct net_device *dev)
1119 1258
1120/* Set a MAC change in hardware. */ 1259/* Set a MAC change in hardware. */
1121static int 1260static int
1122fec_set_mac_address(struct net_device *dev, void *p) 1261fec_set_mac_address(struct net_device *ndev, void *p)
1123{ 1262{
1124 struct fec_enet_private *fep = netdev_priv(dev); 1263 struct fec_enet_private *fep = netdev_priv(ndev);
1125 struct sockaddr *addr = p; 1264 struct sockaddr *addr = p;
1126 1265
1127 if (!is_valid_ether_addr(addr->sa_data)) 1266 if (!is_valid_ether_addr(addr->sa_data))
1128 return -EADDRNOTAVAIL; 1267 return -EADDRNOTAVAIL;
1129 1268
1130 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1269 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1131 1270
1132 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1271 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1133 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1272 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
1134 fep->hwp + FEC_ADDR_LOW); 1273 fep->hwp + FEC_ADDR_LOW);
1135 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1274 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
1136 fep->hwp + FEC_ADDR_HIGH); 1275 fep->hwp + FEC_ADDR_HIGH);
1137 return 0; 1276 return 0;
1138} 1277}
@@ -1146,16 +1285,16 @@ static const struct net_device_ops fec_netdev_ops = {
1146 .ndo_validate_addr = eth_validate_addr, 1285 .ndo_validate_addr = eth_validate_addr,
1147 .ndo_tx_timeout = fec_timeout, 1286 .ndo_tx_timeout = fec_timeout,
1148 .ndo_set_mac_address = fec_set_mac_address, 1287 .ndo_set_mac_address = fec_set_mac_address,
1149 .ndo_do_ioctl = fec_enet_ioctl, 1288 .ndo_do_ioctl = fec_enet_ioctl,
1150}; 1289};
1151 1290
1152 /* 1291 /*
1153 * XXX: We need to clean up on failure exits here. 1292 * XXX: We need to clean up on failure exits here.
1154 * 1293 *
1155 */ 1294 */
1156static int fec_enet_init(struct net_device *dev) 1295static int fec_enet_init(struct net_device *ndev)
1157{ 1296{
1158 struct fec_enet_private *fep = netdev_priv(dev); 1297 struct fec_enet_private *fep = netdev_priv(ndev);
1159 struct bufdesc *cbd_base; 1298 struct bufdesc *cbd_base;
1160 struct bufdesc *bdp; 1299 struct bufdesc *bdp;
1161 int i; 1300 int i;
@@ -1170,20 +1309,19 @@ static int fec_enet_init(struct net_device *dev)
1170 1309
1171 spin_lock_init(&fep->hw_lock); 1310 spin_lock_init(&fep->hw_lock);
1172 1311
1173 fep->hwp = (void __iomem *)dev->base_addr; 1312 fep->netdev = ndev;
1174 fep->netdev = dev;
1175 1313
1176 /* Get the Ethernet address */ 1314 /* Get the Ethernet address */
1177 fec_get_mac(dev); 1315 fec_get_mac(ndev);
1178 1316
1179 /* Set receive and transmit descriptor base. */ 1317 /* Set receive and transmit descriptor base. */
1180 fep->rx_bd_base = cbd_base; 1318 fep->rx_bd_base = cbd_base;
1181 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1319 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1182 1320
1183 /* The FEC Ethernet specific entries in the device structure */ 1321 /* The FEC Ethernet specific entries in the device structure */
1184 dev->watchdog_timeo = TX_TIMEOUT; 1322 ndev->watchdog_timeo = TX_TIMEOUT;
1185 dev->netdev_ops = &fec_netdev_ops; 1323 ndev->netdev_ops = &fec_netdev_ops;
1186 dev->ethtool_ops = &fec_enet_ethtool_ops; 1324 ndev->ethtool_ops = &fec_enet_ethtool_ops;
1187 1325
1188 /* Initialize the receive buffer descriptors. */ 1326 /* Initialize the receive buffer descriptors. */
1189 bdp = fep->rx_bd_base; 1327 bdp = fep->rx_bd_base;
@@ -1212,152 +1350,11 @@ static int fec_enet_init(struct net_device *dev)
1212 bdp--; 1350 bdp--;
1213 bdp->cbd_sc |= BD_SC_WRAP; 1351 bdp->cbd_sc |= BD_SC_WRAP;
1214 1352
1215 fec_restart(dev, 0); 1353 fec_restart(ndev, 0);
1216 1354
1217 return 0; 1355 return 0;
1218} 1356}
1219 1357
1220/* This function is called to start or restart the FEC during a link
1221 * change. This only happens when switching between half and full
1222 * duplex.
1223 */
1224static void
1225fec_restart(struct net_device *dev, int duplex)
1226{
1227 struct fec_enet_private *fep = netdev_priv(dev);
1228 const struct platform_device_id *id_entry =
1229 platform_get_device_id(fep->pdev);
1230 int i;
1231 u32 val, temp_mac[2];
1232
1233 /* Whack a reset. We should wait for this. */
1234 writel(1, fep->hwp + FEC_ECNTRL);
1235 udelay(10);
1236
1237 /*
1238 * enet-mac reset will reset mac address registers too,
1239 * so need to reconfigure it.
1240 */
1241 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1242 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
1243 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1244 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1245 }
1246
1247 /* Clear any outstanding interrupt. */
1248 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1249
1250 /* Reset all multicast. */
1251 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1252 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1253#ifndef CONFIG_M5272
1254 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1255 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1256#endif
1257
1258 /* Set maximum receive buffer size. */
1259 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1260
1261 /* Set receive and transmit descriptor base. */
1262 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1263 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1264 fep->hwp + FEC_X_DES_START);
1265
1266 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1267 fep->cur_rx = fep->rx_bd_base;
1268
1269 /* Reset SKB transmit buffers. */
1270 fep->skb_cur = fep->skb_dirty = 0;
1271 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1272 if (fep->tx_skbuff[i]) {
1273 dev_kfree_skb_any(fep->tx_skbuff[i]);
1274 fep->tx_skbuff[i] = NULL;
1275 }
1276 }
1277
1278 /* Enable MII mode */
1279 if (duplex) {
1280 /* MII enable / FD enable */
1281 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1282 writel(0x04, fep->hwp + FEC_X_CNTRL);
1283 } else {
1284 /* MII enable / No Rcv on Xmit */
1285 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1286 writel(0x0, fep->hwp + FEC_X_CNTRL);
1287 }
1288 fep->full_duplex = duplex;
1289
1290 /* Set MII speed */
1291 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1292
1293 /*
1294 * The phy interface and speed need to get configured
1295 * differently on enet-mac.
1296 */
1297 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1298 val = readl(fep->hwp + FEC_R_CNTRL);
1299
1300 /* MII or RMII */
1301 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1302 val |= (1 << 8);
1303 else
1304 val &= ~(1 << 8);
1305
1306 /* 10M or 100M */
1307 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
1308 val &= ~(1 << 9);
1309 else
1310 val |= (1 << 9);
1311
1312 writel(val, fep->hwp + FEC_R_CNTRL);
1313 } else {
1314#ifdef FEC_MIIGSK_ENR
1315 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1316 /* disable the gasket and wait */
1317 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1318 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1319 udelay(1);
1320
1321 /*
1322 * configure the gasket:
1323 * RMII, 50 MHz, no loopback, no echo
1324 */
1325 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1326
1327 /* re-enable the gasket */
1328 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1329 }
1330#endif
1331 }
1332
1333 /* And last, enable the transmit and receive processing */
1334 writel(2, fep->hwp + FEC_ECNTRL);
1335 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1336
1337 /* Enable interrupts we wish to service */
1338 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1339}
1340
1341static void
1342fec_stop(struct net_device *dev)
1343{
1344 struct fec_enet_private *fep = netdev_priv(dev);
1345
1346 /* We cannot expect a graceful transmit stop without link !!! */
1347 if (fep->link) {
1348 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1349 udelay(10);
1350 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1351 printk("fec_stop : Graceful transmit stop did not complete !\n");
1352 }
1353
1354 /* Whack a reset. We should wait for this. */
1355 writel(1, fep->hwp + FEC_ECNTRL);
1356 udelay(10);
1357 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1358 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1359}
1360
1361static int __devinit 1358static int __devinit
1362fec_probe(struct platform_device *pdev) 1359fec_probe(struct platform_device *pdev)
1363{ 1360{
@@ -1377,19 +1374,20 @@ fec_probe(struct platform_device *pdev)
1377 1374
1378 /* Init network device */ 1375 /* Init network device */
1379 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1376 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1380 if (!ndev) 1377 if (!ndev) {
1381 return -ENOMEM; 1378 ret = -ENOMEM;
1379 goto failed_alloc_etherdev;
1380 }
1382 1381
1383 SET_NETDEV_DEV(ndev, &pdev->dev); 1382 SET_NETDEV_DEV(ndev, &pdev->dev);
1384 1383
1385 /* setup board info structure */ 1384 /* setup board info structure */
1386 fep = netdev_priv(ndev); 1385 fep = netdev_priv(ndev);
1387 memset(fep, 0, sizeof(*fep));
1388 1386
1389 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1387 fep->hwp = ioremap(r->start, resource_size(r));
1390 fep->pdev = pdev; 1388 fep->pdev = pdev;
1391 1389
1392 if (!ndev->base_addr) { 1390 if (!fep->hwp) {
1393 ret = -ENOMEM; 1391 ret = -ENOMEM;
1394 goto failed_ioremap; 1392 goto failed_ioremap;
1395 } 1393 }
@@ -1407,10 +1405,9 @@ fec_probe(struct platform_device *pdev)
1407 break; 1405 break;
1408 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 1406 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1409 if (ret) { 1407 if (ret) {
1410 while (i >= 0) { 1408 while (--i >= 0) {
1411 irq = platform_get_irq(pdev, i); 1409 irq = platform_get_irq(pdev, i);
1412 free_irq(irq, ndev); 1410 free_irq(irq, ndev);
1413 i--;
1414 } 1411 }
1415 goto failed_irq; 1412 goto failed_irq;
1416 } 1413 }
@@ -1453,9 +1450,11 @@ failed_clk:
1453 free_irq(irq, ndev); 1450 free_irq(irq, ndev);
1454 } 1451 }
1455failed_irq: 1452failed_irq:
1456 iounmap((void __iomem *)ndev->base_addr); 1453 iounmap(fep->hwp);
1457failed_ioremap: 1454failed_ioremap:
1458 free_netdev(ndev); 1455 free_netdev(ndev);
1456failed_alloc_etherdev:
1457 release_mem_region(r->start, resource_size(r));
1459 1458
1460 return ret; 1459 return ret;
1461} 1460}
@@ -1465,16 +1464,22 @@ fec_drv_remove(struct platform_device *pdev)
1465{ 1464{
1466 struct net_device *ndev = platform_get_drvdata(pdev); 1465 struct net_device *ndev = platform_get_drvdata(pdev);
1467 struct fec_enet_private *fep = netdev_priv(ndev); 1466 struct fec_enet_private *fep = netdev_priv(ndev);
1468 1467 struct resource *r;
1469 platform_set_drvdata(pdev, NULL);
1470 1468
1471 fec_stop(ndev); 1469 fec_stop(ndev);
1472 fec_enet_mii_remove(fep); 1470 fec_enet_mii_remove(fep);
1473 clk_disable(fep->clk); 1471 clk_disable(fep->clk);
1474 clk_put(fep->clk); 1472 clk_put(fep->clk);
1475 iounmap((void __iomem *)ndev->base_addr); 1473 iounmap(fep->hwp);
1476 unregister_netdev(ndev); 1474 unregister_netdev(ndev);
1477 free_netdev(ndev); 1475 free_netdev(ndev);
1476
1477 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1478 BUG_ON(!r);
1479 release_mem_region(r->start, resource_size(r));
1480
1481 platform_set_drvdata(pdev, NULL);
1482
1478 return 0; 1483 return 0;
1479} 1484}
1480 1485
@@ -1483,16 +1488,14 @@ static int
1483fec_suspend(struct device *dev) 1488fec_suspend(struct device *dev)
1484{ 1489{
1485 struct net_device *ndev = dev_get_drvdata(dev); 1490 struct net_device *ndev = dev_get_drvdata(dev);
1486 struct fec_enet_private *fep; 1491 struct fec_enet_private *fep = netdev_priv(ndev);
1487 1492
1488 if (ndev) { 1493 if (netif_running(ndev)) {
1489 fep = netdev_priv(ndev); 1494 fec_stop(ndev);
1490 if (netif_running(ndev)) { 1495 netif_device_detach(ndev);
1491 fec_stop(ndev);
1492 netif_device_detach(ndev);
1493 }
1494 clk_disable(fep->clk);
1495 } 1496 }
1497 clk_disable(fep->clk);
1498
1496 return 0; 1499 return 0;
1497} 1500}
1498 1501
@@ -1500,16 +1503,14 @@ static int
1500fec_resume(struct device *dev) 1503fec_resume(struct device *dev)
1501{ 1504{
1502 struct net_device *ndev = dev_get_drvdata(dev); 1505 struct net_device *ndev = dev_get_drvdata(dev);
1503 struct fec_enet_private *fep; 1506 struct fec_enet_private *fep = netdev_priv(ndev);
1504 1507
1505 if (ndev) { 1508 clk_enable(fep->clk);
1506 fep = netdev_priv(ndev); 1509 if (netif_running(ndev)) {
1507 clk_enable(fep->clk); 1510 fec_restart(ndev, fep->full_duplex);
1508 if (netif_running(ndev)) { 1511 netif_device_attach(ndev);
1509 fec_restart(ndev, fep->full_duplex);
1510 netif_device_attach(ndev);
1511 }
1512 } 1512 }
1513
1513 return 0; 1514 return 0;
1514} 1515}
1515 1516
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 50c1213f61f..9f81b1ac130 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -840,8 +840,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
840/* OF Driver */ 840/* OF Driver */
841/* ======================================================================== */ 841/* ======================================================================== */
842 842
843static int __devinit 843static int __devinit mpc52xx_fec_probe(struct platform_device *op)
844mpc52xx_fec_probe(struct platform_device *op, const struct of_device_id *match)
845{ 844{
846 int rv; 845 int rv;
847 struct net_device *ndev; 846 struct net_device *ndev;
@@ -1049,7 +1048,7 @@ static struct of_device_id mpc52xx_fec_match[] = {
1049 1048
1050MODULE_DEVICE_TABLE(of, mpc52xx_fec_match); 1049MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
1051 1050
1052static struct of_platform_driver mpc52xx_fec_driver = { 1051static struct platform_driver mpc52xx_fec_driver = {
1053 .driver = { 1052 .driver = {
1054 .name = DRIVER_NAME, 1053 .name = DRIVER_NAME,
1055 .owner = THIS_MODULE, 1054 .owner = THIS_MODULE,
@@ -1073,21 +1072,21 @@ mpc52xx_fec_init(void)
1073{ 1072{
1074#ifdef CONFIG_FEC_MPC52xx_MDIO 1073#ifdef CONFIG_FEC_MPC52xx_MDIO
1075 int ret; 1074 int ret;
1076 ret = of_register_platform_driver(&mpc52xx_fec_mdio_driver); 1075 ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
1077 if (ret) { 1076 if (ret) {
1078 printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n"); 1077 printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n");
1079 return ret; 1078 return ret;
1080 } 1079 }
1081#endif 1080#endif
1082 return of_register_platform_driver(&mpc52xx_fec_driver); 1081 return platform_driver_register(&mpc52xx_fec_driver);
1083} 1082}
1084 1083
1085static void __exit 1084static void __exit
1086mpc52xx_fec_exit(void) 1085mpc52xx_fec_exit(void)
1087{ 1086{
1088 of_unregister_platform_driver(&mpc52xx_fec_driver); 1087 platform_driver_unregister(&mpc52xx_fec_driver);
1089#ifdef CONFIG_FEC_MPC52xx_MDIO 1088#ifdef CONFIG_FEC_MPC52xx_MDIO
1090 of_unregister_platform_driver(&mpc52xx_fec_mdio_driver); 1089 platform_driver_unregister(&mpc52xx_fec_mdio_driver);
1091#endif 1090#endif
1092} 1091}
1093 1092
diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/fec_mpc52xx.h
index a227a525bdb..41d2dffde55 100644
--- a/drivers/net/fec_mpc52xx.h
+++ b/drivers/net/fec_mpc52xx.h
@@ -289,6 +289,6 @@ struct mpc52xx_fec {
289#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000 289#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000
290 290
291 291
292extern struct of_platform_driver mpc52xx_fec_mdio_driver; 292extern struct platform_driver mpc52xx_fec_mdio_driver;
293 293
294#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */ 294#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 0b4cb6f1598..360a578c2bb 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -61,8 +61,7 @@ static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
61 data | FEC_MII_WRITE_FRAME); 61 data | FEC_MII_WRITE_FRAME);
62} 62}
63 63
64static int mpc52xx_fec_mdio_probe(struct platform_device *of, 64static int mpc52xx_fec_mdio_probe(struct platform_device *of)
65 const struct of_device_id *match)
66{ 65{
67 struct device *dev = &of->dev; 66 struct device *dev = &of->dev;
68 struct device_node *np = of->dev.of_node; 67 struct device_node *np = of->dev.of_node;
@@ -145,7 +144,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
145}; 144};
146MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); 145MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
147 146
148struct of_platform_driver mpc52xx_fec_mdio_driver = { 147struct platform_driver mpc52xx_fec_mdio_driver = {
149 .driver = { 148 .driver = {
150 .name = "mpc5200b-fec-phy", 149 .name = "mpc5200b-fec-phy",
151 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296ef0d..7b92897ca66 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5645 goto out_error; 5645 goto out_error;
5646 } 5646 }
5647 5647
5648 netif_carrier_off(dev);
5649
5648 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5650 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5649 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5651 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5650 5652
@@ -5742,7 +5744,7 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
5742 pci_set_drvdata(pci_dev, NULL); 5744 pci_set_drvdata(pci_dev, NULL);
5743} 5745}
5744 5746
5745#ifdef CONFIG_PM 5747#ifdef CONFIG_PM_SLEEP
5746static int nv_suspend(struct device *device) 5748static int nv_suspend(struct device *device)
5747{ 5749{
5748 struct pci_dev *pdev = to_pci_dev(device); 5750 struct pci_dev *pdev = to_pci_dev(device);
@@ -5793,6 +5795,11 @@ static int nv_resume(struct device *device)
5793static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); 5795static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
5794#define NV_PM_OPS (&nv_pm_ops) 5796#define NV_PM_OPS (&nv_pm_ops)
5795 5797
5798#else
5799#define NV_PM_OPS NULL
5800#endif /* CONFIG_PM_SLEEP */
5801
5802#ifdef CONFIG_PM
5796static void nv_shutdown(struct pci_dev *pdev) 5803static void nv_shutdown(struct pci_dev *pdev)
5797{ 5804{
5798 struct net_device *dev = pci_get_drvdata(pdev); 5805 struct net_device *dev = pci_get_drvdata(pdev);
@@ -5820,7 +5827,6 @@ static void nv_shutdown(struct pci_dev *pdev)
5820 } 5827 }
5821} 5828}
5822#else 5829#else
5823#define NV_PM_OPS NULL
5824#define nv_shutdown NULL 5830#define nv_shutdown NULL
5825#endif /* CONFIG_PM */ 5831#endif /* CONFIG_PM */
5826 5832
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 7a1f3d0ffa7..24cb953900d 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -998,8 +998,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
998#endif 998#endif
999}; 999};
1000 1000
1001static int __devinit fs_enet_probe(struct platform_device *ofdev, 1001static int __devinit fs_enet_probe(struct platform_device *ofdev)
1002 const struct of_device_id *match)
1003{ 1002{
1004 struct net_device *ndev; 1003 struct net_device *ndev;
1005 struct fs_enet_private *fep; 1004 struct fs_enet_private *fep;
@@ -1008,11 +1007,14 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev,
1008 const u8 *mac_addr; 1007 const u8 *mac_addr;
1009 int privsize, len, ret = -ENODEV; 1008 int privsize, len, ret = -ENODEV;
1010 1009
1010 if (!ofdev->dev.of_match)
1011 return -EINVAL;
1012
1011 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); 1013 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
1012 if (!fpi) 1014 if (!fpi)
1013 return -ENOMEM; 1015 return -ENOMEM;
1014 1016
1015 if (!IS_FEC(match)) { 1017 if (!IS_FEC(ofdev->dev.of_match)) {
1016 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); 1018 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
1017 if (!data || len != 4) 1019 if (!data || len != 4)
1018 goto out_free_fpi; 1020 goto out_free_fpi;
@@ -1047,7 +1049,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev,
1047 fep->dev = &ofdev->dev; 1049 fep->dev = &ofdev->dev;
1048 fep->ndev = ndev; 1050 fep->ndev = ndev;
1049 fep->fpi = fpi; 1051 fep->fpi = fpi;
1050 fep->ops = match->data; 1052 fep->ops = ofdev->dev.of_match->data;
1051 1053
1052 ret = fep->ops->setup_data(ndev); 1054 ret = fep->ops->setup_data(ndev);
1053 if (ret) 1055 if (ret)
@@ -1156,7 +1158,7 @@ static struct of_device_id fs_enet_match[] = {
1156}; 1158};
1157MODULE_DEVICE_TABLE(of, fs_enet_match); 1159MODULE_DEVICE_TABLE(of, fs_enet_match);
1158 1160
1159static struct of_platform_driver fs_enet_driver = { 1161static struct platform_driver fs_enet_driver = {
1160 .driver = { 1162 .driver = {
1161 .owner = THIS_MODULE, 1163 .owner = THIS_MODULE,
1162 .name = "fs_enet", 1164 .name = "fs_enet",
@@ -1168,12 +1170,12 @@ static struct of_platform_driver fs_enet_driver = {
1168 1170
1169static int __init fs_init(void) 1171static int __init fs_init(void)
1170{ 1172{
1171 return of_register_platform_driver(&fs_enet_driver); 1173 return platform_driver_register(&fs_enet_driver);
1172} 1174}
1173 1175
1174static void __exit fs_cleanup(void) 1176static void __exit fs_cleanup(void)
1175{ 1177{
1176 of_unregister_platform_driver(&fs_enet_driver); 1178 platform_driver_unregister(&fs_enet_driver);
1177} 1179}
1178 1180
1179#ifdef CONFIG_NET_POLL_CONTROLLER 1181#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 3cda2b51547..ad297544071 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -150,8 +150,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
150 return 0; 150 return 0;
151} 151}
152 152
153static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev, 153static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
154 const struct of_device_id *match)
155{ 154{
156 struct mii_bus *new_bus; 155 struct mii_bus *new_bus;
157 struct bb_info *bitbang; 156 struct bb_info *bitbang;
@@ -223,7 +222,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
223}; 222};
224MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); 223MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
225 224
226static struct of_platform_driver fs_enet_bb_mdio_driver = { 225static struct platform_driver fs_enet_bb_mdio_driver = {
227 .driver = { 226 .driver = {
228 .name = "fsl-bb-mdio", 227 .name = "fsl-bb-mdio",
229 .owner = THIS_MODULE, 228 .owner = THIS_MODULE,
@@ -235,12 +234,12 @@ static struct of_platform_driver fs_enet_bb_mdio_driver = {
235 234
236static int fs_enet_mdio_bb_init(void) 235static int fs_enet_mdio_bb_init(void)
237{ 236{
238 return of_register_platform_driver(&fs_enet_bb_mdio_driver); 237 return platform_driver_register(&fs_enet_bb_mdio_driver);
239} 238}
240 239
241static void fs_enet_mdio_bb_exit(void) 240static void fs_enet_mdio_bb_exit(void)
242{ 241{
243 of_unregister_platform_driver(&fs_enet_bb_mdio_driver); 242 platform_driver_unregister(&fs_enet_bb_mdio_driver);
244} 243}
245 244
246module_init(fs_enet_mdio_bb_init); 245module_init(fs_enet_mdio_bb_init);
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index dbb9c48623d..7e840d373ab 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -101,15 +101,18 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
101 return 0; 101 return 0;
102} 102}
103 103
104static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev, 104static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
105 const struct of_device_id *match)
106{ 105{
107 struct resource res; 106 struct resource res;
108 struct mii_bus *new_bus; 107 struct mii_bus *new_bus;
109 struct fec_info *fec; 108 struct fec_info *fec;
110 int (*get_bus_freq)(struct device_node *) = match->data; 109 int (*get_bus_freq)(struct device_node *);
111 int ret = -ENOMEM, clock, speed; 110 int ret = -ENOMEM, clock, speed;
112 111
112 if (!ofdev->dev.of_match)
113 return -EINVAL;
114 get_bus_freq = ofdev->dev.of_match->data;
115
113 new_bus = mdiobus_alloc(); 116 new_bus = mdiobus_alloc();
114 if (!new_bus) 117 if (!new_bus)
115 goto out; 118 goto out;
@@ -221,7 +224,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
221}; 224};
222MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); 225MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
223 226
224static struct of_platform_driver fs_enet_fec_mdio_driver = { 227static struct platform_driver fs_enet_fec_mdio_driver = {
225 .driver = { 228 .driver = {
226 .name = "fsl-fec-mdio", 229 .name = "fsl-fec-mdio",
227 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
@@ -233,12 +236,12 @@ static struct of_platform_driver fs_enet_fec_mdio_driver = {
233 236
234static int fs_enet_mdio_fec_init(void) 237static int fs_enet_mdio_fec_init(void)
235{ 238{
236 return of_register_platform_driver(&fs_enet_fec_mdio_driver); 239 return platform_driver_register(&fs_enet_fec_mdio_driver);
237} 240}
238 241
239static void fs_enet_mdio_fec_exit(void) 242static void fs_enet_mdio_fec_exit(void)
240{ 243{
241 of_unregister_platform_driver(&fs_enet_fec_mdio_driver); 244 platform_driver_unregister(&fs_enet_fec_mdio_driver);
242} 245}
243 246
244module_init(fs_enet_mdio_fec_init); 247module_init(fs_enet_mdio_fec_init);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 8d3a2ccbc95..52f4e8ad48e 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -265,8 +265,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
265#endif 265#endif
266 266
267 267
268static int fsl_pq_mdio_probe(struct platform_device *ofdev, 268static int fsl_pq_mdio_probe(struct platform_device *ofdev)
269 const struct of_device_id *match)
270{ 269{
271 struct device_node *np = ofdev->dev.of_node; 270 struct device_node *np = ofdev->dev.of_node;
272 struct device_node *tbi; 271 struct device_node *tbi;
@@ -471,7 +470,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
471}; 470};
472MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); 471MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
473 472
474static struct of_platform_driver fsl_pq_mdio_driver = { 473static struct platform_driver fsl_pq_mdio_driver = {
475 .driver = { 474 .driver = {
476 .name = "fsl-pq_mdio", 475 .name = "fsl-pq_mdio",
477 .owner = THIS_MODULE, 476 .owner = THIS_MODULE,
@@ -483,13 +482,13 @@ static struct of_platform_driver fsl_pq_mdio_driver = {
483 482
484int __init fsl_pq_mdio_init(void) 483int __init fsl_pq_mdio_init(void)
485{ 484{
486 return of_register_platform_driver(&fsl_pq_mdio_driver); 485 return platform_driver_register(&fsl_pq_mdio_driver);
487} 486}
488module_init(fsl_pq_mdio_init); 487module_init(fsl_pq_mdio_init);
489 488
490void fsl_pq_mdio_exit(void) 489void fsl_pq_mdio_exit(void)
491{ 490{
492 of_unregister_platform_driver(&fsl_pq_mdio_driver); 491 platform_driver_unregister(&fsl_pq_mdio_driver);
493} 492}
494module_exit(fsl_pq_mdio_exit); 493module_exit(fsl_pq_mdio_exit);
495MODULE_LICENSE("GPL"); 494MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
new file mode 100644
index 00000000000..a31661948c4
--- /dev/null
+++ b/drivers/net/ftmac100.c
@@ -0,0 +1,1198 @@
1/*
2 * Faraday FTMAC100 10/100 Ethernet
3 *
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/init.h>
28#include <linux/io.h>
29#include <linux/mii.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
33
34#include "ftmac100.h"
35
36#define DRV_NAME "ftmac100"
37#define DRV_VERSION "0.2"
38
39#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
40#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
41
42#define MAX_PKT_SIZE 1518
43#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
44
45#if MAX_PKT_SIZE > 0x7ff
46#error invalid MAX_PKT_SIZE
47#endif
48
49#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
50#error invalid RX_BUF_SIZE
51#endif
52
53/******************************************************************************
54 * private data
55 *****************************************************************************/
56struct ftmac100_descs {
57 struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
58 struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES];
59};
60
61struct ftmac100 {
62 struct resource *res;
63 void __iomem *base;
64 int irq;
65
66 struct ftmac100_descs *descs;
67 dma_addr_t descs_dma_addr;
68
69 unsigned int rx_pointer;
70 unsigned int tx_clean_pointer;
71 unsigned int tx_pointer;
72 unsigned int tx_pending;
73
74 spinlock_t tx_lock;
75
76 struct net_device *netdev;
77 struct device *dev;
78 struct napi_struct napi;
79
80 struct mii_if_info mii;
81};
82
83static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
84 struct ftmac100_rxdes *rxdes, gfp_t gfp);
85
86/******************************************************************************
87 * internal functions (hardware register access)
88 *****************************************************************************/
89#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \
90 FTMAC100_INT_NORXBUF | \
91 FTMAC100_INT_XPKT_OK | \
92 FTMAC100_INT_XPKT_LOST | \
93 FTMAC100_INT_RPKT_LOST | \
94 FTMAC100_INT_AHB_ERR | \
95 FTMAC100_INT_PHYSTS_CHG)
96
97#define INT_MASK_ALL_DISABLED 0
98
99static void ftmac100_enable_all_int(struct ftmac100 *priv)
100{
101 iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
102}
103
104static void ftmac100_disable_all_int(struct ftmac100 *priv)
105{
106 iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR);
107}
108
109static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
110{
111 iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR);
112}
113
114static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
115{
116 iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
117}
118
119static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
120{
121 iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
122}
123
124static int ftmac100_reset(struct ftmac100 *priv)
125{
126 struct net_device *netdev = priv->netdev;
127 int i;
128
129 /* NOTE: reset clears all registers */
130 iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR);
131
132 for (i = 0; i < 5; i++) {
133 unsigned int maccr;
134
135 maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
136 if (!(maccr & FTMAC100_MACCR_SW_RST)) {
137 /*
138 * FTMAC100_MACCR_SW_RST cleared does not indicate
139 * that hardware reset completed (what the f*ck).
140 * We still need to wait for a while.
141 */
142 usleep_range(500, 1000);
143 return 0;
144 }
145
146 usleep_range(1000, 10000);
147 }
148
149 netdev_err(netdev, "software reset failed\n");
150 return -EIO;
151}
152
153static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
154{
155 unsigned int maddr = mac[0] << 8 | mac[1];
156 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
157
158 iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR);
159 iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
160}
161
162#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
163 FTMAC100_MACCR_RCV_EN | \
164 FTMAC100_MACCR_XDMA_EN | \
165 FTMAC100_MACCR_RDMA_EN | \
166 FTMAC100_MACCR_CRC_APD | \
167 FTMAC100_MACCR_FULLDUP | \
168 FTMAC100_MACCR_RX_RUNT | \
169 FTMAC100_MACCR_RX_BROADPKT)
170
171static int ftmac100_start_hw(struct ftmac100 *priv)
172{
173 struct net_device *netdev = priv->netdev;
174
175 if (ftmac100_reset(priv))
176 return -EIO;
177
178 /* setup ring buffer base registers */
179 ftmac100_set_rx_ring_base(priv,
180 priv->descs_dma_addr +
181 offsetof(struct ftmac100_descs, rxdes));
182 ftmac100_set_tx_ring_base(priv,
183 priv->descs_dma_addr +
184 offsetof(struct ftmac100_descs, txdes));
185
186 iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC);
187
188 ftmac100_set_mac(priv, netdev->dev_addr);
189
190 iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
191 return 0;
192}
193
194static void ftmac100_stop_hw(struct ftmac100 *priv)
195{
196 iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
197}
198
199/******************************************************************************
200 * internal functions (receive descriptor)
201 *****************************************************************************/
202static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes)
203{
204 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS);
205}
206
207static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes)
208{
209 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS);
210}
211
212static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes)
213{
214 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
215}
216
217static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes)
218{
219 /* clear status bits */
220 rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
221}
222
223static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes)
224{
225 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR);
226}
227
228static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
229{
230 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
231}
232
233static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
234{
235 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
236}
237
238static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
239{
240 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
241}
242
243static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes)
244{
245 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB);
246}
247
248static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes)
249{
250 return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL;
251}
252
253static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes)
254{
255 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST);
256}
257
258static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes,
259 unsigned int size)
260{
261 rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR);
262 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size));
263}
264
265static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes)
266{
267 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR);
268}
269
270static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes,
271 dma_addr_t addr)
272{
273 rxdes->rxdes2 = cpu_to_le32(addr);
274}
275
276static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes)
277{
278 return le32_to_cpu(rxdes->rxdes2);
279}
280
281/*
282 * rxdes3 is not used by hardware. We use it to keep track of page.
283 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
284 */
285static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
286{
287 rxdes->rxdes3 = (unsigned int)page;
288}
289
290static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
291{
292 return (struct page *)rxdes->rxdes3;
293}
294
295/******************************************************************************
296 * internal functions (receive)
297 *****************************************************************************/
298static int ftmac100_next_rx_pointer(int pointer)
299{
300 return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
301}
302
303static void ftmac100_rx_pointer_advance(struct ftmac100 *priv)
304{
305 priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer);
306}
307
308static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv)
309{
310 return &priv->descs->rxdes[priv->rx_pointer];
311}
312
313static struct ftmac100_rxdes *
314ftmac100_rx_locate_first_segment(struct ftmac100 *priv)
315{
316 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
317
318 while (!ftmac100_rxdes_owned_by_dma(rxdes)) {
319 if (ftmac100_rxdes_first_segment(rxdes))
320 return rxdes;
321
322 ftmac100_rxdes_set_dma_own(rxdes);
323 ftmac100_rx_pointer_advance(priv);
324 rxdes = ftmac100_current_rxdes(priv);
325 }
326
327 return NULL;
328}
329
330static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
331 struct ftmac100_rxdes *rxdes)
332{
333 struct net_device *netdev = priv->netdev;
334 bool error = false;
335
336 if (unlikely(ftmac100_rxdes_rx_error(rxdes))) {
337 if (net_ratelimit())
338 netdev_info(netdev, "rx err\n");
339
340 netdev->stats.rx_errors++;
341 error = true;
342 }
343
344 if (unlikely(ftmac100_rxdes_crc_error(rxdes))) {
345 if (net_ratelimit())
346 netdev_info(netdev, "rx crc err\n");
347
348 netdev->stats.rx_crc_errors++;
349 error = true;
350 }
351
352 if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
353 if (net_ratelimit())
354 netdev_info(netdev, "rx frame too long\n");
355
356 netdev->stats.rx_length_errors++;
357 error = true;
358 } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
359 if (net_ratelimit())
360 netdev_info(netdev, "rx runt\n");
361
362 netdev->stats.rx_length_errors++;
363 error = true;
364 } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) {
365 if (net_ratelimit())
366 netdev_info(netdev, "rx odd nibble\n");
367
368 netdev->stats.rx_length_errors++;
369 error = true;
370 }
371
372 return error;
373}
374
375static void ftmac100_rx_drop_packet(struct ftmac100 *priv)
376{
377 struct net_device *netdev = priv->netdev;
378 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
379 bool done = false;
380
381 if (net_ratelimit())
382 netdev_dbg(netdev, "drop packet %p\n", rxdes);
383
384 do {
385 if (ftmac100_rxdes_last_segment(rxdes))
386 done = true;
387
388 ftmac100_rxdes_set_dma_own(rxdes);
389 ftmac100_rx_pointer_advance(priv);
390 rxdes = ftmac100_current_rxdes(priv);
391 } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes));
392
393 netdev->stats.rx_dropped++;
394}
395
396static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
397{
398 struct net_device *netdev = priv->netdev;
399 struct ftmac100_rxdes *rxdes;
400 struct sk_buff *skb;
401 struct page *page;
402 dma_addr_t map;
403 int length;
404
405 rxdes = ftmac100_rx_locate_first_segment(priv);
406 if (!rxdes)
407 return false;
408
409 if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
410 ftmac100_rx_drop_packet(priv);
411 return true;
412 }
413
414 /*
415 * It is impossible to get multi-segment packets
416 * because we always provide big enough receive buffers.
417 */
418 if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
419 BUG();
420
421 /* start processing */
422 skb = netdev_alloc_skb_ip_align(netdev, 128);
423 if (unlikely(!skb)) {
424 if (net_ratelimit())
425 netdev_err(netdev, "rx skb alloc failed\n");
426
427 ftmac100_rx_drop_packet(priv);
428 return true;
429 }
430
431 if (unlikely(ftmac100_rxdes_multicast(rxdes)))
432 netdev->stats.multicast++;
433
434 map = ftmac100_rxdes_get_dma_addr(rxdes);
435 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
436
437 length = ftmac100_rxdes_frame_length(rxdes);
438 page = ftmac100_rxdes_get_page(rxdes);
439 skb_fill_page_desc(skb, 0, page, 0, length);
440 skb->len += length;
441 skb->data_len += length;
442 skb->truesize += length;
443 __pskb_pull_tail(skb, min(length, 64));
444
445 ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
446
447 ftmac100_rx_pointer_advance(priv);
448
449 skb->protocol = eth_type_trans(skb, netdev);
450
451 netdev->stats.rx_packets++;
452 netdev->stats.rx_bytes += skb->len;
453
454 /* push packet to protocol stack */
455 netif_receive_skb(skb);
456
457 (*processed)++;
458 return true;
459}
460
461/******************************************************************************
462 * internal functions (transmit descriptor)
463 *****************************************************************************/
464static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes)
465{
466 /* clear all except end of ring bit */
467 txdes->txdes0 = 0;
468 txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
469 txdes->txdes2 = 0;
470 txdes->txdes3 = 0;
471}
472
473static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes)
474{
475 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
476}
477
478static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes)
479{
480 /*
481 * Make sure dma own bit will not be set before any other
482 * descriptor fields.
483 */
484 wmb();
485 txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
486}
487
488static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes)
489{
490 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL);
491}
492
493static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes)
494{
495 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL);
496}
497
498static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes)
499{
500 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
501}
502
503static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes)
504{
505 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS);
506}
507
508static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes)
509{
510 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS);
511}
512
513static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes)
514{
515 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC);
516}
517
518static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes,
519 unsigned int len)
520{
521 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len));
522}
523
524static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes,
525 dma_addr_t addr)
526{
527 txdes->txdes2 = cpu_to_le32(addr);
528}
529
530static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes)
531{
532 return le32_to_cpu(txdes->txdes2);
533}
534
535/*
536 * txdes3 is not used by hardware. We use it to keep track of socket buffer.
537 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
538 */
539static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
540{
541 txdes->txdes3 = (unsigned int)skb;
542}
543
544static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes)
545{
546 return (struct sk_buff *)txdes->txdes3;
547}
548
549/******************************************************************************
550 * internal functions (transmit)
551 *****************************************************************************/
552static int ftmac100_next_tx_pointer(int pointer)
553{
554 return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
555}
556
557static void ftmac100_tx_pointer_advance(struct ftmac100 *priv)
558{
559 priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer);
560}
561
562static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv)
563{
564 priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer);
565}
566
567static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv)
568{
569 return &priv->descs->txdes[priv->tx_pointer];
570}
571
572static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv)
573{
574 return &priv->descs->txdes[priv->tx_clean_pointer];
575}
576
577static bool ftmac100_tx_complete_packet(struct ftmac100 *priv)
578{
579 struct net_device *netdev = priv->netdev;
580 struct ftmac100_txdes *txdes;
581 struct sk_buff *skb;
582 dma_addr_t map;
583
584 if (priv->tx_pending == 0)
585 return false;
586
587 txdes = ftmac100_current_clean_txdes(priv);
588
589 if (ftmac100_txdes_owned_by_dma(txdes))
590 return false;
591
592 skb = ftmac100_txdes_get_skb(txdes);
593 map = ftmac100_txdes_get_dma_addr(txdes);
594
595 if (unlikely(ftmac100_txdes_excessive_collision(txdes) ||
596 ftmac100_txdes_late_collision(txdes))) {
597 /*
598 * packet transmitted to ethernet lost due to late collision
599 * or excessive collision
600 */
601 netdev->stats.tx_aborted_errors++;
602 } else {
603 netdev->stats.tx_packets++;
604 netdev->stats.tx_bytes += skb->len;
605 }
606
607 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
608 dev_kfree_skb(skb);
609
610 ftmac100_txdes_reset(txdes);
611
612 ftmac100_tx_clean_pointer_advance(priv);
613
614 spin_lock(&priv->tx_lock);
615 priv->tx_pending--;
616 spin_unlock(&priv->tx_lock);
617 netif_wake_queue(netdev);
618
619 return true;
620}
621
622static void ftmac100_tx_complete(struct ftmac100 *priv)
623{
624 while (ftmac100_tx_complete_packet(priv))
625 ;
626}
627
628static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
629 dma_addr_t map)
630{
631 struct net_device *netdev = priv->netdev;
632 struct ftmac100_txdes *txdes;
633 unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
634
635 txdes = ftmac100_current_txdes(priv);
636 ftmac100_tx_pointer_advance(priv);
637
638 /* setup TX descriptor */
639 ftmac100_txdes_set_skb(txdes, skb);
640 ftmac100_txdes_set_dma_addr(txdes, map);
641
642 ftmac100_txdes_set_first_segment(txdes);
643 ftmac100_txdes_set_last_segment(txdes);
644 ftmac100_txdes_set_txint(txdes);
645 ftmac100_txdes_set_buffer_size(txdes, len);
646
647 spin_lock(&priv->tx_lock);
648 priv->tx_pending++;
649 if (priv->tx_pending == TX_QUEUE_ENTRIES)
650 netif_stop_queue(netdev);
651
652 /* start transmit */
653 ftmac100_txdes_set_dma_own(txdes);
654 spin_unlock(&priv->tx_lock);
655
656 ftmac100_txdma_start_polling(priv);
657 return NETDEV_TX_OK;
658}
659
660/******************************************************************************
661 * internal functions (buffer)
662 *****************************************************************************/
663static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
664 struct ftmac100_rxdes *rxdes, gfp_t gfp)
665{
666 struct net_device *netdev = priv->netdev;
667 struct page *page;
668 dma_addr_t map;
669
670 page = alloc_page(gfp);
671 if (!page) {
672 if (net_ratelimit())
673 netdev_err(netdev, "failed to allocate rx page\n");
674 return -ENOMEM;
675 }
676
677 map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
678 if (unlikely(dma_mapping_error(priv->dev, map))) {
679 if (net_ratelimit())
680 netdev_err(netdev, "failed to map rx page\n");
681 __free_page(page);
682 return -ENOMEM;
683 }
684
685 ftmac100_rxdes_set_page(rxdes, page);
686 ftmac100_rxdes_set_dma_addr(rxdes, map);
687 ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
688 ftmac100_rxdes_set_dma_own(rxdes);
689 return 0;
690}
691
692static void ftmac100_free_buffers(struct ftmac100 *priv)
693{
694 int i;
695
696 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
697 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
698 struct page *page = ftmac100_rxdes_get_page(rxdes);
699 dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
700
701 if (!page)
702 continue;
703
704 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
705 __free_page(page);
706 }
707
708 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
709 struct ftmac100_txdes *txdes = &priv->descs->txdes[i];
710 struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
711 dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
712
713 if (!skb)
714 continue;
715
716 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
717 dev_kfree_skb(skb);
718 }
719
720 dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs),
721 priv->descs, priv->descs_dma_addr);
722}
723
724static int ftmac100_alloc_buffers(struct ftmac100 *priv)
725{
726 int i;
727
728 priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
729 &priv->descs_dma_addr, GFP_KERNEL);
730 if (!priv->descs)
731 return -ENOMEM;
732
733 memset(priv->descs, 0, sizeof(struct ftmac100_descs));
734
735 /* initialize RX ring */
736 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
737
738 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
739 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
740
741 if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
742 goto err;
743 }
744
745 /* initialize TX ring */
746 ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
747 return 0;
748
749err:
750 ftmac100_free_buffers(priv);
751 return -ENOMEM;
752}
753
754/******************************************************************************
755 * struct mii_if_info functions
756 *****************************************************************************/
757static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
758{
759 struct ftmac100 *priv = netdev_priv(netdev);
760 unsigned int phycr;
761 int i;
762
763 phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
764 FTMAC100_PHYCR_REGAD(reg) |
765 FTMAC100_PHYCR_MIIRD;
766
767 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
768
769 for (i = 0; i < 10; i++) {
770 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
771
772 if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
773 return phycr & FTMAC100_PHYCR_MIIRDATA;
774
775 usleep_range(100, 1000);
776 }
777
778 netdev_err(netdev, "mdio read timed out\n");
779 return 0;
780}
781
782static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
783 int data)
784{
785 struct ftmac100 *priv = netdev_priv(netdev);
786 unsigned int phycr;
787 int i;
788
789 phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
790 FTMAC100_PHYCR_REGAD(reg) |
791 FTMAC100_PHYCR_MIIWR;
792
793 data = FTMAC100_PHYWDATA_MIIWDATA(data);
794
795 iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA);
796 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
797
798 for (i = 0; i < 10; i++) {
799 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
800
801 if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
802 return;
803
804 usleep_range(100, 1000);
805 }
806
807 netdev_err(netdev, "mdio write timed out\n");
808}
809
810/******************************************************************************
811 * struct ethtool_ops functions
812 *****************************************************************************/
813static void ftmac100_get_drvinfo(struct net_device *netdev,
814 struct ethtool_drvinfo *info)
815{
816 strcpy(info->driver, DRV_NAME);
817 strcpy(info->version, DRV_VERSION);
818 strcpy(info->bus_info, dev_name(&netdev->dev));
819}
820
821static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
822{
823 struct ftmac100 *priv = netdev_priv(netdev);
824 return mii_ethtool_gset(&priv->mii, cmd);
825}
826
827static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
828{
829 struct ftmac100 *priv = netdev_priv(netdev);
830 return mii_ethtool_sset(&priv->mii, cmd);
831}
832
833static int ftmac100_nway_reset(struct net_device *netdev)
834{
835 struct ftmac100 *priv = netdev_priv(netdev);
836 return mii_nway_restart(&priv->mii);
837}
838
839static u32 ftmac100_get_link(struct net_device *netdev)
840{
841 struct ftmac100 *priv = netdev_priv(netdev);
842 return mii_link_ok(&priv->mii);
843}
844
845static const struct ethtool_ops ftmac100_ethtool_ops = {
846 .set_settings = ftmac100_set_settings,
847 .get_settings = ftmac100_get_settings,
848 .get_drvinfo = ftmac100_get_drvinfo,
849 .nway_reset = ftmac100_nway_reset,
850 .get_link = ftmac100_get_link,
851};
852
853/******************************************************************************
854 * interrupt handler
855 *****************************************************************************/
856static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
857{
858 struct net_device *netdev = dev_id;
859 struct ftmac100 *priv = netdev_priv(netdev);
860
861 if (likely(netif_running(netdev))) {
862 /* Disable interrupts for polling */
863 ftmac100_disable_all_int(priv);
864 napi_schedule(&priv->napi);
865 }
866
867 return IRQ_HANDLED;
868}
869
870/******************************************************************************
871 * struct napi_struct functions
872 *****************************************************************************/
873static int ftmac100_poll(struct napi_struct *napi, int budget)
874{
875 struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
876 struct net_device *netdev = priv->netdev;
877 unsigned int status;
878 bool completed = true;
879 int rx = 0;
880
881 status = ioread32(priv->base + FTMAC100_OFFSET_ISR);
882
883 if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
884 /*
885 * FTMAC100_INT_RPKT_FINISH:
886 * RX DMA has received packets into RX buffer successfully
887 *
888 * FTMAC100_INT_NORXBUF:
889 * RX buffer unavailable
890 */
891 bool retry;
892
893 do {
894 retry = ftmac100_rx_packet(priv, &rx);
895 } while (retry && rx < budget);
896
897 if (retry && rx == budget)
898 completed = false;
899 }
900
901 if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
902 /*
903 * FTMAC100_INT_XPKT_OK:
904 * packet transmitted to ethernet successfully
905 *
906 * FTMAC100_INT_XPKT_LOST:
907 * packet transmitted to ethernet lost due to late
908 * collision or excessive collision
909 */
910 ftmac100_tx_complete(priv);
911 }
912
913 if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
914 FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
915 if (net_ratelimit())
916 netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
917 status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
918 status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
919 status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
920 status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
921
922 if (status & FTMAC100_INT_NORXBUF) {
923 /* RX buffer unavailable */
924 netdev->stats.rx_over_errors++;
925 }
926
927 if (status & FTMAC100_INT_RPKT_LOST) {
928 /* received packet lost due to RX FIFO full */
929 netdev->stats.rx_fifo_errors++;
930 }
931
932 if (status & FTMAC100_INT_PHYSTS_CHG) {
933 /* PHY link status change */
934 mii_check_link(&priv->mii);
935 }
936 }
937
938 if (completed) {
939 /* stop polling */
940 napi_complete(napi);
941 ftmac100_enable_all_int(priv);
942 }
943
944 return rx;
945}
946
947/******************************************************************************
948 * struct net_device_ops functions
949 *****************************************************************************/
950static int ftmac100_open(struct net_device *netdev)
951{
952 struct ftmac100 *priv = netdev_priv(netdev);
953 int err;
954
955 err = ftmac100_alloc_buffers(priv);
956 if (err) {
957 netdev_err(netdev, "failed to allocate buffers\n");
958 goto err_alloc;
959 }
960
961 err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
962 if (err) {
963 netdev_err(netdev, "failed to request irq %d\n", priv->irq);
964 goto err_irq;
965 }
966
967 priv->rx_pointer = 0;
968 priv->tx_clean_pointer = 0;
969 priv->tx_pointer = 0;
970 priv->tx_pending = 0;
971
972 err = ftmac100_start_hw(priv);
973 if (err)
974 goto err_hw;
975
976 napi_enable(&priv->napi);
977 netif_start_queue(netdev);
978
979 ftmac100_enable_all_int(priv);
980
981 return 0;
982
983err_hw:
984 free_irq(priv->irq, netdev);
985err_irq:
986 ftmac100_free_buffers(priv);
987err_alloc:
988 return err;
989}
990
991static int ftmac100_stop(struct net_device *netdev)
992{
993 struct ftmac100 *priv = netdev_priv(netdev);
994
995 ftmac100_disable_all_int(priv);
996 netif_stop_queue(netdev);
997 napi_disable(&priv->napi);
998 ftmac100_stop_hw(priv);
999 free_irq(priv->irq, netdev);
1000 ftmac100_free_buffers(priv);
1001
1002 return 0;
1003}
1004
1005static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1006{
1007 struct ftmac100 *priv = netdev_priv(netdev);
1008 dma_addr_t map;
1009
1010 if (unlikely(skb->len > MAX_PKT_SIZE)) {
1011 if (net_ratelimit())
1012 netdev_dbg(netdev, "tx packet too big\n");
1013
1014 netdev->stats.tx_dropped++;
1015 dev_kfree_skb(skb);
1016 return NETDEV_TX_OK;
1017 }
1018
1019 map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1020 if (unlikely(dma_mapping_error(priv->dev, map))) {
1021 /* drop packet */
1022 if (net_ratelimit())
1023 netdev_err(netdev, "map socket buffer failed\n");
1024
1025 netdev->stats.tx_dropped++;
1026 dev_kfree_skb(skb);
1027 return NETDEV_TX_OK;
1028 }
1029
1030 return ftmac100_xmit(priv, skb, map);
1031}
1032
1033/* optional */
1034static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1035{
1036 struct ftmac100 *priv = netdev_priv(netdev);
1037 struct mii_ioctl_data *data = if_mii(ifr);
1038
1039 return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
1040}
1041
1042static const struct net_device_ops ftmac100_netdev_ops = {
1043 .ndo_open = ftmac100_open,
1044 .ndo_stop = ftmac100_stop,
1045 .ndo_start_xmit = ftmac100_hard_start_xmit,
1046 .ndo_set_mac_address = eth_mac_addr,
1047 .ndo_validate_addr = eth_validate_addr,
1048 .ndo_do_ioctl = ftmac100_do_ioctl,
1049};
1050
1051/******************************************************************************
1052 * struct platform_driver functions
1053 *****************************************************************************/
1054static int ftmac100_probe(struct platform_device *pdev)
1055{
1056 struct resource *res;
1057 int irq;
1058 struct net_device *netdev;
1059 struct ftmac100 *priv;
1060 int err;
1061
1062 if (!pdev)
1063 return -ENODEV;
1064
1065 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1066 if (!res)
1067 return -ENXIO;
1068
1069 irq = platform_get_irq(pdev, 0);
1070 if (irq < 0)
1071 return irq;
1072
1073 /* setup net_device */
1074 netdev = alloc_etherdev(sizeof(*priv));
1075 if (!netdev) {
1076 err = -ENOMEM;
1077 goto err_alloc_etherdev;
1078 }
1079
1080 SET_NETDEV_DEV(netdev, &pdev->dev);
1081 SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
1082 netdev->netdev_ops = &ftmac100_netdev_ops;
1083
1084 platform_set_drvdata(pdev, netdev);
1085
1086 /* setup private data */
1087 priv = netdev_priv(netdev);
1088 priv->netdev = netdev;
1089 priv->dev = &pdev->dev;
1090
1091 spin_lock_init(&priv->tx_lock);
1092
1093 /* initialize NAPI */
1094 netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
1095
1096 /* map io memory */
1097 priv->res = request_mem_region(res->start, resource_size(res),
1098 dev_name(&pdev->dev));
1099 if (!priv->res) {
1100 dev_err(&pdev->dev, "Could not reserve memory region\n");
1101 err = -ENOMEM;
1102 goto err_req_mem;
1103 }
1104
1105 priv->base = ioremap(res->start, resource_size(res));
1106 if (!priv->base) {
1107 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1108 err = -EIO;
1109 goto err_ioremap;
1110 }
1111
1112 priv->irq = irq;
1113
1114 /* initialize struct mii_if_info */
1115 priv->mii.phy_id = 0;
1116 priv->mii.phy_id_mask = 0x1f;
1117 priv->mii.reg_num_mask = 0x1f;
1118 priv->mii.dev = netdev;
1119 priv->mii.mdio_read = ftmac100_mdio_read;
1120 priv->mii.mdio_write = ftmac100_mdio_write;
1121
1122 /* register network device */
1123 err = register_netdev(netdev);
1124 if (err) {
1125 dev_err(&pdev->dev, "Failed to register netdev\n");
1126 goto err_register_netdev;
1127 }
1128
1129 netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
1130
1131 if (!is_valid_ether_addr(netdev->dev_addr)) {
1132 random_ether_addr(netdev->dev_addr);
1133 netdev_info(netdev, "generated random MAC address %pM\n",
1134 netdev->dev_addr);
1135 }
1136
1137 return 0;
1138
1139err_register_netdev:
1140 iounmap(priv->base);
1141err_ioremap:
1142 release_resource(priv->res);
1143err_req_mem:
1144 netif_napi_del(&priv->napi);
1145 platform_set_drvdata(pdev, NULL);
1146 free_netdev(netdev);
1147err_alloc_etherdev:
1148 return err;
1149}
1150
1151static int __exit ftmac100_remove(struct platform_device *pdev)
1152{
1153 struct net_device *netdev;
1154 struct ftmac100 *priv;
1155
1156 netdev = platform_get_drvdata(pdev);
1157 priv = netdev_priv(netdev);
1158
1159 unregister_netdev(netdev);
1160
1161 iounmap(priv->base);
1162 release_resource(priv->res);
1163
1164 netif_napi_del(&priv->napi);
1165 platform_set_drvdata(pdev, NULL);
1166 free_netdev(netdev);
1167 return 0;
1168}
1169
1170static struct platform_driver ftmac100_driver = {
1171 .probe = ftmac100_probe,
1172 .remove = __exit_p(ftmac100_remove),
1173 .driver = {
1174 .name = DRV_NAME,
1175 .owner = THIS_MODULE,
1176 },
1177};
1178
1179/******************************************************************************
1180 * initialization / finalization
1181 *****************************************************************************/
1182static int __init ftmac100_init(void)
1183{
1184 pr_info("Loading version " DRV_VERSION " ...\n");
1185 return platform_driver_register(&ftmac100_driver);
1186}
1187
1188static void __exit ftmac100_exit(void)
1189{
1190 platform_driver_unregister(&ftmac100_driver);
1191}
1192
1193module_init(ftmac100_init);
1194module_exit(ftmac100_exit);
1195
1196MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1197MODULE_DESCRIPTION("FTMAC100 driver");
1198MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.h b/drivers/net/ftmac100.h
new file mode 100644
index 00000000000..46a0c47b1ee
--- /dev/null
+++ b/drivers/net/ftmac100.h
@@ -0,0 +1,180 @@
1/*
2 * Faraday FTMAC100 10/100 Ethernet
3 *
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#ifndef __FTMAC100_H
23#define __FTMAC100_H
24
25#define FTMAC100_OFFSET_ISR 0x00
26#define FTMAC100_OFFSET_IMR 0x04
27#define FTMAC100_OFFSET_MAC_MADR 0x08
28#define FTMAC100_OFFSET_MAC_LADR 0x0c
29#define FTMAC100_OFFSET_MAHT0 0x10
30#define FTMAC100_OFFSET_MAHT1 0x14
31#define FTMAC100_OFFSET_TXPD 0x18
32#define FTMAC100_OFFSET_RXPD 0x1c
33#define FTMAC100_OFFSET_TXR_BADR 0x20
34#define FTMAC100_OFFSET_RXR_BADR 0x24
35#define FTMAC100_OFFSET_ITC 0x28
36#define FTMAC100_OFFSET_APTC 0x2c
37#define FTMAC100_OFFSET_DBLAC 0x30
38#define FTMAC100_OFFSET_MACCR 0x88
39#define FTMAC100_OFFSET_MACSR 0x8c
40#define FTMAC100_OFFSET_PHYCR 0x90
41#define FTMAC100_OFFSET_PHYWDATA 0x94
42#define FTMAC100_OFFSET_FCR 0x98
43#define FTMAC100_OFFSET_BPR 0x9c
44#define FTMAC100_OFFSET_TS 0xc4
45#define FTMAC100_OFFSET_DMAFIFOS 0xc8
46#define FTMAC100_OFFSET_TM 0xcc
47#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4
48#define FTMAC100_OFFSET_RPF_AEP 0xd8
49#define FTMAC100_OFFSET_XM_PG 0xdc
50#define FTMAC100_OFFSET_RUNT_TLCC 0xe0
51#define FTMAC100_OFFSET_CRCER_FTL 0xe4
52#define FTMAC100_OFFSET_RLC_RCC 0xe8
53#define FTMAC100_OFFSET_BROC 0xec
54#define FTMAC100_OFFSET_MULCA 0xf0
55#define FTMAC100_OFFSET_RP 0xf4
56#define FTMAC100_OFFSET_XP 0xf8
57
58/*
59 * Interrupt status register & interrupt mask register
60 */
61#define FTMAC100_INT_RPKT_FINISH (1 << 0)
62#define FTMAC100_INT_NORXBUF (1 << 1)
63#define FTMAC100_INT_XPKT_FINISH (1 << 2)
64#define FTMAC100_INT_NOTXBUF (1 << 3)
65#define FTMAC100_INT_XPKT_OK (1 << 4)
66#define FTMAC100_INT_XPKT_LOST (1 << 5)
67#define FTMAC100_INT_RPKT_SAV (1 << 6)
68#define FTMAC100_INT_RPKT_LOST (1 << 7)
69#define FTMAC100_INT_AHB_ERR (1 << 8)
70#define FTMAC100_INT_PHYSTS_CHG (1 << 9)
71
72/*
73 * Interrupt timer control register
74 */
75#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
76#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
77#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7)
78#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
79#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
80#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15)
81
82/*
83 * Automatic polling timer control register
84 */
85#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
86#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
87#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
88#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
89
90/*
91 * DMA burst length and arbitration control register
92 */
93#define FTMAC100_DBLAC_INCR4_EN (1 << 0)
94#define FTMAC100_DBLAC_INCR8_EN (1 << 1)
95#define FTMAC100_DBLAC_INCR16_EN (1 << 2)
96#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3)
97#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6)
98#define FTMAC100_DBLAC_RX_THR_EN (1 << 9)
99
100/*
101 * MAC control register
102 */
103#define FTMAC100_MACCR_XDMA_EN (1 << 0)
104#define FTMAC100_MACCR_RDMA_EN (1 << 1)
105#define FTMAC100_MACCR_SW_RST (1 << 2)
106#define FTMAC100_MACCR_LOOP_EN (1 << 3)
107#define FTMAC100_MACCR_CRC_DIS (1 << 4)
108#define FTMAC100_MACCR_XMT_EN (1 << 5)
109#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6)
110#define FTMAC100_MACCR_RCV_EN (1 << 8)
111#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9)
112#define FTMAC100_MACCR_RX_RUNT (1 << 10)
113#define FTMAC100_MACCR_RX_FTL (1 << 11)
114#define FTMAC100_MACCR_RCV_ALL (1 << 12)
115#define FTMAC100_MACCR_CRC_APD (1 << 14)
116#define FTMAC100_MACCR_FULLDUP (1 << 15)
117#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16)
118#define FTMAC100_MACCR_RX_BROADPKT (1 << 17)
119
120/*
121 * PHY control register
122 */
123#define FTMAC100_PHYCR_MIIRDATA 0xffff
124#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
125#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
126#define FTMAC100_PHYCR_MIIRD (1 << 26)
127#define FTMAC100_PHYCR_MIIWR (1 << 27)
128
129/*
130 * PHY write data register
131 */
132#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff)
133
134/*
135 * Transmit descriptor, aligned to 16 bytes
136 */
137struct ftmac100_txdes {
138 unsigned int txdes0;
139 unsigned int txdes1;
140 unsigned int txdes2; /* TXBUF_BADR */
141 unsigned int txdes3; /* not used by HW */
142} __attribute__ ((aligned(16)));
143
144#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0)
145#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1)
146#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31)
147
148#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff)
149#define FTMAC100_TXDES1_LTS (1 << 27)
150#define FTMAC100_TXDES1_FTS (1 << 28)
151#define FTMAC100_TXDES1_TX2FIC (1 << 29)
152#define FTMAC100_TXDES1_TXIC (1 << 30)
153#define FTMAC100_TXDES1_EDOTR (1 << 31)
154
155/*
156 * Receive descriptor, aligned to 16 bytes
157 */
158struct ftmac100_rxdes {
159 unsigned int rxdes0;
160 unsigned int rxdes1;
161 unsigned int rxdes2; /* RXBUF_BADR */
162 unsigned int rxdes3; /* not used by HW */
163} __attribute__ ((aligned(16)));
164
165#define FTMAC100_RXDES0_RFL 0x7ff
166#define FTMAC100_RXDES0_MULTICAST (1 << 16)
167#define FTMAC100_RXDES0_BROADCAST (1 << 17)
168#define FTMAC100_RXDES0_RX_ERR (1 << 18)
169#define FTMAC100_RXDES0_CRC_ERR (1 << 19)
170#define FTMAC100_RXDES0_FTL (1 << 20)
171#define FTMAC100_RXDES0_RUNT (1 << 21)
172#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22)
173#define FTMAC100_RXDES0_LRS (1 << 28)
174#define FTMAC100_RXDES0_FRS (1 << 29)
175#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31)
176
177#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff)
178#define FTMAC100_RXDES1_EDORR (1 << 31)
179
180#endif /* __FTMAC100_H */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 119aa2000c2..2a0ad9a501b 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -123,8 +123,7 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123static void adjust_link(struct net_device *dev); 123static void adjust_link(struct net_device *dev);
124static void init_registers(struct net_device *dev); 124static void init_registers(struct net_device *dev);
125static int init_phy(struct net_device *dev); 125static int init_phy(struct net_device *dev);
126static int gfar_probe(struct platform_device *ofdev, 126static int gfar_probe(struct platform_device *ofdev);
127 const struct of_device_id *match);
128static int gfar_remove(struct platform_device *ofdev); 127static int gfar_remove(struct platform_device *ofdev);
129static void free_skb_resources(struct gfar_private *priv); 128static void free_skb_resources(struct gfar_private *priv);
130static void gfar_set_multi(struct net_device *dev); 129static void gfar_set_multi(struct net_device *dev);
@@ -950,6 +949,11 @@ static void gfar_detect_errata(struct gfar_private *priv)
950 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
951 priv->errata |= GFAR_ERRATA_A002; 950 priv->errata |= GFAR_ERRATA_A002;
952 951
952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
955 priv->errata |= GFAR_ERRATA_12;
956
953 if (priv->errata) 957 if (priv->errata)
954 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 958 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
955 priv->errata); 959 priv->errata);
@@ -957,8 +961,7 @@ static void gfar_detect_errata(struct gfar_private *priv)
957 961
958/* Set up the ethernet device structure, private data, 962/* Set up the ethernet device structure, private data,
959 * and anything else we need before we start */ 963 * and anything else we need before we start */
960static int gfar_probe(struct platform_device *ofdev, 964static int gfar_probe(struct platform_device *ofdev)
961 const struct of_device_id *match)
962{ 965{
963 u32 tempval; 966 u32 tempval;
964 struct net_device *dev = NULL; 967 struct net_device *dev = NULL;
@@ -1920,7 +1923,7 @@ int startup_gfar(struct net_device *ndev)
1920 if (err) { 1923 if (err) {
1921 for (j = 0; j < i; j++) 1924 for (j = 0; j < i; j++)
1922 free_grp_irqs(&priv->gfargrp[j]); 1925 free_grp_irqs(&priv->gfargrp[j]);
1923 goto irq_fail; 1926 goto irq_fail;
1924 } 1927 }
1925 } 1928 }
1926 1929
@@ -2156,8 +2159,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2156 /* Set up checksumming */ 2159 /* Set up checksumming */
2157 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2160 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2158 fcb = gfar_add_fcb(skb); 2161 fcb = gfar_add_fcb(skb);
2159 lstatus |= BD_LFLAG(TXBD_TOE); 2162 /* as specified by errata */
2160 gfar_tx_checksum(skb, fcb); 2163 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2164 && ((unsigned long)fcb % 0x20) > 0x18)) {
2165 __skb_pull(skb, GMAC_FCB_LEN);
2166 skb_checksum_help(skb);
2167 } else {
2168 lstatus |= BD_LFLAG(TXBD_TOE);
2169 gfar_tx_checksum(skb, fcb);
2170 }
2161 } 2171 }
2162 2172
2163 if (vlan_tx_tag_present(skb)) { 2173 if (vlan_tx_tag_present(skb)) {
@@ -3256,7 +3266,7 @@ static struct of_device_id gfar_match[] =
3256MODULE_DEVICE_TABLE(of, gfar_match); 3266MODULE_DEVICE_TABLE(of, gfar_match);
3257 3267
3258/* Structure for a device driver */ 3268/* Structure for a device driver */
3259static struct of_platform_driver gfar_driver = { 3269static struct platform_driver gfar_driver = {
3260 .driver = { 3270 .driver = {
3261 .name = "fsl-gianfar", 3271 .name = "fsl-gianfar",
3262 .owner = THIS_MODULE, 3272 .owner = THIS_MODULE,
@@ -3269,12 +3279,12 @@ static struct of_platform_driver gfar_driver = {
3269 3279
3270static int __init gfar_init(void) 3280static int __init gfar_init(void)
3271{ 3281{
3272 return of_register_platform_driver(&gfar_driver); 3282 return platform_driver_register(&gfar_driver);
3273} 3283}
3274 3284
3275static void __exit gfar_exit(void) 3285static void __exit gfar_exit(void)
3276{ 3286{
3277 of_unregister_platform_driver(&gfar_driver); 3287 platform_driver_unregister(&gfar_driver);
3278} 3288}
3279 3289
3280module_init(gfar_init); 3290module_init(gfar_init);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 54de4135e93..ec5d595ce2e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1039,6 +1039,7 @@ enum gfar_errata {
1039 GFAR_ERRATA_74 = 0x01, 1039 GFAR_ERRATA_74 = 0x01,
1040 GFAR_ERRATA_76 = 0x02, 1040 GFAR_ERRATA_76 = 0x02,
1041 GFAR_ERRATA_A002 = 0x04, 1041 GFAR_ERRATA_A002 = 0x04,
1042 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
1042}; 1043};
1043 1044
1044/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1045/* Struct stolen almost completely (and shamelessly) from the FCC enet source
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index fdb0333f5cb..396ff7d785d 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1411,7 +1411,7 @@ error:
1411} 1411}
1412 1412
1413/* Initialize the GRETH MAC */ 1413/* Initialize the GRETH MAC */
1414static int __devinit greth_of_probe(struct platform_device *ofdev, const struct of_device_id *match) 1414static int __devinit greth_of_probe(struct platform_device *ofdev)
1415{ 1415{
1416 struct net_device *dev; 1416 struct net_device *dev;
1417 struct greth_private *greth; 1417 struct greth_private *greth;
@@ -1646,7 +1646,7 @@ static struct of_device_id greth_of_match[] = {
1646 1646
1647MODULE_DEVICE_TABLE(of, greth_of_match); 1647MODULE_DEVICE_TABLE(of, greth_of_match);
1648 1648
1649static struct of_platform_driver greth_of_driver = { 1649static struct platform_driver greth_of_driver = {
1650 .driver = { 1650 .driver = {
1651 .name = "grlib-greth", 1651 .name = "grlib-greth",
1652 .owner = THIS_MODULE, 1652 .owner = THIS_MODULE,
@@ -1658,12 +1658,12 @@ static struct of_platform_driver greth_of_driver = {
1658 1658
1659static int __init greth_init(void) 1659static int __init greth_init(void)
1660{ 1660{
1661 return of_register_platform_driver(&greth_of_driver); 1661 return platform_driver_register(&greth_of_driver);
1662} 1662}
1663 1663
1664static void __exit greth_cleanup(void) 1664static void __exit greth_cleanup(void)
1665{ 1665{
1666 of_unregister_platform_driver(&greth_of_driver); 1666 platform_driver_unregister(&greth_of_driver);
1667} 1667}
1668 1668
1669module_init(greth_init); 1669module_init(greth_init);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323c5eb..8931168d3e7 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
400static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) 400static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
401{ 401{
402 struct list_head *p; 402 struct list_head *p;
403 struct bpqdev *bpqdev = v;
403 404
404 ++*pos; 405 ++*pos;
405 406
406 if (v == SEQ_START_TOKEN) 407 if (v == SEQ_START_TOKEN)
407 p = rcu_dereference(bpq_devices.next); 408 p = rcu_dereference(list_next_rcu(&bpq_devices));
408 else 409 else
409 p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next); 410 p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
410 411
411 return (p == &bpq_devices) ? NULL 412 return (p == &bpq_devices) ? NULL
412 : list_entry(p, struct bpqdev, bpq_list); 413 : list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 6d9275c52e0..3bb990b6651 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2719,8 +2719,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
2719 .ndo_change_mtu = emac_change_mtu, 2719 .ndo_change_mtu = emac_change_mtu,
2720}; 2720};
2721 2721
2722static int __devinit emac_probe(struct platform_device *ofdev, 2722static int __devinit emac_probe(struct platform_device *ofdev)
2723 const struct of_device_id *match)
2724{ 2723{
2725 struct net_device *ndev; 2724 struct net_device *ndev;
2726 struct emac_instance *dev; 2725 struct emac_instance *dev;
@@ -2994,7 +2993,7 @@ static struct of_device_id emac_match[] =
2994}; 2993};
2995MODULE_DEVICE_TABLE(of, emac_match); 2994MODULE_DEVICE_TABLE(of, emac_match);
2996 2995
2997static struct of_platform_driver emac_driver = { 2996static struct platform_driver emac_driver = {
2998 .driver = { 2997 .driver = {
2999 .name = "emac", 2998 .name = "emac",
3000 .owner = THIS_MODULE, 2999 .owner = THIS_MODULE,
@@ -3069,7 +3068,7 @@ static int __init emac_init(void)
3069 rc = tah_init(); 3068 rc = tah_init();
3070 if (rc) 3069 if (rc)
3071 goto err_rgmii; 3070 goto err_rgmii;
3072 rc = of_register_platform_driver(&emac_driver); 3071 rc = platform_driver_register(&emac_driver);
3073 if (rc) 3072 if (rc)
3074 goto err_tah; 3073 goto err_tah;
3075 3074
@@ -3091,7 +3090,7 @@ static void __exit emac_exit(void)
3091{ 3090{
3092 int i; 3091 int i;
3093 3092
3094 of_unregister_platform_driver(&emac_driver); 3093 platform_driver_unregister(&emac_driver);
3095 3094
3096 tah_exit(); 3095 tah_exit();
3097 rgmii_exit(); 3096 rgmii_exit();
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index d5717e2123e..d268f404b7b 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -517,8 +517,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
517 return regs + 1; 517 return regs + 1;
518} 518}
519 519
520static int __devinit mal_probe(struct platform_device *ofdev, 520static int __devinit mal_probe(struct platform_device *ofdev)
521 const struct of_device_id *match)
522{ 521{
523 struct mal_instance *mal; 522 struct mal_instance *mal;
524 int err = 0, i, bd_size; 523 int err = 0, i, bd_size;
@@ -789,7 +788,7 @@ static struct of_device_id mal_platform_match[] =
789 {}, 788 {},
790}; 789};
791 790
792static struct of_platform_driver mal_of_driver = { 791static struct platform_driver mal_of_driver = {
793 .driver = { 792 .driver = {
794 .name = "mcmal", 793 .name = "mcmal",
795 .owner = THIS_MODULE, 794 .owner = THIS_MODULE,
@@ -801,10 +800,10 @@ static struct of_platform_driver mal_of_driver = {
801 800
802int __init mal_init(void) 801int __init mal_init(void)
803{ 802{
804 return of_register_platform_driver(&mal_of_driver); 803 return platform_driver_register(&mal_of_driver);
805} 804}
806 805
807void mal_exit(void) 806void mal_exit(void)
808{ 807{
809 of_unregister_platform_driver(&mal_of_driver); 808 platform_driver_unregister(&mal_of_driver);
810} 809}
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index dd61798897a..4fa53f3def6 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -228,8 +228,7 @@ void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
228} 228}
229 229
230 230
231static int __devinit rgmii_probe(struct platform_device *ofdev, 231static int __devinit rgmii_probe(struct platform_device *ofdev)
232 const struct of_device_id *match)
233{ 232{
234 struct device_node *np = ofdev->dev.of_node; 233 struct device_node *np = ofdev->dev.of_node;
235 struct rgmii_instance *dev; 234 struct rgmii_instance *dev;
@@ -318,7 +317,7 @@ static struct of_device_id rgmii_match[] =
318 {}, 317 {},
319}; 318};
320 319
321static struct of_platform_driver rgmii_driver = { 320static struct platform_driver rgmii_driver = {
322 .driver = { 321 .driver = {
323 .name = "emac-rgmii", 322 .name = "emac-rgmii",
324 .owner = THIS_MODULE, 323 .owner = THIS_MODULE,
@@ -330,10 +329,10 @@ static struct of_platform_driver rgmii_driver = {
330 329
331int __init rgmii_init(void) 330int __init rgmii_init(void)
332{ 331{
333 return of_register_platform_driver(&rgmii_driver); 332 return platform_driver_register(&rgmii_driver);
334} 333}
335 334
336void rgmii_exit(void) 335void rgmii_exit(void)
337{ 336{
338 of_unregister_platform_driver(&rgmii_driver); 337 platform_driver_unregister(&rgmii_driver);
339} 338}
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 299aa49490c..8ead6a96aba 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -87,8 +87,7 @@ void *tah_dump_regs(struct platform_device *ofdev, void *buf)
87 return regs + 1; 87 return regs + 1;
88} 88}
89 89
90static int __devinit tah_probe(struct platform_device *ofdev, 90static int __devinit tah_probe(struct platform_device *ofdev)
91 const struct of_device_id *match)
92{ 91{
93 struct device_node *np = ofdev->dev.of_node; 92 struct device_node *np = ofdev->dev.of_node;
94 struct tah_instance *dev; 93 struct tah_instance *dev;
@@ -165,7 +164,7 @@ static struct of_device_id tah_match[] =
165 {}, 164 {},
166}; 165};
167 166
168static struct of_platform_driver tah_driver = { 167static struct platform_driver tah_driver = {
169 .driver = { 168 .driver = {
170 .name = "emac-tah", 169 .name = "emac-tah",
171 .owner = THIS_MODULE, 170 .owner = THIS_MODULE,
@@ -177,10 +176,10 @@ static struct of_platform_driver tah_driver = {
177 176
178int __init tah_init(void) 177int __init tah_init(void)
179{ 178{
180 return of_register_platform_driver(&tah_driver); 179 return platform_driver_register(&tah_driver);
181} 180}
182 181
183void tah_exit(void) 182void tah_exit(void)
184{ 183{
185 of_unregister_platform_driver(&tah_driver); 184 platform_driver_unregister(&tah_driver);
186} 185}
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 34ed6ee8ca8..97449e786d6 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -231,8 +231,7 @@ void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
231 return regs + 1; 231 return regs + 1;
232} 232}
233 233
234static int __devinit zmii_probe(struct platform_device *ofdev, 234static int __devinit zmii_probe(struct platform_device *ofdev)
235 const struct of_device_id *match)
236{ 235{
237 struct device_node *np = ofdev->dev.of_node; 236 struct device_node *np = ofdev->dev.of_node;
238 struct zmii_instance *dev; 237 struct zmii_instance *dev;
@@ -312,7 +311,7 @@ static struct of_device_id zmii_match[] =
312 {}, 311 {},
313}; 312};
314 313
315static struct of_platform_driver zmii_driver = { 314static struct platform_driver zmii_driver = {
316 .driver = { 315 .driver = {
317 .name = "emac-zmii", 316 .name = "emac-zmii",
318 .owner = THIS_MODULE, 317 .owner = THIS_MODULE,
@@ -324,10 +323,10 @@ static struct of_platform_driver zmii_driver = {
324 323
325int __init zmii_init(void) 324int __init zmii_init(void)
326{ 325{
327 return of_register_platform_driver(&zmii_driver); 326 return platform_driver_register(&zmii_driver);
328} 327}
329 328
330void zmii_exit(void) 329void zmii_exit(void)
331{ 330{
332 of_unregister_platform_driver(&zmii_driver); 331 platform_driver_unregister(&zmii_driver);
333} 332}
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0a2368fa6bc..6b256c275e1 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -64,7 +64,14 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *);
64static s32 igb_read_mac_addr_82575(struct e1000_hw *); 64static s32 igb_read_mac_addr_82575(struct e1000_hw *);
65static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 65static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
66static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 66static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
67 67static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
68static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
69static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
70 u16 offset);
71static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
72 u16 offset);
73static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
74static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
68static const u16 e1000_82580_rxpbs_table[] = 75static const u16 e1000_82580_rxpbs_table[] =
69 { 36, 72, 144, 1, 2, 4, 8, 16, 76 { 36, 72, 144, 1, 2, 4, 8, 16,
70 35, 70, 140 }; 77 35, 70, 140 };
@@ -129,6 +136,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
129 break; 136 break;
130 case E1000_DEV_ID_82580_COPPER: 137 case E1000_DEV_ID_82580_COPPER:
131 case E1000_DEV_ID_82580_FIBER: 138 case E1000_DEV_ID_82580_FIBER:
139 case E1000_DEV_ID_82580_QUAD_FIBER:
132 case E1000_DEV_ID_82580_SERDES: 140 case E1000_DEV_ID_82580_SERDES:
133 case E1000_DEV_ID_82580_SGMII: 141 case E1000_DEV_ID_82580_SGMII:
134 case E1000_DEV_ID_82580_COPPER_DUAL: 142 case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -194,7 +202,11 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
194 mac->arc_subsystem_valid = 202 mac->arc_subsystem_valid =
195 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 203 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
196 ? true : false; 204 ? true : false;
197 205 /* enable EEE on i350 parts */
206 if (mac->type == e1000_i350)
207 dev_spec->eee_disable = false;
208 else
209 dev_spec->eee_disable = true;
198 /* physical interface link setup */ 210 /* physical interface link setup */
199 mac->ops.setup_physical_interface = 211 mac->ops.setup_physical_interface =
200 (hw->phy.media_type == e1000_media_type_copper) 212 (hw->phy.media_type == e1000_media_type_copper)
@@ -232,14 +244,42 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
232 */ 244 */
233 size += NVM_WORD_SIZE_BASE_SHIFT; 245 size += NVM_WORD_SIZE_BASE_SHIFT;
234 246
235 /* EEPROM access above 16k is unsupported */
236 if (size > 14)
237 size = 14;
238 nvm->word_size = 1 << size; 247 nvm->word_size = 1 << size;
248 if (nvm->word_size == (1 << 15))
249 nvm->page_size = 128;
239 250
240 /* if 82576 then initialize mailbox parameters */ 251 /* NVM Function Pointers */
241 if (mac->type == e1000_82576) 252 nvm->ops.acquire = igb_acquire_nvm_82575;
253 if (nvm->word_size < (1 << 15))
254 nvm->ops.read = igb_read_nvm_eerd;
255 else
256 nvm->ops.read = igb_read_nvm_spi;
257
258 nvm->ops.release = igb_release_nvm_82575;
259 switch (hw->mac.type) {
260 case e1000_82580:
261 nvm->ops.validate = igb_validate_nvm_checksum_82580;
262 nvm->ops.update = igb_update_nvm_checksum_82580;
263 break;
264 case e1000_i350:
265 nvm->ops.validate = igb_validate_nvm_checksum_i350;
266 nvm->ops.update = igb_update_nvm_checksum_i350;
267 break;
268 default:
269 nvm->ops.validate = igb_validate_nvm_checksum;
270 nvm->ops.update = igb_update_nvm_checksum;
271 }
272 nvm->ops.write = igb_write_nvm_spi;
273
274 /* if part supports SR-IOV then initialize mailbox parameters */
275 switch (mac->type) {
276 case e1000_82576:
277 case e1000_i350:
242 igb_init_mbx_params_pf(hw); 278 igb_init_mbx_params_pf(hw);
279 break;
280 default:
281 break;
282 }
243 283
244 /* setup PHY parameters */ 284 /* setup PHY parameters */
245 if (phy->media_type != e1000_media_type_copper) { 285 if (phy->media_type != e1000_media_type_copper) {
@@ -1747,6 +1787,248 @@ u16 igb_rxpbs_adjust_82580(u32 data)
1747 return ret_val; 1787 return ret_val;
1748} 1788}
1749 1789
1790/**
1791 * igb_validate_nvm_checksum_with_offset - Validate EEPROM
1792 * checksum
1793 * @hw: pointer to the HW structure
1794 * @offset: offset in words of the checksum protected region
1795 *
1796 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
1797 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
1798 **/
1799s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1800{
1801 s32 ret_val = 0;
1802 u16 checksum = 0;
1803 u16 i, nvm_data;
1804
1805 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
1806 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1807 if (ret_val) {
1808 hw_dbg("NVM Read Error\n");
1809 goto out;
1810 }
1811 checksum += nvm_data;
1812 }
1813
1814 if (checksum != (u16) NVM_SUM) {
1815 hw_dbg("NVM Checksum Invalid\n");
1816 ret_val = -E1000_ERR_NVM;
1817 goto out;
1818 }
1819
1820out:
1821 return ret_val;
1822}
1823
1824/**
1825 * igb_update_nvm_checksum_with_offset - Update EEPROM
1826 * checksum
1827 * @hw: pointer to the HW structure
1828 * @offset: offset in words of the checksum protected region
1829 *
1830 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
1831 * up to the checksum. Then calculates the EEPROM checksum and writes the
1832 * value to the EEPROM.
1833 **/
1834s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1835{
1836 s32 ret_val;
1837 u16 checksum = 0;
1838 u16 i, nvm_data;
1839
1840 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
1841 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1842 if (ret_val) {
1843 hw_dbg("NVM Read Error while updating checksum.\n");
1844 goto out;
1845 }
1846 checksum += nvm_data;
1847 }
1848 checksum = (u16) NVM_SUM - checksum;
1849 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
1850 &checksum);
1851 if (ret_val)
1852 hw_dbg("NVM Write Error while updating checksum.\n");
1853
1854out:
1855 return ret_val;
1856}
1857
1858/**
1859 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
1860 * @hw: pointer to the HW structure
1861 *
1862 * Calculates the EEPROM section checksum by reading/adding each word of
1863 * the EEPROM and then verifies that the sum of the EEPROM is
1864 * equal to 0xBABA.
1865 **/
1866static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
1867{
1868 s32 ret_val = 0;
1869 u16 eeprom_regions_count = 1;
1870 u16 j, nvm_data;
1871 u16 nvm_offset;
1872
1873 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
1874 if (ret_val) {
1875 hw_dbg("NVM Read Error\n");
1876 goto out;
1877 }
1878
1879 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
1880 /* if chekcsums compatibility bit is set validate checksums
1881 * for all 4 ports. */
1882 eeprom_regions_count = 4;
1883 }
1884
1885 for (j = 0; j < eeprom_regions_count; j++) {
1886 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1887 ret_val = igb_validate_nvm_checksum_with_offset(hw,
1888 nvm_offset);
1889 if (ret_val != 0)
1890 goto out;
1891 }
1892
1893out:
1894 return ret_val;
1895}
1896
1897/**
1898 * igb_update_nvm_checksum_82580 - Update EEPROM checksum
1899 * @hw: pointer to the HW structure
1900 *
1901 * Updates the EEPROM section checksums for all 4 ports by reading/adding
1902 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
1903 * checksum and writes the value to the EEPROM.
1904 **/
1905static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
1906{
1907 s32 ret_val;
1908 u16 j, nvm_data;
1909 u16 nvm_offset;
1910
1911 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
1912 if (ret_val) {
1913 hw_dbg("NVM Read Error while updating checksum"
1914 " compatibility bit.\n");
1915 goto out;
1916 }
1917
1918 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
1919 /* set compatibility bit to validate checksums appropriately */
1920 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
1921 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
1922 &nvm_data);
1923 if (ret_val) {
1924 hw_dbg("NVM Write Error while updating checksum"
1925 " compatibility bit.\n");
1926 goto out;
1927 }
1928 }
1929
1930 for (j = 0; j < 4; j++) {
1931 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1932 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
1933 if (ret_val)
1934 goto out;
1935 }
1936
1937out:
1938 return ret_val;
1939}
1940
1941/**
1942 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
1943 * @hw: pointer to the HW structure
1944 *
1945 * Calculates the EEPROM section checksum by reading/adding each word of
1946 * the EEPROM and then verifies that the sum of the EEPROM is
1947 * equal to 0xBABA.
1948 **/
1949static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
1950{
1951 s32 ret_val = 0;
1952 u16 j;
1953 u16 nvm_offset;
1954
1955 for (j = 0; j < 4; j++) {
1956 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1957 ret_val = igb_validate_nvm_checksum_with_offset(hw,
1958 nvm_offset);
1959 if (ret_val != 0)
1960 goto out;
1961 }
1962
1963out:
1964 return ret_val;
1965}
1966
1967/**
1968 * igb_update_nvm_checksum_i350 - Update EEPROM checksum
1969 * @hw: pointer to the HW structure
1970 *
1971 * Updates the EEPROM section checksums for all 4 ports by reading/adding
1972 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
1973 * checksum and writes the value to the EEPROM.
1974 **/
1975static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
1976{
1977 s32 ret_val = 0;
1978 u16 j;
1979 u16 nvm_offset;
1980
1981 for (j = 0; j < 4; j++) {
1982 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1983 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
1984 if (ret_val != 0)
1985 goto out;
1986 }
1987
1988out:
1989 return ret_val;
1990}
1991/**
1992 * igb_set_eee_i350 - Enable/disable EEE support
1993 * @hw: pointer to the HW structure
1994 *
1995 * Enable/disable EEE based on setting in dev_spec structure.
1996 *
1997 **/
1998s32 igb_set_eee_i350(struct e1000_hw *hw)
1999{
2000 s32 ret_val = 0;
2001 u32 ipcnfg, eeer, ctrl_ext;
2002
2003 ctrl_ext = rd32(E1000_CTRL_EXT);
2004 if ((hw->mac.type != e1000_i350) ||
2005 (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
2006 goto out;
2007 ipcnfg = rd32(E1000_IPCNFG);
2008 eeer = rd32(E1000_EEER);
2009
2010 /* enable or disable per user setting */
2011 if (!(hw->dev_spec._82575.eee_disable)) {
2012 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
2013 E1000_IPCNFG_EEE_100M_AN);
2014 eeer |= (E1000_EEER_TX_LPI_EN |
2015 E1000_EEER_RX_LPI_EN |
2016 E1000_EEER_LPI_FC);
2017
2018 } else {
2019 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2020 E1000_IPCNFG_EEE_100M_AN);
2021 eeer &= ~(E1000_EEER_TX_LPI_EN |
2022 E1000_EEER_RX_LPI_EN |
2023 E1000_EEER_LPI_FC);
2024 }
2025 wr32(E1000_IPCNFG, ipcnfg);
2026 wr32(E1000_EEER, eeer);
2027out:
2028
2029 return ret_val;
2030}
2031
1750static struct e1000_mac_operations e1000_mac_ops_82575 = { 2032static struct e1000_mac_operations e1000_mac_ops_82575 = {
1751 .init_hw = igb_init_hw_82575, 2033 .init_hw = igb_init_hw_82575,
1752 .check_for_link = igb_check_for_link_82575, 2034 .check_for_link = igb_check_for_link_82575,
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 1d01af2472e..dd6df349899 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -251,5 +251,6 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
251void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 251void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
252void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 252void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
253u16 igb_rxpbs_adjust_82580(u32 data); 253u16 igb_rxpbs_adjust_82580(u32 data);
254s32 igb_set_eee_i350(struct e1000_hw *);
254 255
255#endif 256#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6319ed902bc..6b80d40110c 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -51,6 +51,7 @@
51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
54#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
54#define E1000_CTRL_EXT_EIAME 0x01000000 55#define E1000_CTRL_EXT_EIAME 0x01000000
55#define E1000_CTRL_EXT_IRCA 0x00000001 56#define E1000_CTRL_EXT_IRCA 0x00000001
56/* Interrupt delay cancellation */ 57/* Interrupt delay cancellation */
@@ -110,6 +111,7 @@
110/* Management Control */ 111/* Management Control */
111#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ 112#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
112#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ 113#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
114#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
113/* Enable Neighbor Discovery Filtering */ 115/* Enable Neighbor Discovery Filtering */
114#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 116#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
115#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 117#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
@@ -286,7 +288,34 @@
286#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ 288#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
287#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 289#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
288 290
289/* Transmit Arbitration Count */ 291/* DMA Coalescing register fields */
292#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
293 * Watchdog Timer */
294#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
295 * Threshold */
296#define E1000_DMACR_DMACTHR_SHIFT 16
297#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
298 * transactions */
299#define E1000_DMACR_DMAC_LX_SHIFT 28
300#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
301
302#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
303 * Threshold */
304
305#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
306
307#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
308 * Threshold */
309#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
310 * current window */
311
312#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
313 * Current Cnt */
314
315#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
316 * High val */
317#define E1000_FCRTC_RTH_COAL_SHIFT 4
318#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
290 319
291/* SerDes Control */ 320/* SerDes Control */
292#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 321#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -565,6 +594,8 @@
565#define NVM_INIT_CONTROL3_PORT_A 0x0024 594#define NVM_INIT_CONTROL3_PORT_A 0x0024
566#define NVM_ALT_MAC_ADDR_PTR 0x0037 595#define NVM_ALT_MAC_ADDR_PTR 0x0037
567#define NVM_CHECKSUM_REG 0x003F 596#define NVM_CHECKSUM_REG 0x003F
597#define NVM_COMPATIBILITY_REG_3 0x0003
598#define NVM_COMPATIBILITY_BIT_MASK 0x8000
568 599
569#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ 600#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
570#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ 601#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -599,6 +630,7 @@
599/* NVM Commands - SPI */ 630/* NVM Commands - SPI */
600#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 631#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
601#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ 632#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
633#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
602#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ 634#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
603#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ 635#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
604#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ 636#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
@@ -757,6 +789,17 @@
757#define E1000_MDIC_ERROR 0x40000000 789#define E1000_MDIC_ERROR 0x40000000
758#define E1000_MDIC_DEST 0x80000000 790#define E1000_MDIC_DEST 0x80000000
759 791
792/* Thermal Sensor */
793#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
794#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */
795
796/* Energy Efficient Ethernet */
797#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
798#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
799#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
800#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
801#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
802
760/* SerDes Control */ 803/* SerDes Control */
761#define E1000_GEN_CTL_READY 0x80000000 804#define E1000_GEN_CTL_READY 0x80000000
762#define E1000_GEN_CTL_ADDRESS_SHIFT 8 805#define E1000_GEN_CTL_ADDRESS_SHIFT 8
@@ -770,4 +813,11 @@
770#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based 813#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
771 on DMA coal */ 814 on DMA coal */
772 815
816/* Tx Rate-Scheduler Config fields */
817#define E1000_RTTBCNRC_RS_ENA 0x80000000
818#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
819#define E1000_RTTBCNRC_RF_INT_SHIFT 14
820#define E1000_RTTBCNRC_RF_INT_MASK \
821 (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
822
773#endif 823#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index e2638afb8cd..27153e8d7b1 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,7 @@ struct e1000_hw;
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
58#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A 59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
59#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C 60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
@@ -247,6 +248,10 @@ struct e1000_hw_stats {
247 u64 scvpc; 248 u64 scvpc;
248 u64 hrmpc; 249 u64 hrmpc;
249 u64 doosync; 250 u64 doosync;
251 u64 o2bgptc;
252 u64 o2bspc;
253 u64 b2ospc;
254 u64 b2ogprc;
250}; 255};
251 256
252struct e1000_phy_stats { 257struct e1000_phy_stats {
@@ -331,6 +336,8 @@ struct e1000_nvm_operations {
331 s32 (*read)(struct e1000_hw *, u16, u16, u16 *); 336 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
332 void (*release)(struct e1000_hw *); 337 void (*release)(struct e1000_hw *);
333 s32 (*write)(struct e1000_hw *, u16, u16, u16 *); 338 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
339 s32 (*update)(struct e1000_hw *);
340 s32 (*validate)(struct e1000_hw *);
334}; 341};
335 342
336struct e1000_info { 343struct e1000_info {
@@ -417,7 +424,6 @@ struct e1000_phy_info {
417 424
418struct e1000_nvm_info { 425struct e1000_nvm_info {
419 struct e1000_nvm_operations ops; 426 struct e1000_nvm_operations ops;
420
421 enum e1000_nvm_type type; 427 enum e1000_nvm_type type;
422 enum e1000_nvm_override override; 428 enum e1000_nvm_override override;
423 429
@@ -483,6 +489,7 @@ struct e1000_mbx_info {
483struct e1000_dev_spec_82575 { 489struct e1000_dev_spec_82575 {
484 bool sgmii_active; 490 bool sgmii_active;
485 bool global_device_reset; 491 bool global_device_reset;
492 bool eee_disable;
486}; 493};
487 494
488struct e1000_hw { 495struct e1000_hw {
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index c474cdb7004..78d48c7fa85 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
422{ 422{
423 struct e1000_mbx_info *mbx = &hw->mbx; 423 struct e1000_mbx_info *mbx = &hw->mbx;
424 424
425 if (hw->mac.type == e1000_82576) { 425 mbx->timeout = 0;
426 mbx->timeout = 0; 426 mbx->usec_delay = 0;
427 mbx->usec_delay = 0; 427
428 428 mbx->size = E1000_VFMAILBOX_SIZE;
429 mbx->size = E1000_VFMAILBOX_SIZE; 429
430 430 mbx->ops.read = igb_read_mbx_pf;
431 mbx->ops.read = igb_read_mbx_pf; 431 mbx->ops.write = igb_write_mbx_pf;
432 mbx->ops.write = igb_write_mbx_pf; 432 mbx->ops.read_posted = igb_read_posted_mbx;
433 mbx->ops.read_posted = igb_read_posted_mbx; 433 mbx->ops.write_posted = igb_write_posted_mbx;
434 mbx->ops.write_posted = igb_write_posted_mbx; 434 mbx->ops.check_for_msg = igb_check_for_msg_pf;
435 mbx->ops.check_for_msg = igb_check_for_msg_pf; 435 mbx->ops.check_for_ack = igb_check_for_ack_pf;
436 mbx->ops.check_for_ack = igb_check_for_ack_pf; 436 mbx->ops.check_for_rst = igb_check_for_rst_pf;
437 mbx->ops.check_for_rst = igb_check_for_rst_pf; 437
438 438 mbx->stats.msgs_tx = 0;
439 mbx->stats.msgs_tx = 0; 439 mbx->stats.msgs_rx = 0;
440 mbx->stats.msgs_rx = 0; 440 mbx->stats.reqs = 0;
441 mbx->stats.reqs = 0; 441 mbx->stats.acks = 0;
442 mbx->stats.acks = 0; 442 mbx->stats.rsts = 0;
443 mbx->stats.rsts = 0;
444 }
445 443
446 return 0; 444 return 0;
447} 445}
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index 6b5cc2cc453..75bf36a4bae 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -318,6 +318,68 @@ out:
318} 318}
319 319
320/** 320/**
321 * igb_read_nvm_spi - Read EEPROM's using SPI
322 * @hw: pointer to the HW structure
323 * @offset: offset of word in the EEPROM to read
324 * @words: number of words to read
325 * @data: word read from the EEPROM
326 *
327 * Reads a 16 bit word from the EEPROM.
328 **/
329s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
330{
331 struct e1000_nvm_info *nvm = &hw->nvm;
332 u32 i = 0;
333 s32 ret_val;
334 u16 word_in;
335 u8 read_opcode = NVM_READ_OPCODE_SPI;
336
337 /*
338 * A check for invalid values: offset too large, too many words,
339 * and not enough words.
340 */
341 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
342 (words == 0)) {
343 hw_dbg("nvm parameter(s) out of bounds\n");
344 ret_val = -E1000_ERR_NVM;
345 goto out;
346 }
347
348 ret_val = nvm->ops.acquire(hw);
349 if (ret_val)
350 goto out;
351
352 ret_val = igb_ready_nvm_eeprom(hw);
353 if (ret_val)
354 goto release;
355
356 igb_standby_nvm(hw);
357
358 if ((nvm->address_bits == 8) && (offset >= 128))
359 read_opcode |= NVM_A8_OPCODE_SPI;
360
361 /* Send the READ command (opcode + addr) */
362 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
363 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
364
365 /*
366 * Read the data. SPI NVMs increment the address with each byte
367 * read and will roll over if reading beyond the end. This allows
368 * us to read the whole NVM from any offset
369 */
370 for (i = 0; i < words; i++) {
371 word_in = igb_shift_in_eec_bits(hw, 16);
372 data[i] = (word_in >> 8) | (word_in << 8);
373 }
374
375release:
376 nvm->ops.release(hw);
377
378out:
379 return ret_val;
380}
381
382/**
321 * igb_read_nvm_eerd - Reads EEPROM using EERD register 383 * igb_read_nvm_eerd - Reads EEPROM using EERD register
322 * @hw: pointer to the HW structure 384 * @hw: pointer to the HW structure
323 * @offset: offset of word in the EEPROM to read 385 * @offset: offset of word in the EEPROM to read
@@ -353,7 +415,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
353 break; 415 break;
354 416
355 data[i] = (rd32(E1000_EERD) >> 417 data[i] = (rd32(E1000_EERD) >>
356 E1000_NVM_RW_REG_DATA); 418 E1000_NVM_RW_REG_DATA);
357 } 419 }
358 420
359out: 421out:
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 29c956a84bd..7f43564c4bc 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -35,6 +35,7 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
35s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, 35s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
36 u32 part_num_size); 36 u32 part_num_size);
37s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 37s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
38s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
38s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 39s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
39s32 igb_validate_nvm_checksum(struct e1000_hw *hw); 40s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
40s32 igb_update_nvm_checksum(struct e1000_hw *hw); 41s32 igb_update_nvm_checksum(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 8ac83c5190d..958ca3bda48 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -106,6 +106,19 @@
106 106
107#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 107#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
108 108
109/* DMA Coalescing registers */
110#define E1000_DMACR 0x02508 /* Control Register */
111#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
112#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
113#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
114#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
115#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
116#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
117
118/* TX Rate Limit Registers */
119#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
120#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
121
109/* Split and Replication RX Control - RW */ 122/* Split and Replication RX Control - RW */
110#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ 123#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
111/* 124/*
@@ -324,4 +337,18 @@
324 337
325/* DMA Coalescing registers */ 338/* DMA Coalescing registers */
326#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ 339#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
340
341/* Energy Efficient Ethernet "EEE" register */
342#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
343#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
344
345/* Thermal Sensor Register */
346#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
347
348/* OS2BMC Registers */
349#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
350#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
351#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
352#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
353
327#endif 354#endif
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 92a4ef09e55..1c687e298d5 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -77,6 +77,7 @@ struct vf_data_storage {
77 unsigned long last_nack; 77 unsigned long last_nack;
78 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 78 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
79 u16 pf_qos; 79 u16 pf_qos;
80 u16 tx_rate;
80}; 81};
81 82
82#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 83#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -323,6 +324,7 @@ struct igb_adapter {
323 u16 rx_ring_count; 324 u16 rx_ring_count;
324 unsigned int vfs_allocated_count; 325 unsigned int vfs_allocated_count;
325 struct vf_data_storage *vf_data; 326 struct vf_data_storage *vf_data;
327 int vf_rate_link_speed;
326 u32 rss_queues; 328 u32 rss_queues;
327 u32 wvbr; 329 u32 wvbr;
328}; 330};
@@ -331,6 +333,12 @@ struct igb_adapter {
331#define IGB_FLAG_DCA_ENABLED (1 << 1) 333#define IGB_FLAG_DCA_ENABLED (1 << 1)
332#define IGB_FLAG_QUAD_PORT_A (1 << 2) 334#define IGB_FLAG_QUAD_PORT_A (1 << 2)
333#define IGB_FLAG_QUEUE_PAIRS (1 << 3) 335#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
336#define IGB_FLAG_DMAC (1 << 4)
337
338/* DMA Coalescing defines */
339#define IGB_MIN_TXPBSIZE 20408
340#define IGB_TX_BUF_4096 4096
341#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
334 342
335#define IGB_82576_TSYNC_SHIFT 19 343#define IGB_82576_TSYNC_SHIFT 19
336#define IGB_82580_TSYNC_SHIFT 24 344#define IGB_82580_TSYNC_SHIFT 24
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a70e16bcfa7..d976733bbcc 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -86,6 +86,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
86 IGB_STAT("tx_smbus", stats.mgptc), 86 IGB_STAT("tx_smbus", stats.mgptc),
87 IGB_STAT("rx_smbus", stats.mgprc), 87 IGB_STAT("rx_smbus", stats.mgprc),
88 IGB_STAT("dropped_smbus", stats.mgpdc), 88 IGB_STAT("dropped_smbus", stats.mgpdc),
89 IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
90 IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
91 IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
92 IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
89}; 93};
90 94
91#define IGB_NETDEV_STAT(_net_stat) { \ 95#define IGB_NETDEV_STAT(_net_stat) { \
@@ -603,7 +607,10 @@ static void igb_get_regs(struct net_device *netdev,
603 regs_buff[548] = rd32(E1000_TDFT); 607 regs_buff[548] = rd32(E1000_TDFT);
604 regs_buff[549] = rd32(E1000_TDFHS); 608 regs_buff[549] = rd32(E1000_TDFHS);
605 regs_buff[550] = rd32(E1000_TDFPC); 609 regs_buff[550] = rd32(E1000_TDFPC);
606 610 regs_buff[551] = adapter->stats.o2bgptc;
611 regs_buff[552] = adapter->stats.b2ospc;
612 regs_buff[553] = adapter->stats.o2bspc;
613 regs_buff[554] = adapter->stats.b2ogprc;
607} 614}
608 615
609static int igb_get_eeprom_len(struct net_device *netdev) 616static int igb_get_eeprom_len(struct net_device *netdev)
@@ -714,7 +721,7 @@ static int igb_set_eeprom(struct net_device *netdev,
714 /* Update the checksum over the first part of the EEPROM if needed 721 /* Update the checksum over the first part of the EEPROM if needed
715 * and flush shadow RAM for 82573 controllers */ 722 * and flush shadow RAM for 82573 controllers */
716 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 723 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
717 igb_update_nvm_checksum(hw); 724 hw->nvm.ops.update(hw);
718 725
719 kfree(eeprom_buff); 726 kfree(eeprom_buff);
720 return ret_val; 727 return ret_val;
@@ -727,8 +734,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
727 char firmware_version[32]; 734 char firmware_version[32];
728 u16 eeprom_data; 735 u16 eeprom_data;
729 736
730 strncpy(drvinfo->driver, igb_driver_name, 32); 737 strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
731 strncpy(drvinfo->version, igb_driver_version, 32); 738 strncpy(drvinfo->version, igb_driver_version,
739 sizeof(drvinfo->version) - 1);
732 740
733 /* EEPROM image version # is reported as firmware version # for 741 /* EEPROM image version # is reported as firmware version # for
734 * 82575 controllers */ 742 * 82575 controllers */
@@ -738,8 +746,10 @@ static void igb_get_drvinfo(struct net_device *netdev,
738 (eeprom_data & 0x0FF0) >> 4, 746 (eeprom_data & 0x0FF0) >> 4,
739 eeprom_data & 0x000F); 747 eeprom_data & 0x000F);
740 748
741 strncpy(drvinfo->fw_version, firmware_version, 32); 749 strncpy(drvinfo->fw_version, firmware_version,
742 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 750 sizeof(drvinfo->fw_version) - 1);
751 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
752 sizeof(drvinfo->bus_info) - 1);
743 drvinfo->n_stats = IGB_STATS_LEN; 753 drvinfo->n_stats = IGB_STATS_LEN;
744 drvinfo->testinfo_len = IGB_TEST_LEN; 754 drvinfo->testinfo_len = IGB_TEST_LEN;
745 drvinfo->regdump_len = igb_get_regs_len(netdev); 755 drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -1070,7 +1080,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1070 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1080 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1071 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1081 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1072 wr32(reg, (_test[pat] & write)); 1082 wr32(reg, (_test[pat] & write));
1073 val = rd32(reg); 1083 val = rd32(reg) & mask;
1074 if (val != (_test[pat] & write & mask)) { 1084 if (val != (_test[pat] & write & mask)) {
1075 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 1085 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
1076 "failed: got 0x%08X expected 0x%08X\n", 1086 "failed: got 0x%08X expected 0x%08X\n",
@@ -1999,6 +2009,12 @@ static int igb_set_coalesce(struct net_device *netdev,
1999 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) 2009 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
2000 return -EINVAL; 2010 return -EINVAL;
2001 2011
2012 /* If ITR is disabled, disable DMAC */
2013 if (ec->rx_coalesce_usecs == 0) {
2014 if (adapter->flags & IGB_FLAG_DMAC)
2015 adapter->flags &= ~IGB_FLAG_DMAC;
2016 }
2017
2002 /* convert to rate of irq's per second */ 2018 /* convert to rate of irq's per second */
2003 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) 2019 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
2004 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2020 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 58c665b7513..3d850af0cdd 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -50,12 +50,17 @@
50#endif 50#endif
51#include "igb.h" 51#include "igb.h"
52 52
53#define DRV_VERSION "2.1.0-k2" 53#define MAJ 3
54#define MIN 0
55#define BUILD 6
56#define KFIX 2
57#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
58__stringify(BUILD) "-k" __stringify(KFIX)
54char igb_driver_name[] = "igb"; 59char igb_driver_name[] = "igb";
55char igb_driver_version[] = DRV_VERSION; 60char igb_driver_version[] = DRV_VERSION;
56static const char igb_driver_string[] = 61static const char igb_driver_string[] =
57 "Intel(R) Gigabit Ethernet Network Driver"; 62 "Intel(R) Gigabit Ethernet Network Driver";
58static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation."; 63static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
59 64
60static const struct e1000_info *igb_info_tbl[] = { 65static const struct e1000_info *igb_info_tbl[] = {
61 [board_82575] = &e1000_82575_info, 66 [board_82575] = &e1000_82575_info,
@@ -68,6 +73,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -100,6 +106,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
100static void igb_setup_mrqc(struct igb_adapter *); 106static void igb_setup_mrqc(struct igb_adapter *);
101static int igb_probe(struct pci_dev *, const struct pci_device_id *); 107static int igb_probe(struct pci_dev *, const struct pci_device_id *);
102static void __devexit igb_remove(struct pci_dev *pdev); 108static void __devexit igb_remove(struct pci_dev *pdev);
109static void igb_init_hw_timer(struct igb_adapter *adapter);
103static int igb_sw_init(struct igb_adapter *); 110static int igb_sw_init(struct igb_adapter *);
104static int igb_open(struct net_device *); 111static int igb_open(struct net_device *);
105static int igb_close(struct net_device *); 112static int igb_close(struct net_device *);
@@ -149,6 +156,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
149static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 156static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
150static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 157static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
151 struct ifla_vf_info *ivi); 158 struct ifla_vf_info *ivi);
159static void igb_check_vf_rate_limit(struct igb_adapter *);
152 160
153#ifdef CONFIG_PM 161#ifdef CONFIG_PM
154static int igb_suspend(struct pci_dev *, pm_message_t); 162static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -1672,7 +1680,58 @@ void igb_reset(struct igb_adapter *adapter)
1672 1680
1673 if (hw->mac.ops.init_hw(hw)) 1681 if (hw->mac.ops.init_hw(hw))
1674 dev_err(&pdev->dev, "Hardware Error\n"); 1682 dev_err(&pdev->dev, "Hardware Error\n");
1683 if (hw->mac.type > e1000_82580) {
1684 if (adapter->flags & IGB_FLAG_DMAC) {
1685 u32 reg;
1675 1686
1687 /*
1688 * DMA Coalescing high water mark needs to be higher
1689 * than * the * Rx threshold. The Rx threshold is
1690 * currently * pba - 6, so we * should use a high water
1691 * mark of pba * - 4. */
1692 hwm = (pba - 4) << 10;
1693
1694 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1695 & E1000_DMACR_DMACTHR_MASK);
1696
1697 /* transition to L0x or L1 if available..*/
1698 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1699
1700 /* watchdog timer= +-1000 usec in 32usec intervals */
1701 reg |= (1000 >> 5);
1702 wr32(E1000_DMACR, reg);
1703
1704 /* no lower threshold to disable coalescing(smart fifb)
1705 * -UTRESH=0*/
1706 wr32(E1000_DMCRTRH, 0);
1707
1708 /* set hwm to PBA - 2 * max frame size */
1709 wr32(E1000_FCRTC, hwm);
1710
1711 /*
1712 * This sets the time to wait before requesting tran-
1713 * sition to * low power state to number of usecs needed
1714 * to receive 1 512 * byte frame at gigabit line rate
1715 */
1716 reg = rd32(E1000_DMCTLX);
1717 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1718
1719 /* Delay 255 usec before entering Lx state. */
1720 reg |= 0xFF;
1721 wr32(E1000_DMCTLX, reg);
1722
1723 /* free space in Tx packet buffer to wake from DMAC */
1724 wr32(E1000_DMCTXTH,
1725 (IGB_MIN_TXPBSIZE -
1726 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1727 >> 6);
1728
1729 /* make low power state decision controlled by DMAC */
1730 reg = rd32(E1000_PCIEMISC);
1731 reg |= E1000_PCIEMISC_LX_DECISION;
1732 wr32(E1000_PCIEMISC, reg);
1733 } /* end if IGB_FLAG_DMAC set */
1734 }
1676 if (hw->mac.type == e1000_82580) { 1735 if (hw->mac.type == e1000_82580) {
1677 u32 reg = rd32(E1000_PCIEMISC); 1736 u32 reg = rd32(E1000_PCIEMISC);
1678 wr32(E1000_PCIEMISC, 1737 wr32(E1000_PCIEMISC,
@@ -1882,7 +1941,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1882 hw->mac.ops.reset_hw(hw); 1941 hw->mac.ops.reset_hw(hw);
1883 1942
1884 /* make sure the NVM is good */ 1943 /* make sure the NVM is good */
1885 if (igb_validate_nvm_checksum(hw) < 0) { 1944 if (hw->nvm.ops.validate(hw) < 0) {
1886 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 1945 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1887 err = -EIO; 1946 err = -EIO;
1888 goto err_eeprom; 1947 goto err_eeprom;
@@ -1990,6 +2049,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1990 } 2049 }
1991 2050
1992#endif 2051#endif
2052 /* do hw tstamp init after resetting */
2053 igb_init_hw_timer(adapter);
2054
1993 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2055 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1994 /* print bus type/speed/width info */ 2056 /* print bus type/speed/width info */
1995 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2057 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2012,7 +2074,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2012 adapter->msix_entries ? "MSI-X" : 2074 adapter->msix_entries ? "MSI-X" :
2013 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", 2075 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2014 adapter->num_rx_queues, adapter->num_tx_queues); 2076 adapter->num_rx_queues, adapter->num_tx_queues);
2015 2077 switch (hw->mac.type) {
2078 case e1000_i350:
2079 igb_set_eee_i350(hw);
2080 break;
2081 default:
2082 break;
2083 }
2016 return 0; 2084 return 0;
2017 2085
2018err_register: 2086err_register:
@@ -2149,6 +2217,9 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2149 random_ether_addr(mac_addr); 2217 random_ether_addr(mac_addr);
2150 igb_set_vf_mac(adapter, i, mac_addr); 2218 igb_set_vf_mac(adapter, i, mac_addr);
2151 } 2219 }
2220 /* DMA Coalescing is not supported in IOV mode. */
2221 if (adapter->flags & IGB_FLAG_DMAC)
2222 adapter->flags &= ~IGB_FLAG_DMAC;
2152 } 2223 }
2153#endif /* CONFIG_PCI_IOV */ 2224#endif /* CONFIG_PCI_IOV */
2154} 2225}
@@ -2286,9 +2357,19 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2286 2357
2287 spin_lock_init(&adapter->stats64_lock); 2358 spin_lock_init(&adapter->stats64_lock);
2288#ifdef CONFIG_PCI_IOV 2359#ifdef CONFIG_PCI_IOV
2289 if (hw->mac.type == e1000_82576) 2360 switch (hw->mac.type) {
2290 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2361 case e1000_82576:
2291 2362 case e1000_i350:
2363 if (max_vfs > 7) {
2364 dev_warn(&pdev->dev,
2365 "Maximum of 7 VFs per PF, using max\n");
2366 adapter->vfs_allocated_count = 7;
2367 } else
2368 adapter->vfs_allocated_count = max_vfs;
2369 break;
2370 default:
2371 break;
2372 }
2292#endif /* CONFIG_PCI_IOV */ 2373#endif /* CONFIG_PCI_IOV */
2293 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2374 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2294 2375
@@ -2307,12 +2388,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2307 return -ENOMEM; 2388 return -ENOMEM;
2308 } 2389 }
2309 2390
2310 igb_init_hw_timer(adapter);
2311 igb_probe_vfs(adapter); 2391 igb_probe_vfs(adapter);
2312 2392
2313 /* Explicitly disable IRQ since the NIC can be in any state. */ 2393 /* Explicitly disable IRQ since the NIC can be in any state. */
2314 igb_irq_disable(adapter); 2394 igb_irq_disable(adapter);
2315 2395
2396 if (hw->mac.type == e1000_i350)
2397 adapter->flags &= ~IGB_FLAG_DMAC;
2398
2316 set_bit(__IGB_DOWN, &adapter->state); 2399 set_bit(__IGB_DOWN, &adapter->state);
2317 return 0; 2400 return 0;
2318} 2401}
@@ -3467,7 +3550,7 @@ static void igb_watchdog_task(struct work_struct *work)
3467 watchdog_task); 3550 watchdog_task);
3468 struct e1000_hw *hw = &adapter->hw; 3551 struct e1000_hw *hw = &adapter->hw;
3469 struct net_device *netdev = adapter->netdev; 3552 struct net_device *netdev = adapter->netdev;
3470 u32 link; 3553 u32 link, ctrl_ext, thstat;
3471 int i; 3554 int i;
3472 3555
3473 link = igb_has_link(adapter); 3556 link = igb_has_link(adapter);
@@ -3491,6 +3574,25 @@ static void igb_watchdog_task(struct work_struct *work)
3491 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 3574 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3492 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); 3575 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3493 3576
3577 /* check for thermal sensor event on i350,
3578 * copper only */
3579 if (hw->mac.type == e1000_i350) {
3580 thstat = rd32(E1000_THSTAT);
3581 ctrl_ext = rd32(E1000_CTRL_EXT);
3582 if ((hw->phy.media_type ==
3583 e1000_media_type_copper) && !(ctrl_ext &
3584 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3585 if (thstat &
3586 E1000_THSTAT_LINK_THROTTLE) {
3587 printk(KERN_INFO "igb: %s The "
3588 "network adapter link "
3589 "speed was downshifted "
3590 "because it "
3591 "overheated.\n",
3592 netdev->name);
3593 }
3594 }
3595 }
3494 /* adjust timeout factor according to speed/duplex */ 3596 /* adjust timeout factor according to speed/duplex */
3495 adapter->tx_timeout_factor = 1; 3597 adapter->tx_timeout_factor = 1;
3496 switch (adapter->link_speed) { 3598 switch (adapter->link_speed) {
@@ -3505,6 +3607,7 @@ static void igb_watchdog_task(struct work_struct *work)
3505 netif_carrier_on(netdev); 3607 netif_carrier_on(netdev);
3506 3608
3507 igb_ping_all_vfs(adapter); 3609 igb_ping_all_vfs(adapter);
3610 igb_check_vf_rate_limit(adapter);
3508 3611
3509 /* link state has changed, schedule phy info update */ 3612 /* link state has changed, schedule phy info update */
3510 if (!test_bit(__IGB_DOWN, &adapter->state)) 3613 if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -3515,6 +3618,22 @@ static void igb_watchdog_task(struct work_struct *work)
3515 if (netif_carrier_ok(netdev)) { 3618 if (netif_carrier_ok(netdev)) {
3516 adapter->link_speed = 0; 3619 adapter->link_speed = 0;
3517 adapter->link_duplex = 0; 3620 adapter->link_duplex = 0;
3621 /* check for thermal sensor event on i350
3622 * copper only*/
3623 if (hw->mac.type == e1000_i350) {
3624 thstat = rd32(E1000_THSTAT);
3625 ctrl_ext = rd32(E1000_CTRL_EXT);
3626 if ((hw->phy.media_type ==
3627 e1000_media_type_copper) && !(ctrl_ext &
3628 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3629 if (thstat & E1000_THSTAT_PWR_DOWN) {
3630 printk(KERN_ERR "igb: %s The "
3631 "network adapter was stopped "
3632 "because it overheated.\n",
3633 netdev->name);
3634 }
3635 }
3636 }
3518 /* Links status message must follow this format */ 3637 /* Links status message must follow this format */
3519 printk(KERN_INFO "igb: %s NIC Link is Down\n", 3638 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3520 netdev->name); 3639 netdev->name);
@@ -4547,6 +4666,15 @@ void igb_update_stats(struct igb_adapter *adapter,
4547 adapter->stats.mgptc += rd32(E1000_MGTPTC); 4666 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4548 adapter->stats.mgprc += rd32(E1000_MGTPRC); 4667 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4549 adapter->stats.mgpdc += rd32(E1000_MGTPDC); 4668 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4669
4670 /* OS2BMC Stats */
4671 reg = rd32(E1000_MANC);
4672 if (reg & E1000_MANC_EN_BMC2OS) {
4673 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4674 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4675 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4676 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4677 }
4550} 4678}
4551 4679
4552static irqreturn_t igb_msix_other(int irq, void *data) 4680static irqreturn_t igb_msix_other(int irq, void *data)
@@ -6593,9 +6721,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6593 return igb_set_vf_mac(adapter, vf, mac); 6721 return igb_set_vf_mac(adapter, vf, mac);
6594} 6722}
6595 6723
6724static int igb_link_mbps(int internal_link_speed)
6725{
6726 switch (internal_link_speed) {
6727 case SPEED_100:
6728 return 100;
6729 case SPEED_1000:
6730 return 1000;
6731 default:
6732 return 0;
6733 }
6734}
6735
6736static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6737 int link_speed)
6738{
6739 int rf_dec, rf_int;
6740 u32 bcnrc_val;
6741
6742 if (tx_rate != 0) {
6743 /* Calculate the rate factor values to set */
6744 rf_int = link_speed / tx_rate;
6745 rf_dec = (link_speed - (rf_int * tx_rate));
6746 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6747
6748 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6749 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6750 E1000_RTTBCNRC_RF_INT_MASK);
6751 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6752 } else {
6753 bcnrc_val = 0;
6754 }
6755
6756 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6757 wr32(E1000_RTTBCNRC, bcnrc_val);
6758}
6759
6760static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6761{
6762 int actual_link_speed, i;
6763 bool reset_rate = false;
6764
6765 /* VF TX rate limit was not set or not supported */
6766 if ((adapter->vf_rate_link_speed == 0) ||
6767 (adapter->hw.mac.type != e1000_82576))
6768 return;
6769
6770 actual_link_speed = igb_link_mbps(adapter->link_speed);
6771 if (actual_link_speed != adapter->vf_rate_link_speed) {
6772 reset_rate = true;
6773 adapter->vf_rate_link_speed = 0;
6774 dev_info(&adapter->pdev->dev,
6775 "Link speed has been changed. VF Transmit "
6776 "rate is disabled\n");
6777 }
6778
6779 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6780 if (reset_rate)
6781 adapter->vf_data[i].tx_rate = 0;
6782
6783 igb_set_vf_rate_limit(&adapter->hw, i,
6784 adapter->vf_data[i].tx_rate,
6785 actual_link_speed);
6786 }
6787}
6788
6596static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 6789static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6597{ 6790{
6598 return -EOPNOTSUPP; 6791 struct igb_adapter *adapter = netdev_priv(netdev);
6792 struct e1000_hw *hw = &adapter->hw;
6793 int actual_link_speed;
6794
6795 if (hw->mac.type != e1000_82576)
6796 return -EOPNOTSUPP;
6797
6798 actual_link_speed = igb_link_mbps(adapter->link_speed);
6799 if ((vf >= adapter->vfs_allocated_count) ||
6800 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6801 (tx_rate < 0) || (tx_rate > actual_link_speed))
6802 return -EINVAL;
6803
6804 adapter->vf_rate_link_speed = actual_link_speed;
6805 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6806 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6807
6808 return 0;
6599} 6809}
6600 6810
6601static int igb_ndo_get_vf_config(struct net_device *netdev, 6811static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -6606,7 +6816,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
6606 return -EINVAL; 6816 return -EINVAL;
6607 ivi->vf = vf; 6817 ivi->vf = vf;
6608 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); 6818 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6609 ivi->tx_rate = 0; 6819 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
6610 ivi->vlan = adapter->vf_data[vf].pf_vlan; 6820 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6611 ivi->qos = adapter->vf_data[vf].pf_qos; 6821 ivi->qos = adapter->vf_data[vf].pf_qos;
6612 return 0; 6822 return 0;
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index ed6e3d91024..1d943aa7c7a 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -201,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev,
201 struct igbvf_adapter *adapter = netdev_priv(netdev); 201 struct igbvf_adapter *adapter = netdev_priv(netdev);
202 struct e1000_hw *hw = &adapter->hw; 202 struct e1000_hw *hw = &adapter->hw;
203 u32 *regs_buff = p; 203 u32 *regs_buff = p;
204 u8 revision_id;
205 204
206 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); 205 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
207 206
208 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); 207 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
209 208 adapter->pdev->device;
210 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
211 209
212 regs_buff[0] = er32(CTRL); 210 regs_buff[0] = er32(CTRL);
213 regs_buff[1] = er32(STATUS); 211 regs_buff[1] = er32(STATUS);
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 990c329e6c3..d5dad5d607d 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -201,9 +201,6 @@ struct igbvf_adapter {
201 unsigned int restart_queue; 201 unsigned int restart_queue;
202 u32 txd_cmd; 202 u32 txd_cmd;
203 203
204 bool detect_tx_hung;
205 u8 tx_timeout_factor;
206
207 u32 tx_int_delay; 204 u32 tx_int_delay;
208 u32 tx_abs_int_delay; 205 u32 tx_abs_int_delay;
209 206
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 6352c8158e6..6ccc32fd733 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -396,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
396 buffer_info->time_stamp = 0; 396 buffer_info->time_stamp = 0;
397} 397}
398 398
399static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
400{
401 struct igbvf_ring *tx_ring = adapter->tx_ring;
402 unsigned int i = tx_ring->next_to_clean;
403 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
404 union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
405
406 /* detected Tx unit hang */
407 dev_err(&adapter->pdev->dev,
408 "Detected Tx Unit Hang:\n"
409 " TDH <%x>\n"
410 " TDT <%x>\n"
411 " next_to_use <%x>\n"
412 " next_to_clean <%x>\n"
413 "buffer_info[next_to_clean]:\n"
414 " time_stamp <%lx>\n"
415 " next_to_watch <%x>\n"
416 " jiffies <%lx>\n"
417 " next_to_watch.status <%x>\n",
418 readl(adapter->hw.hw_addr + tx_ring->head),
419 readl(adapter->hw.hw_addr + tx_ring->tail),
420 tx_ring->next_to_use,
421 tx_ring->next_to_clean,
422 tx_ring->buffer_info[eop].time_stamp,
423 eop,
424 jiffies,
425 eop_desc->wb.status);
426}
427
428/** 399/**
429 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) 400 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
430 * @adapter: board private structure 401 * @adapter: board private structure
@@ -771,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
771static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 742static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
772{ 743{
773 struct igbvf_adapter *adapter = tx_ring->adapter; 744 struct igbvf_adapter *adapter = tx_ring->adapter;
774 struct e1000_hw *hw = &adapter->hw;
775 struct net_device *netdev = adapter->netdev; 745 struct net_device *netdev = adapter->netdev;
776 struct igbvf_buffer *buffer_info; 746 struct igbvf_buffer *buffer_info;
777 struct sk_buff *skb; 747 struct sk_buff *skb;
@@ -832,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
832 } 802 }
833 } 803 }
834 804
835 if (adapter->detect_tx_hung) {
836 /* Detect a transmit hang in hardware, this serializes the
837 * check with the clearing of time_stamp and movement of i */
838 adapter->detect_tx_hung = false;
839 if (tx_ring->buffer_info[i].time_stamp &&
840 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
841 (adapter->tx_timeout_factor * HZ)) &&
842 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
843
844 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
845 /* detected Tx unit hang */
846 igbvf_print_tx_hang(adapter);
847
848 netif_stop_queue(netdev);
849 }
850 }
851 adapter->net_stats.tx_bytes += total_bytes; 805 adapter->net_stats.tx_bytes += total_bytes;
852 adapter->net_stats.tx_packets += total_packets; 806 adapter->net_stats.tx_packets += total_packets;
853 return count < tx_ring->count; 807 return count < tx_ring->count;
@@ -1863,17 +1817,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1863 &adapter->link_duplex); 1817 &adapter->link_duplex);
1864 igbvf_print_link_info(adapter); 1818 igbvf_print_link_info(adapter);
1865 1819
1866 /* adjust timeout factor according to speed/duplex */
1867 adapter->tx_timeout_factor = 1;
1868 switch (adapter->link_speed) {
1869 case SPEED_10:
1870 adapter->tx_timeout_factor = 16;
1871 break;
1872 case SPEED_100:
1873 /* maybe add some timeout factor ? */
1874 break;
1875 }
1876
1877 netif_carrier_on(netdev); 1820 netif_carrier_on(netdev);
1878 netif_wake_queue(netdev); 1821 netif_wake_queue(netdev);
1879 } 1822 }
@@ -1907,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1907 /* Cause software interrupt to ensure Rx ring is cleaned */ 1850 /* Cause software interrupt to ensure Rx ring is cleaned */
1908 ew32(EICS, adapter->rx_ring->eims_value); 1851 ew32(EICS, adapter->rx_ring->eims_value);
1909 1852
1910 /* Force detection of hung controller every watchdog period */
1911 adapter->detect_tx_hung = 1;
1912
1913 /* Reset the timer */ 1853 /* Reset the timer */
1914 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1854 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1915 mod_timer(&adapter->watchdog_timer, 1855 mod_timer(&adapter->watchdog_timer,
@@ -2699,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2699 hw->device_id = pdev->device; 2639 hw->device_id = pdev->device;
2700 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2640 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2701 hw->subsystem_device_id = pdev->subsystem_device; 2641 hw->subsystem_device_id = pdev->subsystem_device;
2702 2642 hw->revision_id = pdev->revision;
2703 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2704 2643
2705 err = -EIO; 2644 err = -EIO;
2706 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2645 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8b009..af3822f9ea9 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
220 * The parameter rar_count will usually be hw->mac.rar_entry_count 220 * The parameter rar_count will usually be hw->mac.rar_entry_count
221 * unless there are workarounds that change this. 221 * unless there are workarounds that change this.
222 **/ 222 **/
223void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 223static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
224 u8 *mc_addr_list, u32 mc_addr_count, 224 u8 *mc_addr_list, u32 mc_addr_count,
225 u32 rar_used_count, u32 rar_count) 225 u32 rar_used_count, u32 rar_count)
226{ 226{
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index aa93655c3aa..a5b0f0e194b 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev)
2025 2025
2026 if (phyaddr != 0x1f) { 2026 if (phyaddr != 0x1f) {
2027 u16 mii_phyctrl, mii_1000cr; 2027 u16 mii_phyctrl, mii_1000cr;
2028 u8 revisionid = 0;
2029 2028
2030 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); 2029 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2031 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | 2030 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
@@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev)
2035 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); 2034 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2036 2035
2037 /* Set default phyparam */ 2036 /* Set default phyparam */
2038 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); 2037 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2039 ipg_set_phy_default_param(revisionid, dev, phyaddr);
2040 2038
2041 /* Reset PHY */ 2039 /* Reset PHY */
2042 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; 2040 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index ee1dde52e8f..3352b2443e5 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -167,7 +167,7 @@ static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
167 * let's be careful... Jean II 167 * let's be careful... Jean II
168 */ 168 */
169 IRDA_ASSERT(priv->tty->ops->tiocmset != NULL, return -1;); 169 IRDA_ASSERT(priv->tty->ops->tiocmset != NULL, return -1;);
170 priv->tty->ops->tiocmset(priv->tty, NULL, set, clear); 170 priv->tty->ops->tiocmset(priv->tty, set, clear);
171 171
172 return 0; 172 return 0;
173} 173}
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f54281..4488bd581ec 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
635 635
636 ret = sh_irda_set_baudrate(self, speed); 636 ret = sh_irda_set_baudrate(self, speed);
637 if (ret < 0) 637 if (ret < 0)
638 return ret; 638 goto sh_irda_hard_xmit_end;
639 639
640 self->tx_buff.len = 0; 640 self->tx_buff.len = 0;
641 if (skb->len) { 641 if (skb->len) {
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
652 652
653 sh_irda_write(self, IRTFLR, self->tx_buff.len); 653 sh_irda_write(self, IRTFLR, self->tx_buff.len);
654 sh_irda_write(self, IRTCTR, ARMOD | TE); 654 sh_irda_write(self, IRTCTR, ARMOD | TE);
655 } 655 } else
656 goto sh_irda_hard_xmit_end;
656 657
657 dev_kfree_skb(skb); 658 dev_kfree_skb(skb);
658 659
659 return 0; 660 return 0;
661
662sh_irda_hard_xmit_end:
663 sh_irda_set_baudrate(self, 9600);
664 netif_wake_queue(self->ndev);
665 sh_irda_rcv_ctrl(self, 1);
666 dev_kfree_skb(skb);
667
668 return ret;
669
660} 670}
661 671
662static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) 672static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 521c0c73299..8f3df044e81 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -149,7 +149,7 @@ struct ixgb_desc_ring {
149 149
150struct ixgb_adapter { 150struct ixgb_adapter {
151 struct timer_list watchdog_timer; 151 struct timer_list watchdog_timer;
152 struct vlan_group *vlgrp; 152 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
153 u32 bd_number; 153 u32 bd_number;
154 u32 rx_buffer_len; 154 u32 rx_buffer_len;
155 u32 part_num; 155 u32 part_num;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 43994c19999..cc53aa1541b 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -706,6 +706,43 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
706 } 706 }
707} 707}
708 708
709static int ixgb_set_flags(struct net_device *netdev, u32 data)
710{
711 struct ixgb_adapter *adapter = netdev_priv(netdev);
712 bool need_reset;
713 int rc;
714
715 /*
716 * Tx VLAN insertion does not work per HW design when Rx stripping is
717 * disabled. Disable txvlan when rxvlan is turned off, and enable
718 * rxvlan when txvlan is turned on.
719 */
720 if (!(data & ETH_FLAG_RXVLAN) &&
721 (netdev->features & NETIF_F_HW_VLAN_TX))
722 data &= ~ETH_FLAG_TXVLAN;
723 else if (data & ETH_FLAG_TXVLAN)
724 data |= ETH_FLAG_RXVLAN;
725
726 need_reset = (data & ETH_FLAG_RXVLAN) !=
727 (netdev->features & NETIF_F_HW_VLAN_RX);
728
729 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
730 ETH_FLAG_TXVLAN);
731 if (rc)
732 return rc;
733
734 if (need_reset) {
735 if (netif_running(netdev)) {
736 ixgb_down(adapter, true);
737 ixgb_up(adapter);
738 ixgb_set_speed_duplex(netdev);
739 } else
740 ixgb_reset(adapter);
741 }
742
743 return 0;
744}
745
709static const struct ethtool_ops ixgb_ethtool_ops = { 746static const struct ethtool_ops ixgb_ethtool_ops = {
710 .get_settings = ixgb_get_settings, 747 .get_settings = ixgb_get_settings,
711 .set_settings = ixgb_set_settings, 748 .set_settings = ixgb_set_settings,
@@ -732,6 +769,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
732 .phys_id = ixgb_phys_id, 769 .phys_id = ixgb_phys_id,
733 .get_sset_count = ixgb_get_sset_count, 770 .get_sset_count = ixgb_get_sset_count,
734 .get_ethtool_stats = ixgb_get_ethtool_stats, 771 .get_ethtool_stats = ixgb_get_ethtool_stats,
772 .get_flags = ethtool_op_get_flags,
773 .set_flags = ixgb_set_flags,
735}; 774};
736 775
737void ixgb_set_ethtool_ops(struct net_device *netdev) 776void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 5639cccb493..0f681ac2da8 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -100,8 +100,6 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
100 100
101static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); 101static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
102static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); 102static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
103static void ixgb_vlan_rx_register(struct net_device *netdev,
104 struct vlan_group *grp);
105static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 103static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
106static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 104static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
107static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 105static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
@@ -336,7 +334,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
336 .ndo_set_mac_address = ixgb_set_mac, 334 .ndo_set_mac_address = ixgb_set_mac,
337 .ndo_change_mtu = ixgb_change_mtu, 335 .ndo_change_mtu = ixgb_change_mtu,
338 .ndo_tx_timeout = ixgb_tx_timeout, 336 .ndo_tx_timeout = ixgb_tx_timeout,
339 .ndo_vlan_rx_register = ixgb_vlan_rx_register,
340 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, 337 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
341 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, 338 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
342#ifdef CONFIG_NET_POLL_CONTROLLER 339#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1508,7 +1505,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1508 DESC_NEEDED))) 1505 DESC_NEEDED)))
1509 return NETDEV_TX_BUSY; 1506 return NETDEV_TX_BUSY;
1510 1507
1511 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 1508 if (vlan_tx_tag_present(skb)) {
1512 tx_flags |= IXGB_TX_FLAGS_VLAN; 1509 tx_flags |= IXGB_TX_FLAGS_VLAN;
1513 vlan_id = vlan_tx_tag_get(skb); 1510 vlan_id = vlan_tx_tag_get(skb);
1514 } 1511 }
@@ -2049,12 +2046,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
2049 ixgb_rx_checksum(adapter, rx_desc, skb); 2046 ixgb_rx_checksum(adapter, rx_desc, skb);
2050 2047
2051 skb->protocol = eth_type_trans(skb, netdev); 2048 skb->protocol = eth_type_trans(skb, netdev);
2052 if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { 2049 if (status & IXGB_RX_DESC_STATUS_VP)
2053 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2050 __vlan_hwaccel_put_tag(skb,
2054 le16_to_cpu(rx_desc->special)); 2051 le16_to_cpu(rx_desc->special));
2055 } else { 2052
2056 netif_receive_skb(skb); 2053 netif_receive_skb(skb);
2057 }
2058 2054
2059rxdesc_done: 2055rxdesc_done:
2060 /* clean up descriptor, might be written over by hw */ 2056 /* clean up descriptor, might be written over by hw */
@@ -2152,20 +2148,6 @@ map_skb:
2152 } 2148 }
2153} 2149}
2154 2150
2155/**
2156 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2157 *
2158 * @param netdev network interface device structure
2159 * @param grp indicates to enable or disable tagging/stripping
2160 **/
2161static void
2162ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2163{
2164 struct ixgb_adapter *adapter = netdev_priv(netdev);
2165
2166 adapter->vlgrp = grp;
2167}
2168
2169static void 2151static void
2170ixgb_vlan_strip_enable(struct ixgb_adapter *adapter) 2152ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2171{ 2153{
@@ -2200,6 +2182,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2200 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); 2182 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2201 vfta |= (1 << (vid & 0x1F)); 2183 vfta |= (1 << (vid & 0x1F));
2202 ixgb_write_vfta(&adapter->hw, index, vfta); 2184 ixgb_write_vfta(&adapter->hw, index, vfta);
2185 set_bit(vid, adapter->active_vlans);
2203} 2186}
2204 2187
2205static void 2188static void
@@ -2208,35 +2191,22 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2208 struct ixgb_adapter *adapter = netdev_priv(netdev); 2191 struct ixgb_adapter *adapter = netdev_priv(netdev);
2209 u32 vfta, index; 2192 u32 vfta, index;
2210 2193
2211 ixgb_irq_disable(adapter);
2212
2213 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2214
2215 /* don't enable interrupts unless we are UP */
2216 if (adapter->netdev->flags & IFF_UP)
2217 ixgb_irq_enable(adapter);
2218
2219 /* remove VID from filter table */ 2194 /* remove VID from filter table */
2220 2195
2221 index = (vid >> 5) & 0x7F; 2196 index = (vid >> 5) & 0x7F;
2222 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); 2197 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2223 vfta &= ~(1 << (vid & 0x1F)); 2198 vfta &= ~(1 << (vid & 0x1F));
2224 ixgb_write_vfta(&adapter->hw, index, vfta); 2199 ixgb_write_vfta(&adapter->hw, index, vfta);
2200 clear_bit(vid, adapter->active_vlans);
2225} 2201}
2226 2202
2227static void 2203static void
2228ixgb_restore_vlan(struct ixgb_adapter *adapter) 2204ixgb_restore_vlan(struct ixgb_adapter *adapter)
2229{ 2205{
2230 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2206 u16 vid;
2231 2207
2232 if (adapter->vlgrp) { 2208 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2233 u16 vid; 2209 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2234 for (vid = 0; vid < VLAN_N_VID; vid++) {
2235 if (!vlan_group_get_device(adapter->vlgrp, vid))
2236 continue;
2237 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2238 }
2239 }
2240} 2210}
2241 2211
2242#ifdef CONFIG_NET_POLL_CONTROLLER 2212#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3b8c9246361..8d468028bb5 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -118,6 +118,7 @@ struct vf_data_storage {
118 bool pf_set_mac; 118 bool pf_set_mac;
119 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 119 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
120 u16 pf_qos; 120 u16 pf_qos;
121 u16 tx_rate;
121}; 122};
122 123
123/* wrapper around a pointer to a socket buffer, 124/* wrapper around a pointer to a socket buffer,
@@ -209,6 +210,7 @@ struct ixgbe_ring {
209 * associated with this ring, which is 210 * associated with this ring, which is
210 * different for DCB and RSS modes 211 * different for DCB and RSS modes
211 */ 212 */
213 u8 dcb_tc;
212 214
213 u16 work_limit; /* max work per interrupt */ 215 u16 work_limit; /* max work per interrupt */
214 216
@@ -243,7 +245,7 @@ enum ixgbe_ring_f_enum {
243 RING_F_ARRAY_SIZE /* must be last in enum set */ 245 RING_F_ARRAY_SIZE /* must be last in enum set */
244}; 246};
245 247
246#define IXGBE_MAX_DCB_INDICES 8 248#define IXGBE_MAX_DCB_INDICES 64
247#define IXGBE_MAX_RSS_INDICES 16 249#define IXGBE_MAX_RSS_INDICES 16
248#define IXGBE_MAX_VMDQ_INDICES 64 250#define IXGBE_MAX_VMDQ_INDICES 64
249#define IXGBE_MAX_FDIR_INDICES 64 251#define IXGBE_MAX_FDIR_INDICES 64
@@ -334,9 +336,14 @@ struct ixgbe_adapter {
334 u16 bd_number; 336 u16 bd_number;
335 struct work_struct reset_task; 337 struct work_struct reset_task;
336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 338 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
339
340 /* DCB parameters */
341 struct ieee_pfc *ixgbe_ieee_pfc;
342 struct ieee_ets *ixgbe_ieee_ets;
337 struct ixgbe_dcb_config dcb_cfg; 343 struct ixgbe_dcb_config dcb_cfg;
338 struct ixgbe_dcb_config temp_dcb_cfg; 344 struct ixgbe_dcb_config temp_dcb_cfg;
339 u8 dcb_set_bitmap; 345 u8 dcb_set_bitmap;
346 u8 dcbx_cap;
340 enum ixgbe_fc_mode last_lfc_mode; 347 enum ixgbe_fc_mode last_lfc_mode;
341 348
342 /* Interrupt Throttle Rate */ 349 /* Interrupt Throttle Rate */
@@ -462,6 +469,7 @@ struct ixgbe_adapter {
462 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 469 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
463 unsigned int num_vfs; 470 unsigned int num_vfs;
464 struct vf_data_storage *vfinfo; 471 struct vf_data_storage *vfinfo;
472 int vf_rate_link_speed;
465}; 473};
466 474
467enum ixbge_state_t { 475enum ixbge_state_t {
@@ -521,7 +529,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
521extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); 529extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
522extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 530extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
523extern int ethtool_ioctl(struct ifreq *ifr); 531extern int ethtool_ioctl(struct ifreq *ifr);
524extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
525extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 532extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
526extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 533extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
527extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 534extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -538,6 +545,7 @@ extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
538extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, 545extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
539 struct ixgbe_ring *ring); 546 struct ixgbe_ring *ring);
540extern void ixgbe_set_rx_mode(struct net_device *netdev); 547extern void ixgbe_set_rx_mode(struct net_device *netdev);
548extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
541#ifdef IXGBE_FCOE 549#ifdef IXGBE_FCOE
542extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 550extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
543extern int ixgbe_fso(struct ixgbe_adapter *adapter, 551extern int ixgbe_fso(struct ixgbe_adapter *adapter,
@@ -549,6 +557,8 @@ extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
549 struct sk_buff *skb); 557 struct sk_buff *skb);
550extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 558extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
551 struct scatterlist *sgl, unsigned int sgc); 559 struct scatterlist *sgl, unsigned int sgc);
560extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
561 struct scatterlist *sgl, unsigned int sgc);
552extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 562extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
553extern int ixgbe_fcoe_enable(struct net_device *netdev); 563extern int ixgbe_fcoe_enable(struct net_device *netdev);
554extern int ixgbe_fcoe_disable(struct net_device *netdev); 564extern int ixgbe_fcoe_disable(struct net_device *netdev);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index d0f1d9d2c41..845c679c8b8 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -158,6 +158,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
158 158
159 switch (hw->phy.type) { 159 switch (hw->phy.type) {
160 case ixgbe_phy_tn: 160 case ixgbe_phy_tn:
161 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
161 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 162 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
162 phy->ops.get_firmware_version = 163 phy->ops.get_firmware_version =
163 &ixgbe_get_phy_firmware_version_tnx; 164 &ixgbe_get_phy_firmware_version_tnx;
@@ -280,10 +281,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
280{ 281{
281 enum ixgbe_media_type media_type; 282 enum ixgbe_media_type media_type;
282 283
284 /* Detect if there is a copper PHY attached. */
285 switch (hw->phy.type) {
286 case ixgbe_phy_cu_unknown:
287 case ixgbe_phy_tn:
288 case ixgbe_phy_aq:
289 media_type = ixgbe_media_type_copper;
290 goto out;
291 default:
292 break;
293 }
294
283 /* Media type for I82598 is based on device ID */ 295 /* Media type for I82598 is based on device ID */
284 switch (hw->device_id) { 296 switch (hw->device_id) {
285 case IXGBE_DEV_ID_82598: 297 case IXGBE_DEV_ID_82598:
286 case IXGBE_DEV_ID_82598_BX: 298 case IXGBE_DEV_ID_82598_BX:
299 /* Default device ID is mezzanine card KX/KX4 */
287 media_type = ixgbe_media_type_backplane; 300 media_type = ixgbe_media_type_backplane;
288 break; 301 break;
289 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 302 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
@@ -306,7 +319,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
306 media_type = ixgbe_media_type_unknown; 319 media_type = ixgbe_media_type_unknown;
307 break; 320 break;
308 } 321 }
309 322out:
310 return media_type; 323 return media_type;
311} 324}
312 325
@@ -354,7 +367,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
354 367
355 /* Negotiate the fc mode to use */ 368 /* Negotiate the fc mode to use */
356 ret_val = ixgbe_fc_autoneg(hw); 369 ret_val = ixgbe_fc_autoneg(hw);
357 if (ret_val) 370 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
358 goto out; 371 goto out;
359 372
360 /* Disable any previous flow control settings */ 373 /* Disable any previous flow control settings */
@@ -372,10 +385,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
372 * 2: Tx flow control is enabled (we can send pause frames but 385 * 2: Tx flow control is enabled (we can send pause frames but
373 * we do not support receiving pause frames). 386 * we do not support receiving pause frames).
374 * 3: Both Rx and Tx flow control (symmetric) are enabled. 387 * 3: Both Rx and Tx flow control (symmetric) are enabled.
375 * other: Invalid.
376#ifdef CONFIG_DCB 388#ifdef CONFIG_DCB
377 * 4: Priority Flow Control is enabled. 389 * 4: Priority Flow Control is enabled.
378#endif 390#endif
391 * other: Invalid.
379 */ 392 */
380 switch (hw->fc.current_mode) { 393 switch (hw->fc.current_mode) {
381 case ixgbe_fc_none: 394 case ixgbe_fc_none:
@@ -432,9 +445,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
432 reg = (rx_pba_size - hw->fc.low_water) << 6; 445 reg = (rx_pba_size - hw->fc.low_water) << 6;
433 if (hw->fc.send_xon) 446 if (hw->fc.send_xon)
434 reg |= IXGBE_FCRTL_XONE; 447 reg |= IXGBE_FCRTL_XONE;
448
435 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); 449 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
436 450
437 reg = (rx_pba_size - hw->fc.high_water) << 10; 451 reg = (rx_pba_size - hw->fc.high_water) << 6;
438 reg |= IXGBE_FCRTH_FCEN; 452 reg |= IXGBE_FCRTH_FCEN;
439 453
440 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); 454 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
@@ -627,13 +641,12 @@ out:
627 return 0; 641 return 0;
628} 642}
629 643
630
631/** 644/**
632 * ixgbe_setup_mac_link_82598 - Set MAC link speed 645 * ixgbe_setup_mac_link_82598 - Set MAC link speed
633 * @hw: pointer to hardware structure 646 * @hw: pointer to hardware structure
634 * @speed: new link speed 647 * @speed: new link speed
635 * @autoneg: true if auto-negotiation enabled 648 * @autoneg: true if auto-negotiation enabled
636 * @autoneg_wait_to_complete: true if waiting is needed to complete 649 * @autoneg_wait_to_complete: true when waiting for completion is needed
637 * 650 *
638 * Set the link speed in the AUTOC register and restarts link. 651 * Set the link speed in the AUTOC register and restarts link.
639 **/ 652 **/
@@ -672,7 +685,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
672 * ixgbe_hw This will write the AUTOC register based on the new 685 * ixgbe_hw This will write the AUTOC register based on the new
673 * stored values 686 * stored values
674 */ 687 */
675 status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 688 status = ixgbe_start_mac_link_82598(hw,
689 autoneg_wait_to_complete);
676 } 690 }
677 691
678 return status; 692 return status;
@@ -698,7 +712,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
698 /* Setup the PHY according to input speed */ 712 /* Setup the PHY according to input speed */
699 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 713 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
700 autoneg_wait_to_complete); 714 autoneg_wait_to_complete);
701
702 /* Set up MAC */ 715 /* Set up MAC */
703 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 716 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
704 717
@@ -770,7 +783,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
770 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 783 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
771 goto no_phy_reset; 784 goto no_phy_reset;
772 785
773
774 hw->phy.ops.reset(hw); 786 hw->phy.ops.reset(hw);
775 } 787 }
776 788
@@ -779,12 +791,9 @@ no_phy_reset:
779 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 791 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
780 * access and verify no pending requests before reset 792 * access and verify no pending requests before reset
781 */ 793 */
782 status = ixgbe_disable_pcie_master(hw); 794 ixgbe_disable_pcie_master(hw);
783 if (status != 0) {
784 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
785 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
786 }
787 795
796mac_reset_top:
788 /* 797 /*
789 * Issue global reset to the MAC. This needs to be a SW reset. 798 * Issue global reset to the MAC. This needs to be a SW reset.
790 * If link reset is used, it might reset the MAC when mng is using it 799 * If link reset is used, it might reset the MAC when mng is using it
@@ -805,6 +814,19 @@ no_phy_reset:
805 hw_dbg(hw, "Reset polling failed to complete.\n"); 814 hw_dbg(hw, "Reset polling failed to complete.\n");
806 } 815 }
807 816
817 /*
818 * Double resets are required for recovery from certain error
819 * conditions. Between resets, it is necessary to stall to allow time
820 * for any pending HW events to complete. We use 1usec since that is
821 * what is needed for ixgbe_disable_pcie_master(). The second reset
822 * then clears out any effects of those events.
823 */
824 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
825 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
826 udelay(1);
827 goto mac_reset_top;
828 }
829
808 msleep(50); 830 msleep(50);
809 831
810 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 832 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -824,15 +846,15 @@ no_phy_reset:
824 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 846 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
825 } 847 }
826 848
849 /* Store the permanent mac address */
850 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
851
827 /* 852 /*
828 * Store MAC address from RAR0, clear receive address registers, and 853 * Store MAC address from RAR0, clear receive address registers, and
829 * clear the multicast table 854 * clear the multicast table
830 */ 855 */
831 hw->mac.ops.init_rx_addrs(hw); 856 hw->mac.ops.init_rx_addrs(hw);
832 857
833 /* Store the permanent mac address */
834 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
835
836reset_hw_out: 858reset_hw_out:
837 if (phy_status) 859 if (phy_status)
838 status = phy_status; 860 status = phy_status;
@@ -849,6 +871,13 @@ reset_hw_out:
849static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 871static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
850{ 872{
851 u32 rar_high; 873 u32 rar_high;
874 u32 rar_entries = hw->mac.num_rar_entries;
875
876 /* Make sure we are using a valid rar index range */
877 if (rar >= rar_entries) {
878 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
879 return IXGBE_ERR_INVALID_ARGUMENT;
880 }
852 881
853 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 882 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
854 rar_high &= ~IXGBE_RAH_VIND_MASK; 883 rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -868,14 +897,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
868 u32 rar_high; 897 u32 rar_high;
869 u32 rar_entries = hw->mac.num_rar_entries; 898 u32 rar_entries = hw->mac.num_rar_entries;
870 899
871 if (rar < rar_entries) { 900
872 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 901 /* Make sure we are using a valid rar index range */
873 if (rar_high & IXGBE_RAH_VIND_MASK) { 902 if (rar >= rar_entries) {
874 rar_high &= ~IXGBE_RAH_VIND_MASK;
875 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
876 }
877 } else {
878 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 903 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
904 return IXGBE_ERR_INVALID_ARGUMENT;
905 }
906
907 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
908 if (rar_high & IXGBE_RAH_VIND_MASK) {
909 rar_high &= ~IXGBE_RAH_VIND_MASK;
910 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
879 } 911 }
880 912
881 return 0; 913 return 0;
@@ -994,13 +1026,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
994} 1026}
995 1027
996/** 1028/**
997 * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module 1029 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
998 * over I2C interface through an intermediate phy.
999 * @hw: pointer to hardware structure 1030 * @hw: pointer to hardware structure
1000 * @byte_offset: EEPROM byte offset to read 1031 * @byte_offset: EEPROM byte offset to read
1001 * @eeprom_data: value read 1032 * @eeprom_data: value read
1002 * 1033 *
1003 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1034 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1004 **/ 1035 **/
1005static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1036static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1006 u8 *eeprom_data) 1037 u8 *eeprom_data)
@@ -1074,10 +1105,12 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1074 1105
1075 /* Copper PHY must be checked before AUTOC LMS to determine correct 1106 /* Copper PHY must be checked before AUTOC LMS to determine correct
1076 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1107 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1077 if (hw->phy.type == ixgbe_phy_tn || 1108 switch (hw->phy.type) {
1078 hw->phy.type == ixgbe_phy_cu_unknown) { 1109 case ixgbe_phy_tn:
1079 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1110 case ixgbe_phy_aq:
1080 &ext_ability); 1111 case ixgbe_phy_cu_unknown:
1112 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
1113 MDIO_MMD_PMAPMD, &ext_ability);
1081 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1114 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1082 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1115 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1083 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1116 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1085,6 +1118,8 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1085 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1118 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1086 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1119 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1087 goto out; 1120 goto out;
1121 default:
1122 break;
1088 } 1123 }
1089 1124
1090 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1125 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1179,13 +1214,14 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1179 .set_vmdq = &ixgbe_set_vmdq_82598, 1214 .set_vmdq = &ixgbe_set_vmdq_82598,
1180 .clear_vmdq = &ixgbe_clear_vmdq_82598, 1215 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1181 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 1216 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1182 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1183 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 1217 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1184 .enable_mc = &ixgbe_enable_mc_generic, 1218 .enable_mc = &ixgbe_enable_mc_generic,
1185 .disable_mc = &ixgbe_disable_mc_generic, 1219 .disable_mc = &ixgbe_disable_mc_generic,
1186 .clear_vfta = &ixgbe_clear_vfta_82598, 1220 .clear_vfta = &ixgbe_clear_vfta_82598,
1187 .set_vfta = &ixgbe_set_vfta_82598, 1221 .set_vfta = &ixgbe_set_vfta_82598,
1188 .fc_enable = &ixgbe_fc_enable_82598, 1222 .fc_enable = &ixgbe_fc_enable_82598,
1223 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1224 .release_swfw_sync = &ixgbe_release_swfw_sync,
1189}; 1225};
1190 1226
1191static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1227static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a21f5817685..00aeba385a2 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -112,7 +112,8 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
112 goto setup_sfp_out; 112 goto setup_sfp_out;
113 113
114 /* PHY config will finish before releasing the semaphore */ 114 /* PHY config will finish before releasing the semaphore */
115 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 115 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
116 IXGBE_GSSR_MAC_CSR_SM);
116 if (ret_val != 0) { 117 if (ret_val != 0) {
117 ret_val = IXGBE_ERR_SWFW_SYNC; 118 ret_val = IXGBE_ERR_SWFW_SYNC;
118 goto setup_sfp_out; 119 goto setup_sfp_out;
@@ -329,11 +330,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
329 enum ixgbe_media_type media_type; 330 enum ixgbe_media_type media_type;
330 331
331 /* Detect if there is a copper PHY attached. */ 332 /* Detect if there is a copper PHY attached. */
332 if (hw->phy.type == ixgbe_phy_cu_unknown || 333 switch (hw->phy.type) {
333 hw->phy.type == ixgbe_phy_tn || 334 case ixgbe_phy_cu_unknown:
334 hw->phy.type == ixgbe_phy_aq) { 335 case ixgbe_phy_tn:
336 case ixgbe_phy_aq:
335 media_type = ixgbe_media_type_copper; 337 media_type = ixgbe_media_type_copper;
336 goto out; 338 goto out;
339 default:
340 break;
337 } 341 }
338 342
339 switch (hw->device_id) { 343 switch (hw->device_id) {
@@ -354,6 +358,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
354 case IXGBE_DEV_ID_82599_CX4: 358 case IXGBE_DEV_ID_82599_CX4:
355 media_type = ixgbe_media_type_cx4; 359 media_type = ixgbe_media_type_cx4;
356 break; 360 break;
361 case IXGBE_DEV_ID_82599_T3_LOM:
362 media_type = ixgbe_media_type_copper;
363 break;
357 default: 364 default:
358 media_type = ixgbe_media_type_unknown; 365 media_type = ixgbe_media_type_unknown;
359 break; 366 break;
@@ -411,14 +418,14 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
411 return status; 418 return status;
412} 419}
413 420
414 /** 421/**
415 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 422 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
416 * @hw: pointer to hardware structure 423 * @hw: pointer to hardware structure
417 * 424 *
418 * The base drivers may require better control over SFP+ module 425 * The base drivers may require better control over SFP+ module
419 * PHY states. This includes selectively shutting down the Tx 426 * PHY states. This includes selectively shutting down the Tx
420 * laser on the PHY, effectively halting physical link. 427 * laser on the PHY, effectively halting physical link.
421 **/ 428 **/
422static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 429static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
423{ 430{
424 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 431 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -463,8 +470,6 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
463 **/ 470 **/
464static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 471static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
465{ 472{
466 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
467
468 if (hw->mac.autotry_restart) { 473 if (hw->mac.autotry_restart) {
469 ixgbe_disable_tx_laser_multispeed_fiber(hw); 474 ixgbe_disable_tx_laser_multispeed_fiber(hw);
470 ixgbe_enable_tx_laser_multispeed_fiber(hw); 475 ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -487,17 +492,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
487 bool autoneg_wait_to_complete) 492 bool autoneg_wait_to_complete)
488{ 493{
489 s32 status = 0; 494 s32 status = 0;
490 ixgbe_link_speed phy_link_speed; 495 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
491 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 496 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
492 u32 speedcnt = 0; 497 u32 speedcnt = 0;
493 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 498 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
499 u32 i = 0;
494 bool link_up = false; 500 bool link_up = false;
495 bool negotiation; 501 bool negotiation;
496 int i;
497 502
498 /* Mask off requested but non-supported speeds */ 503 /* Mask off requested but non-supported speeds */
499 hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation); 504 status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
500 speed &= phy_link_speed; 505 &negotiation);
506 if (status != 0)
507 return status;
508
509 speed &= link_speed;
501 510
502 /* 511 /*
503 * Try each speed one by one, highest priority first. We do this in 512 * Try each speed one by one, highest priority first. We do this in
@@ -508,9 +517,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
508 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 517 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
509 518
510 /* If we already have link at this speed, just jump out */ 519 /* If we already have link at this speed, just jump out */
511 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 520 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
521 false);
522 if (status != 0)
523 return status;
512 524
513 if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 525 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
514 goto out; 526 goto out;
515 527
516 /* Set the module link speed */ 528 /* Set the module link speed */
@@ -522,9 +534,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
522 msleep(40); 534 msleep(40);
523 535
524 status = ixgbe_setup_mac_link_82599(hw, 536 status = ixgbe_setup_mac_link_82599(hw,
525 IXGBE_LINK_SPEED_10GB_FULL, 537 IXGBE_LINK_SPEED_10GB_FULL,
526 autoneg, 538 autoneg,
527 autoneg_wait_to_complete); 539 autoneg_wait_to_complete);
528 if (status != 0) 540 if (status != 0)
529 return status; 541 return status;
530 542
@@ -536,14 +548,16 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
536 * Section 73.10.2, we may have to wait up to 500ms if KR is 548 * Section 73.10.2, we may have to wait up to 500ms if KR is
537 * attempted. 82599 uses the same timing for 10g SFI. 549 * attempted. 82599 uses the same timing for 10g SFI.
538 */ 550 */
539
540 for (i = 0; i < 5; i++) { 551 for (i = 0; i < 5; i++) {
541 /* Wait for the link partner to also set speed */ 552 /* Wait for the link partner to also set speed */
542 msleep(100); 553 msleep(100);
543 554
544 /* If we have link, just jump out */ 555 /* If we have link, just jump out */
545 hw->mac.ops.check_link(hw, &phy_link_speed, 556 status = hw->mac.ops.check_link(hw, &link_speed,
546 &link_up, false); 557 &link_up, false);
558 if (status != 0)
559 return status;
560
547 if (link_up) 561 if (link_up)
548 goto out; 562 goto out;
549 } 563 }
@@ -555,9 +569,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
555 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 569 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
556 570
557 /* If we already have link at this speed, just jump out */ 571 /* If we already have link at this speed, just jump out */
558 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 572 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
573 false);
574 if (status != 0)
575 return status;
559 576
560 if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 577 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
561 goto out; 578 goto out;
562 579
563 /* Set the module link speed */ 580 /* Set the module link speed */
@@ -570,9 +587,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
570 msleep(40); 587 msleep(40);
571 588
572 status = ixgbe_setup_mac_link_82599(hw, 589 status = ixgbe_setup_mac_link_82599(hw,
573 IXGBE_LINK_SPEED_1GB_FULL, 590 IXGBE_LINK_SPEED_1GB_FULL,
574 autoneg, 591 autoneg,
575 autoneg_wait_to_complete); 592 autoneg_wait_to_complete);
576 if (status != 0) 593 if (status != 0)
577 return status; 594 return status;
578 595
@@ -583,7 +600,11 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
583 msleep(100); 600 msleep(100);
584 601
585 /* If we have link, just jump out */ 602 /* If we have link, just jump out */
586 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 603 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
604 false);
605 if (status != 0)
606 return status;
607
587 if (link_up) 608 if (link_up)
588 goto out; 609 goto out;
589 } 610 }
@@ -626,13 +647,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
626 bool autoneg_wait_to_complete) 647 bool autoneg_wait_to_complete)
627{ 648{
628 s32 status = 0; 649 s32 status = 0;
629 ixgbe_link_speed link_speed; 650 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
630 s32 i, j; 651 s32 i, j;
631 bool link_up = false; 652 bool link_up = false;
632 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 653 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
633 struct ixgbe_adapter *adapter = hw->back;
634
635 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
636 654
637 /* Set autoneg_advertised value based on input link speed */ 655 /* Set autoneg_advertised value based on input link speed */
638 hw->phy.autoneg_advertised = 0; 656 hw->phy.autoneg_advertised = 0;
@@ -658,7 +676,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
658 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 676 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
659 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 677 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
660 autoneg_wait_to_complete); 678 autoneg_wait_to_complete);
661 if (status) 679 if (status != 0)
662 goto out; 680 goto out;
663 681
664 /* 682 /*
@@ -671,8 +689,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
671 mdelay(100); 689 mdelay(100);
672 690
673 /* If we have link, just jump out */ 691 /* If we have link, just jump out */
674 hw->mac.ops.check_link(hw, &link_speed, 692 status = hw->mac.ops.check_link(hw, &link_speed,
675 &link_up, false); 693 &link_up, false);
694 if (status != 0)
695 goto out;
696
676 if (link_up) 697 if (link_up)
677 goto out; 698 goto out;
678 } 699 }
@@ -690,7 +711,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
690 hw->phy.smart_speed_active = true; 711 hw->phy.smart_speed_active = true;
691 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 712 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
692 autoneg_wait_to_complete); 713 autoneg_wait_to_complete);
693 if (status) 714 if (status != 0)
694 goto out; 715 goto out;
695 716
696 /* 717 /*
@@ -703,8 +724,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
703 mdelay(100); 724 mdelay(100);
704 725
705 /* If we have link, just jump out */ 726 /* If we have link, just jump out */
706 hw->mac.ops.check_link(hw, &link_speed, 727 status = hw->mac.ops.check_link(hw, &link_speed,
707 &link_up, false); 728 &link_up, false);
729 if (status != 0)
730 goto out;
731
708 if (link_up) 732 if (link_up)
709 goto out; 733 goto out;
710 } 734 }
@@ -716,7 +740,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
716 740
717out: 741out:
718 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 742 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
719 e_info(hw, "Smartspeed has downgraded the link speed from " 743 hw_dbg(hw, "Smartspeed has downgraded the link speed from "
720 "the maximum advertised\n"); 744 "the maximum advertised\n");
721 return status; 745 return status;
722} 746}
@@ -748,6 +772,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
748 772
749 /* Check to see if speed passed in is supported. */ 773 /* Check to see if speed passed in is supported. */
750 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); 774 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
775 if (status != 0)
776 goto out;
777
751 speed &= link_capabilities; 778 speed &= link_capabilities;
752 779
753 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 780 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
@@ -761,7 +788,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
761 else 788 else
762 orig_autoc = autoc; 789 orig_autoc = autoc;
763 790
764
765 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 791 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
766 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 792 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
767 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 793 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
@@ -878,7 +904,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
878 904
879 /* PHY ops must be identified and initialized prior to reset */ 905 /* PHY ops must be identified and initialized prior to reset */
880 906
881 /* Init PHY and function pointers, perform SFP setup */ 907 /* Identify PHY and related function pointers */
882 status = hw->phy.ops.init(hw); 908 status = hw->phy.ops.init(hw);
883 909
884 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 910 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -890,6 +916,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
890 hw->phy.sfp_setup_needed = false; 916 hw->phy.sfp_setup_needed = false;
891 } 917 }
892 918
919 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
920 goto reset_hw_out;
921
893 /* Reset PHY */ 922 /* Reset PHY */
894 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 923 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
895 hw->phy.ops.reset(hw); 924 hw->phy.ops.reset(hw);
@@ -898,12 +927,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
898 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 927 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
899 * access and verify no pending requests before reset 928 * access and verify no pending requests before reset
900 */ 929 */
901 status = ixgbe_disable_pcie_master(hw); 930 ixgbe_disable_pcie_master(hw);
902 if (status != 0) {
903 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
904 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
905 }
906 931
932mac_reset_top:
907 /* 933 /*
908 * Issue global reset to the MAC. This needs to be a SW reset. 934 * Issue global reset to the MAC. This needs to be a SW reset.
909 * If link reset is used, it might reset the MAC when mng is using it 935 * If link reset is used, it might reset the MAC when mng is using it
@@ -924,6 +950,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
924 hw_dbg(hw, "Reset polling failed to complete.\n"); 950 hw_dbg(hw, "Reset polling failed to complete.\n");
925 } 951 }
926 952
953 /*
954 * Double resets are required for recovery from certain error
955 * conditions. Between resets, it is necessary to stall to allow time
956 * for any pending HW events to complete. We use 1usec since that is
957 * what is needed for ixgbe_disable_pcie_master(). The second reset
958 * then clears out any effects of those events.
959 */
960 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
961 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
962 udelay(1);
963 goto mac_reset_top;
964 }
965
927 msleep(50); 966 msleep(50);
928 967
929 /* 968 /*
@@ -951,6 +990,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
951 } 990 }
952 } 991 }
953 992
993 /* Store the permanent mac address */
994 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
995
954 /* 996 /*
955 * Store MAC address from RAR0, clear receive address registers, and 997 * Store MAC address from RAR0, clear receive address registers, and
956 * clear the multicast table. Also reset num_rar_entries to 128, 998 * clear the multicast table. Also reset num_rar_entries to 128,
@@ -959,9 +1001,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
959 hw->mac.num_rar_entries = 128; 1001 hw->mac.num_rar_entries = 128;
960 hw->mac.ops.init_rx_addrs(hw); 1002 hw->mac.ops.init_rx_addrs(hw);
961 1003
962 /* Store the permanent mac address */
963 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
964
965 /* Store the permanent SAN mac address */ 1004 /* Store the permanent SAN mac address */
966 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1005 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
967 1006
@@ -1733,13 +1772,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1733 * @hw: pointer to hardware structure 1772 * @hw: pointer to hardware structure
1734 * 1773 *
1735 * Determines the physical layer module found on the current adapter. 1774 * Determines the physical layer module found on the current adapter.
1775 * If PHY already detected, maintains current PHY type in hw struct,
1776 * otherwise executes the PHY detection routine.
1736 **/ 1777 **/
1737static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1778s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1738{ 1779{
1739 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1780 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1781
1782 /* Detect PHY if not unknown - returns success if already detected. */
1740 status = ixgbe_identify_phy_generic(hw); 1783 status = ixgbe_identify_phy_generic(hw);
1741 if (status != 0) 1784 if (status != 0) {
1742 status = ixgbe_identify_sfp_module_generic(hw); 1785 /* 82599 10GBASE-T requires an external PHY */
1786 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1787 goto out;
1788 else
1789 status = ixgbe_identify_sfp_module_generic(hw);
1790 }
1791
1792 /* Set PHY type none if no PHY detected */
1793 if (hw->phy.type == ixgbe_phy_unknown) {
1794 hw->phy.type = ixgbe_phy_none;
1795 status = 0;
1796 }
1797
1798 /* Return error if SFP module has been detected but is not supported */
1799 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1800 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1801
1802out:
1743 return status; 1803 return status;
1744} 1804}
1745 1805
@@ -1763,11 +1823,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1763 1823
1764 hw->phy.ops.identify(hw); 1824 hw->phy.ops.identify(hw);
1765 1825
1766 if (hw->phy.type == ixgbe_phy_tn || 1826 switch (hw->phy.type) {
1767 hw->phy.type == ixgbe_phy_aq || 1827 case ixgbe_phy_tn:
1768 hw->phy.type == ixgbe_phy_cu_unknown) { 1828 case ixgbe_phy_aq:
1829 case ixgbe_phy_cu_unknown:
1769 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1830 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
1770 &ext_ability); 1831 &ext_ability);
1771 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1832 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1772 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1833 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1773 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1834 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1775,6 +1836,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1775 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1836 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1776 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1837 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1777 goto out; 1838 goto out;
1839 default:
1840 break;
1778 } 1841 }
1779 1842
1780 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1843 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1886,6 +1949,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
1886 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 1949 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
1887 break; 1950 break;
1888 else 1951 else
1952 /* Use interrupt-safe sleep just in case */
1889 udelay(10); 1953 udelay(10);
1890 } 1954 }
1891 1955
@@ -1995,7 +2059,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
1995 .set_vmdq = &ixgbe_set_vmdq_generic, 2059 .set_vmdq = &ixgbe_set_vmdq_generic,
1996 .clear_vmdq = &ixgbe_clear_vmdq_generic, 2060 .clear_vmdq = &ixgbe_clear_vmdq_generic,
1997 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2061 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1998 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1999 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2062 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
2000 .enable_mc = &ixgbe_enable_mc_generic, 2063 .enable_mc = &ixgbe_enable_mc_generic,
2001 .disable_mc = &ixgbe_disable_mc_generic, 2064 .disable_mc = &ixgbe_disable_mc_generic,
@@ -2006,31 +2069,34 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2006 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2069 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2007 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 2070 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
2008 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2071 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2072 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2073 .release_swfw_sync = &ixgbe_release_swfw_sync,
2074
2009}; 2075};
2010 2076
2011static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2077static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2012 .init_params = &ixgbe_init_eeprom_params_generic, 2078 .init_params = &ixgbe_init_eeprom_params_generic,
2013 .read = &ixgbe_read_eerd_generic, 2079 .read = &ixgbe_read_eerd_generic,
2014 .write = &ixgbe_write_eeprom_generic, 2080 .write = &ixgbe_write_eeprom_generic,
2015 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 2081 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2016 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2082 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2017 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2083 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2018}; 2084};
2019 2085
2020static struct ixgbe_phy_operations phy_ops_82599 = { 2086static struct ixgbe_phy_operations phy_ops_82599 = {
2021 .identify = &ixgbe_identify_phy_82599, 2087 .identify = &ixgbe_identify_phy_82599,
2022 .identify_sfp = &ixgbe_identify_sfp_module_generic, 2088 .identify_sfp = &ixgbe_identify_sfp_module_generic,
2023 .init = &ixgbe_init_phy_ops_82599, 2089 .init = &ixgbe_init_phy_ops_82599,
2024 .reset = &ixgbe_reset_phy_generic, 2090 .reset = &ixgbe_reset_phy_generic,
2025 .read_reg = &ixgbe_read_phy_reg_generic, 2091 .read_reg = &ixgbe_read_phy_reg_generic,
2026 .write_reg = &ixgbe_write_phy_reg_generic, 2092 .write_reg = &ixgbe_write_phy_reg_generic,
2027 .setup_link = &ixgbe_setup_phy_link_generic, 2093 .setup_link = &ixgbe_setup_phy_link_generic,
2028 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 2094 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
2029 .read_i2c_byte = &ixgbe_read_i2c_byte_generic, 2095 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
2030 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2096 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
2031 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2097 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
2032 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2098 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
2033 .check_overtemp = &ixgbe_tn_check_overtemp, 2099 .check_overtemp = &ixgbe_tn_check_overtemp,
2034}; 2100};
2035 2101
2036struct ixgbe_info ixgbe_82599_info = { 2102struct ixgbe_info ixgbe_82599_info = {
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2df3e4..bcd952916eb 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -46,10 +46,13 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 48
49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 50static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
51static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
52static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
53static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
53static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
54 57
55/** 58/**
@@ -139,17 +142,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
139 IXGBE_READ_REG(hw, IXGBE_MRFC); 142 IXGBE_READ_REG(hw, IXGBE_MRFC);
140 IXGBE_READ_REG(hw, IXGBE_RLEC); 143 IXGBE_READ_REG(hw, IXGBE_RLEC);
141 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 144 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
142 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
143 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 145 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
144 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 146 if (hw->mac.type >= ixgbe_mac_82599EB) {
147 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
148 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
149 } else {
150 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
151 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
152 }
145 153
146 for (i = 0; i < 8; i++) { 154 for (i = 0; i < 8; i++) {
147 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 155 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
148 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
149 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 156 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
150 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 157 if (hw->mac.type >= ixgbe_mac_82599EB) {
158 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
159 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
160 } else {
161 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
162 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
163 }
151 } 164 }
152 165 if (hw->mac.type >= ixgbe_mac_82599EB)
166 for (i = 0; i < 8; i++)
167 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
153 IXGBE_READ_REG(hw, IXGBE_PRC64); 168 IXGBE_READ_REG(hw, IXGBE_PRC64);
154 IXGBE_READ_REG(hw, IXGBE_PRC127); 169 IXGBE_READ_REG(hw, IXGBE_PRC127);
155 IXGBE_READ_REG(hw, IXGBE_PRC255); 170 IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -187,9 +202,26 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
187 IXGBE_READ_REG(hw, IXGBE_BPTC); 202 IXGBE_READ_REG(hw, IXGBE_BPTC);
188 for (i = 0; i < 16; i++) { 203 for (i = 0; i < 16; i++) {
189 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 204 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
190 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
191 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 205 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
192 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 206 if (hw->mac.type >= ixgbe_mac_82599EB) {
207 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
208 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
209 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
210 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
211 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
212 } else {
213 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
214 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
215 }
216 }
217
218 if (hw->mac.type == ixgbe_mac_X540) {
219 if (hw->phy.id == 0)
220 hw->phy.ops.identify(hw);
221 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
222 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
223 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
224 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
193 } 225 }
194 226
195 return 0; 227 return 0;
@@ -454,8 +486,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
454 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 486 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
455 * access and verify no pending requests 487 * access and verify no pending requests
456 */ 488 */
457 if (ixgbe_disable_pcie_master(hw) != 0) 489 ixgbe_disable_pcie_master(hw);
458 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
459 490
460 return 0; 491 return 0;
461} 492}
@@ -603,7 +634,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
603 ixgbe_shift_out_eeprom_bits(hw, data, 16); 634 ixgbe_shift_out_eeprom_bits(hw, data, 16);
604 ixgbe_standby_eeprom(hw); 635 ixgbe_standby_eeprom(hw);
605 636
606 msleep(hw->eeprom.semaphore_delay);
607 /* Done with writing - release the EEPROM */ 637 /* Done with writing - release the EEPROM */
608 ixgbe_release_eeprom(hw); 638 ixgbe_release_eeprom(hw);
609 } 639 }
@@ -747,10 +777,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
747static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 777static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
748{ 778{
749 s32 status = 0; 779 s32 status = 0;
750 u32 eec = 0; 780 u32 eec;
751 u32 i; 781 u32 i;
752 782
753 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 783 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
754 status = IXGBE_ERR_SWFW_SYNC; 784 status = IXGBE_ERR_SWFW_SYNC;
755 785
756 if (status == 0) { 786 if (status == 0) {
@@ -773,18 +803,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
773 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 803 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
774 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 804 hw_dbg(hw, "Could not acquire EEPROM grant\n");
775 805
776 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 806 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
777 status = IXGBE_ERR_EEPROM; 807 status = IXGBE_ERR_EEPROM;
778 } 808 }
779 }
780 809
781 /* Setup EEPROM for Read/Write */ 810 /* Setup EEPROM for Read/Write */
782 if (status == 0) { 811 if (status == 0) {
783 /* Clear CS and SK */ 812 /* Clear CS and SK */
784 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 813 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
785 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 814 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
786 IXGBE_WRITE_FLUSH(hw); 815 IXGBE_WRITE_FLUSH(hw);
787 udelay(1); 816 udelay(1);
817 }
788 } 818 }
789 return status; 819 return status;
790} 820}
@@ -798,13 +828,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
798static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 828static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
799{ 829{
800 s32 status = IXGBE_ERR_EEPROM; 830 s32 status = IXGBE_ERR_EEPROM;
801 u32 timeout; 831 u32 timeout = 2000;
802 u32 i; 832 u32 i;
803 u32 swsm; 833 u32 swsm;
804 834
805 /* Set timeout value based on size of EEPROM */
806 timeout = hw->eeprom.word_size + 1;
807
808 /* Get SMBI software semaphore between device drivers first */ 835 /* Get SMBI software semaphore between device drivers first */
809 for (i = 0; i < timeout; i++) { 836 for (i = 0; i < timeout; i++) {
810 /* 837 /*
@@ -816,7 +843,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
816 status = 0; 843 status = 0;
817 break; 844 break;
818 } 845 }
819 msleep(1); 846 udelay(50);
820 } 847 }
821 848
822 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 849 /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -844,11 +871,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
844 * was not granted because we don't have access to the EEPROM 871 * was not granted because we don't have access to the EEPROM
845 */ 872 */
846 if (i >= timeout) { 873 if (i >= timeout) {
847 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 874 hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
848 "not granted.\n"); 875 "not granted.\n");
849 ixgbe_release_eeprom_semaphore(hw); 876 ixgbe_release_eeprom_semaphore(hw);
850 status = IXGBE_ERR_EEPROM; 877 status = IXGBE_ERR_EEPROM;
851 } 878 }
879 } else {
880 hw_dbg(hw, "Software semaphore SMBI between device drivers "
881 "not granted.\n");
852 } 882 }
853 883
854 return status; 884 return status;
@@ -1080,11 +1110,14 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1080 eec &= ~IXGBE_EEC_REQ; 1110 eec &= ~IXGBE_EEC_REQ;
1081 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1111 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1082 1112
1083 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1113 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1114
1115 /* Delay before attempt to obtain semaphore again to allow FW access */
1116 msleep(hw->eeprom.semaphore_delay);
1084} 1117}
1085 1118
1086/** 1119/**
1087 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1120 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1088 * @hw: pointer to hardware structure 1121 * @hw: pointer to hardware structure
1089 **/ 1122 **/
1090u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1123u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
@@ -1190,7 +1223,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1190 if (status == 0) { 1223 if (status == 0) {
1191 checksum = hw->eeprom.ops.calc_checksum(hw); 1224 checksum = hw->eeprom.ops.calc_checksum(hw);
1192 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1225 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1193 checksum); 1226 checksum);
1194 } else { 1227 } else {
1195 hw_dbg(hw, "EEPROM read failed\n"); 1228 hw_dbg(hw, "EEPROM read failed\n");
1196 } 1229 }
@@ -1238,37 +1271,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1238 u32 rar_low, rar_high; 1271 u32 rar_low, rar_high;
1239 u32 rar_entries = hw->mac.num_rar_entries; 1272 u32 rar_entries = hw->mac.num_rar_entries;
1240 1273
1274 /* Make sure we are using a valid rar index range */
1275 if (index >= rar_entries) {
1276 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1277 return IXGBE_ERR_INVALID_ARGUMENT;
1278 }
1279
1241 /* setup VMDq pool selection before this RAR gets enabled */ 1280 /* setup VMDq pool selection before this RAR gets enabled */
1242 hw->mac.ops.set_vmdq(hw, index, vmdq); 1281 hw->mac.ops.set_vmdq(hw, index, vmdq);
1243 1282
1244 /* Make sure we are using a valid rar index range */ 1283 /*
1245 if (index < rar_entries) { 1284 * HW expects these in little endian so we reverse the byte
1246 /* 1285 * order from network order (big endian) to little endian
1247 * HW expects these in little endian so we reverse the byte 1286 */
1248 * order from network order (big endian) to little endian 1287 rar_low = ((u32)addr[0] |
1249 */ 1288 ((u32)addr[1] << 8) |
1250 rar_low = ((u32)addr[0] | 1289 ((u32)addr[2] << 16) |
1251 ((u32)addr[1] << 8) | 1290 ((u32)addr[3] << 24));
1252 ((u32)addr[2] << 16) | 1291 /*
1253 ((u32)addr[3] << 24)); 1292 * Some parts put the VMDq setting in the extra RAH bits,
1254 /* 1293 * so save everything except the lower 16 bits that hold part
1255 * Some parts put the VMDq setting in the extra RAH bits, 1294 * of the address and the address valid bit.
1256 * so save everything except the lower 16 bits that hold part 1295 */
1257 * of the address and the address valid bit. 1296 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1258 */ 1297 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1259 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1298 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1260 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1261 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1262 1299
1263 if (enable_addr != 0) 1300 if (enable_addr != 0)
1264 rar_high |= IXGBE_RAH_AV; 1301 rar_high |= IXGBE_RAH_AV;
1265 1302
1266 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1303 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1267 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1304 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1268 } else {
1269 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1270 return IXGBE_ERR_RAR_INDEX;
1271 }
1272 1305
1273 return 0; 1306 return 0;
1274} 1307}
@@ -1286,58 +1319,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1286 u32 rar_entries = hw->mac.num_rar_entries; 1319 u32 rar_entries = hw->mac.num_rar_entries;
1287 1320
1288 /* Make sure we are using a valid rar index range */ 1321 /* Make sure we are using a valid rar index range */
1289 if (index < rar_entries) { 1322 if (index >= rar_entries) {
1290 /*
1291 * Some parts put the VMDq setting in the extra RAH bits,
1292 * so save everything except the lower 16 bits that hold part
1293 * of the address and the address valid bit.
1294 */
1295 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1296 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1297
1298 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1299 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1300 } else {
1301 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1323 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1302 return IXGBE_ERR_RAR_INDEX; 1324 return IXGBE_ERR_INVALID_ARGUMENT;
1303 } 1325 }
1304 1326
1305 /* clear VMDq pool/queue selection for this RAR */ 1327 /*
1306 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1328 * Some parts put the VMDq setting in the extra RAH bits,
1307 1329 * so save everything except the lower 16 bits that hold part
1308 return 0; 1330 * of the address and the address valid bit.
1309} 1331 */
1310
1311/**
1312 * ixgbe_enable_rar - Enable Rx address register
1313 * @hw: pointer to hardware structure
1314 * @index: index into the RAR table
1315 *
1316 * Enables the select receive address register.
1317 **/
1318static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1319{
1320 u32 rar_high;
1321
1322 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1332 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1323 rar_high |= IXGBE_RAH_AV; 1333 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1334
1335 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1324 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1336 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1325}
1326 1337
1327/** 1338 /* clear VMDq pool/queue selection for this RAR */
1328 * ixgbe_disable_rar - Disable Rx address register 1339 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1329 * @hw: pointer to hardware structure
1330 * @index: index into the RAR table
1331 *
1332 * Disables the select receive address register.
1333 **/
1334static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1335{
1336 u32 rar_high;
1337 1340
1338 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1341 return 0;
1339 rar_high &= (~IXGBE_RAH_AV);
1340 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1341} 1342}
1342 1343
1343/** 1344/**
@@ -1370,6 +1371,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1371 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1371 1372
1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1373 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1374
1375 /* clear VMDq pool/queue selection for RAR 0 */
1376 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1373 } 1377 }
1374 hw->addr_ctrl.overflow_promisc = 0; 1378 hw->addr_ctrl.overflow_promisc = 0;
1375 1379
@@ -1383,7 +1387,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1383 } 1387 }
1384 1388
1385 /* Clear the MTA */ 1389 /* Clear the MTA */
1386 hw->addr_ctrl.mc_addr_in_rar_count = 0;
1387 hw->addr_ctrl.mta_in_use = 0; 1390 hw->addr_ctrl.mta_in_use = 0;
1388 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1391 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1389 1392
@@ -1398,105 +1401,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1398} 1401}
1399 1402
1400/** 1403/**
1401 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1402 * @hw: pointer to hardware structure
1403 * @addr: new address
1404 *
1405 * Adds it to unused receive address register or goes into promiscuous mode.
1406 **/
1407static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1408{
1409 u32 rar_entries = hw->mac.num_rar_entries;
1410 u32 rar;
1411
1412 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1413 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1414
1415 /*
1416 * Place this address in the RAR if there is room,
1417 * else put the controller into promiscuous mode
1418 */
1419 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1420 rar = hw->addr_ctrl.rar_used_count -
1421 hw->addr_ctrl.mc_addr_in_rar_count;
1422 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1423 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1424 hw->addr_ctrl.rar_used_count++;
1425 } else {
1426 hw->addr_ctrl.overflow_promisc++;
1427 }
1428
1429 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1430}
1431
1432/**
1433 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1434 * @hw: pointer to hardware structure
1435 * @netdev: pointer to net device structure
1436 *
1437 * The given list replaces any existing list. Clears the secondary addrs from
1438 * receive address registers. Uses unused receive address registers for the
1439 * first secondary addresses, and falls back to promiscuous mode as needed.
1440 *
1441 * Drivers using secondary unicast addresses must set user_set_promisc when
1442 * manually putting the device into promiscuous mode.
1443 **/
1444s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1445 struct net_device *netdev)
1446{
1447 u32 i;
1448 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1449 u32 uc_addr_in_use;
1450 u32 fctrl;
1451 struct netdev_hw_addr *ha;
1452
1453 /*
1454 * Clear accounting of old secondary address list,
1455 * don't count RAR[0]
1456 */
1457 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1458 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1459 hw->addr_ctrl.overflow_promisc = 0;
1460
1461 /* Zero out the other receive addresses */
1462 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1463 for (i = 0; i < uc_addr_in_use; i++) {
1464 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1465 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1466 }
1467
1468 /* Add the new addresses */
1469 netdev_for_each_uc_addr(ha, netdev) {
1470 hw_dbg(hw, " Adding the secondary addresses:\n");
1471 ixgbe_add_uc_addr(hw, ha->addr, 0);
1472 }
1473
1474 if (hw->addr_ctrl.overflow_promisc) {
1475 /* enable promisc if not already in overflow or set by user */
1476 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1477 hw_dbg(hw, " Entering address overflow promisc mode\n");
1478 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1479 fctrl |= IXGBE_FCTRL_UPE;
1480 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1481 hw->addr_ctrl.uc_set_promisc = true;
1482 }
1483 } else {
1484 /* only disable if set by overflow, not by user */
1485 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1486 !(hw->addr_ctrl.user_set_promisc)) {
1487 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1488 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1489 fctrl &= ~IXGBE_FCTRL_UPE;
1490 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1491 hw->addr_ctrl.uc_set_promisc = false;
1492 }
1493 }
1494
1495 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
1496 return 0;
1497}
1498
1499/**
1500 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1404 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1501 * @hw: pointer to hardware structure 1405 * @hw: pointer to hardware structure
1502 * @mc_addr: the multicast address 1406 * @mc_addr: the multicast address
@@ -1547,7 +1451,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1547 u32 vector; 1451 u32 vector;
1548 u32 vector_bit; 1452 u32 vector_bit;
1549 u32 vector_reg; 1453 u32 vector_reg;
1550 u32 mta_reg;
1551 1454
1552 hw->addr_ctrl.mta_in_use++; 1455 hw->addr_ctrl.mta_in_use++;
1553 1456
@@ -1565,9 +1468,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1565 */ 1468 */
1566 vector_reg = (vector >> 5) & 0x7F; 1469 vector_reg = (vector >> 5) & 0x7F;
1567 vector_bit = vector & 0x1F; 1470 vector_bit = vector & 0x1F;
1568 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 1471 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1569 mta_reg |= (1 << vector_bit);
1570 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
1571} 1472}
1572 1473
1573/** 1474/**
@@ -1593,18 +1494,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1593 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 1494 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1594 hw->addr_ctrl.mta_in_use = 0; 1495 hw->addr_ctrl.mta_in_use = 0;
1595 1496
1596 /* Clear the MTA */ 1497 /* Clear mta_shadow */
1597 hw_dbg(hw, " Clearing MTA\n"); 1498 hw_dbg(hw, " Clearing MTA\n");
1598 for (i = 0; i < hw->mac.mcft_size; i++) 1499 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
1599 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1600 1500
1601 /* Add the new addresses */ 1501 /* Update mta shadow */
1602 netdev_for_each_mc_addr(ha, netdev) { 1502 netdev_for_each_mc_addr(ha, netdev) {
1603 hw_dbg(hw, " Adding the multicast addresses:\n"); 1503 hw_dbg(hw, " Adding the multicast addresses:\n");
1604 ixgbe_set_mta(hw, ha->addr); 1504 ixgbe_set_mta(hw, ha->addr);
1605 } 1505 }
1606 1506
1607 /* Enable mta */ 1507 /* Enable mta */
1508 for (i = 0; i < hw->mac.mcft_size; i++)
1509 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1510 hw->mac.mta_shadow[i]);
1511
1608 if (hw->addr_ctrl.mta_in_use > 0) 1512 if (hw->addr_ctrl.mta_in_use > 0)
1609 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1513 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1610 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1514 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1621,15 +1525,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1621 **/ 1525 **/
1622s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1526s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1623{ 1527{
1624 u32 i;
1625 u32 rar_entries = hw->mac.num_rar_entries;
1626 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1528 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1627 1529
1628 if (a->mc_addr_in_rar_count > 0)
1629 for (i = (rar_entries - a->mc_addr_in_rar_count);
1630 i < rar_entries; i++)
1631 ixgbe_enable_rar(hw, i);
1632
1633 if (a->mta_in_use > 0) 1530 if (a->mta_in_use > 0)
1634 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1531 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1635 hw->mac.mc_filter_type); 1532 hw->mac.mc_filter_type);
@@ -1645,15 +1542,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1645 **/ 1542 **/
1646s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1543s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1647{ 1544{
1648 u32 i;
1649 u32 rar_entries = hw->mac.num_rar_entries;
1650 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1545 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1651 1546
1652 if (a->mc_addr_in_rar_count > 0)
1653 for (i = (rar_entries - a->mc_addr_in_rar_count);
1654 i < rar_entries; i++)
1655 ixgbe_disable_rar(hw, i);
1656
1657 if (a->mta_in_use > 0) 1547 if (a->mta_in_use > 0)
1658 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1548 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1659 1549
@@ -1682,7 +1572,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1682#endif /* CONFIG_DCB */ 1572#endif /* CONFIG_DCB */
1683 /* Negotiate the fc mode to use */ 1573 /* Negotiate the fc mode to use */
1684 ret_val = ixgbe_fc_autoneg(hw); 1574 ret_val = ixgbe_fc_autoneg(hw);
1685 if (ret_val) 1575 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1686 goto out; 1576 goto out;
1687 1577
1688 /* Disable any previous flow control settings */ 1578 /* Disable any previous flow control settings */
@@ -1700,7 +1590,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1700 * 2: Tx flow control is enabled (we can send pause frames but 1590 * 2: Tx flow control is enabled (we can send pause frames but
1701 * we do not support receiving pause frames). 1591 * we do not support receiving pause frames).
1702 * 3: Both Rx and Tx flow control (symmetric) are enabled. 1592 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1593#ifdef CONFIG_DCB
1703 * 4: Priority Flow Control is enabled. 1594 * 4: Priority Flow Control is enabled.
1595#endif
1704 * other: Invalid. 1596 * other: Invalid.
1705 */ 1597 */
1706 switch (hw->fc.current_mode) { 1598 switch (hw->fc.current_mode) {
@@ -1788,12 +1680,13 @@ out:
1788 **/ 1680 **/
1789s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 1681s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1790{ 1682{
1791 s32 ret_val = 0; 1683 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1792 ixgbe_link_speed speed; 1684 ixgbe_link_speed speed;
1793 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1794 u32 links2, anlp1_reg, autoc_reg, links;
1795 bool link_up; 1685 bool link_up;
1796 1686
1687 if (hw->fc.disable_fc_autoneg)
1688 goto out;
1689
1797 /* 1690 /*
1798 * AN should have completed when the cable was plugged in. 1691 * AN should have completed when the cable was plugged in.
1799 * Look for reasons to bail out. Bail out if: 1692 * Look for reasons to bail out. Bail out if:
@@ -1804,153 +1697,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1804 * So use link_up_wait_to_complete=false. 1697 * So use link_up_wait_to_complete=false.
1805 */ 1698 */
1806 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1699 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1807 1700 if (!link_up) {
1808 if (hw->fc.disable_fc_autoneg || (!link_up)) { 1701 ret_val = IXGBE_ERR_FLOW_CONTROL;
1809 hw->fc.fc_was_autonegged = false;
1810 hw->fc.current_mode = hw->fc.requested_mode;
1811 goto out; 1702 goto out;
1812 } 1703 }
1813 1704
1814 /* 1705 switch (hw->phy.media_type) {
1815 * On backplane, bail out if 1706 /* Autoneg flow control on fiber adapters */
1816 * - backplane autoneg was not completed, or if 1707 case ixgbe_media_type_fiber:
1817 * - we are 82599 and link partner is not AN enabled 1708 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
1818 */ 1709 ret_val = ixgbe_fc_autoneg_fiber(hw);
1819 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1710 break;
1820 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1821 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1822 hw->fc.fc_was_autonegged = false;
1823 hw->fc.current_mode = hw->fc.requested_mode;
1824 goto out;
1825 }
1826 1711
1827 if (hw->mac.type == ixgbe_mac_82599EB) { 1712 /* Autoneg flow control on backplane adapters */
1828 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 1713 case ixgbe_media_type_backplane:
1829 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 1714 ret_val = ixgbe_fc_autoneg_backplane(hw);
1830 hw->fc.fc_was_autonegged = false; 1715 break;
1831 hw->fc.current_mode = hw->fc.requested_mode; 1716
1832 goto out; 1717 /* Autoneg flow control on copper adapters */
1833 } 1718 case ixgbe_media_type_copper:
1834 } 1719 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
1720 ret_val = ixgbe_fc_autoneg_copper(hw);
1721 break;
1722
1723 default:
1724 break;
1835 } 1725 }
1836 1726
1727out:
1728 if (ret_val == 0) {
1729 hw->fc.fc_was_autonegged = true;
1730 } else {
1731 hw->fc.fc_was_autonegged = false;
1732 hw->fc.current_mode = hw->fc.requested_mode;
1733 }
1734 return ret_val;
1735}
1736
1737/**
1738 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
1739 * @hw: pointer to hardware structure
1740 *
1741 * Enable flow control according on 1 gig fiber.
1742 **/
1743static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
1744{
1745 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1746 s32 ret_val;
1747
1837 /* 1748 /*
1838 * On multispeed fiber at 1g, bail out if 1749 * On multispeed fiber at 1g, bail out if
1839 * - link is up but AN did not complete, or if 1750 * - link is up but AN did not complete, or if
1840 * - link is up and AN completed but timed out 1751 * - link is up and AN completed but timed out
1841 */ 1752 */
1842 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) { 1753
1843 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1754 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1844 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 1755 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1845 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 1756 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1846 hw->fc.fc_was_autonegged = false; 1757 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1847 hw->fc.current_mode = hw->fc.requested_mode; 1758 goto out;
1848 goto out;
1849 }
1850 } 1759 }
1851 1760
1761 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1762 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1763
1764 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
1765 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
1766 IXGBE_PCS1GANA_ASM_PAUSE,
1767 IXGBE_PCS1GANA_SYM_PAUSE,
1768 IXGBE_PCS1GANA_ASM_PAUSE);
1769
1770out:
1771 return ret_val;
1772}
1773
1774/**
1775 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
1776 * @hw: pointer to hardware structure
1777 *
1778 * Enable flow control according to IEEE clause 37.
1779 **/
1780static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
1781{
1782 u32 links2, anlp1_reg, autoc_reg, links;
1783 s32 ret_val;
1784
1852 /* 1785 /*
1853 * Bail out on 1786 * On backplane, bail out if
1854 * - copper or CX4 adapters 1787 * - backplane autoneg was not completed, or if
1855 * - fiber adapters running at 10gig 1788 * - we are 82599 and link partner is not AN enabled
1856 */ 1789 */
1857 if ((hw->phy.media_type == ixgbe_media_type_copper) || 1790 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1858 (hw->phy.media_type == ixgbe_media_type_cx4) || 1791 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1859 ((hw->phy.media_type == ixgbe_media_type_fiber) &&
1860 (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
1861 hw->fc.fc_was_autonegged = false; 1792 hw->fc.fc_was_autonegged = false;
1862 hw->fc.current_mode = hw->fc.requested_mode; 1793 hw->fc.current_mode = hw->fc.requested_mode;
1794 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1863 goto out; 1795 goto out;
1864 } 1796 }
1865 1797
1798 if (hw->mac.type == ixgbe_mac_82599EB) {
1799 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1800 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
1801 hw->fc.fc_was_autonegged = false;
1802 hw->fc.current_mode = hw->fc.requested_mode;
1803 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1804 goto out;
1805 }
1806 }
1866 /* 1807 /*
1867 * Read the AN advertisement and LP ability registers and resolve 1808 * Read the 10g AN autoc and LP ability registers and resolve
1868 * local flow control settings accordingly 1809 * local flow control settings accordingly
1869 */ 1810 */
1870 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 1811 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1871 (hw->phy.media_type != ixgbe_media_type_backplane)) { 1812 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1872 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1873 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1874 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1875 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1876 /*
1877 * Now we need to check if the user selected Rx ONLY
1878 * of pause frames. In this case, we had to advertise
1879 * FULL flow control because we could not advertise RX
1880 * ONLY. Hence, we must now check to see if we need to
1881 * turn OFF the TRANSMISSION of PAUSE frames.
1882 */
1883 if (hw->fc.requested_mode == ixgbe_fc_full) {
1884 hw->fc.current_mode = ixgbe_fc_full;
1885 hw_dbg(hw, "Flow Control = FULL.\n");
1886 } else {
1887 hw->fc.current_mode = ixgbe_fc_rx_pause;
1888 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1889 }
1890 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1891 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1892 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1893 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1894 hw->fc.current_mode = ixgbe_fc_tx_pause;
1895 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1896 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1897 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1898 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1899 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1900 hw->fc.current_mode = ixgbe_fc_rx_pause;
1901 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1902 } else {
1903 hw->fc.current_mode = ixgbe_fc_none;
1904 hw_dbg(hw, "Flow Control = NONE.\n");
1905 }
1906 }
1907 1813
1908 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1814 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
1815 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
1816 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
1817
1818out:
1819 return ret_val;
1820}
1821
1822/**
1823 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
1824 * @hw: pointer to hardware structure
1825 *
1826 * Enable flow control according to IEEE clause 37.
1827 **/
1828static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
1829{
1830 u16 technology_ability_reg = 0;
1831 u16 lp_technology_ability_reg = 0;
1832
1833 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1834 MDIO_MMD_AN,
1835 &technology_ability_reg);
1836 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
1837 MDIO_MMD_AN,
1838 &lp_technology_ability_reg);
1839
1840 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
1841 (u32)lp_technology_ability_reg,
1842 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
1843 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
1844}
1845
1846/**
1847 * ixgbe_negotiate_fc - Negotiate flow control
1848 * @hw: pointer to hardware structure
1849 * @adv_reg: flow control advertised settings
1850 * @lp_reg: link partner's flow control settings
1851 * @adv_sym: symmetric pause bit in advertisement
1852 * @adv_asm: asymmetric pause bit in advertisement
1853 * @lp_sym: symmetric pause bit in link partner advertisement
1854 * @lp_asm: asymmetric pause bit in link partner advertisement
1855 *
1856 * Find the intersection between advertised settings and link partner's
1857 * advertised settings
1858 **/
1859static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1860 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
1861{
1862 if ((!(adv_reg)) || (!(lp_reg)))
1863 return IXGBE_ERR_FC_NOT_NEGOTIATED;
1864
1865 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
1909 /* 1866 /*
1910 * Read the 10g AN autoc and LP ability registers and resolve 1867 * Now we need to check if the user selected Rx ONLY
1911 * local flow control settings accordingly 1868 * of pause frames. In this case, we had to advertise
1869 * FULL flow control because we could not advertise RX
1870 * ONLY. Hence, we must now check to see if we need to
1871 * turn OFF the TRANSMISSION of PAUSE frames.
1912 */ 1872 */
1913 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1873 if (hw->fc.requested_mode == ixgbe_fc_full) {
1914 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 1874 hw->fc.current_mode = ixgbe_fc_full;
1915 1875 hw_dbg(hw, "Flow Control = FULL.\n");
1916 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1917 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1918 /*
1919 * Now we need to check if the user selected Rx ONLY
1920 * of pause frames. In this case, we had to advertise
1921 * FULL flow control because we could not advertise RX
1922 * ONLY. Hence, we must now check to see if we need to
1923 * turn OFF the TRANSMISSION of PAUSE frames.
1924 */
1925 if (hw->fc.requested_mode == ixgbe_fc_full) {
1926 hw->fc.current_mode = ixgbe_fc_full;
1927 hw_dbg(hw, "Flow Control = FULL.\n");
1928 } else {
1929 hw->fc.current_mode = ixgbe_fc_rx_pause;
1930 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1931 }
1932 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1933 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1934 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1935 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1936 hw->fc.current_mode = ixgbe_fc_tx_pause;
1937 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1938 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1939 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1940 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1941 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1942 hw->fc.current_mode = ixgbe_fc_rx_pause;
1943 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1944 } else { 1876 } else {
1945 hw->fc.current_mode = ixgbe_fc_none; 1877 hw->fc.current_mode = ixgbe_fc_rx_pause;
1946 hw_dbg(hw, "Flow Control = NONE.\n"); 1878 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
1947 } 1879 }
1880 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
1881 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
1882 hw->fc.current_mode = ixgbe_fc_tx_pause;
1883 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1884 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
1885 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
1886 hw->fc.current_mode = ixgbe_fc_rx_pause;
1887 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1888 } else {
1889 hw->fc.current_mode = ixgbe_fc_none;
1890 hw_dbg(hw, "Flow Control = NONE.\n");
1948 } 1891 }
1949 /* Record that current_mode is the result of a successful autoneg */ 1892 return 0;
1950 hw->fc.fc_was_autonegged = true;
1951
1952out:
1953 return ret_val;
1954} 1893}
1955 1894
1956/** 1895/**
@@ -1962,7 +1901,8 @@ out:
1962static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 1901static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1963{ 1902{
1964 s32 ret_val = 0; 1903 s32 ret_val = 0;
1965 u32 reg; 1904 u32 reg = 0, reg_bp = 0;
1905 u16 reg_cu = 0;
1966 1906
1967#ifdef CONFIG_DCB 1907#ifdef CONFIG_DCB
1968 if (hw->fc.requested_mode == ixgbe_fc_pfc) { 1908 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1970,7 +1910,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1970 goto out; 1910 goto out;
1971 } 1911 }
1972 1912
1973#endif 1913#endif /* CONFIG_DCB */
1974 /* Validate the packetbuf configuration */ 1914 /* Validate the packetbuf configuration */
1975 if (packetbuf_num < 0 || packetbuf_num > 7) { 1915 if (packetbuf_num < 0 || packetbuf_num > 7) {
1976 hw_dbg(hw, "Invalid packet buffer number [%d], expected range " 1916 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
@@ -2008,11 +1948,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2008 hw->fc.requested_mode = ixgbe_fc_full; 1948 hw->fc.requested_mode = ixgbe_fc_full;
2009 1949
2010 /* 1950 /*
2011 * Set up the 1G flow control advertisement registers so the HW will be 1951 * Set up the 1G and 10G flow control advertisement registers so the
2012 * able to do fc autoneg once the cable is plugged in. If we end up 1952 * HW will be able to do fc autoneg once the cable is plugged in. If
2013 * using 10g instead, this is harmless. 1953 * we link at 10G, the 1G advertisement is harmless and vice versa.
2014 */ 1954 */
2015 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1955
1956 switch (hw->phy.media_type) {
1957 case ixgbe_media_type_fiber:
1958 case ixgbe_media_type_backplane:
1959 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1960 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1961 break;
1962
1963 case ixgbe_media_type_copper:
1964 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1965 MDIO_MMD_AN, &reg_cu);
1966 break;
1967
1968 default:
1969 ;
1970 }
2016 1971
2017 /* 1972 /*
2018 * The possible values of fc.requested_mode are: 1973 * The possible values of fc.requested_mode are:
@@ -2031,6 +1986,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2031 case ixgbe_fc_none: 1986 case ixgbe_fc_none:
2032 /* Flow control completely disabled by software override. */ 1987 /* Flow control completely disabled by software override. */
2033 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 1988 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1989 if (hw->phy.media_type == ixgbe_media_type_backplane)
1990 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
1991 IXGBE_AUTOC_ASM_PAUSE);
1992 else if (hw->phy.media_type == ixgbe_media_type_copper)
1993 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2034 break; 1994 break;
2035 case ixgbe_fc_rx_pause: 1995 case ixgbe_fc_rx_pause:
2036 /* 1996 /*
@@ -2042,6 +2002,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2042 * disable the adapter's ability to send PAUSE frames. 2002 * disable the adapter's ability to send PAUSE frames.
2043 */ 2003 */
2044 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2004 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2005 if (hw->phy.media_type == ixgbe_media_type_backplane)
2006 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2007 IXGBE_AUTOC_ASM_PAUSE);
2008 else if (hw->phy.media_type == ixgbe_media_type_copper)
2009 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2045 break; 2010 break;
2046 case ixgbe_fc_tx_pause: 2011 case ixgbe_fc_tx_pause:
2047 /* 2012 /*
@@ -2050,10 +2015,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2050 */ 2015 */
2051 reg |= (IXGBE_PCS1GANA_ASM_PAUSE); 2016 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2052 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); 2017 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2018 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2019 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2020 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2021 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2022 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2023 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2024 }
2053 break; 2025 break;
2054 case ixgbe_fc_full: 2026 case ixgbe_fc_full:
2055 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2027 /* Flow control (both Rx and Tx) is enabled by SW override. */
2056 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2028 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2029 if (hw->phy.media_type == ixgbe_media_type_backplane)
2030 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2031 IXGBE_AUTOC_ASM_PAUSE);
2032 else if (hw->phy.media_type == ixgbe_media_type_copper)
2033 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2057 break; 2034 break;
2058#ifdef CONFIG_DCB 2035#ifdef CONFIG_DCB
2059 case ixgbe_fc_pfc: 2036 case ixgbe_fc_pfc:
@@ -2067,80 +2044,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2067 break; 2044 break;
2068 } 2045 }
2069 2046
2070 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 2047 if (hw->mac.type != ixgbe_mac_X540) {
2071 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 2048 /*
2072 2049 * Enable auto-negotiation between the MAC & PHY;
2073 /* Disable AN timeout */ 2050 * the MAC will advertise clause 37 flow control.
2074 if (hw->fc.strict_ieee) 2051 */
2075 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 2052 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2053 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2076 2054
2077 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 2055 /* Disable AN timeout */
2078 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 2056 if (hw->fc.strict_ieee)
2057 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2079 2058
2080 /* 2059 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2081 * Set up the 10G flow control advertisement registers so the HW 2060 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2082 * can do fc autoneg once the cable is plugged in. If we end up 2061 }
2083 * using 1g instead, this is harmless.
2084 */
2085 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2086 2062
2087 /* 2063 /*
2088 * The possible values of fc.requested_mode are: 2064 * AUTOC restart handles negotiation of 1G and 10G on backplane
2089 * 0: Flow control is completely disabled 2065 * and copper. There is no need to set the PCS1GCTL register.
2090 * 1: Rx flow control is enabled (we can receive pause frames, 2066 *
2091 * but not send pause frames).
2092 * 2: Tx flow control is enabled (we can send pause frames but
2093 * we do not support receiving pause frames).
2094 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2095 * other: Invalid.
2096 */ 2067 */
2097 switch (hw->fc.requested_mode) { 2068 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2098 case ixgbe_fc_none: 2069 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2099 /* Flow control completely disabled by software override. */ 2070 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2100 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); 2071 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2101 break; 2072 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
2102 case ixgbe_fc_rx_pause: 2073 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
2103 /* 2074 MDIO_MMD_AN, reg_cu);
2104 * Rx Flow control is enabled and Tx Flow control is
2105 * disabled by software override. Since there really
2106 * isn't a way to advertise that we are capable of RX
2107 * Pause ONLY, we will advertise that we support both
2108 * symmetric and asymmetric Rx PAUSE. Later, we will
2109 * disable the adapter's ability to send PAUSE frames.
2110 */
2111 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2112 break;
2113 case ixgbe_fc_tx_pause:
2114 /*
2115 * Tx Flow control is enabled, and Rx Flow control is
2116 * disabled by software override.
2117 */
2118 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2119 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2120 break;
2121 case ixgbe_fc_full:
2122 /* Flow control (both Rx and Tx) is enabled by SW override. */
2123 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2124 break;
2125#ifdef CONFIG_DCB
2126 case ixgbe_fc_pfc:
2127 goto out;
2128 break;
2129#endif /* CONFIG_DCB */
2130 default:
2131 hw_dbg(hw, "Flow control param set incorrectly\n");
2132 ret_val = IXGBE_ERR_CONFIG;
2133 goto out;
2134 break;
2135 } 2075 }
2136 /*
2137 * AUTOC restart handles negotiation of 1G and 10G. There is
2138 * no need to set the PCS1GCTL register.
2139 */
2140 reg |= IXGBE_AUTOC_AN_RESTART;
2141 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2142 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2143 2076
2077 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2144out: 2078out:
2145 return ret_val; 2079 return ret_val;
2146} 2080}
@@ -2156,10 +2090,16 @@ out:
2156 **/ 2090 **/
2157s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2091s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2158{ 2092{
2093 struct ixgbe_adapter *adapter = hw->back;
2159 u32 i; 2094 u32 i;
2160 u32 reg_val; 2095 u32 reg_val;
2161 u32 number_of_queues; 2096 u32 number_of_queues;
2162 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2097 s32 status = 0;
2098 u16 dev_status = 0;
2099
2100 /* Just jump out if bus mastering is already disabled */
2101 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2102 goto out;
2163 2103
2164 /* Disable the receive unit by stopping each queue */ 2104 /* Disable the receive unit by stopping each queue */
2165 number_of_queues = hw->mac.max_rx_queues; 2105 number_of_queues = hw->mac.max_rx_queues;
@@ -2176,13 +2116,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2176 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2116 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2177 2117
2178 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2118 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2179 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 2119 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2180 status = 0; 2120 goto check_device_status;
2121 udelay(100);
2122 }
2123
2124 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2125 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2126
2127 /*
2128 * Before proceeding, make sure that the PCIe block does not have
2129 * transactions pending.
2130 */
2131check_device_status:
2132 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2133 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
2134 &dev_status);
2135 if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2181 break; 2136 break;
2182 }
2183 udelay(100); 2137 udelay(100);
2184 } 2138 }
2185 2139
2140 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2141 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2142 else
2143 goto out;
2144
2145 /*
2146 * Two consecutive resets are required via CTRL.RST per datasheet
2147 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2148 * of this need. The first reset prevents new master requests from
2149 * being issued by our device. We then must wait 1usec for any
2150 * remaining completions from the PCIe bus to trickle in, and then reset
2151 * again to clear out any effects they may have had on our device.
2152 */
2153 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2154
2155out:
2186 return status; 2156 return status;
2187} 2157}
2188 2158
@@ -2192,7 +2162,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2192 * @hw: pointer to hardware structure 2162 * @hw: pointer to hardware structure
2193 * @mask: Mask to specify which semaphore to acquire 2163 * @mask: Mask to specify which semaphore to acquire
2194 * 2164 *
2195 * Acquires the SWFW semaphore thought the GSSR register for the specified 2165 * Acquires the SWFW semaphore through the GSSR register for the specified
2196 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2166 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2197 **/ 2167 **/
2198s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2168s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2203,6 +2173,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2203 s32 timeout = 200; 2173 s32 timeout = 200;
2204 2174
2205 while (timeout) { 2175 while (timeout) {
2176 /*
2177 * SW EEPROM semaphore bit is used for access to all
2178 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2179 */
2206 if (ixgbe_get_eeprom_semaphore(hw)) 2180 if (ixgbe_get_eeprom_semaphore(hw))
2207 return IXGBE_ERR_SWFW_SYNC; 2181 return IXGBE_ERR_SWFW_SYNC;
2208 2182
@@ -2220,7 +2194,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2220 } 2194 }
2221 2195
2222 if (!timeout) { 2196 if (!timeout) {
2223 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2197 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2224 return IXGBE_ERR_SWFW_SYNC; 2198 return IXGBE_ERR_SWFW_SYNC;
2225 } 2199 }
2226 2200
@@ -2236,7 +2210,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2236 * @hw: pointer to hardware structure 2210 * @hw: pointer to hardware structure
2237 * @mask: Mask to specify which semaphore to release 2211 * @mask: Mask to specify which semaphore to release
2238 * 2212 *
2239 * Releases the SWFW semaphore thought the GSSR register for the specified 2213 * Releases the SWFW semaphore through the GSSR register for the specified
2240 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2214 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2241 **/ 2215 **/
2242void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2216void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2424,37 +2398,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2424 u32 mpsar_lo, mpsar_hi; 2398 u32 mpsar_lo, mpsar_hi;
2425 u32 rar_entries = hw->mac.num_rar_entries; 2399 u32 rar_entries = hw->mac.num_rar_entries;
2426 2400
2427 if (rar < rar_entries) { 2401 /* Make sure we are using a valid rar index range */
2428 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2402 if (rar >= rar_entries) {
2429 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2403 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2404 return IXGBE_ERR_INVALID_ARGUMENT;
2405 }
2430 2406
2431 if (!mpsar_lo && !mpsar_hi) 2407 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2432 goto done; 2408 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2433 2409
2434 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2410 if (!mpsar_lo && !mpsar_hi)
2435 if (mpsar_lo) { 2411 goto done;
2436 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2437 mpsar_lo = 0;
2438 }
2439 if (mpsar_hi) {
2440 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2441 mpsar_hi = 0;
2442 }
2443 } else if (vmdq < 32) {
2444 mpsar_lo &= ~(1 << vmdq);
2445 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2446 } else {
2447 mpsar_hi &= ~(1 << (vmdq - 32));
2448 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2449 }
2450 2412
2451 /* was that the last pool using this rar? */ 2413 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2452 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2414 if (mpsar_lo) {
2453 hw->mac.ops.clear_rar(hw, rar); 2415 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2416 mpsar_lo = 0;
2417 }
2418 if (mpsar_hi) {
2419 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2420 mpsar_hi = 0;
2421 }
2422 } else if (vmdq < 32) {
2423 mpsar_lo &= ~(1 << vmdq);
2424 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2454 } else { 2425 } else {
2455 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2426 mpsar_hi &= ~(1 << (vmdq - 32));
2427 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2456 } 2428 }
2457 2429
2430 /* was that the last pool using this rar? */
2431 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2432 hw->mac.ops.clear_rar(hw, rar);
2458done: 2433done:
2459 return 0; 2434 return 0;
2460} 2435}
@@ -2470,18 +2445,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2470 u32 mpsar; 2445 u32 mpsar;
2471 u32 rar_entries = hw->mac.num_rar_entries; 2446 u32 rar_entries = hw->mac.num_rar_entries;
2472 2447
2473 if (rar < rar_entries) { 2448 /* Make sure we are using a valid rar index range */
2474 if (vmdq < 32) { 2449 if (rar >= rar_entries) {
2475 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2476 mpsar |= 1 << vmdq;
2477 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2478 } else {
2479 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2480 mpsar |= 1 << (vmdq - 32);
2481 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2482 }
2483 } else {
2484 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2450 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2451 return IXGBE_ERR_INVALID_ARGUMENT;
2452 }
2453
2454 if (vmdq < 32) {
2455 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2456 mpsar |= 1 << vmdq;
2457 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2458 } else {
2459 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2460 mpsar |= 1 << (vmdq - 32);
2461 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2485 } 2462 }
2486 return 0; 2463 return 0;
2487} 2464}
@@ -2494,7 +2471,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2494{ 2471{
2495 int i; 2472 int i;
2496 2473
2497
2498 for (i = 0; i < 128; i++) 2474 for (i = 0; i < 128; i++)
2499 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 2475 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2500 2476
@@ -2723,12 +2699,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2723 * Reads the links register to determine if link is up and the current speed 2699 * Reads the links register to determine if link is up and the current speed
2724 **/ 2700 **/
2725s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 2701s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2726 bool *link_up, bool link_up_wait_to_complete) 2702 bool *link_up, bool link_up_wait_to_complete)
2727{ 2703{
2728 u32 links_reg; 2704 u32 links_reg, links_orig;
2729 u32 i; 2705 u32 i;
2730 2706
2707 /* clear the old state */
2708 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
2709
2731 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 2710 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2711
2712 if (links_orig != links_reg) {
2713 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
2714 links_orig, links_reg);
2715 }
2716
2732 if (link_up_wait_to_complete) { 2717 if (link_up_wait_to_complete) {
2733 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 2718 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2734 if (links_reg & IXGBE_LINKS_UP) { 2719 if (links_reg & IXGBE_LINKS_UP) {
@@ -2751,10 +2736,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2751 IXGBE_LINKS_SPEED_10G_82599) 2736 IXGBE_LINKS_SPEED_10G_82599)
2752 *speed = IXGBE_LINK_SPEED_10GB_FULL; 2737 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2753 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 2738 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2754 IXGBE_LINKS_SPEED_1G_82599) 2739 IXGBE_LINKS_SPEED_1G_82599)
2755 *speed = IXGBE_LINK_SPEED_1GB_FULL; 2740 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2756 else 2741 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2742 IXGBE_LINKS_SPEED_100_82599)
2757 *speed = IXGBE_LINK_SPEED_100_FULL; 2743 *speed = IXGBE_LINK_SPEED_100_FULL;
2744 else
2745 *speed = IXGBE_LINK_SPEED_UNKNOWN;
2758 2746
2759 /* if link is down, zero out the current_mode */ 2747 /* if link is down, zero out the current_mode */
2760 if (*link_up == false) { 2748 if (*link_up == false) {
@@ -2811,6 +2799,28 @@ wwn_prefix_out:
2811} 2799}
2812 2800
2813/** 2801/**
2802 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
2803 * control
2804 * @hw: pointer to hardware structure
2805 *
2806 * There are several phys that do not support autoneg flow control. This
2807 * function check the device id to see if the associated phy supports
2808 * autoneg flow control.
2809 **/
2810static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
2811{
2812
2813 switch (hw->device_id) {
2814 case IXGBE_DEV_ID_X540T:
2815 return 0;
2816 case IXGBE_DEV_ID_82599_T3_LOM:
2817 return 0;
2818 default:
2819 return IXGBE_ERR_FC_NOT_SUPPORTED;
2820 }
2821}
2822
2823/**
2814 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 2824 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
2815 * @hw: pointer to hardware structure 2825 * @hw: pointer to hardware structure
2816 * @enable: enable or disable switch for anti-spoofing 2826 * @enable: enable or disable switch for anti-spoofing
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 66ed045a8cf..508f635fc2c 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,7 @@
29#define _IXGBE_COMMON_H_ 29#define _IXGBE_COMMON_H_
30 30
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#include "ixgbe.h"
32 33
33u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); 34u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
34s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -62,8 +63,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
62s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 63s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
63s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 64s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
64 struct net_device *netdev); 65 struct net_device *netdev);
65s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
66 struct net_device *netdev);
67s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 66s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
68s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 67s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
69s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 68s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
@@ -110,9 +109,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
110 109
111#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 110#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
112 111
113extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
114#define hw_dbg(hw, format, arg...) \ 112#define hw_dbg(hw, format, arg...) \
115 netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg) 113 netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
116#define e_dev_info(format, arg...) \ 114#define e_dev_info(format, arg...) \
117 dev_info(&adapter->pdev->dev, format, ## arg) 115 dev_info(&adapter->pdev->dev, format, ## arg)
118#define e_dev_warn(format, arg...) \ 116#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index d16c260c1f5..41c529fac0a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,42 @@
34#include "ixgbe_dcb_82599.h" 34#include "ixgbe_dcb_82599.h"
35 35
36/** 36/**
37 * ixgbe_ieee_credits - This calculates the ieee traffic class
38 * credits from the configured bandwidth percentages. Credits
39 * are the smallest unit programable into the underlying
40 * hardware. The IEEE 802.1Qaz specification do not use bandwidth
41 * groups so this is much simplified from the CEE case.
42 */
43s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
44{
45 int min_percent = 100;
46 int min_credit, multiplier;
47 int i;
48
49 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
50 DCB_CREDIT_QUANTUM;
51
52 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
53 if (bw[i] < min_percent && bw[i])
54 min_percent = bw[i];
55 }
56
57 multiplier = (min_credit / min_percent) + 1;
58
59 /* Find out the hw credits for each TC */
60 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
61 int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
62
63 if (val < min_credit)
64 val = min_credit;
65 refill[i] = val;
66
67 max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
68 }
69 return 0;
70}
71
72/**
37 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits 73 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
38 * @ixgbe_dcb_config: Struct containing DCB settings. 74 * @ixgbe_dcb_config: Struct containing DCB settings.
39 * @direction: Configuring either Tx or Rx. 75 * @direction: Configuring either Tx or Rx.
@@ -141,6 +177,59 @@ out:
141 return ret_val; 177 return ret_val;
142} 178}
143 179
180void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
181{
182 int i;
183
184 *pfc_en = 0;
185 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
186 *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
187}
188
189void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
190 u16 *refill)
191{
192 struct tc_bw_alloc *p;
193 int i;
194
195 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
196 p = &cfg->tc_config[i].path[direction];
197 refill[i] = p->data_credits_refill;
198 }
199}
200
201void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
202{
203 int i;
204
205 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
206 max[i] = cfg->tc_config[i].desc_credits_max;
207}
208
209void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
210 u8 *bwgid)
211{
212 struct tc_bw_alloc *p;
213 int i;
214
215 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
216 p = &cfg->tc_config[i].path[direction];
217 bwgid[i] = p->bwg_id;
218 }
219}
220
221void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
222 u8 *ptype)
223{
224 struct tc_bw_alloc *p;
225 int i;
226
227 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
228 p = &cfg->tc_config[i].path[direction];
229 ptype[i] = p->prio_type;
230 }
231}
232
144/** 233/**
145 * ixgbe_dcb_hw_config - Config and enable DCB 234 * ixgbe_dcb_hw_config - Config and enable DCB
146 * @hw: pointer to hardware structure 235 * @hw: pointer to hardware structure
@@ -152,13 +241,32 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
152 struct ixgbe_dcb_config *dcb_config) 241 struct ixgbe_dcb_config *dcb_config)
153{ 242{
154 s32 ret = 0; 243 s32 ret = 0;
244 u8 pfc_en;
245 u8 ptype[MAX_TRAFFIC_CLASS];
246 u8 bwgid[MAX_TRAFFIC_CLASS];
247 u16 refill[MAX_TRAFFIC_CLASS];
248 u16 max[MAX_TRAFFIC_CLASS];
249 /* CEE does not define a priority to tc mapping so map 1:1 */
250 u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
251
252 /* Unpack CEE standard containers */
253 ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
254 ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
255 ixgbe_dcb_unpack_max(dcb_config, max);
256 ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
257 ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
258
155 switch (hw->mac.type) { 259 switch (hw->mac.type) {
156 case ixgbe_mac_82598EB: 260 case ixgbe_mac_82598EB:
157 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); 261 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
262 pfc_en, refill, max, bwgid,
263 ptype);
158 break; 264 break;
159 case ixgbe_mac_82599EB: 265 case ixgbe_mac_82599EB:
160 case ixgbe_mac_X540: 266 case ixgbe_mac_X540:
161 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); 267 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
268 pfc_en, refill, max, bwgid,
269 ptype, prio_tc);
162 break; 270 break;
163 default: 271 default:
164 break; 272 break;
@@ -166,3 +274,49 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
166 return ret; 274 return ret;
167} 275}
168 276
277/* Helper routines to abstract HW specifics from DCB netlink ops */
278s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
279{
280 int ret = -EINVAL;
281
282 switch (hw->mac.type) {
283 case ixgbe_mac_82598EB:
284 ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
285 break;
286 case ixgbe_mac_82599EB:
287 case ixgbe_mac_X540:
288 ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
289 break;
290 default:
291 break;
292 }
293 return ret;
294}
295
296s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
297 u16 *refill, u16 *max, u8 *bwg_id,
298 u8 *prio_type, u8 *prio_tc)
299{
300 switch (hw->mac.type) {
301 case ixgbe_mac_82598EB:
302 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
303 prio_type);
304 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
305 bwg_id, prio_type);
306 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
307 bwg_id, prio_type);
308 break;
309 case ixgbe_mac_82599EB:
310 case ixgbe_mac_X540:
311 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
312 bwg_id, prio_type, prio_tc);
313 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
314 bwg_id, prio_type);
315 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
316 prio_type, prio_tc);
317 break;
318 default:
319 break;
320 }
321 return 0;
322}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 1cfe38ee164..944838fc7b5 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; 139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ 140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
141 bool pfc_mode_enable; 141 bool pfc_mode_enable;
142 bool round_robin_enable;
143 142
144 enum dcb_rx_pba_cfg rx_pba_cfg; 143 enum dcb_rx_pba_cfg rx_pba_cfg;
145 144
@@ -148,12 +147,21 @@ struct ixgbe_dcb_config {
148}; 147};
149 148
150/* DCB driver APIs */ 149/* DCB driver APIs */
150void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
151void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
152void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
153void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
154void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
151 155
152/* DCB credits calculation */ 156/* DCB credits calculation */
157s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, 158s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
154 struct ixgbe_dcb_config *, int, u8); 159 struct ixgbe_dcb_config *, int, u8);
155 160
156/* DCB hw initialization */ 161/* DCB hw initialization */
162s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
163 u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
164s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
157s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 165s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
158 166
159/* DCB definitions for credit calculation */ 167/* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 9a5e89c12e0..1bc57e52cee 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -38,15 +38,14 @@
38 * 38 *
39 * Configure packet buffers for DCB mode. 39 * Configure packet buffers for DCB mode.
40 */ 40 */
41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, 41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
42 struct ixgbe_dcb_config *dcb_config)
43{ 42{
44 s32 ret_val = 0; 43 s32 ret_val = 0;
45 u32 value = IXGBE_RXPBSIZE_64KB; 44 u32 value = IXGBE_RXPBSIZE_64KB;
46 u8 i = 0; 45 u8 i = 0;
47 46
48 /* Setup Rx packet buffer sizes */ 47 /* Setup Rx packet buffer sizes */
49 switch (dcb_config->rx_pba_cfg) { 48 switch (rx_pba) {
50 case pba_80_48: 49 case pba_80_48:
51 /* Setup the first four at 80KB */ 50 /* Setup the first four at 80KB */
52 value = IXGBE_RXPBSIZE_80KB; 51 value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
78 * 77 *
79 * Configure Rx Data Arbiter and credits for each traffic class. 78 * Configure Rx Data Arbiter and credits for each traffic class.
80 */ 79 */
81static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, 80s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
82 struct ixgbe_dcb_config *dcb_config) 81 u16 *refill,
82 u16 *max,
83 u8 *prio_type)
83{ 84{
84 struct tc_bw_alloc *p;
85 u32 reg = 0; 85 u32 reg = 0;
86 u32 credit_refill = 0; 86 u32 credit_refill = 0;
87 u32 credit_max = 0; 87 u32 credit_max = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
102 102
103 /* Configure traffic class credits and priority */ 103 /* Configure traffic class credits and priority */
104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
105 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 105 credit_refill = refill[i];
106 credit_refill = p->data_credits_refill; 106 credit_max = max[i];
107 credit_max = p->data_credits_max;
108 107
109 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); 108 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
110 109
111 if (p->prio_type == prio_link) 110 if (prio_type[i] == prio_link)
112 reg |= IXGBE_RT2CR_LSP; 111 reg |= IXGBE_RT2CR_LSP;
113 112
114 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); 113 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
135 * 134 *
136 * Configure Tx Descriptor Arbiter and credits for each traffic class. 135 * Configure Tx Descriptor Arbiter and credits for each traffic class.
137 */ 136 */
138static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, 137s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
139 struct ixgbe_dcb_config *dcb_config) 138 u16 *refill,
139 u16 *max,
140 u8 *bwg_id,
141 u8 *prio_type)
140{ 142{
141 struct tc_bw_alloc *p;
142 u32 reg, max_credits; 143 u32 reg, max_credits;
143 u8 i; 144 u8 i;
144 145
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
146 147
147 /* Enable arbiter */ 148 /* Enable arbiter */
148 reg &= ~IXGBE_DPMCS_ARBDIS; 149 reg &= ~IXGBE_DPMCS_ARBDIS;
149 if (!(dcb_config->round_robin_enable)) { 150 /* Enable DFP and Recycle mode */
150 /* Enable DFP and Recycle mode */ 151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
152 }
153 reg |= IXGBE_DPMCS_TSOEF; 152 reg |= IXGBE_DPMCS_TSOEF;
154 /* Configure Max TSO packet size 34KB including payload and headers */ 153 /* Configure Max TSO packet size 34KB including payload and headers */
155 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); 154 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
158 157
159 /* Configure traffic class credits and priority */ 158 /* Configure traffic class credits and priority */
160 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 159 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
161 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 160 max_credits = max[i];
162 max_credits = dcb_config->tc_config[i].desc_credits_max;
163 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; 161 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
164 reg |= p->data_credits_refill; 162 reg |= refill[i];
165 reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT; 163 reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
166 164
167 if (p->prio_type == prio_group) 165 if (prio_type[i] == prio_group)
168 reg |= IXGBE_TDTQ2TCCR_GSP; 166 reg |= IXGBE_TDTQ2TCCR_GSP;
169 167
170 if (p->prio_type == prio_link) 168 if (prio_type[i] == prio_link)
171 reg |= IXGBE_TDTQ2TCCR_LSP; 169 reg |= IXGBE_TDTQ2TCCR_LSP;
172 170
173 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); 171 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
183 * 181 *
184 * Configure Tx Data Arbiter and credits for each traffic class. 182 * Configure Tx Data Arbiter and credits for each traffic class.
185 */ 183 */
186static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, 184s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
187 struct ixgbe_dcb_config *dcb_config) 185 u16 *refill,
186 u16 *max,
187 u8 *bwg_id,
188 u8 *prio_type)
188{ 189{
189 struct tc_bw_alloc *p;
190 u32 reg; 190 u32 reg;
191 u8 i; 191 u8 i;
192 192
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
200 200
201 /* Configure traffic class credits and priority */ 201 /* Configure traffic class credits and priority */
202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
203 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 203 reg = refill[i];
204 reg = p->data_credits_refill; 204 reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
205 reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT; 205 reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
206 reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
207 206
208 if (p->prio_type == prio_group) 207 if (prio_type[i] == prio_group)
209 reg |= IXGBE_TDPT2TCCR_GSP; 208 reg |= IXGBE_TDPT2TCCR_GSP;
210 209
211 if (p->prio_type == prio_link) 210 if (prio_type[i] == prio_link)
212 reg |= IXGBE_TDPT2TCCR_LSP; 211 reg |= IXGBE_TDPT2TCCR_LSP;
213 212
214 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); 213 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,59 +228,57 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
229 * 228 *
230 * Configure Priority Flow Control for each traffic class. 229 * Configure Priority Flow Control for each traffic class.
231 */ 230 */
232s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, 231s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
233 struct ixgbe_dcb_config *dcb_config)
234{ 232{
235 u32 reg, rx_pba_size; 233 u32 reg, rx_pba_size;
236 u8 i; 234 u8 i;
237 235
238 if (!dcb_config->pfc_mode_enable) 236 if (pfc_en) {
239 goto out; 237 /* Enable Transmit Priority Flow Control */
240 238 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
241 /* Enable Transmit Priority Flow Control */ 239 reg &= ~IXGBE_RMCS_TFCE_802_3X;
242 reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 240 /* correct the reporting of our flow control status */
243 reg &= ~IXGBE_RMCS_TFCE_802_3X; 241 reg |= IXGBE_RMCS_TFCE_PRIORITY;
244 /* correct the reporting of our flow control status */ 242 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
245 reg |= IXGBE_RMCS_TFCE_PRIORITY; 243
246 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); 244 /* Enable Receive Priority Flow Control */
247 245 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
248 /* Enable Receive Priority Flow Control */ 246 reg &= ~IXGBE_FCTRL_RFCE;
249 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 247 reg |= IXGBE_FCTRL_RPFCE;
250 reg &= ~IXGBE_FCTRL_RFCE; 248 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
251 reg |= IXGBE_FCTRL_RPFCE; 249
252 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); 250 /* Configure pause time */
251 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
252 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
253
254 /* Configure flow control refresh threshold value */
255 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
256 }
253 257
254 /* 258 /*
255 * Configure flow control thresholds and enable priority flow control 259 * Configure flow control thresholds and enable priority flow control
256 * for each traffic class. 260 * for each traffic class.
257 */ 261 */
258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 262 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
263 int enabled = pfc_en & (1 << i);
259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 264 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 265 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
261 reg = (rx_pba_size - hw->fc.low_water) << 10; 266 reg = (rx_pba_size - hw->fc.low_water) << 10;
262 267
263 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 268 if (enabled == pfc_enabled_tx ||
264 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 269 enabled == pfc_enabled_full)
265 reg |= IXGBE_FCRTL_XONE; 270 reg |= IXGBE_FCRTL_XONE;
266 271
267 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 272 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
268 273
269 reg = (rx_pba_size - hw->fc.high_water) << 10; 274 reg = (rx_pba_size - hw->fc.high_water) << 10;
270 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 275 if (enabled == pfc_enabled_tx ||
271 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 276 enabled == pfc_enabled_full)
272 reg |= IXGBE_FCRTH_FCEN; 277 reg |= IXGBE_FCRTH_FCEN;
273 278
274 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); 279 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
275 } 280 }
276 281
277 /* Configure pause time */
278 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
279 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
280
281 /* Configure flow control refresh threshold value */
282 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
283
284out:
285 return 0; 282 return 0;
286} 283}
287 284
@@ -292,7 +289,7 @@ out:
292 * Configure queue statistics registers, all queues belonging to same traffic 289 * Configure queue statistics registers, all queues belonging to same traffic
293 * class uses a single set of queue statistics counters. 290 * class uses a single set of queue statistics counters.
294 */ 291 */
295static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) 292s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
296{ 293{
297 u32 reg = 0; 294 u32 reg = 0;
298 u8 i = 0; 295 u8 i = 0;
@@ -325,13 +322,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
325 * Configure dcb settings and enable dcb mode. 322 * Configure dcb settings and enable dcb mode.
326 */ 323 */
327s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, 324s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
328 struct ixgbe_dcb_config *dcb_config) 325 u8 rx_pba, u8 pfc_en, u16 *refill,
326 u16 *max, u8 *bwg_id, u8 *prio_type)
329{ 327{
330 ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config); 328 ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
331 ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); 329 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
332 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); 330 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
333 ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); 331 bwg_id, prio_type);
334 ixgbe_dcb_config_pfc_82598(hw, dcb_config); 332 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
333 bwg_id, prio_type);
334 ixgbe_dcb_config_pfc_82598(hw, pfc_en);
335 ixgbe_dcb_config_tc_stats_82598(hw); 335 ixgbe_dcb_config_tc_stats_82598(hw);
336 336
337 return 0; 337 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index abc03ccfa08..1e9750c2b46 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -71,9 +71,28 @@
71/* DCB hardware-specific driver APIs */ 71/* DCB hardware-specific driver APIs */
72 72
73/* DCB PFC functions */ 73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
75 75
76/* DCB hw initialization */ 76/* DCB hw initialization */
77s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 77s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
78 u16 *refill,
79 u16 *max,
80 u8 *prio_type);
81
82s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
83 u16 *refill,
84 u16 *max,
85 u8 *bwg_id,
86 u8 *prio_type);
87
88s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
89 u16 *refill,
90 u16 *max,
91 u8 *bwg_id,
92 u8 *prio_type);
93
94s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
95 u8 rx_pba, u8 pfc_en, u16 *refill,
96 u16 *max, u8 *bwg_id, u8 *prio_type);
78 97
79#endif /* _DCB_82598_CONFIG_H */ 98#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 374e1f74d0f..025af8c53dd 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -33,19 +33,18 @@
33/** 33/**
34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers 34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
35 * @hw: pointer to hardware structure 35 * @hw: pointer to hardware structure
36 * @dcb_config: pointer to ixgbe_dcb_config structure 36 * @rx_pba: method to distribute packet buffer
37 * 37 *
38 * Configure packet buffers for DCB mode. 38 * Configure packet buffers for DCB mode.
39 */ 39 */
40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, 40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
41 struct ixgbe_dcb_config *dcb_config)
42{ 41{
43 s32 ret_val = 0; 42 s32 ret_val = 0;
44 u32 value = IXGBE_RXPBSIZE_64KB; 43 u32 value = IXGBE_RXPBSIZE_64KB;
45 u8 i = 0; 44 u8 i = 0;
46 45
47 /* Setup Rx packet buffer sizes */ 46 /* Setup Rx packet buffer sizes */
48 switch (dcb_config->rx_pba_cfg) { 47 switch (rx_pba) {
49 case pba_80_48: 48 case pba_80_48:
50 /* Setup the first four at 80KB */ 49 /* Setup the first four at 80KB */
51 value = IXGBE_RXPBSIZE_80KB; 50 value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,20 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
75/** 74/**
76 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter 75 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
77 * @hw: pointer to hardware structure 76 * @hw: pointer to hardware structure
78 * @dcb_config: pointer to ixgbe_dcb_config structure 77 * @refill: refill credits index by traffic class
78 * @max: max credits index by traffic class
79 * @bwg_id: bandwidth grouping indexed by traffic class
80 * @prio_type: priority type indexed by traffic class
79 * 81 *
80 * Configure Rx Packet Arbiter and credits for each traffic class. 82 * Configure Rx Packet Arbiter and credits for each traffic class.
81 */ 83 */
82static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, 84s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
83 struct ixgbe_dcb_config *dcb_config) 85 u16 *refill,
86 u16 *max,
87 u8 *bwg_id,
88 u8 *prio_type,
89 u8 *prio_tc)
84{ 90{
85 struct tc_bw_alloc *p;
86 u32 reg = 0; 91 u32 reg = 0;
87 u32 credit_refill = 0; 92 u32 credit_refill = 0;
88 u32 credit_max = 0; 93 u32 credit_max = 0;
@@ -98,20 +103,18 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
98 /* Map all traffic classes to their UP, 1 to 1 */ 103 /* Map all traffic classes to their UP, 1 to 1 */
99 reg = 0; 104 reg = 0;
100 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 105 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
101 reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT)); 106 reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
102 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); 107 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
103 108
104 /* Configure traffic class credits and priority */ 109 /* Configure traffic class credits and priority */
105 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 110 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
106 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 111 credit_refill = refill[i];
107 112 credit_max = max[i];
108 credit_refill = p->data_credits_refill;
109 credit_max = p->data_credits_max;
110 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); 113 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
111 114
112 reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT; 115 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
113 116
114 if (p->prio_type == prio_link) 117 if (prio_type[i] == prio_link)
115 reg |= IXGBE_RTRPT4C_LSP; 118 reg |= IXGBE_RTRPT4C_LSP;
116 119
117 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); 120 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +133,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
130/** 133/**
131 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter 134 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
132 * @hw: pointer to hardware structure 135 * @hw: pointer to hardware structure
133 * @dcb_config: pointer to ixgbe_dcb_config structure 136 * @refill: refill credits index by traffic class
137 * @max: max credits index by traffic class
138 * @bwg_id: bandwidth grouping indexed by traffic class
139 * @prio_type: priority type indexed by traffic class
134 * 140 *
135 * Configure Tx Descriptor Arbiter and credits for each traffic class. 141 * Configure Tx Descriptor Arbiter and credits for each traffic class.
136 */ 142 */
137static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, 143s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
138 struct ixgbe_dcb_config *dcb_config) 144 u16 *refill,
145 u16 *max,
146 u8 *bwg_id,
147 u8 *prio_type)
139{ 148{
140 struct tc_bw_alloc *p;
141 u32 reg, max_credits; 149 u32 reg, max_credits;
142 u8 i; 150 u8 i;
143 151
@@ -149,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
149 157
150 /* Configure traffic class credits and priority */ 158 /* Configure traffic class credits and priority */
151 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 159 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
152 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 160 max_credits = max[i];
153 max_credits = dcb_config->tc_config[i].desc_credits_max;
154 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; 161 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
155 reg |= p->data_credits_refill; 162 reg |= refill[i];
156 reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT; 163 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
157 164
158 if (p->prio_type == prio_group) 165 if (prio_type[i] == prio_group)
159 reg |= IXGBE_RTTDT2C_GSP; 166 reg |= IXGBE_RTTDT2C_GSP;
160 167
161 if (p->prio_type == prio_link) 168 if (prio_type[i] == prio_link)
162 reg |= IXGBE_RTTDT2C_LSP; 169 reg |= IXGBE_RTTDT2C_LSP;
163 170
164 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); 171 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +184,20 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
177/** 184/**
178 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter 185 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
179 * @hw: pointer to hardware structure 186 * @hw: pointer to hardware structure
180 * @dcb_config: pointer to ixgbe_dcb_config structure 187 * @refill: refill credits index by traffic class
188 * @max: max credits index by traffic class
189 * @bwg_id: bandwidth grouping indexed by traffic class
190 * @prio_type: priority type indexed by traffic class
181 * 191 *
182 * Configure Tx Packet Arbiter and credits for each traffic class. 192 * Configure Tx Packet Arbiter and credits for each traffic class.
183 */ 193 */
184static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, 194s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
185 struct ixgbe_dcb_config *dcb_config) 195 u16 *refill,
196 u16 *max,
197 u8 *bwg_id,
198 u8 *prio_type,
199 u8 *prio_tc)
186{ 200{
187 struct tc_bw_alloc *p;
188 u32 reg; 201 u32 reg;
189 u8 i; 202 u8 i;
190 203
@@ -200,20 +213,19 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
200 /* Map all traffic classes to their UP, 1 to 1 */ 213 /* Map all traffic classes to their UP, 1 to 1 */
201 reg = 0; 214 reg = 0;
202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 215 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
203 reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT)); 216 reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
204 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); 217 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
205 218
206 /* Configure traffic class credits and priority */ 219 /* Configure traffic class credits and priority */
207 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 220 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
208 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 221 reg = refill[i];
209 reg = p->data_credits_refill; 222 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
210 reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT; 223 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
211 reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
212 224
213 if (p->prio_type == prio_group) 225 if (prio_type[i] == prio_group)
214 reg |= IXGBE_RTTPT2C_GSP; 226 reg |= IXGBE_RTTPT2C_GSP;
215 227
216 if (p->prio_type == prio_link) 228 if (prio_type[i] == prio_link)
217 reg |= IXGBE_RTTPT2C_LSP; 229 reg |= IXGBE_RTTPT2C_LSP;
218 230
219 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); 231 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,63 +245,59 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
233/** 245/**
234 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control 246 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
235 * @hw: pointer to hardware structure 247 * @hw: pointer to hardware structure
236 * @dcb_config: pointer to ixgbe_dcb_config structure 248 * @pfc_en: enabled pfc bitmask
237 * 249 *
238 * Configure Priority Flow Control (PFC) for each traffic class. 250 * Configure Priority Flow Control (PFC) for each traffic class.
239 */ 251 */
240s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 252s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
241 struct ixgbe_dcb_config *dcb_config)
242{ 253{
243 u32 i, reg, rx_pba_size; 254 u32 i, reg, rx_pba_size;
244 255
245 /* If PFC is disabled globally then fall back to LFC. */
246 if (!dcb_config->pfc_mode_enable) {
247 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
248 hw->mac.ops.fc_enable(hw, i);
249 goto out;
250 }
251
252 /* Configure PFC Tx thresholds per TC */ 256 /* Configure PFC Tx thresholds per TC */
253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 257 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
258 int enabled = pfc_en & (1 << i);
254 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
255 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
256 261
257 reg = (rx_pba_size - hw->fc.low_water) << 10; 262 reg = (rx_pba_size - hw->fc.low_water) << 10;
258 263
259 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 264 if (enabled)
260 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
261 reg |= IXGBE_FCRTL_XONE; 265 reg |= IXGBE_FCRTL_XONE;
262 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 266 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
263 267
264 reg = (rx_pba_size - hw->fc.high_water) << 10; 268 reg = (rx_pba_size - hw->fc.high_water) << 10;
265 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 269 if (enabled)
266 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
267 reg |= IXGBE_FCRTH_FCEN; 270 reg |= IXGBE_FCRTH_FCEN;
268 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); 271 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
269 } 272 }
270 273
271 /* Configure pause time (2 TCs per register) */ 274 if (pfc_en) {
272 reg = hw->fc.pause_time | (hw->fc.pause_time << 16); 275 /* Configure pause time (2 TCs per register) */
273 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 276 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
274 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 277 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
275 278 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
276 /* Configure flow control refresh threshold value */ 279
277 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 280 /* Configure flow control refresh threshold value */
278 281 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
279 /* Enable Transmit PFC */ 282
280 reg = IXGBE_FCCFG_TFCE_PRIORITY; 283
281 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); 284 reg = IXGBE_FCCFG_TFCE_PRIORITY;
285 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
286 /*
287 * Enable Receive PFC
288 * We will always honor XOFF frames we receive when
289 * we are in PFC mode.
290 */
291 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
292 reg &= ~IXGBE_MFLCN_RFCE;
293 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
294 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
295
296 } else {
297 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
298 hw->mac.ops.fc_enable(hw, i);
299 }
282 300
283 /*
284 * Enable Receive PFC
285 * We will always honor XOFF frames we receive when
286 * we are in PFC mode.
287 */
288 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
289 reg &= ~IXGBE_MFLCN_RFCE;
290 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
291 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
292out:
293 return 0; 301 return 0;
294} 302}
295 303
@@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
349/** 357/**
350 * ixgbe_dcb_config_82599 - Configure general DCB parameters 358 * ixgbe_dcb_config_82599 - Configure general DCB parameters
351 * @hw: pointer to hardware structure 359 * @hw: pointer to hardware structure
352 * @dcb_config: pointer to ixgbe_dcb_config structure
353 * 360 *
354 * Configure general DCB parameters. 361 * Configure general DCB parameters.
355 */ 362 */
@@ -406,19 +413,28 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
406/** 413/**
407 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB 414 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
408 * @hw: pointer to hardware structure 415 * @hw: pointer to hardware structure
409 * @dcb_config: pointer to ixgbe_dcb_config structure 416 * @rx_pba: method to distribute packet buffer
417 * @refill: refill credits index by traffic class
418 * @max: max credits index by traffic class
419 * @bwg_id: bandwidth grouping indexed by traffic class
420 * @prio_type: priority type indexed by traffic class
421 * @pfc_en: enabled pfc bitmask
410 * 422 *
411 * Configure dcb settings and enable dcb mode. 423 * Configure dcb settings and enable dcb mode.
412 */ 424 */
413s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 425s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
414 struct ixgbe_dcb_config *dcb_config) 426 u8 rx_pba, u8 pfc_en, u16 *refill,
427 u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
415{ 428{
416 ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config); 429 ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
417 ixgbe_dcb_config_82599(hw); 430 ixgbe_dcb_config_82599(hw);
418 ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); 431 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
419 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); 432 prio_type, prio_tc);
420 ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); 433 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
421 ixgbe_dcb_config_pfc_82599(hw, dcb_config); 434 bwg_id, prio_type);
435 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
436 bwg_id, prio_type, prio_tc);
437 ixgbe_dcb_config_pfc_82599(hw, pfc_en);
422 ixgbe_dcb_config_tc_stats_82599(hw); 438 ixgbe_dcb_config_tc_stats_82599(hw);
423 439
424 return 0; 440 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 3841649fb95..148fd8b477a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -102,11 +102,32 @@
102/* DCB hardware-specific driver APIs */ 102/* DCB hardware-specific driver APIs */
103 103
104/* DCB PFC functions */ 104/* DCB PFC functions */
105s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 105s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
106 struct ixgbe_dcb_config *dcb_config);
107 106
108/* DCB hw initialization */ 107/* DCB hw initialization */
108s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
109 u16 *refill,
110 u16 *max,
111 u8 *bwg_id,
112 u8 *prio_type,
113 u8 *prio_tc);
114
115s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
116 u16 *refill,
117 u16 *max,
118 u8 *bwg_id,
119 u8 *prio_type);
120
121s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
122 u16 *refill,
123 u16 *max,
124 u8 *bwg_id,
125 u8 *prio_type,
126 u8 *prio_tc);
127
109s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 128s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
110 struct ixgbe_dcb_config *config); 129 u8 rx_pba, u8 pfc_en, u16 *refill,
130 u16 *max, u8 *bwg_id, u8 *prio_type,
131 u8 *prio_tc);
111 132
112#endif /* _DCB_82599_CONFIG_H */ 133#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bf566e8a455..fec4c724c37 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
37#define BIT_PG_RX 0x04 37#define BIT_PG_RX 0x04
38#define BIT_PG_TX 0x08 38#define BIT_PG_TX 0x08
39#define BIT_APP_UPCHG 0x10 39#define BIT_APP_UPCHG 0x10
40#define BIT_RESETLINK 0x40
41#define BIT_LINKSPEED 0x80 40#define BIT_LINKSPEED 0x80
42 41
43/* Responses for the DCB_C_SET_ALL command */ 42/* Responses for the DCB_C_SET_ALL command */
@@ -130,7 +129,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
130 netdev->netdev_ops->ndo_stop(netdev); 129 netdev->netdev_ops->ndo_stop(netdev);
131 ixgbe_clear_interrupt_scheme(adapter); 130 ixgbe_clear_interrupt_scheme(adapter);
132 131
133 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
134 switch (adapter->hw.mac.type) { 132 switch (adapter->hw.mac.type) {
135 case ixgbe_mac_82598EB: 133 case ixgbe_mac_82598EB:
136 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 134 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
@@ -146,6 +144,9 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
146 } 144 }
147 145
148 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 146 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
147 if (!netdev_get_num_tc(netdev))
148 ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
149
149 ixgbe_init_interrupt_scheme(adapter); 150 ixgbe_init_interrupt_scheme(adapter);
150 if (netif_running(netdev)) 151 if (netif_running(netdev))
151 netdev->netdev_ops->ndo_open(netdev); 152 netdev->netdev_ops->ndo_open(netdev);
@@ -160,7 +161,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
160 adapter->temp_dcb_cfg.pfc_mode_enable = false; 161 adapter->temp_dcb_cfg.pfc_mode_enable = false;
161 adapter->dcb_cfg.pfc_mode_enable = false; 162 adapter->dcb_cfg.pfc_mode_enable = false;
162 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 163 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
163 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
164 switch (adapter->hw.mac.type) { 164 switch (adapter->hw.mac.type) {
165 case ixgbe_mac_82599EB: 165 case ixgbe_mac_82599EB:
166 case ixgbe_mac_X540: 166 case ixgbe_mac_X540:
@@ -170,6 +170,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
170 break; 170 break;
171 } 171 }
172 172
173 ixgbe_setup_tc(netdev, 0);
174
173 ixgbe_init_interrupt_scheme(adapter); 175 ixgbe_init_interrupt_scheme(adapter);
174 if (netif_running(netdev)) 176 if (netif_running(netdev))
175 netdev->netdev_ops->ndo_open(netdev); 177 netdev->netdev_ops->ndo_open(netdev);
@@ -225,10 +227,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
225 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != 227 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
226 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || 228 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
227 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != 229 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
228 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { 230 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
229 adapter->dcb_set_bitmap |= BIT_PG_TX; 231 adapter->dcb_set_bitmap |= BIT_PG_TX;
230 adapter->dcb_set_bitmap |= BIT_RESETLINK;
231 }
232} 232}
233 233
234static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 234static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +239,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
239 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; 239 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
240 240
241 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != 241 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
242 adapter->dcb_cfg.bw_percentage[0][bwg_id]) { 242 adapter->dcb_cfg.bw_percentage[0][bwg_id])
243 adapter->dcb_set_bitmap |= BIT_PG_TX; 243 adapter->dcb_set_bitmap |= BIT_PG_TX;
244 adapter->dcb_set_bitmap |= BIT_RESETLINK;
245 }
246} 244}
247 245
248static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 246static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +267,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
269 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != 267 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
270 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || 268 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
271 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != 269 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
272 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { 270 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
273 adapter->dcb_set_bitmap |= BIT_PG_RX; 271 adapter->dcb_set_bitmap |= BIT_PG_RX;
274 adapter->dcb_set_bitmap |= BIT_RESETLINK;
275 }
276} 272}
277 273
278static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 274static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +279,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
283 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; 279 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
284 280
285 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != 281 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
286 adapter->dcb_cfg.bw_percentage[1][bwg_id]) { 282 adapter->dcb_cfg.bw_percentage[1][bwg_id])
287 adapter->dcb_set_bitmap |= BIT_PG_RX; 283 adapter->dcb_set_bitmap |= BIT_PG_RX;
288 adapter->dcb_set_bitmap |= BIT_RESETLINK;
289 }
290} 284}
291 285
292static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 286static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -355,31 +349,28 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
355 struct ixgbe_adapter *adapter = netdev_priv(netdev); 349 struct ixgbe_adapter *adapter = netdev_priv(netdev);
356 int ret; 350 int ret;
357 351
358 if (!adapter->dcb_set_bitmap) 352 if (!adapter->dcb_set_bitmap ||
353 !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
359 return DCB_NO_HW_CHG; 354 return DCB_NO_HW_CHG;
360 355
361 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 356 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
362 adapter->ring_feature[RING_F_DCB].indices); 357 MAX_TRAFFIC_CLASS);
363 358
364 if (ret) 359 if (ret)
365 return DCB_NO_HW_CHG; 360 return DCB_NO_HW_CHG;
366 361
367 /* 362 /*
368 * Only take down the adapter if the configuration change 363 * Only take down the adapter if an app change occured. FCoE
369 * requires a reset. 364 * may shuffle tx rings in this case and this can not be done
365 * without a reset currently.
370 */ 366 */
371 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 367 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
372 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 368 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
373 msleep(1); 369 msleep(1);
374 370
375 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 371 if (netif_running(netdev))
376 if (netif_running(netdev)) 372 netdev->netdev_ops->ndo_stop(netdev);
377 netdev->netdev_ops->ndo_stop(netdev); 373 ixgbe_clear_interrupt_scheme(adapter);
378 ixgbe_clear_interrupt_scheme(adapter);
379 } else {
380 if (netif_running(netdev))
381 ixgbe_down(adapter);
382 }
383 } 374 }
384 375
385 if (adapter->dcb_cfg.pfc_mode_enable) { 376 if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +399,53 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
408 } 399 }
409 } 400 }
410 401
411 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 402 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
412 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 403 ixgbe_init_interrupt_scheme(adapter);
413 ixgbe_init_interrupt_scheme(adapter); 404 if (netif_running(netdev))
414 if (netif_running(netdev)) 405 netdev->netdev_ops->ndo_open(netdev);
415 netdev->netdev_ops->ndo_open(netdev);
416 } else {
417 if (netif_running(netdev))
418 ixgbe_up(adapter);
419 }
420 ret = DCB_HW_CHG_RST; 406 ret = DCB_HW_CHG_RST;
421 } else if (adapter->dcb_set_bitmap & BIT_PFC) { 407 }
422 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 408
423 ixgbe_dcb_config_pfc_82598(&adapter->hw, 409 if (adapter->dcb_set_bitmap & BIT_PFC) {
424 &adapter->dcb_cfg); 410 u8 pfc_en;
425 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 411 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
426 ixgbe_dcb_config_pfc_82599(&adapter->hw, 412 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
427 &adapter->dcb_cfg);
428 ret = DCB_HW_CHG; 413 ret = DCB_HW_CHG;
429 } 414 }
415
416 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
417 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
418 u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
419 /* Priority to TC mapping in CEE case default to 1:1 */
420 u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
421 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
422
423#ifdef CONFIG_FCOE
424 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
425 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
426#endif
427
428 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
429 max_frame, DCB_TX_CONFIG);
430 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
431 max_frame, DCB_RX_CONFIG);
432
433 ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
434 DCB_TX_CONFIG, refill);
435 ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
436 ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
437 DCB_TX_CONFIG, bwg_id);
438 ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
439 DCB_TX_CONFIG, prio_type);
440
441 ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
442 bwg_id, prio_type, prio_tc);
443 }
444
430 if (adapter->dcb_cfg.pfc_mode_enable) 445 if (adapter->dcb_cfg.pfc_mode_enable)
431 adapter->hw.fc.current_mode = ixgbe_fc_pfc; 446 adapter->hw.fc.current_mode = ixgbe_fc_pfc;
432 447
433 if (adapter->dcb_set_bitmap & BIT_RESETLINK) 448 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
434 clear_bit(__IXGBE_RESETTING, &adapter->state); 449 clear_bit(__IXGBE_RESETTING, &adapter->state);
435 adapter->dcb_set_bitmap = 0x00; 450 adapter->dcb_set_bitmap = 0x00;
436 return ret; 451 return ret;
@@ -439,40 +454,38 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
439static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) 454static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
440{ 455{
441 struct ixgbe_adapter *adapter = netdev_priv(netdev); 456 struct ixgbe_adapter *adapter = netdev_priv(netdev);
442 u8 rval = 0;
443 457
444 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 458 switch (capid) {
445 switch (capid) { 459 case DCB_CAP_ATTR_PG:
446 case DCB_CAP_ATTR_PG: 460 *cap = true;
447 *cap = true; 461 break;
448 break; 462 case DCB_CAP_ATTR_PFC:
449 case DCB_CAP_ATTR_PFC: 463 *cap = true;
450 *cap = true; 464 break;
451 break; 465 case DCB_CAP_ATTR_UP2TC:
452 case DCB_CAP_ATTR_UP2TC: 466 *cap = false;
453 *cap = false; 467 break;
454 break; 468 case DCB_CAP_ATTR_PG_TCS:
455 case DCB_CAP_ATTR_PG_TCS: 469 *cap = 0x80;
456 *cap = 0x80; 470 break;
457 break; 471 case DCB_CAP_ATTR_PFC_TCS:
458 case DCB_CAP_ATTR_PFC_TCS: 472 *cap = 0x80;
459 *cap = 0x80; 473 break;
460 break; 474 case DCB_CAP_ATTR_GSP:
461 case DCB_CAP_ATTR_GSP: 475 *cap = true;
462 *cap = true; 476 break;
463 break; 477 case DCB_CAP_ATTR_BCN:
464 case DCB_CAP_ATTR_BCN: 478 *cap = false;
465 *cap = false; 479 break;
466 break; 480 case DCB_CAP_ATTR_DCBX:
467 default: 481 *cap = adapter->dcbx_cap;
468 rval = -EINVAL; 482 break;
469 break; 483 default:
470 } 484 *cap = false;
471 } else { 485 break;
472 rval = -EINVAL;
473 } 486 }
474 487
475 return rval; 488 return 0;
476} 489}
477 490
478static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) 491static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -533,21 +546,16 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
533 */ 546 */
534static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) 547static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
535{ 548{
536 u8 rval = 0; 549 struct ixgbe_adapter *adapter = netdev_priv(netdev);
550 struct dcb_app app = {
551 .selector = idtype,
552 .protocol = id,
553 };
537 554
538 switch (idtype) { 555 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
539 case DCB_APP_IDTYPE_ETHTYPE: 556 return 0;
540#ifdef IXGBE_FCOE 557
541 if (id == ETH_P_FCOE) 558 return dcb_getapp(netdev, &app);
542 rval = ixgbe_fcoe_getapp(netdev_priv(netdev));
543#endif
544 break;
545 case DCB_APP_IDTYPE_PORTNUM:
546 break;
547 default:
548 break;
549 }
550 return rval;
551} 559}
552 560
553/** 561/**
@@ -562,24 +570,45 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
562static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, 570static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
563 u8 idtype, u16 id, u8 up) 571 u8 idtype, u16 id, u8 up)
564{ 572{
573 struct ixgbe_adapter *adapter = netdev_priv(netdev);
565 u8 rval = 1; 574 u8 rval = 1;
575 struct dcb_app app = {
576 .selector = idtype,
577 .protocol = id,
578 .priority = up
579 };
580
581 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
582 return rval;
583
584 rval = dcb_setapp(netdev, &app);
566 585
567 switch (idtype) { 586 switch (idtype) {
568 case DCB_APP_IDTYPE_ETHTYPE: 587 case DCB_APP_IDTYPE_ETHTYPE:
569#ifdef IXGBE_FCOE 588#ifdef IXGBE_FCOE
570 if (id == ETH_P_FCOE) { 589 if (id == ETH_P_FCOE) {
571 u8 tc; 590 u8 old_tc;
572 struct ixgbe_adapter *adapter;
573 591
574 adapter = netdev_priv(netdev); 592 /* Get current programmed tc */
575 tc = adapter->fcoe.tc; 593 old_tc = adapter->fcoe.tc;
576 rval = ixgbe_fcoe_setapp(adapter, up); 594 rval = ixgbe_fcoe_setapp(adapter, up);
577 if ((!rval) && (tc != adapter->fcoe.tc) && 595
578 (adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 596 if (rval ||
579 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { 597 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
598 !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
599 break;
600
601 /* The FCoE application priority may be changed multiple
602 * times in quick sucession with switches that build up
603 * TLVs. To avoid creating uneeded device resets this
604 * checks the actual HW configuration and clears
605 * BIT_APP_UPCHG if a HW configuration change is not
606 * need
607 */
608 if (old_tc == adapter->fcoe.tc)
609 adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
610 else
580 adapter->dcb_set_bitmap |= BIT_APP_UPCHG; 611 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
581 adapter->dcb_set_bitmap |= BIT_RESETLINK;
582 }
583 } 612 }
584#endif 613#endif
585 break; 614 break;
@@ -591,7 +620,204 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
591 return rval; 620 return rval;
592} 621}
593 622
623static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
624 struct ieee_ets *ets)
625{
626 struct ixgbe_adapter *adapter = netdev_priv(dev);
627 struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
628
629 /* No IEEE PFC settings available */
630 if (!my_ets)
631 return -EINVAL;
632
633 ets->ets_cap = MAX_TRAFFIC_CLASS;
634 ets->cbs = my_ets->cbs;
635 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
636 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
637 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
638 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
639 return 0;
640}
641
642static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
643 struct ieee_ets *ets)
644{
645 struct ixgbe_adapter *adapter = netdev_priv(dev);
646 __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
647 __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
648 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
649 int i, err;
650 __u64 *p = (__u64 *) ets->prio_tc;
651 /* naively give each TC a bwg to map onto CEE hardware */
652 __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
653
654 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
655 return -EINVAL;
656
657 if (!adapter->ixgbe_ieee_ets) {
658 adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
659 GFP_KERNEL);
660 if (!adapter->ixgbe_ieee_ets)
661 return -ENOMEM;
662 }
663
664 memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
665
666 /* Map TSA onto CEE prio type */
667 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
668 switch (ets->tc_tsa[i]) {
669 case IEEE_8021QAZ_TSA_STRICT:
670 prio_type[i] = 2;
671 break;
672 case IEEE_8021QAZ_TSA_ETS:
673 prio_type[i] = 0;
674 break;
675 default:
676 /* Hardware only supports priority strict or
677 * ETS transmission selection algorithms if
678 * we receive some other value from dcbnl
679 * throw an error
680 */
681 return -EINVAL;
682 }
683 }
684
685 if (*p)
686 ixgbe_dcbnl_set_state(dev, 1);
687 else
688 ixgbe_dcbnl_set_state(dev, 0);
689
690 ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
691 err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
692 bwg_id, prio_type, ets->prio_tc);
693 return err;
694}
695
696static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
697 struct ieee_pfc *pfc)
698{
699 struct ixgbe_adapter *adapter = netdev_priv(dev);
700 struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
701 int i;
702
703 /* No IEEE PFC settings available */
704 if (!my_pfc)
705 return -EINVAL;
706
707 pfc->pfc_cap = MAX_TRAFFIC_CLASS;
708 pfc->pfc_en = my_pfc->pfc_en;
709 pfc->mbc = my_pfc->mbc;
710 pfc->delay = my_pfc->delay;
711
712 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
713 pfc->requests[i] = adapter->stats.pxoffrxc[i];
714 pfc->indications[i] = adapter->stats.pxofftxc[i];
715 }
716
717 return 0;
718}
719
720static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
721 struct ieee_pfc *pfc)
722{
723 struct ixgbe_adapter *adapter = netdev_priv(dev);
724 int err;
725
726 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
727 return -EINVAL;
728
729 if (!adapter->ixgbe_ieee_pfc) {
730 adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
731 GFP_KERNEL);
732 if (!adapter->ixgbe_ieee_pfc)
733 return -ENOMEM;
734 }
735
736 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
737 err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
738 return err;
739}
740
741static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
742 struct dcb_app *app)
743{
744 struct ixgbe_adapter *adapter = netdev_priv(dev);
745
746 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
747 return -EINVAL;
748#ifdef IXGBE_FCOE
749 if (app->selector == 1 && app->protocol == ETH_P_FCOE) {
750 if (adapter->fcoe.tc == app->priority)
751 goto setapp;
752
753 /* In IEEE mode map up to tc 1:1 */
754 adapter->fcoe.tc = app->priority;
755 adapter->fcoe.up = app->priority;
756
757 /* Force hardware reset required to push FCoE
758 * setup on {tx|rx}_rings
759 */
760 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
761 ixgbe_dcbnl_set_all(dev);
762 }
763
764setapp:
765#endif
766 dcb_setapp(dev, app);
767 return 0;
768}
769
770static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
771{
772 struct ixgbe_adapter *adapter = netdev_priv(dev);
773 return adapter->dcbx_cap;
774}
775
776static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
777{
778 struct ixgbe_adapter *adapter = netdev_priv(dev);
779 struct ieee_ets ets = {0};
780 struct ieee_pfc pfc = {0};
781
782 /* no support for LLD_MANAGED modes or CEE+IEEE */
783 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
784 ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
785 !(mode & DCB_CAP_DCBX_HOST))
786 return 1;
787
788 if (mode == adapter->dcbx_cap)
789 return 0;
790
791 adapter->dcbx_cap = mode;
792
793 /* ETS and PFC defaults */
794 ets.ets_cap = 8;
795 pfc.pfc_cap = 8;
796
797 if (mode & DCB_CAP_DCBX_VER_IEEE) {
798 ixgbe_dcbnl_ieee_setets(dev, &ets);
799 ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
800 } else if (mode & DCB_CAP_DCBX_VER_CEE) {
801 adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
802 ixgbe_dcbnl_set_all(dev);
803 } else {
804 /* Drop into single TC mode strict priority as this
805 * indicates CEE and IEEE versions are disabled
806 */
807 ixgbe_dcbnl_ieee_setets(dev, &ets);
808 ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
809 ixgbe_dcbnl_set_state(dev, 0);
810 }
811
812 return 0;
813}
814
594const struct dcbnl_rtnl_ops dcbnl_ops = { 815const struct dcbnl_rtnl_ops dcbnl_ops = {
816 .ieee_getets = ixgbe_dcbnl_ieee_getets,
817 .ieee_setets = ixgbe_dcbnl_ieee_setets,
818 .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
819 .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
820 .ieee_setapp = ixgbe_dcbnl_ieee_setapp,
595 .getstate = ixgbe_dcbnl_get_state, 821 .getstate = ixgbe_dcbnl_get_state,
596 .setstate = ixgbe_dcbnl_set_state, 822 .setstate = ixgbe_dcbnl_set_state,
597 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, 823 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
@@ -613,5 +839,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
613 .setpfcstate = ixgbe_dcbnl_setpfcstate, 839 .setpfcstate = ixgbe_dcbnl_setpfcstate,
614 .getapp = ixgbe_dcbnl_getapp, 840 .getapp = ixgbe_dcbnl_getapp,
615 .setapp = ixgbe_dcbnl_setapp, 841 .setapp = ixgbe_dcbnl_setapp,
842 .getdcbx = ixgbe_dcbnl_getdcbx,
843 .setdcbx = ixgbe_dcbnl_setdcbx,
616}; 844};
617
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2002ea88ca2..76380a2b35a 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -152,20 +152,35 @@ static int ixgbe_get_settings(struct net_device *netdev,
152 ecmd->supported |= (SUPPORTED_1000baseT_Full | 152 ecmd->supported |= (SUPPORTED_1000baseT_Full |
153 SUPPORTED_Autoneg); 153 SUPPORTED_Autoneg);
154 154
155 switch (hw->mac.type) {
156 case ixgbe_mac_X540:
157 ecmd->supported |= SUPPORTED_100baseT_Full;
158 break;
159 default:
160 break;
161 }
162
155 ecmd->advertising = ADVERTISED_Autoneg; 163 ecmd->advertising = ADVERTISED_Autoneg;
156 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 164 if (hw->phy.autoneg_advertised) {
157 ecmd->advertising |= ADVERTISED_10000baseT_Full; 165 if (hw->phy.autoneg_advertised &
158 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 166 IXGBE_LINK_SPEED_100_FULL)
159 ecmd->advertising |= ADVERTISED_1000baseT_Full; 167 ecmd->advertising |= ADVERTISED_100baseT_Full;
160 /* 168 if (hw->phy.autoneg_advertised &
161 * It's possible that phy.autoneg_advertised may not be 169 IXGBE_LINK_SPEED_10GB_FULL)
162 * set yet. If so display what the default would be - 170 ecmd->advertising |= ADVERTISED_10000baseT_Full;
163 * both 1G and 10G supported. 171 if (hw->phy.autoneg_advertised &
164 */ 172 IXGBE_LINK_SPEED_1GB_FULL)
165 if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | 173 ecmd->advertising |= ADVERTISED_1000baseT_Full;
166 ADVERTISED_10000baseT_Full))) 174 } else {
175 /*
176 * Default advertised modes in case
177 * phy.autoneg_advertised isn't set.
178 */
167 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 179 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
168 ADVERTISED_1000baseT_Full); 180 ADVERTISED_1000baseT_Full);
181 if (hw->mac.type == ixgbe_mac_X540)
182 ecmd->advertising |= ADVERTISED_100baseT_Full;
183 }
169 184
170 if (hw->phy.media_type == ixgbe_media_type_copper) { 185 if (hw->phy.media_type == ixgbe_media_type_copper) {
171 ecmd->supported |= SUPPORTED_TP; 186 ecmd->supported |= SUPPORTED_TP;
@@ -271,8 +286,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
271 286
272 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 287 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
273 if (link_up) { 288 if (link_up) {
274 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 289 switch (link_speed) {
275 SPEED_10000 : SPEED_1000; 290 case IXGBE_LINK_SPEED_10GB_FULL:
291 ecmd->speed = SPEED_10000;
292 break;
293 case IXGBE_LINK_SPEED_1GB_FULL:
294 ecmd->speed = SPEED_1000;
295 break;
296 case IXGBE_LINK_SPEED_100_FULL:
297 ecmd->speed = SPEED_100;
298 break;
299 default:
300 break;
301 }
276 ecmd->duplex = DUPLEX_FULL; 302 ecmd->duplex = DUPLEX_FULL;
277 } else { 303 } else {
278 ecmd->speed = -1; 304 ecmd->speed = -1;
@@ -306,6 +332,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
306 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 332 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
307 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 333 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
308 334
335 if (ecmd->advertising & ADVERTISED_100baseT_Full)
336 advertised |= IXGBE_LINK_SPEED_100_FULL;
337
309 if (old == advertised) 338 if (old == advertised)
310 return err; 339 return err;
311 /* this sets the link speed and restarts auto-neg */ 340 /* this sets the link speed and restarts auto-neg */
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d485979..dba7d77588e 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -135,22 +135,19 @@ out_ddp_put:
135 return len; 135 return len;
136} 136}
137 137
138
138/** 139/**
139 * ixgbe_fcoe_ddp_get - called to set up ddp context 140 * ixgbe_fcoe_ddp_setup - called to set up ddp context
140 * @netdev: the corresponding net_device 141 * @netdev: the corresponding net_device
141 * @xid: the exchange id requesting ddp 142 * @xid: the exchange id requesting ddp
142 * @sgl: the scatter-gather list for this request 143 * @sgl: the scatter-gather list for this request
143 * @sgc: the number of scatter-gather items 144 * @sgc: the number of scatter-gather items
144 * 145 *
145 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
146 * and is expected to be called from ULD, e.g., FCP layer of libfc
147 * to set up ddp for the corresponding xid of the given sglist for
148 * the corresponding I/O.
149 *
150 * Returns : 1 for success and 0 for no ddp 146 * Returns : 1 for success and 0 for no ddp
151 */ 147 */
152int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 148static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
153 struct scatterlist *sgl, unsigned int sgc) 149 struct scatterlist *sgl, unsigned int sgc,
150 int target_mode)
154{ 151{
155 struct ixgbe_adapter *adapter; 152 struct ixgbe_adapter *adapter;
156 struct ixgbe_hw *hw; 153 struct ixgbe_hw *hw;
@@ -159,13 +156,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
159 struct scatterlist *sg; 156 struct scatterlist *sg;
160 unsigned int i, j, dmacount; 157 unsigned int i, j, dmacount;
161 unsigned int len; 158 unsigned int len;
162 static const unsigned int bufflen = 4096; 159 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
163 unsigned int firstoff = 0; 160 unsigned int firstoff = 0;
164 unsigned int lastsize; 161 unsigned int lastsize;
165 unsigned int thisoff = 0; 162 unsigned int thisoff = 0;
166 unsigned int thislen = 0; 163 unsigned int thislen = 0;
167 u32 fcbuff, fcdmarw, fcfltrw; 164 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
168 dma_addr_t addr; 165 dma_addr_t addr = 0;
169 166
170 if (!netdev || !sgl) 167 if (!netdev || !sgl)
171 return 0; 168 return 0;
@@ -254,9 +251,30 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
254 /* only the last buffer may have non-full bufflen */ 251 /* only the last buffer may have non-full bufflen */
255 lastsize = thisoff + thislen; 252 lastsize = thisoff + thislen;
256 253
254 /*
255 * lastsize can not be buffer len.
256 * If it is then adding another buffer with lastsize = 1.
257 */
258 if (lastsize == bufflen) {
259 if (j >= IXGBE_BUFFCNT_MAX) {
260 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
261 "not enough user buffers. We need an extra "
262 "buffer because lastsize is bufflen.\n",
263 xid, i, j, dmacount, (u64)addr);
264 goto out_noddp_free;
265 }
266
267 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
268 j++;
269 lastsize = 1;
270 }
271
257 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 272 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
258 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 273 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
259 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 274 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
275 /* Set WRCONTX bit to allow DDP for target */
276 if (target_mode)
277 fcbuff |= (IXGBE_FCBUFF_WRCONTX);
260 fcbuff |= (IXGBE_FCBUFF_VALID); 278 fcbuff |= (IXGBE_FCBUFF_VALID);
261 279
262 fcdmarw = xid; 280 fcdmarw = xid;
@@ -269,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
269 /* program DMA context */ 287 /* program DMA context */
270 hw = &adapter->hw; 288 hw = &adapter->hw;
271 spin_lock_bh(&fcoe->lock); 289 spin_lock_bh(&fcoe->lock);
290
291 /* turn on last frame indication for target mode as FCP_RSPtarget is
292 * supposed to send FCP_RSP when it is done. */
293 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
294 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
295 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
296 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
297 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
298 }
299
272 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 300 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
273 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 301 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
274 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 302 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
@@ -277,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
277 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 305 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
278 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 306 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
279 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 307 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
308
280 spin_unlock_bh(&fcoe->lock); 309 spin_unlock_bh(&fcoe->lock);
281 310
282 return 1; 311 return 1;
@@ -291,6 +320,47 @@ out_noddp_unmap:
291} 320}
292 321
293/** 322/**
323 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
324 * @netdev: the corresponding net_device
325 * @xid: the exchange id requesting ddp
326 * @sgl: the scatter-gather list for this request
327 * @sgc: the number of scatter-gather items
328 *
329 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
330 * and is expected to be called from ULD, e.g., FCP layer of libfc
331 * to set up ddp for the corresponding xid of the given sglist for
332 * the corresponding I/O.
333 *
334 * Returns : 1 for success and 0 for no ddp
335 */
336int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
337 struct scatterlist *sgl, unsigned int sgc)
338{
339 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
340}
341
342/**
343 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
344 * @netdev: the corresponding net_device
345 * @xid: the exchange id requesting ddp
346 * @sgl: the scatter-gather list for this request
347 * @sgc: the number of scatter-gather items
348 *
349 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
350 * and is expected to be called from ULD, e.g., FCP layer of libfc
351 * to set up ddp for the corresponding xid of the given sglist for
352 * the corresponding I/O. The DDP in target mode is a write I/O request
353 * from the initiator.
354 *
355 * Returns : 1 for success and 0 for no ddp
356 */
357int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
358 struct scatterlist *sgl, unsigned int sgc)
359{
360 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
361}
362
363/**
294 * ixgbe_fcoe_ddp - check ddp status and mark it done 364 * ixgbe_fcoe_ddp - check ddp status and mark it done
295 * @adapter: ixgbe adapter 365 * @adapter: ixgbe adapter
296 * @rx_desc: advanced rx descriptor 366 * @rx_desc: advanced rx descriptor
@@ -313,6 +383,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
313 struct ixgbe_fcoe *fcoe; 383 struct ixgbe_fcoe *fcoe;
314 struct ixgbe_fcoe_ddp *ddp; 384 struct ixgbe_fcoe_ddp *ddp;
315 struct fc_frame_header *fh; 385 struct fc_frame_header *fh;
386 struct fcoe_crc_eof *crc;
316 387
317 if (!ixgbe_rx_is_fcoe(rx_desc)) 388 if (!ixgbe_rx_is_fcoe(rx_desc))
318 goto ddp_out; 389 goto ddp_out;
@@ -366,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
366 else if (ddp->len) 437 else if (ddp->len)
367 rc = ddp->len; 438 rc = ddp->len;
368 } 439 }
369 440 /* In target mode, check the last data frame of the sequence.
441 * For DDP in target mode, data is already DDPed but the header
442 * indication of the last data frame ould allow is to tell if we
443 * got all the data and the ULP can send FCP_RSP back, as this is
444 * not a full fcoe frame, we fill the trailer here so it won't be
445 * dropped by the ULP stack.
446 */
447 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
448 (fctl & FC_FC_END_SEQ)) {
449 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
450 crc->fcoe_eof = FC_EOF_T;
451 }
370ddp_out: 452ddp_out:
371 return rc; 453 return rc;
372} 454}
@@ -532,6 +614,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
532 e_err(drv, "failed to allocated FCoE DDP pool\n"); 614 e_err(drv, "failed to allocated FCoE DDP pool\n");
533 615
534 spin_lock_init(&fcoe->lock); 616 spin_lock_init(&fcoe->lock);
617
618 /* Extra buffer to be shared by all DDPs for HW work around */
619 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
620 if (fcoe->extra_ddp_buffer == NULL) {
621 e_err(drv, "failed to allocated extra DDP buffer\n");
622 goto out_extra_ddp_buffer_alloc;
623 }
624
625 fcoe->extra_ddp_buffer_dma =
626 dma_map_single(&adapter->pdev->dev,
627 fcoe->extra_ddp_buffer,
628 IXGBE_FCBUFF_MIN,
629 DMA_FROM_DEVICE);
630 if (dma_mapping_error(&adapter->pdev->dev,
631 fcoe->extra_ddp_buffer_dma)) {
632 e_err(drv, "failed to map extra DDP buffer\n");
633 goto out_extra_ddp_buffer_dma;
634 }
535 } 635 }
536 636
537 /* Enable L2 eth type filter for FCoE */ 637 /* Enable L2 eth type filter for FCoE */
@@ -581,6 +681,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
581 } 681 }
582 } 682 }
583#endif 683#endif
684
685 return;
686
687out_extra_ddp_buffer_dma:
688 kfree(fcoe->extra_ddp_buffer);
689out_extra_ddp_buffer_alloc:
690 pci_pool_destroy(fcoe->pool);
691 fcoe->pool = NULL;
584} 692}
585 693
586/** 694/**
@@ -600,6 +708,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
600 if (fcoe->pool) { 708 if (fcoe->pool) {
601 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 709 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
602 ixgbe_fcoe_ddp_put(adapter->netdev, i); 710 ixgbe_fcoe_ddp_put(adapter->netdev, i);
711 dma_unmap_single(&adapter->pdev->dev,
712 fcoe->extra_ddp_buffer_dma,
713 IXGBE_FCBUFF_MIN,
714 DMA_FROM_DEVICE);
715 kfree(fcoe->extra_ddp_buffer);
603 pci_pool_destroy(fcoe->pool); 716 pci_pool_destroy(fcoe->pool);
604 fcoe->pool = NULL; 717 fcoe->pool = NULL;
605 } 718 }
@@ -700,21 +813,6 @@ out_disable:
700 813
701#ifdef CONFIG_IXGBE_DCB 814#ifdef CONFIG_IXGBE_DCB
702/** 815/**
703 * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
704 * @adapter : ixgbe adapter
705 *
706 * Finds out the corresponding user priority bitmap from the current
707 * traffic class that FCoE belongs to. Returns 0 as the invalid user
708 * priority bitmap to indicate an error.
709 *
710 * Returns : 802.1p user priority bitmap for FCoE
711 */
712u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
713{
714 return 1 << adapter->fcoe.up;
715}
716
717/**
718 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE 816 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
719 * @adapter : ixgbe adapter 817 * @adapter : ixgbe adapter
720 * @up : 802.1p user priority bitmap 818 * @up : 802.1p user priority bitmap
@@ -791,5 +889,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
791 } 889 }
792 return rc; 890 return rc;
793} 891}
794
795
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c551c8d..5a650a4ace6 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,9 @@
52/* fcerr */ 52/* fcerr */
53#define IXGBE_FCERR_BADCRC 0x00100000 53#define IXGBE_FCERR_BADCRC 0x00100000
54 54
55/* FCoE DDP for target mode */
56#define __IXGBE_FCOE_TARGET 1
57
55struct ixgbe_fcoe_ddp { 58struct ixgbe_fcoe_ddp {
56 int len; 59 int len;
57 u32 err; 60 u32 err;
@@ -66,10 +69,13 @@ struct ixgbe_fcoe {
66 u8 tc; 69 u8 tc;
67 u8 up; 70 u8 up;
68#endif 71#endif
72 unsigned long mode;
69 atomic_t refcnt; 73 atomic_t refcnt;
70 spinlock_t lock; 74 spinlock_t lock;
71 struct pci_pool *pool; 75 struct pci_pool *pool;
72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 76 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
77 unsigned char *extra_ddp_buffer;
78 dma_addr_t extra_ddp_buffer_dma;
73}; 79};
74 80
75#endif /* _IXGBE_FCOE_H */ 81#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 602078b8489..f17e4a7ee73 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,9 +52,10 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "3.0.12-k2" 55#define DRV_VERSION "3.2.9-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static const char ixgbe_copyright[] =
58 "Copyright (c) 1999-2011 Intel Corporation.";
58 59
59static const struct ixgbe_info *ixgbe_info_tbl[] = { 60static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info, 61 [board_82598] = &ixgbe_82598_info,
@@ -648,10 +649,10 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
648 * 649 *
649 * Returns : a tc index for use in range 0-7, or 0-3 650 * Returns : a tc index for use in range 0-7, or 0-3
650 */ 651 */
651u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) 652static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
652{ 653{
653 int tc = -1; 654 int tc = -1;
654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 655 int dcb_i = netdev_get_num_tc(adapter->netdev);
655 656
656 /* if DCB is not enabled the queues have no TC */ 657 /* if DCB is not enabled the queues have no TC */
657 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 658 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
@@ -2597,6 +2598,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2597 2598
2598 i--; 2599 i--;
2599 for (; i >= 0; i--) { 2600 for (; i >= 0; i--) {
2601 /* free only the irqs that were actually requested */
2602 if (!adapter->q_vector[i]->rxr_count &&
2603 !adapter->q_vector[i]->txr_count)
2604 continue;
2605
2600 free_irq(adapter->msix_entries[i].vector, 2606 free_irq(adapter->msix_entries[i].vector,
2601 adapter->q_vector[i]); 2607 adapter->q_vector[i]);
2602 } 2608 }
@@ -2886,17 +2892,20 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2886 ); 2892 );
2887 2893
2888 switch (mask) { 2894 switch (mask) {
2895#ifdef CONFIG_IXGBE_DCB
2896 case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
2897 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2898 break;
2899 case (IXGBE_FLAG_DCB_ENABLED):
2900 mrqc = IXGBE_MRQC_RT8TCEN;
2901 break;
2902#endif /* CONFIG_IXGBE_DCB */
2889 case (IXGBE_FLAG_RSS_ENABLED): 2903 case (IXGBE_FLAG_RSS_ENABLED):
2890 mrqc = IXGBE_MRQC_RSSEN; 2904 mrqc = IXGBE_MRQC_RSSEN;
2891 break; 2905 break;
2892 case (IXGBE_FLAG_SRIOV_ENABLED): 2906 case (IXGBE_FLAG_SRIOV_ENABLED):
2893 mrqc = IXGBE_MRQC_VMDQEN; 2907 mrqc = IXGBE_MRQC_VMDQEN;
2894 break; 2908 break;
2895#ifdef CONFIG_IXGBE_DCB
2896 case (IXGBE_FLAG_DCB_ENABLED):
2897 mrqc = IXGBE_MRQC_RT8TCEN;
2898 break;
2899#endif /* CONFIG_IXGBE_DCB */
2900 default: 2909 default:
2901 break; 2910 break;
2902 } 2911 }
@@ -3077,6 +3086,14 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3077 ixgbe_configure_srrctl(adapter, ring); 3086 ixgbe_configure_srrctl(adapter, ring);
3078 ixgbe_configure_rscctl(adapter, ring); 3087 ixgbe_configure_rscctl(adapter, ring);
3079 3088
3089 /* If operating in IOV mode set RLPML for X540 */
3090 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3091 hw->mac.type == ixgbe_mac_X540) {
3092 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
3093 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
3094 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
3095 }
3096
3080 if (hw->mac.type == ixgbe_mac_82598EB) { 3097 if (hw->mac.type == ixgbe_mac_82598EB) {
3081 /* 3098 /*
3082 * enable cache line friendly hardware writes: 3099 * enable cache line friendly hardware writes:
@@ -3176,9 +3193,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3176 u32 mhadd, hlreg0; 3193 u32 mhadd, hlreg0;
3177 3194
3178 /* Decide whether to use packet split mode or not */ 3195 /* Decide whether to use packet split mode or not */
3196 /* On by default */
3197 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3198
3179 /* Do not use packet split if we're in SR-IOV Mode */ 3199 /* Do not use packet split if we're in SR-IOV Mode */
3180 if (!adapter->num_vfs) 3200 if (adapter->num_vfs)
3181 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 3201 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3202
3203 /* Disable packet split due to 82599 erratum #45 */
3204 if (hw->mac.type == ixgbe_mac_82599EB)
3205 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3182 3206
3183 /* Set the RX buffer length according to the mode */ 3207 /* Set the RX buffer length according to the mode */
3184 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 3208 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -3634,15 +3658,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3634 if (hw->mac.type == ixgbe_mac_82598EB) 3658 if (hw->mac.type == ixgbe_mac_82598EB)
3635 netif_set_gso_max_size(adapter->netdev, 32768); 3659 netif_set_gso_max_size(adapter->netdev, 32768);
3636 3660
3637#ifdef CONFIG_FCOE
3638 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3639 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3640#endif
3641
3642 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3643 DCB_TX_CONFIG);
3644 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3645 DCB_RX_CONFIG);
3646 3661
3647 /* Enable VLAN tag insert/strip */ 3662 /* Enable VLAN tag insert/strip */
3648 adapter->netdev->features |= NETIF_F_HW_VLAN_RX; 3663 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
@@ -3650,7 +3665,43 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3650 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3665 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3651 3666
3652 /* reconfigure the hardware */ 3667 /* reconfigure the hardware */
3653 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); 3668 if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) {
3669#ifdef CONFIG_FCOE
3670 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3671 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3672#endif
3673 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3674 DCB_TX_CONFIG);
3675 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3676 DCB_RX_CONFIG);
3677 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3678 } else {
3679 struct net_device *dev = adapter->netdev;
3680
3681 if (adapter->ixgbe_ieee_ets)
3682 dev->dcbnl_ops->ieee_setets(dev,
3683 adapter->ixgbe_ieee_ets);
3684 if (adapter->ixgbe_ieee_pfc)
3685 dev->dcbnl_ops->ieee_setpfc(dev,
3686 adapter->ixgbe_ieee_pfc);
3687 }
3688
3689 /* Enable RSS Hash per TC */
3690 if (hw->mac.type != ixgbe_mac_82598EB) {
3691 int i;
3692 u32 reg = 0;
3693
3694 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
3695 u8 msb = 0;
3696 u8 cnt = adapter->netdev->tc_to_txq[i].count;
3697
3698 while (cnt >>= 1)
3699 msb++;
3700
3701 reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
3702 }
3703 IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
3704 }
3654} 3705}
3655 3706
3656#endif 3707#endif
@@ -3721,7 +3772,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3721 * We need to try and force an autonegotiation 3772 * We need to try and force an autonegotiation
3722 * session, then bring up link. 3773 * session, then bring up link.
3723 */ 3774 */
3724 hw->mac.ops.setup_sfp(hw); 3775 if (hw->mac.ops.setup_sfp)
3776 hw->mac.ops.setup_sfp(hw);
3725 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 3777 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3726 schedule_work(&adapter->multispeed_fiber_task); 3778 schedule_work(&adapter->multispeed_fiber_task);
3727 } else { 3779 } else {
@@ -3753,7 +3805,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3753 if (ret) 3805 if (ret)
3754 goto link_cfg_out; 3806 goto link_cfg_out;
3755 3807
3756 if (hw->mac.ops.get_link_capabilities) 3808 autoneg = hw->phy.autoneg_advertised;
3809 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3757 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, 3810 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3758 &negotiation); 3811 &negotiation);
3759 if (ret) 3812 if (ret)
@@ -3868,7 +3921,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3868 * If we're not hot-pluggable SFP+, we just need to configure link 3921 * If we're not hot-pluggable SFP+, we just need to configure link
3869 * and bring it up. 3922 * and bring it up.
3870 */ 3923 */
3871 if (hw->phy.type == ixgbe_phy_unknown) 3924 if (hw->phy.type == ixgbe_phy_none)
3872 schedule_work(&adapter->sfp_config_module_task); 3925 schedule_work(&adapter->sfp_config_module_task);
3873 3926
3874 /* enable transmits */ 3927 /* enable transmits */
@@ -4235,24 +4288,6 @@ static void ixgbe_reset_task(struct work_struct *work)
4235 ixgbe_reinit_locked(adapter); 4288 ixgbe_reinit_locked(adapter);
4236} 4289}
4237 4290
4238#ifdef CONFIG_IXGBE_DCB
4239static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4240{
4241 bool ret = false;
4242 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
4243
4244 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4245 return ret;
4246
4247 f->mask = 0x7 << 3;
4248 adapter->num_rx_queues = f->indices;
4249 adapter->num_tx_queues = f->indices;
4250 ret = true;
4251
4252 return ret;
4253}
4254#endif
4255
4256/** 4291/**
4257 * ixgbe_set_rss_queues: Allocate queues for RSS 4292 * ixgbe_set_rss_queues: Allocate queues for RSS
4258 * @adapter: board private structure to initialize 4293 * @adapter: board private structure to initialize
@@ -4323,19 +4358,26 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
4323 **/ 4358 **/
4324static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) 4359static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4325{ 4360{
4326 bool ret = false;
4327 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 4361 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4328 4362
4329 f->indices = min((int)num_online_cpus(), f->indices); 4363 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4330 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 4364 return false;
4331 adapter->num_rx_queues = 1; 4365
4332 adapter->num_tx_queues = 1; 4366 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4333#ifdef CONFIG_IXGBE_DCB 4367#ifdef CONFIG_IXGBE_DCB
4334 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4368 int tc;
4335 e_info(probe, "FCoE enabled with DCB\n"); 4369 struct net_device *dev = adapter->netdev;
4336 ixgbe_set_dcb_queues(adapter); 4370
4337 } 4371 tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
4372 f->indices = dev->tc_to_txq[tc].count;
4373 f->mask = dev->tc_to_txq[tc].offset;
4338#endif 4374#endif
4375 } else {
4376 f->indices = min((int)num_online_cpus(), f->indices);
4377
4378 adapter->num_rx_queues = 1;
4379 adapter->num_tx_queues = 1;
4380
4339 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4381 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4340 e_info(probe, "FCoE enabled with RSS\n"); 4382 e_info(probe, "FCoE enabled with RSS\n");
4341 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4383 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
@@ -4348,14 +4390,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4348 f->mask = adapter->num_rx_queues; 4390 f->mask = adapter->num_rx_queues;
4349 adapter->num_rx_queues += f->indices; 4391 adapter->num_rx_queues += f->indices;
4350 adapter->num_tx_queues += f->indices; 4392 adapter->num_tx_queues += f->indices;
4393 }
4351 4394
4352 ret = true; 4395 return true;
4396}
4397#endif /* IXGBE_FCOE */
4398
4399#ifdef CONFIG_IXGBE_DCB
4400static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4401{
4402 bool ret = false;
4403 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
4404 int i, q;
4405
4406 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4407 return ret;
4408
4409 f->indices = 0;
4410 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
4411 q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
4412 f->indices += q;
4353 } 4413 }
4354 4414
4415 f->mask = 0x7 << 3;
4416 adapter->num_rx_queues = f->indices;
4417 adapter->num_tx_queues = f->indices;
4418 ret = true;
4419
4420#ifdef IXGBE_FCOE
4421 /* FCoE enabled queues require special configuration done through
4422 * configure_fcoe() and others. Here we map FCoE indices onto the
4423 * DCB queue pairs allowing FCoE to own configuration later.
4424 */
4425 ixgbe_set_fcoe_queues(adapter);
4426#endif
4427
4355 return ret; 4428 return ret;
4356} 4429}
4430#endif
4357 4431
4358#endif /* IXGBE_FCOE */
4359/** 4432/**
4360 * ixgbe_set_sriov_queues: Allocate queues for IOV use 4433 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4361 * @adapter: board private structure to initialize 4434 * @adapter: board private structure to initialize
@@ -4391,16 +4464,16 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4391 if (ixgbe_set_sriov_queues(adapter)) 4464 if (ixgbe_set_sriov_queues(adapter))
4392 goto done; 4465 goto done;
4393 4466
4394#ifdef IXGBE_FCOE
4395 if (ixgbe_set_fcoe_queues(adapter))
4396 goto done;
4397
4398#endif /* IXGBE_FCOE */
4399#ifdef CONFIG_IXGBE_DCB 4467#ifdef CONFIG_IXGBE_DCB
4400 if (ixgbe_set_dcb_queues(adapter)) 4468 if (ixgbe_set_dcb_queues(adapter))
4401 goto done; 4469 goto done;
4402 4470
4403#endif 4471#endif
4472#ifdef IXGBE_FCOE
4473 if (ixgbe_set_fcoe_queues(adapter))
4474 goto done;
4475
4476#endif /* IXGBE_FCOE */
4404 if (ixgbe_set_fdir_queues(adapter)) 4477 if (ixgbe_set_fdir_queues(adapter))
4405 goto done; 4478 goto done;
4406 4479
@@ -4492,6 +4565,110 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4492} 4565}
4493 4566
4494#ifdef CONFIG_IXGBE_DCB 4567#ifdef CONFIG_IXGBE_DCB
4568
4569/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
4570void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
4571 unsigned int *tx, unsigned int *rx)
4572{
4573 struct net_device *dev = adapter->netdev;
4574 struct ixgbe_hw *hw = &adapter->hw;
4575 u8 num_tcs = netdev_get_num_tc(dev);
4576
4577 *tx = 0;
4578 *rx = 0;
4579
4580 switch (hw->mac.type) {
4581 case ixgbe_mac_82598EB:
4582 *tx = tc << 3;
4583 *rx = tc << 2;
4584 break;
4585 case ixgbe_mac_82599EB:
4586 case ixgbe_mac_X540:
4587 if (num_tcs == 8) {
4588 if (tc < 3) {
4589 *tx = tc << 5;
4590 *rx = tc << 4;
4591 } else if (tc < 5) {
4592 *tx = ((tc + 2) << 4);
4593 *rx = tc << 4;
4594 } else if (tc < num_tcs) {
4595 *tx = ((tc + 8) << 3);
4596 *rx = tc << 4;
4597 }
4598 } else if (num_tcs == 4) {
4599 *rx = tc << 5;
4600 switch (tc) {
4601 case 0:
4602 *tx = 0;
4603 break;
4604 case 1:
4605 *tx = 64;
4606 break;
4607 case 2:
4608 *tx = 96;
4609 break;
4610 case 3:
4611 *tx = 112;
4612 break;
4613 default:
4614 break;
4615 }
4616 }
4617 break;
4618 default:
4619 break;
4620 }
4621}
4622
4623#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
4624
4625/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
4626 * classes.
4627 *
4628 * @netdev: net device to configure
4629 * @tc: number of traffic classes to enable
4630 */
4631int ixgbe_setup_tc(struct net_device *dev, u8 tc)
4632{
4633 int i;
4634 unsigned int q, offset = 0;
4635
4636 if (!tc) {
4637 netdev_reset_tc(dev);
4638 } else {
4639 struct ixgbe_adapter *adapter = netdev_priv(dev);
4640
4641 /* Hardware supports up to 8 traffic classes */
4642 if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
4643 return -EINVAL;
4644
4645 /* Partition Tx queues evenly amongst traffic classes */
4646 for (i = 0; i < tc; i++) {
4647 q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
4648 netdev_set_prio_tc_map(dev, i, i);
4649 netdev_set_tc_queue(dev, i, q, offset);
4650 offset += q;
4651 }
4652
4653 /* This enables multiple traffic class support in the hardware
4654 * which defaults to strict priority transmission by default.
4655 * If traffic classes are already enabled perhaps through DCB
4656 * code path then existing configuration will be used.
4657 */
4658 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
4659 dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
4660 struct ieee_ets ets = {
4661 .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
4662 };
4663 u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4664
4665 dev->dcbnl_ops->setdcbx(dev, mode);
4666 dev->dcbnl_ops->ieee_setets(dev, &ets);
4667 }
4668 }
4669 return 0;
4670}
4671
4495/** 4672/**
4496 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 4673 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4497 * @adapter: board private structure to initialize 4674 * @adapter: board private structure to initialize
@@ -4501,72 +4678,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4501 **/ 4678 **/
4502static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 4679static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4503{ 4680{
4504 int i; 4681 struct net_device *dev = adapter->netdev;
4505 bool ret = false; 4682 int i, j, k;
4506 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 4683 u8 num_tcs = netdev_get_num_tc(dev);
4507 4684
4508 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 4685 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4509 return false; 4686 return false;
4510 4687
4511 /* the number of queues is assumed to be symmetric */ 4688 for (i = 0, k = 0; i < num_tcs; i++) {
4512 switch (adapter->hw.mac.type) { 4689 unsigned int tx_s, rx_s;
4513 case ixgbe_mac_82598EB: 4690 u16 count = dev->tc_to_txq[i].count;
4514 for (i = 0; i < dcb_i; i++) { 4691
4515 adapter->rx_ring[i]->reg_idx = i << 3; 4692 ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
4516 adapter->tx_ring[i]->reg_idx = i << 2; 4693 for (j = 0; j < count; j++, k++) {
4517 } 4694 adapter->tx_ring[k]->reg_idx = tx_s + j;
4518 ret = true; 4695 adapter->rx_ring[k]->reg_idx = rx_s + j;
4519 break; 4696 adapter->tx_ring[k]->dcb_tc = i;
4520 case ixgbe_mac_82599EB: 4697 adapter->rx_ring[k]->dcb_tc = i;
4521 case ixgbe_mac_X540:
4522 if (dcb_i == 8) {
4523 /*
4524 * Tx TC0 starts at: descriptor queue 0
4525 * Tx TC1 starts at: descriptor queue 32
4526 * Tx TC2 starts at: descriptor queue 64
4527 * Tx TC3 starts at: descriptor queue 80
4528 * Tx TC4 starts at: descriptor queue 96
4529 * Tx TC5 starts at: descriptor queue 104
4530 * Tx TC6 starts at: descriptor queue 112
4531 * Tx TC7 starts at: descriptor queue 120
4532 *
4533 * Rx TC0-TC7 are offset by 16 queues each
4534 */
4535 for (i = 0; i < 3; i++) {
4536 adapter->tx_ring[i]->reg_idx = i << 5;
4537 adapter->rx_ring[i]->reg_idx = i << 4;
4538 }
4539 for ( ; i < 5; i++) {
4540 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4541 adapter->rx_ring[i]->reg_idx = i << 4;
4542 }
4543 for ( ; i < dcb_i; i++) {
4544 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4545 adapter->rx_ring[i]->reg_idx = i << 4;
4546 }
4547 ret = true;
4548 } else if (dcb_i == 4) {
4549 /*
4550 * Tx TC0 starts at: descriptor queue 0
4551 * Tx TC1 starts at: descriptor queue 64
4552 * Tx TC2 starts at: descriptor queue 96
4553 * Tx TC3 starts at: descriptor queue 112
4554 *
4555 * Rx TC0-TC3 are offset by 32 queues each
4556 */
4557 adapter->tx_ring[0]->reg_idx = 0;
4558 adapter->tx_ring[1]->reg_idx = 64;
4559 adapter->tx_ring[2]->reg_idx = 96;
4560 adapter->tx_ring[3]->reg_idx = 112;
4561 for (i = 0 ; i < dcb_i; i++)
4562 adapter->rx_ring[i]->reg_idx = i << 5;
4563 ret = true;
4564 } 4698 }
4565 break;
4566 default:
4567 break;
4568 } 4699 }
4569 return ret; 4700
4701 return true;
4570} 4702}
4571#endif 4703#endif
4572 4704
@@ -4612,33 +4744,6 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4612 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 4744 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4613 return false; 4745 return false;
4614 4746
4615#ifdef CONFIG_IXGBE_DCB
4616 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4617 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4618
4619 ixgbe_cache_ring_dcb(adapter);
4620 /* find out queues in TC for FCoE */
4621 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4622 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4623 /*
4624 * In 82599, the number of Tx queues for each traffic
4625 * class for both 8-TC and 4-TC modes are:
4626 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4627 * 8 TCs: 32 32 16 16 8 8 8 8
4628 * 4 TCs: 64 64 32 32
4629 * We have max 8 queues for FCoE, where 8 the is
4630 * FCoE redirection table size. If TC for FCoE is
4631 * less than or equal to TC3, we have enough queues
4632 * to add max of 8 queues for FCoE, so we start FCoE
4633 * Tx queue from the next one, i.e., reg_idx + 1.
4634 * If TC for FCoE is above TC3, implying 8 TC mode,
4635 * and we need 8 for FCoE, we have to take all queues
4636 * in that traffic class for FCoE.
4637 */
4638 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4639 fcoe_tx_i--;
4640 }
4641#endif /* CONFIG_IXGBE_DCB */
4642 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4747 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4643 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4748 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4644 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 4749 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
@@ -4695,16 +4800,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4695 if (ixgbe_cache_ring_sriov(adapter)) 4800 if (ixgbe_cache_ring_sriov(adapter))
4696 return; 4801 return;
4697 4802
4803#ifdef CONFIG_IXGBE_DCB
4804 if (ixgbe_cache_ring_dcb(adapter))
4805 return;
4806#endif
4807
4698#ifdef IXGBE_FCOE 4808#ifdef IXGBE_FCOE
4699 if (ixgbe_cache_ring_fcoe(adapter)) 4809 if (ixgbe_cache_ring_fcoe(adapter))
4700 return; 4810 return;
4701
4702#endif /* IXGBE_FCOE */ 4811#endif /* IXGBE_FCOE */
4703#ifdef CONFIG_IXGBE_DCB
4704 if (ixgbe_cache_ring_dcb(adapter))
4705 return;
4706 4812
4707#endif
4708 if (ixgbe_cache_ring_fdir(adapter)) 4813 if (ixgbe_cache_ring_fdir(adapter))
4709 return; 4814 return;
4710 4815
@@ -4863,16 +4968,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4863{ 4968{
4864 int q_idx, num_q_vectors; 4969 int q_idx, num_q_vectors;
4865 struct ixgbe_q_vector *q_vector; 4970 struct ixgbe_q_vector *q_vector;
4866 int napi_vectors;
4867 int (*poll)(struct napi_struct *, int); 4971 int (*poll)(struct napi_struct *, int);
4868 4972
4869 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4973 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4870 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4974 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4871 napi_vectors = adapter->num_rx_queues;
4872 poll = &ixgbe_clean_rxtx_many; 4975 poll = &ixgbe_clean_rxtx_many;
4873 } else { 4976 } else {
4874 num_q_vectors = 1; 4977 num_q_vectors = 1;
4875 napi_vectors = 1;
4876 poll = &ixgbe_poll; 4978 poll = &ixgbe_poll;
4877 } 4979 }
4878 4980
@@ -5169,10 +5271,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5169 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 5271 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5170 adapter->dcb_cfg.rx_pba_cfg = pba_equal; 5272 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
5171 adapter->dcb_cfg.pfc_mode_enable = false; 5273 adapter->dcb_cfg.pfc_mode_enable = false;
5172 adapter->dcb_cfg.round_robin_enable = false;
5173 adapter->dcb_set_bitmap = 0x00; 5274 adapter->dcb_set_bitmap = 0x00;
5275 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5174 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 5276 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
5175 adapter->ring_feature[RING_F_DCB].indices); 5277 MAX_TRAFFIC_CLASS);
5176 5278
5177#endif 5279#endif
5178 5280
@@ -5437,8 +5539,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5437 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5539 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5438 5540
5439 /* MTU < 68 is an error and causes problems on some kernels */ 5541 /* MTU < 68 is an error and causes problems on some kernels */
5440 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 5542 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
5441 return -EINVAL; 5543 hw->mac.type != ixgbe_mac_X540) {
5544 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
5545 return -EINVAL;
5546 } else {
5547 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5548 return -EINVAL;
5549 }
5442 5550
5443 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5551 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5444 /* must set new MTU before calling down or up */ 5552 /* must set new MTU before calling down or up */
@@ -5606,6 +5714,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5606 } 5714 }
5607 5715
5608 ixgbe_clear_interrupt_scheme(adapter); 5716 ixgbe_clear_interrupt_scheme(adapter);
5717#ifdef CONFIG_DCB
5718 kfree(adapter->ixgbe_ieee_pfc);
5719 kfree(adapter->ixgbe_ieee_ets);
5720#endif
5609 5721
5610#ifdef CONFIG_PM 5722#ifdef CONFIG_PM
5611 retval = pci_save_state(pdev); 5723 retval = pci_save_state(pdev);
@@ -5964,7 +6076,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5964 unregister_netdev(adapter->netdev); 6076 unregister_netdev(adapter->netdev);
5965 return; 6077 return;
5966 } 6078 }
5967 hw->mac.ops.setup_sfp(hw); 6079 if (hw->mac.ops.setup_sfp)
6080 hw->mac.ops.setup_sfp(hw);
5968 6081
5969 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 6082 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
5970 /* This will also work for DA Twinax connections */ 6083 /* This will also work for DA Twinax connections */
@@ -6095,12 +6208,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
6095 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6208 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6096 "10 Gbps" : 6209 "10 Gbps" :
6097 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 6210 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6098 "1 Gbps" : "unknown speed")), 6211 "1 Gbps" :
6212 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6213 "100 Mbps" :
6214 "unknown speed"))),
6099 ((flow_rx && flow_tx) ? "RX/TX" : 6215 ((flow_rx && flow_tx) ? "RX/TX" :
6100 (flow_rx ? "RX" : 6216 (flow_rx ? "RX" :
6101 (flow_tx ? "TX" : "None")))); 6217 (flow_tx ? "TX" : "None"))));
6102 6218
6103 netif_carrier_on(netdev); 6219 netif_carrier_on(netdev);
6220 ixgbe_check_vf_rate_limit(adapter);
6104 } else { 6221 } else {
6105 /* Force detection of hung controller */ 6222 /* Force detection of hung controller */
6106 for (i = 0; i < adapter->num_tx_queues; i++) { 6223 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -6630,18 +6747,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6630 6747
6631 protocol = vlan_get_protocol(skb); 6748 protocol = vlan_get_protocol(skb);
6632 6749
6633 if ((protocol == htons(ETH_P_FCOE)) || 6750 if (((protocol == htons(ETH_P_FCOE)) ||
6634 (protocol == htons(ETH_P_FIP))) { 6751 (protocol == htons(ETH_P_FIP))) &&
6635 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6752 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
6636 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6753 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6637 txq += adapter->ring_feature[RING_F_FCOE].mask; 6754 txq += adapter->ring_feature[RING_F_FCOE].mask;
6638 return txq; 6755 return txq;
6639#ifdef CONFIG_IXGBE_DCB
6640 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6641 txq = adapter->fcoe.up;
6642 return txq;
6643#endif
6644 }
6645 } 6756 }
6646#endif 6757#endif
6647 6758
@@ -6651,15 +6762,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6651 return txq; 6762 return txq;
6652 } 6763 }
6653 6764
6654 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6655 if (skb->priority == TC_PRIO_CONTROL)
6656 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6657 else
6658 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6659 >> 13;
6660 return txq;
6661 }
6662
6663 return skb_tx_hash(dev, skb); 6765 return skb_tx_hash(dev, skb);
6664} 6766}
6665 6767
@@ -6681,13 +6783,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6681 tx_flags |= vlan_tx_tag_get(skb); 6783 tx_flags |= vlan_tx_tag_get(skb);
6682 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6784 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6683 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 6785 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6684 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6786 tx_flags |= tx_ring->dcb_tc << 13;
6685 } 6787 }
6686 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6788 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6687 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6789 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6688 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && 6790 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6689 skb->priority != TC_PRIO_CONTROL) { 6791 skb->priority != TC_PRIO_CONTROL) {
6690 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6792 tx_flags |= tx_ring->dcb_tc << 13;
6691 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6793 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6692 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6794 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6693 } 6795 }
@@ -6696,20 +6798,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6696 /* for FCoE with DCB, we force the priority to what 6798 /* for FCoE with DCB, we force the priority to what
6697 * was specified by the switch */ 6799 * was specified by the switch */
6698 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6800 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6699 (protocol == htons(ETH_P_FCOE) || 6801 (protocol == htons(ETH_P_FCOE)))
6700 protocol == htons(ETH_P_FIP))) { 6802 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6701#ifdef CONFIG_IXGBE_DCB
6702 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6703 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6704 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6705 tx_flags |= ((adapter->fcoe.up << 13)
6706 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6707 }
6708#endif
6709 /* flag for FCoE offloads */
6710 if (protocol == htons(ETH_P_FCOE))
6711 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6712 }
6713#endif 6803#endif
6714 6804
6715 /* four things can cause us to need a context descriptor */ 6805 /* four things can cause us to need a context descriptor */
@@ -6982,11 +7072,15 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6982 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 7072 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6983 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 7073 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
6984 .ndo_get_stats64 = ixgbe_get_stats64, 7074 .ndo_get_stats64 = ixgbe_get_stats64,
7075#ifdef CONFIG_IXGBE_DCB
7076 .ndo_setup_tc = ixgbe_setup_tc,
7077#endif
6985#ifdef CONFIG_NET_POLL_CONTROLLER 7078#ifdef CONFIG_NET_POLL_CONTROLLER
6986 .ndo_poll_controller = ixgbe_netpoll, 7079 .ndo_poll_controller = ixgbe_netpoll,
6987#endif 7080#endif
6988#ifdef IXGBE_FCOE 7081#ifdef IXGBE_FCOE
6989 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 7082 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7083 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
6990 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 7084 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
6991 .ndo_fcoe_enable = ixgbe_fcoe_enable, 7085 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6992 .ndo_fcoe_disable = ixgbe_fcoe_disable, 7086 .ndo_fcoe_disable = ixgbe_fcoe_disable,
@@ -7122,8 +7216,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7122 else 7216 else
7123 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); 7217 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7124 7218
7219#if defined(CONFIG_DCB)
7125 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); 7220 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
7126#ifdef IXGBE_FCOE 7221#elif defined(IXGBE_FCOE)
7127 indices += min_t(unsigned int, num_possible_cpus(), 7222 indices += min_t(unsigned int, num_possible_cpus(),
7128 IXGBE_MAX_FCOE_INDICES); 7223 IXGBE_MAX_FCOE_INDICES);
7129#endif 7224#endif
@@ -7279,8 +7374,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7279 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7374 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7280 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | 7375 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
7281 IXGBE_FLAG_DCB_ENABLED); 7376 IXGBE_FLAG_DCB_ENABLED);
7282 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
7283 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
7284 7377
7285#ifdef CONFIG_IXGBE_DCB 7378#ifdef CONFIG_IXGBE_DCB
7286 netdev->dcbnl_ops = &dcbnl_ops; 7379 netdev->dcbnl_ops = &dcbnl_ops;
@@ -7700,16 +7793,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7700 7793
7701#endif /* CONFIG_IXGBE_DCA */ 7794#endif /* CONFIG_IXGBE_DCA */
7702 7795
7703/**
7704 * ixgbe_get_hw_dev return device
7705 * used by hardware layer to print debugging information
7706 **/
7707struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
7708{
7709 struct ixgbe_adapter *adapter = hw->back;
7710 return adapter->netdev;
7711}
7712
7713module_exit(ixgbe_exit_module); 7796module_exit(ixgbe_exit_module);
7714 7797
7715/* ixgbe_main.c */ 7798/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index ea82c5a1cd3..1ff0eefcfd0 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -154,9 +154,6 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
154 udelay(mbx->usec_delay); 154 udelay(mbx->usec_delay);
155 } 155 }
156 156
157 /* if we failed, all future posted messages fail until reset */
158 if (!countdown)
159 mbx->timeout = 0;
160out: 157out:
161 return countdown ? 0 : IXGBE_ERR_MBX; 158 return countdown ? 0 : IXGBE_ERR_MBX;
162} 159}
@@ -183,9 +180,6 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
183 udelay(mbx->usec_delay); 180 udelay(mbx->usec_delay);
184 } 181 }
185 182
186 /* if we failed, all future posted messages fail until reset */
187 if (!countdown)
188 mbx->timeout = 0;
189out: 183out:
190 return countdown ? 0 : IXGBE_ERR_MBX; 184 return countdown ? 0 : IXGBE_ERR_MBX;
191} 185}
@@ -437,6 +431,7 @@ out_no_read:
437 return ret_val; 431 return ret_val;
438} 432}
439 433
434#ifdef CONFIG_PCI_IOV
440/** 435/**
441 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox 436 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
442 * @hw: pointer to the HW structure 437 * @hw: pointer to the HW structure
@@ -447,24 +442,22 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
447{ 442{
448 struct ixgbe_mbx_info *mbx = &hw->mbx; 443 struct ixgbe_mbx_info *mbx = &hw->mbx;
449 444
450 switch (hw->mac.type) { 445 if (hw->mac.type != ixgbe_mac_82599EB &&
451 case ixgbe_mac_82599EB: 446 hw->mac.type != ixgbe_mac_X540)
452 case ixgbe_mac_X540: 447 return;
453 mbx->timeout = 0;
454 mbx->usec_delay = 0;
455 448
456 mbx->size = IXGBE_VFMAILBOX_SIZE; 449 mbx->timeout = 0;
450 mbx->usec_delay = 0;
457 451
458 mbx->stats.msgs_tx = 0; 452 mbx->stats.msgs_tx = 0;
459 mbx->stats.msgs_rx = 0; 453 mbx->stats.msgs_rx = 0;
460 mbx->stats.reqs = 0; 454 mbx->stats.reqs = 0;
461 mbx->stats.acks = 0; 455 mbx->stats.acks = 0;
462 mbx->stats.rsts = 0; 456 mbx->stats.rsts = 0;
463 break; 457
464 default: 458 mbx->size = IXGBE_VFMAILBOX_SIZE;
465 break;
466 }
467} 459}
460#endif /* CONFIG_PCI_IOV */
468 461
469struct ixgbe_mbx_operations mbx_ops_generic = { 462struct ixgbe_mbx_operations mbx_ops_generic = {
470 .read = ixgbe_read_mbx_pf, 463 .read = ixgbe_read_mbx_pf,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 3df9b159021..fe6ea81dc7f 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); 86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); 87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
89#ifdef CONFIG_PCI_IOV
89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 90void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
91#endif /* CONFIG_PCI_IOV */
90 92
91extern struct ixgbe_mbx_operations mbx_ops_generic; 93extern struct ixgbe_mbx_operations mbx_ops_generic;
92 94
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8f7123e8fc0..f72f705f618 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
57{ 57{
58 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 58 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
59 u32 phy_addr; 59 u32 phy_addr;
60 u16 ext_ability = 0;
60 61
61 if (hw->phy.type == ixgbe_phy_unknown) { 62 if (hw->phy.type == ixgbe_phy_unknown) {
62 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 63 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -65,12 +66,29 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
65 ixgbe_get_phy_id(hw); 66 ixgbe_get_phy_id(hw);
66 hw->phy.type = 67 hw->phy.type =
67 ixgbe_get_phy_type_from_id(hw->phy.id); 68 ixgbe_get_phy_type_from_id(hw->phy.id);
69
70 if (hw->phy.type == ixgbe_phy_unknown) {
71 hw->phy.ops.read_reg(hw,
72 MDIO_PMA_EXTABLE,
73 MDIO_MMD_PMAPMD,
74 &ext_ability);
75 if (ext_ability &
76 (MDIO_PMA_EXTABLE_10GBT |
77 MDIO_PMA_EXTABLE_1000BT))
78 hw->phy.type =
79 ixgbe_phy_cu_unknown;
80 else
81 hw->phy.type =
82 ixgbe_phy_generic;
83 }
84
68 status = 0; 85 status = 0;
69 break; 86 break;
70 } 87 }
71 } 88 }
72 /* clear value if nothing found */ 89 /* clear value if nothing found */
73 hw->phy.mdio.prtad = 0; 90 if (status != 0)
91 hw->phy.mdio.prtad = 0;
74 } else { 92 } else {
75 status = 0; 93 status = 0;
76 } 94 }
@@ -138,17 +156,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
138 **/ 156 **/
139s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 157s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
140{ 158{
159 u32 i;
160 u16 ctrl = 0;
161 s32 status = 0;
162
163 if (hw->phy.type == ixgbe_phy_unknown)
164 status = ixgbe_identify_phy_generic(hw);
165
166 if (status != 0 || hw->phy.type == ixgbe_phy_none)
167 goto out;
168
141 /* Don't reset PHY if it's shut down due to overtemp. */ 169 /* Don't reset PHY if it's shut down due to overtemp. */
142 if (!hw->phy.reset_if_overtemp && 170 if (!hw->phy.reset_if_overtemp &&
143 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 171 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
144 return 0; 172 goto out;
145 173
146 /* 174 /*
147 * Perform soft PHY reset to the PHY_XS. 175 * Perform soft PHY reset to the PHY_XS.
148 * This will cause a soft reset to the PHY 176 * This will cause a soft reset to the PHY
149 */ 177 */
150 return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 178 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
151 MDIO_CTRL1_RESET); 179 MDIO_MMD_PHYXS,
180 MDIO_CTRL1_RESET);
181
182 /*
183 * Poll for reset bit to self-clear indicating reset is complete.
184 * Some PHYs could take up to 3 seconds to complete and need about
185 * 1.7 usec delay after the reset is complete.
186 */
187 for (i = 0; i < 30; i++) {
188 msleep(100);
189 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
190 MDIO_MMD_PHYXS, &ctrl);
191 if (!(ctrl & MDIO_CTRL1_RESET)) {
192 udelay(2);
193 break;
194 }
195 }
196
197 if (ctrl & MDIO_CTRL1_RESET) {
198 status = IXGBE_ERR_RESET_FAILED;
199 hw_dbg(hw, "PHY reset polling failed to complete.\n");
200 }
201
202out:
203 return status;
152} 204}
153 205
154/** 206/**
@@ -171,7 +223,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
171 else 223 else
172 gssr = IXGBE_GSSR_PHY0_SM; 224 gssr = IXGBE_GSSR_PHY0_SM;
173 225
174 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 226 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
175 status = IXGBE_ERR_SWFW_SYNC; 227 status = IXGBE_ERR_SWFW_SYNC;
176 228
177 if (status == 0) { 229 if (status == 0) {
@@ -243,7 +295,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
243 } 295 }
244 } 296 }
245 297
246 ixgbe_release_swfw_sync(hw, gssr); 298 hw->mac.ops.release_swfw_sync(hw, gssr);
247 } 299 }
248 300
249 return status; 301 return status;
@@ -269,7 +321,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
269 else 321 else
270 gssr = IXGBE_GSSR_PHY0_SM; 322 gssr = IXGBE_GSSR_PHY0_SM;
271 323
272 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 324 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
273 status = IXGBE_ERR_SWFW_SYNC; 325 status = IXGBE_ERR_SWFW_SYNC;
274 326
275 if (status == 0) { 327 if (status == 0) {
@@ -336,7 +388,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
336 } 388 }
337 } 389 }
338 390
339 ixgbe_release_swfw_sync(hw, gssr); 391 hw->mac.ops.release_swfw_sync(hw, gssr);
340 } 392 }
341 393
342 return status; 394 return status;
@@ -350,49 +402,89 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
350 **/ 402 **/
351s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) 403s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
352{ 404{
353 s32 status = IXGBE_NOT_IMPLEMENTED; 405 s32 status = 0;
354 u32 time_out; 406 u32 time_out;
355 u32 max_time_out = 10; 407 u32 max_time_out = 10;
356 u16 autoneg_reg; 408 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
409 bool autoneg = false;
410 ixgbe_link_speed speed;
357 411
358 /* 412 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
359 * Set advertisement settings in PHY based on autoneg_advertised 413
360 * settings. If autoneg_advertised = 0, then advertise default values 414 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
361 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can 415 /* Set or unset auto-negotiation 10G advertisement */
362 * for a 1G. 416 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
363 */ 417 MDIO_MMD_AN,
364 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); 418 &autoneg_reg);
365 419
366 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
367 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; 420 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
368 else 421 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
369 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; 422 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
423
424 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
425 MDIO_MMD_AN,
426 autoneg_reg);
427 }
428
429 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
430 /* Set or unset auto-negotiation 1G advertisement */
431 hw->phy.ops.read_reg(hw,
432 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
433 MDIO_MMD_AN,
434 &autoneg_reg);
435
436 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
437 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
438 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
439
440 hw->phy.ops.write_reg(hw,
441 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
442 MDIO_MMD_AN,
443 autoneg_reg);
444 }
445
446 if (speed & IXGBE_LINK_SPEED_100_FULL) {
447 /* Set or unset auto-negotiation 100M advertisement */
448 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
449 MDIO_MMD_AN,
450 &autoneg_reg);
370 451
371 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); 452 autoneg_reg &= ~ADVERTISE_100FULL;
453 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
454 autoneg_reg |= ADVERTISE_100FULL;
455
456 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
457 MDIO_MMD_AN,
458 autoneg_reg);
459 }
372 460
373 /* Restart PHY autonegotiation and wait for completion */ 461 /* Restart PHY autonegotiation and wait for completion */
374 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); 462 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
463 MDIO_MMD_AN, &autoneg_reg);
375 464
376 autoneg_reg |= MDIO_AN_CTRL1_RESTART; 465 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
377 466
378 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); 467 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
468 MDIO_MMD_AN, autoneg_reg);
379 469
380 /* Wait for autonegotiation to finish */ 470 /* Wait for autonegotiation to finish */
381 for (time_out = 0; time_out < max_time_out; time_out++) { 471 for (time_out = 0; time_out < max_time_out; time_out++) {
382 udelay(10); 472 udelay(10);
383 /* Restart PHY autonegotiation and wait for completion */ 473 /* Restart PHY autonegotiation and wait for completion */
384 status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, 474 status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
385 &autoneg_reg); 475 MDIO_MMD_AN,
476 &autoneg_reg);
386 477
387 autoneg_reg &= MDIO_AN_STAT1_COMPLETE; 478 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
388 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { 479 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
389 status = 0;
390 break; 480 break;
391 } 481 }
392 } 482 }
393 483
394 if (time_out == max_time_out) 484 if (time_out == max_time_out) {
395 status = IXGBE_ERR_LINK_SETUP; 485 status = IXGBE_ERR_LINK_SETUP;
486 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
487 }
396 488
397 return status; 489 return status;
398} 490}
@@ -421,6 +513,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
421 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 513 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
422 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 514 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
423 515
516 if (speed & IXGBE_LINK_SPEED_100_FULL)
517 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
518
424 /* Setup link based on the new speed settings */ 519 /* Setup link based on the new speed settings */
425 hw->phy.ops.setup_link(hw); 520 hw->phy.ops.setup_link(hw);
426 521
@@ -461,6 +556,180 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
461} 556}
462 557
463/** 558/**
559 * ixgbe_check_phy_link_tnx - Determine link and speed status
560 * @hw: pointer to hardware structure
561 *
562 * Reads the VS1 register to determine if link is up and the current speed for
563 * the PHY.
564 **/
565s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
566 bool *link_up)
567{
568 s32 status = 0;
569 u32 time_out;
570 u32 max_time_out = 10;
571 u16 phy_link = 0;
572 u16 phy_speed = 0;
573 u16 phy_data = 0;
574
575 /* Initialize speed and link to default case */
576 *link_up = false;
577 *speed = IXGBE_LINK_SPEED_10GB_FULL;
578
579 /*
580 * Check current speed and link status of the PHY register.
581 * This is a vendor specific register and may have to
582 * be changed for other copper PHYs.
583 */
584 for (time_out = 0; time_out < max_time_out; time_out++) {
585 udelay(10);
586 status = hw->phy.ops.read_reg(hw,
587 MDIO_STAT1,
588 MDIO_MMD_VEND1,
589 &phy_data);
590 phy_link = phy_data &
591 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
592 phy_speed = phy_data &
593 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
594 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
595 *link_up = true;
596 if (phy_speed ==
597 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
598 *speed = IXGBE_LINK_SPEED_1GB_FULL;
599 break;
600 }
601 }
602
603 return status;
604}
605
606/**
607 * ixgbe_setup_phy_link_tnx - Set and restart autoneg
608 * @hw: pointer to hardware structure
609 *
610 * Restart autonegotiation and PHY and waits for completion.
611 **/
612s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
613{
614 s32 status = 0;
615 u32 time_out;
616 u32 max_time_out = 10;
617 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
618 bool autoneg = false;
619 ixgbe_link_speed speed;
620
621 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
622
623 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
624 /* Set or unset auto-negotiation 10G advertisement */
625 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
626 MDIO_MMD_AN,
627 &autoneg_reg);
628
629 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
630 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
631 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
632
633 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
634 MDIO_MMD_AN,
635 autoneg_reg);
636 }
637
638 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
639 /* Set or unset auto-negotiation 1G advertisement */
640 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
641 MDIO_MMD_AN,
642 &autoneg_reg);
643
644 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
645 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
646 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
647
648 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
649 MDIO_MMD_AN,
650 autoneg_reg);
651 }
652
653 if (speed & IXGBE_LINK_SPEED_100_FULL) {
654 /* Set or unset auto-negotiation 100M advertisement */
655 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
656 MDIO_MMD_AN,
657 &autoneg_reg);
658
659 autoneg_reg &= ~ADVERTISE_100FULL;
660 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
661 autoneg_reg |= ADVERTISE_100FULL;
662
663 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
664 MDIO_MMD_AN,
665 autoneg_reg);
666 }
667
668 /* Restart PHY autonegotiation and wait for completion */
669 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
670 MDIO_MMD_AN, &autoneg_reg);
671
672 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
673
674 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
675 MDIO_MMD_AN, autoneg_reg);
676
677 /* Wait for autonegotiation to finish */
678 for (time_out = 0; time_out < max_time_out; time_out++) {
679 udelay(10);
680 /* Restart PHY autonegotiation and wait for completion */
681 status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
682 MDIO_MMD_AN,
683 &autoneg_reg);
684
685 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
686 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
687 break;
688 }
689
690 if (time_out == max_time_out) {
691 status = IXGBE_ERR_LINK_SETUP;
692 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
693 }
694
695 return status;
696}
697
698/**
699 * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
700 * @hw: pointer to hardware structure
701 * @firmware_version: pointer to the PHY Firmware Version
702 **/
703s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
704 u16 *firmware_version)
705{
706 s32 status = 0;
707
708 status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
709 MDIO_MMD_VEND1,
710 firmware_version);
711
712 return status;
713}
714
715/**
716 * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
717 * @hw: pointer to hardware structure
718 * @firmware_version: pointer to the PHY Firmware Version
719 **/
720s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
721 u16 *firmware_version)
722{
723 s32 status = 0;
724
725 status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
726 MDIO_MMD_VEND1,
727 firmware_version);
728
729 return status;
730}
731
732/**
464 * ixgbe_reset_phy_nl - Performs a PHY reset 733 * ixgbe_reset_phy_nl - Performs a PHY reset
465 * @hw: pointer to hardware structure 734 * @hw: pointer to hardware structure
466 **/ 735 **/
@@ -556,11 +825,10 @@ out:
556} 825}
557 826
558/** 827/**
559 * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns 828 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
560 * the PHY type.
561 * @hw: pointer to hardware structure 829 * @hw: pointer to hardware structure
562 * 830 *
563 * Searches for and indentifies the SFP module. Assings appropriate PHY type. 831 * Searches for and identifies the SFP module and assigns appropriate PHY type.
564 **/ 832 **/
565s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 833s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
566{ 834{
@@ -581,41 +849,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
581 goto out; 849 goto out;
582 } 850 }
583 851
584 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, 852 status = hw->phy.ops.read_i2c_eeprom(hw,
853 IXGBE_SFF_IDENTIFIER,
585 &identifier); 854 &identifier);
586 855
587 if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) { 856 if (status == IXGBE_ERR_SWFW_SYNC ||
588 status = IXGBE_ERR_SFP_NOT_PRESENT; 857 status == IXGBE_ERR_I2C ||
589 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 858 status == IXGBE_ERR_SFP_NOT_PRESENT)
590 if (hw->phy.type != ixgbe_phy_nl) { 859 goto err_read_i2c_eeprom;
591 hw->phy.id = 0;
592 hw->phy.type = ixgbe_phy_unknown;
593 }
594 goto out;
595 }
596 860
597 if (identifier == IXGBE_SFF_IDENTIFIER_SFP) { 861 /* LAN ID is needed for sfp_type determination */
598 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, 862 hw->mac.ops.set_lan_id(hw);
599 &comp_codes_1g); 863
600 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, 864 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
601 &comp_codes_10g); 865 hw->phy.type = ixgbe_phy_sfp_unsupported;
602 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, 866 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
603 &cable_tech); 867 } else {
604 868 status = hw->phy.ops.read_i2c_eeprom(hw,
605 /* ID Module 869 IXGBE_SFF_1GBE_COMP_CODES,
606 * ========= 870 &comp_codes_1g);
607 * 0 SFP_DA_CU 871
608 * 1 SFP_SR 872 if (status == IXGBE_ERR_SWFW_SYNC ||
609 * 2 SFP_LR 873 status == IXGBE_ERR_I2C ||
610 * 3 SFP_DA_CORE0 - 82599-specific 874 status == IXGBE_ERR_SFP_NOT_PRESENT)
611 * 4 SFP_DA_CORE1 - 82599-specific 875 goto err_read_i2c_eeprom;
612 * 5 SFP_SR/LR_CORE0 - 82599-specific 876
613 * 6 SFP_SR/LR_CORE1 - 82599-specific 877 status = hw->phy.ops.read_i2c_eeprom(hw,
614 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific 878 IXGBE_SFF_10GBE_COMP_CODES,
615 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 879 &comp_codes_10g);
616 * 9 SFP_1g_cu_CORE0 - 82599-specific 880
617 * 10 SFP_1g_cu_CORE1 - 82599-specific 881 if (status == IXGBE_ERR_SWFW_SYNC ||
618 */ 882 status == IXGBE_ERR_I2C ||
883 status == IXGBE_ERR_SFP_NOT_PRESENT)
884 goto err_read_i2c_eeprom;
885 status = hw->phy.ops.read_i2c_eeprom(hw,
886 IXGBE_SFF_CABLE_TECHNOLOGY,
887 &cable_tech);
888
889 if (status == IXGBE_ERR_SWFW_SYNC ||
890 status == IXGBE_ERR_I2C ||
891 status == IXGBE_ERR_SFP_NOT_PRESENT)
892 goto err_read_i2c_eeprom;
893
894 /* ID Module
895 * =========
896 * 0 SFP_DA_CU
897 * 1 SFP_SR
898 * 2 SFP_LR
899 * 3 SFP_DA_CORE0 - 82599-specific
900 * 4 SFP_DA_CORE1 - 82599-specific
901 * 5 SFP_SR/LR_CORE0 - 82599-specific
902 * 6 SFP_SR/LR_CORE1 - 82599-specific
903 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
904 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
905 * 9 SFP_1g_cu_CORE0 - 82599-specific
906 * 10 SFP_1g_cu_CORE1 - 82599-specific
907 */
619 if (hw->mac.type == ixgbe_mac_82598EB) { 908 if (hw->mac.type == ixgbe_mac_82598EB) {
620 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 909 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
621 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 910 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
@@ -647,31 +936,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
647 ixgbe_sfp_type_da_act_lmt_core1; 936 ixgbe_sfp_type_da_act_lmt_core1;
648 } else { 937 } else {
649 hw->phy.sfp_type = 938 hw->phy.sfp_type =
650 ixgbe_sfp_type_unknown; 939 ixgbe_sfp_type_unknown;
651 } 940 }
652 } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 941 } else if (comp_codes_10g &
942 (IXGBE_SFF_10GBASESR_CAPABLE |
943 IXGBE_SFF_10GBASELR_CAPABLE)) {
653 if (hw->bus.lan_id == 0) 944 if (hw->bus.lan_id == 0)
654 hw->phy.sfp_type = 945 hw->phy.sfp_type =
655 ixgbe_sfp_type_srlr_core0; 946 ixgbe_sfp_type_srlr_core0;
656 else 947 else
657 hw->phy.sfp_type = 948 hw->phy.sfp_type =
658 ixgbe_sfp_type_srlr_core1; 949 ixgbe_sfp_type_srlr_core1;
659 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 950 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
660 if (hw->bus.lan_id == 0)
661 hw->phy.sfp_type =
662 ixgbe_sfp_type_srlr_core0;
663 else
664 hw->phy.sfp_type =
665 ixgbe_sfp_type_srlr_core1;
666 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
667 if (hw->bus.lan_id == 0) 951 if (hw->bus.lan_id == 0)
668 hw->phy.sfp_type = 952 hw->phy.sfp_type =
669 ixgbe_sfp_type_1g_cu_core0; 953 ixgbe_sfp_type_1g_cu_core0;
670 else 954 else
671 hw->phy.sfp_type = 955 hw->phy.sfp_type =
672 ixgbe_sfp_type_1g_cu_core1; 956 ixgbe_sfp_type_1g_cu_core1;
673 else 957 } else {
674 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 958 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
959 }
675 } 960 }
676 961
677 if (hw->phy.sfp_type != stored_sfp_type) 962 if (hw->phy.sfp_type != stored_sfp_type)
@@ -688,16 +973,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
688 /* Determine PHY vendor */ 973 /* Determine PHY vendor */
689 if (hw->phy.type != ixgbe_phy_nl) { 974 if (hw->phy.type != ixgbe_phy_nl) {
690 hw->phy.id = identifier; 975 hw->phy.id = identifier;
691 hw->phy.ops.read_i2c_eeprom(hw, 976 status = hw->phy.ops.read_i2c_eeprom(hw,
692 IXGBE_SFF_VENDOR_OUI_BYTE0, 977 IXGBE_SFF_VENDOR_OUI_BYTE0,
693 &oui_bytes[0]); 978 &oui_bytes[0]);
694 hw->phy.ops.read_i2c_eeprom(hw, 979
980 if (status == IXGBE_ERR_SWFW_SYNC ||
981 status == IXGBE_ERR_I2C ||
982 status == IXGBE_ERR_SFP_NOT_PRESENT)
983 goto err_read_i2c_eeprom;
984
985 status = hw->phy.ops.read_i2c_eeprom(hw,
695 IXGBE_SFF_VENDOR_OUI_BYTE1, 986 IXGBE_SFF_VENDOR_OUI_BYTE1,
696 &oui_bytes[1]); 987 &oui_bytes[1]);
697 hw->phy.ops.read_i2c_eeprom(hw, 988
989 if (status == IXGBE_ERR_SWFW_SYNC ||
990 status == IXGBE_ERR_I2C ||
991 status == IXGBE_ERR_SFP_NOT_PRESENT)
992 goto err_read_i2c_eeprom;
993
994 status = hw->phy.ops.read_i2c_eeprom(hw,
698 IXGBE_SFF_VENDOR_OUI_BYTE2, 995 IXGBE_SFF_VENDOR_OUI_BYTE2,
699 &oui_bytes[2]); 996 &oui_bytes[2]);
700 997
998 if (status == IXGBE_ERR_SWFW_SYNC ||
999 status == IXGBE_ERR_I2C ||
1000 status == IXGBE_ERR_SFP_NOT_PRESENT)
1001 goto err_read_i2c_eeprom;
1002
701 vendor_oui = 1003 vendor_oui =
702 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | 1004 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
703 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | 1005 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
@@ -707,7 +1009,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
707 case IXGBE_SFF_VENDOR_OUI_TYCO: 1009 case IXGBE_SFF_VENDOR_OUI_TYCO:
708 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
709 hw->phy.type = 1011 hw->phy.type =
710 ixgbe_phy_sfp_passive_tyco; 1012 ixgbe_phy_sfp_passive_tyco;
711 break; 1013 break;
712 case IXGBE_SFF_VENDOR_OUI_FTL: 1014 case IXGBE_SFF_VENDOR_OUI_FTL:
713 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 1015 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -724,7 +1026,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
724 default: 1026 default:
725 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 1027 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
726 hw->phy.type = 1028 hw->phy.type =
727 ixgbe_phy_sfp_passive_unknown; 1029 ixgbe_phy_sfp_passive_unknown;
728 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 1030 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
729 hw->phy.type = 1031 hw->phy.type =
730 ixgbe_phy_sfp_active_unknown; 1032 ixgbe_phy_sfp_active_unknown;
@@ -734,7 +1036,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
734 } 1036 }
735 } 1037 }
736 1038
737 /* All passive DA cables are supported */ 1039 /* Allow any DA cable vendor */
738 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | 1040 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
739 IXGBE_SFF_DA_ACTIVE_CABLE)) { 1041 IXGBE_SFF_DA_ACTIVE_CABLE)) {
740 status = 0; 1042 status = 0;
@@ -756,7 +1058,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
756 goto out; 1058 goto out;
757 } 1059 }
758 1060
759 /* This is guaranteed to be 82599, no need to check for NULL */
760 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 1061 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
761 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && 1062 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
762 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || 1063 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
@@ -776,15 +1077,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
776 1077
777out: 1078out:
778 return status; 1079 return status;
1080
1081err_read_i2c_eeprom:
1082 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1083 if (hw->phy.type != ixgbe_phy_nl) {
1084 hw->phy.id = 0;
1085 hw->phy.type = ixgbe_phy_unknown;
1086 }
1087 return IXGBE_ERR_SFP_NOT_PRESENT;
779} 1088}
780 1089
781/** 1090/**
782 * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see 1091 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
783 * if it supports a given SFP+ module type, if so it returns the offsets to the
784 * phy init sequence block.
785 * @hw: pointer to hardware structure 1092 * @hw: pointer to hardware structure
786 * @list_offset: offset to the SFP ID list 1093 * @list_offset: offset to the SFP ID list
787 * @data_offset: offset to the SFP data block 1094 * @data_offset: offset to the SFP data block
1095 *
1096 * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
1097 * so it returns the offsets to the phy init sequence block.
788 **/ 1098 **/
789s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 1099s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
790 u16 *list_offset, 1100 u16 *list_offset,
@@ -899,11 +1209,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
899 u8 dev_addr, u8 *data) 1209 u8 dev_addr, u8 *data)
900{ 1210{
901 s32 status = 0; 1211 s32 status = 0;
902 u32 max_retry = 1; 1212 u32 max_retry = 10;
903 u32 retry = 0; 1213 u32 retry = 0;
1214 u16 swfw_mask = 0;
904 bool nack = 1; 1215 bool nack = 1;
905 1216
1217 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1218 swfw_mask = IXGBE_GSSR_PHY1_SM;
1219 else
1220 swfw_mask = IXGBE_GSSR_PHY0_SM;
1221
906 do { 1222 do {
1223 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
1224 status = IXGBE_ERR_SWFW_SYNC;
1225 goto read_byte_out;
1226 }
1227
907 ixgbe_i2c_start(hw); 1228 ixgbe_i2c_start(hw);
908 1229
909 /* Device Address and write indication */ 1230 /* Device Address and write indication */
@@ -946,6 +1267,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
946 break; 1267 break;
947 1268
948fail: 1269fail:
1270 ixgbe_release_swfw_sync(hw, swfw_mask);
1271 msleep(100);
949 ixgbe_i2c_bus_clear(hw); 1272 ixgbe_i2c_bus_clear(hw);
950 retry++; 1273 retry++;
951 if (retry < max_retry) 1274 if (retry < max_retry)
@@ -955,6 +1278,9 @@ fail:
955 1278
956 } while (retry < max_retry); 1279 } while (retry < max_retry);
957 1280
1281 ixgbe_release_swfw_sync(hw, swfw_mask);
1282
1283read_byte_out:
958 return status; 1284 return status;
959} 1285}
960 1286
@@ -973,6 +1299,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
973 s32 status = 0; 1299 s32 status = 0;
974 u32 max_retry = 1; 1300 u32 max_retry = 1;
975 u32 retry = 0; 1301 u32 retry = 0;
1302 u16 swfw_mask = 0;
1303
1304 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1305 swfw_mask = IXGBE_GSSR_PHY1_SM;
1306 else
1307 swfw_mask = IXGBE_GSSR_PHY0_SM;
1308
1309 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
1310 status = IXGBE_ERR_SWFW_SYNC;
1311 goto write_byte_out;
1312 }
976 1313
977 do { 1314 do {
978 ixgbe_i2c_start(hw); 1315 ixgbe_i2c_start(hw);
@@ -1013,6 +1350,9 @@ fail:
1013 hw_dbg(hw, "I2C byte write error.\n"); 1350 hw_dbg(hw, "I2C byte write error.\n");
1014 } while (retry < max_retry); 1351 } while (retry < max_retry);
1015 1352
1353 ixgbe_release_swfw_sync(hw, swfw_mask);
1354
1355write_byte_out:
1016 return status; 1356 return status;
1017} 1357}
1018 1358
@@ -1331,6 +1671,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1331 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1671 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1332 u32 i; 1672 u32 i;
1333 1673
1674 ixgbe_i2c_start(hw);
1675
1334 ixgbe_set_i2c_data(hw, &i2cctl, 1); 1676 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1335 1677
1336 for (i = 0; i < 9; i++) { 1678 for (i = 0; i < 9; i++) {
@@ -1345,91 +1687,13 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1345 udelay(IXGBE_I2C_T_LOW); 1687 udelay(IXGBE_I2C_T_LOW);
1346 } 1688 }
1347 1689
1690 ixgbe_i2c_start(hw);
1691
1348 /* Put the i2c bus back to default state */ 1692 /* Put the i2c bus back to default state */
1349 ixgbe_i2c_stop(hw); 1693 ixgbe_i2c_stop(hw);
1350} 1694}
1351 1695
1352/** 1696/**
1353 * ixgbe_check_phy_link_tnx - Determine link and speed status
1354 * @hw: pointer to hardware structure
1355 *
1356 * Reads the VS1 register to determine if link is up and the current speed for
1357 * the PHY.
1358 **/
1359s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1360 bool *link_up)
1361{
1362 s32 status = 0;
1363 u32 time_out;
1364 u32 max_time_out = 10;
1365 u16 phy_link = 0;
1366 u16 phy_speed = 0;
1367 u16 phy_data = 0;
1368
1369 /* Initialize speed and link to default case */
1370 *link_up = false;
1371 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1372
1373 /*
1374 * Check current speed and link status of the PHY register.
1375 * This is a vendor specific register and may have to
1376 * be changed for other copper PHYs.
1377 */
1378 for (time_out = 0; time_out < max_time_out; time_out++) {
1379 udelay(10);
1380 status = hw->phy.ops.read_reg(hw,
1381 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
1382 MDIO_MMD_VEND1,
1383 &phy_data);
1384 phy_link = phy_data &
1385 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
1386 phy_speed = phy_data &
1387 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
1388 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
1389 *link_up = true;
1390 if (phy_speed ==
1391 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
1392 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1393 break;
1394 }
1395 }
1396
1397 return status;
1398}
1399
1400/**
1401 * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
1402 * @hw: pointer to hardware structure
1403 * @firmware_version: pointer to the PHY Firmware Version
1404 **/
1405s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1406 u16 *firmware_version)
1407{
1408 s32 status = 0;
1409
1410 status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1,
1411 firmware_version);
1412
1413 return status;
1414}
1415
1416/**
1417 * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
1418 * @hw: pointer to hardware structure
1419 * @firmware_version: pointer to the PHY Firmware Version
1420**/
1421s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
1422 u16 *firmware_version)
1423{
1424 s32 status = 0;
1425
1426 status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
1427 firmware_version);
1428
1429 return status;
1430}
1431
1432/**
1433 * ixgbe_tn_check_overtemp - Checks if an overtemp occured. 1697 * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
1434 * @hw: pointer to hardware structure 1698 * @hw: pointer to hardware structure
1435 * 1699 *
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index e2c6b7eac64..197bdd13106 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -58,6 +58,10 @@
58#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 58#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
59#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 59#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
60 60
61/* Flow control defines */
62#define IXGBE_TAF_SYM_PAUSE 0x400
63#define IXGBE_TAF_ASM_PAUSE 0x800
64
61/* Bit-shift macros */ 65/* Bit-shift macros */
62#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 66#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
63#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 67#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
@@ -104,6 +108,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
104s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 108s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
105 ixgbe_link_speed *speed, 109 ixgbe_link_speed *speed,
106 bool *link_up); 110 bool *link_up);
111s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
107s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 112s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
108 u16 *firmware_version); 113 u16 *firmware_version);
109s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, 114s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b15738b00..6e50d832894 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -110,12 +110,37 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
111} 111}
112 112
113void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
114{
115 struct ixgbe_hw *hw = &adapter->hw;
116 int new_mtu = msgbuf[1];
117 u32 max_frs;
118 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
119
120 /* Only X540 supports jumbo frames in IOV mode */
121 if (adapter->hw.mac.type != ixgbe_mac_X540)
122 return;
123
124 /* MTU < 68 is an error and causes problems on some kernels */
125 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
126 e_err(drv, "VF mtu %d out of range\n", new_mtu);
127 return;
128 }
129
130 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
131 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
132 if (max_frs < new_mtu) {
133 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
134 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
135 }
136
137 e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
138}
113 139
114static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 140static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
115{ 141{
116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 142 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
117 vmolr |= (IXGBE_VMOLR_ROMPE | 143 vmolr |= (IXGBE_VMOLR_ROMPE |
118 IXGBE_VMOLR_ROPE |
119 IXGBE_VMOLR_BAM); 144 IXGBE_VMOLR_BAM);
120 if (aupe) 145 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE; 146 vmolr |= IXGBE_VMOLR_AUPE;
@@ -304,7 +329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
304 hash_list, vf); 329 hash_list, vf);
305 break; 330 break;
306 case IXGBE_VF_SET_LPE: 331 case IXGBE_VF_SET_LPE:
307 WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE); 332 ixgbe_set_vf_lpe(adapter, msgbuf);
308 break; 333 break;
309 case IXGBE_VF_SET_VLAN: 334 case IXGBE_VF_SET_VLAN:
310 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 335 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
@@ -453,9 +478,90 @@ out:
453 return err; 478 return err;
454} 479}
455 480
481static int ixgbe_link_mbps(int internal_link_speed)
482{
483 switch (internal_link_speed) {
484 case IXGBE_LINK_SPEED_100_FULL:
485 return 100;
486 case IXGBE_LINK_SPEED_1GB_FULL:
487 return 1000;
488 case IXGBE_LINK_SPEED_10GB_FULL:
489 return 10000;
490 default:
491 return 0;
492 }
493}
494
495static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
496 int link_speed)
497{
498 int rf_dec, rf_int;
499 u32 bcnrc_val;
500
501 if (tx_rate != 0) {
502 /* Calculate the rate factor values to set */
503 rf_int = link_speed / tx_rate;
504 rf_dec = (link_speed - (rf_int * tx_rate));
505 rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
506
507 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
508 bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
509 IXGBE_RTTBCNRC_RF_INT_MASK);
510 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
511 } else {
512 bcnrc_val = 0;
513 }
514
515 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
516 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
517}
518
519void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
520{
521 int actual_link_speed, i;
522 bool reset_rate = false;
523
524 /* VF Tx rate limit was not set */
525 if (adapter->vf_rate_link_speed == 0)
526 return;
527
528 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
529 if (actual_link_speed != adapter->vf_rate_link_speed) {
530 reset_rate = true;
531 adapter->vf_rate_link_speed = 0;
532 dev_info(&adapter->pdev->dev,
533 "Link speed has been changed. VF Transmit rate "
534 "is disabled\n");
535 }
536
537 for (i = 0; i < adapter->num_vfs; i++) {
538 if (reset_rate)
539 adapter->vfinfo[i].tx_rate = 0;
540
541 ixgbe_set_vf_rate_limit(&adapter->hw, i,
542 adapter->vfinfo[i].tx_rate,
543 actual_link_speed);
544 }
545}
546
456int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 547int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
457{ 548{
458 return -EOPNOTSUPP; 549 struct ixgbe_adapter *adapter = netdev_priv(netdev);
550 struct ixgbe_hw *hw = &adapter->hw;
551 int actual_link_speed;
552
553 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
554 if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
555 (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
556 ((tx_rate != 0) && (tx_rate <= 10)))
557 /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
558 return -EINVAL;
559
560 adapter->vf_rate_link_speed = actual_link_speed;
561 adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
562 ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
563
564 return 0;
459} 565}
460 566
461int ixgbe_ndo_get_vf_config(struct net_device *netdev, 567int ixgbe_ndo_get_vf_config(struct net_device *netdev,
@@ -466,7 +572,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
466 return -EINVAL; 572 return -EINVAL;
467 ivi->vf = vf; 573 ivi->vf = vf;
468 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 574 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
469 ivi->tx_rate = 0; 575 ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
470 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 576 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
471 ivi->qos = adapter->vfinfo[vf].pf_qos; 577 ivi->qos = adapter->vfinfo[vf].pf_qos;
472 return 0; 578 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 49dc14debef..34175564bb7 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -40,6 +40,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
40int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 40int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
41int ixgbe_ndo_get_vf_config(struct net_device *netdev, 41int ixgbe_ndo_get_vf_config(struct net_device *netdev,
42 int vf, struct ifla_vf_info *ivi); 42 int vf, struct ifla_vf_info *ivi);
43void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
43 44
44#endif /* _IXGBE_SRIOV_H_ */ 45#endif /* _IXGBE_SRIOV_H_ */
45 46
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fd3358f5413..25c1fb7eda0 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -91,7 +91,7 @@
91 91
92/* General Receive Control */ 92/* General Receive Control */
93#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ 93#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
94#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */ 94#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
95 95
96#define IXGBE_VPDDIAG0 0x10204 96#define IXGBE_VPDDIAG0 0x10204
97#define IXGBE_VPDDIAG1 0x10208 97#define IXGBE_VPDDIAG1 0x10208
@@ -342,7 +342,7 @@
342/* Wake Up Control */ 342/* Wake Up Control */
343#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ 343#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
344#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ 344#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
345#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/ 345#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
346 346
347/* Wake Up Filter Control */ 347/* Wake Up Filter Control */
348#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 348#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -533,6 +533,12 @@
533#define IXGBE_RTTDTECC 0x04990 533#define IXGBE_RTTDTECC 0x04990
534#define IXGBE_RTTDTECC_NO_BCN 0x00000100 534#define IXGBE_RTTDTECC_NO_BCN 0x00000100
535#define IXGBE_RTTBCNRC 0x04984 535#define IXGBE_RTTBCNRC 0x04984
536#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
537#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
538#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
539#define IXGBE_RTTBCNRC_RF_INT_MASK \
540 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
541
536 542
537/* FCoE registers */ 543/* FCoE registers */
538#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ 544#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
@@ -659,6 +665,8 @@
659#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ 665#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
660#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ 666#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
661#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ 667#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
668#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
669#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
662#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ 670#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
663#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ 671#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
664#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ 672#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
@@ -669,6 +677,11 @@
669#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ 677#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
670#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ 678#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
671#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ 679#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
680#define IXGBE_PCRC8ECL 0x0E810
681#define IXGBE_PCRC8ECH 0x0E811
682#define IXGBE_PCRC8ECH_MASK 0x1F
683#define IXGBE_LDPCECL 0x0E820
684#define IXGBE_LDPCECH 0x0E821
672 685
673/* Management */ 686/* Management */
674#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ 687#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1002,6 +1015,13 @@
1002#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ 1015#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
1003#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ 1016#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
1004 1017
1018/* MII clause 22/28 definitions */
1019#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
1020#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
1021#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
1022#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
1023#define IXGBE_MII_AUTONEG_REG 0x0
1024
1005#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 1025#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
1006#define IXGBE_MAX_PHY_ADDR 32 1026#define IXGBE_MAX_PHY_ADDR 32
1007 1027
@@ -1614,6 +1634,8 @@
1614#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ 1634#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
1615 1635
1616/* PCI Bus Info */ 1636/* PCI Bus Info */
1637#define IXGBE_PCI_DEVICE_STATUS 0xAA
1638#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
1617#define IXGBE_PCI_LINK_STATUS 0xB2 1639#define IXGBE_PCI_LINK_STATUS 0xB2
1618#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1640#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
1619#define IXGBE_PCI_LINK_WIDTH 0x3F0 1641#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -1680,6 +1702,8 @@
1680#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ 1702#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
1681#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 1703#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
1682#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ 1704#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
1705#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
1706#define IXGBE_RXDCTL_RLPML_EN 0x00008000
1683 1707
1684#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ 1708#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
1685#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ 1709#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -2240,6 +2264,7 @@ enum ixgbe_mac_type {
2240 2264
2241enum ixgbe_phy_type { 2265enum ixgbe_phy_type {
2242 ixgbe_phy_unknown = 0, 2266 ixgbe_phy_unknown = 0,
2267 ixgbe_phy_none,
2243 ixgbe_phy_tn, 2268 ixgbe_phy_tn,
2244 ixgbe_phy_aq, 2269 ixgbe_phy_aq,
2245 ixgbe_phy_cu_unknown, 2270 ixgbe_phy_cu_unknown,
@@ -2328,32 +2353,31 @@ enum ixgbe_bus_type {
2328/* PCI bus speeds */ 2353/* PCI bus speeds */
2329enum ixgbe_bus_speed { 2354enum ixgbe_bus_speed {
2330 ixgbe_bus_speed_unknown = 0, 2355 ixgbe_bus_speed_unknown = 0,
2331 ixgbe_bus_speed_33, 2356 ixgbe_bus_speed_33 = 33,
2332 ixgbe_bus_speed_66, 2357 ixgbe_bus_speed_66 = 66,
2333 ixgbe_bus_speed_100, 2358 ixgbe_bus_speed_100 = 100,
2334 ixgbe_bus_speed_120, 2359 ixgbe_bus_speed_120 = 120,
2335 ixgbe_bus_speed_133, 2360 ixgbe_bus_speed_133 = 133,
2336 ixgbe_bus_speed_2500, 2361 ixgbe_bus_speed_2500 = 2500,
2337 ixgbe_bus_speed_5000, 2362 ixgbe_bus_speed_5000 = 5000,
2338 ixgbe_bus_speed_reserved 2363 ixgbe_bus_speed_reserved
2339}; 2364};
2340 2365
2341/* PCI bus widths */ 2366/* PCI bus widths */
2342enum ixgbe_bus_width { 2367enum ixgbe_bus_width {
2343 ixgbe_bus_width_unknown = 0, 2368 ixgbe_bus_width_unknown = 0,
2344 ixgbe_bus_width_pcie_x1, 2369 ixgbe_bus_width_pcie_x1 = 1,
2345 ixgbe_bus_width_pcie_x2, 2370 ixgbe_bus_width_pcie_x2 = 2,
2346 ixgbe_bus_width_pcie_x4 = 4, 2371 ixgbe_bus_width_pcie_x4 = 4,
2347 ixgbe_bus_width_pcie_x8 = 8, 2372 ixgbe_bus_width_pcie_x8 = 8,
2348 ixgbe_bus_width_32, 2373 ixgbe_bus_width_32 = 32,
2349 ixgbe_bus_width_64, 2374 ixgbe_bus_width_64 = 64,
2350 ixgbe_bus_width_reserved 2375 ixgbe_bus_width_reserved
2351}; 2376};
2352 2377
2353struct ixgbe_addr_filter_info { 2378struct ixgbe_addr_filter_info {
2354 u32 num_mc_addrs; 2379 u32 num_mc_addrs;
2355 u32 rar_used_count; 2380 u32 rar_used_count;
2356 u32 mc_addr_in_rar_count;
2357 u32 mta_in_use; 2381 u32 mta_in_use;
2358 u32 overflow_promisc; 2382 u32 overflow_promisc;
2359 bool uc_set_promisc; 2383 bool uc_set_promisc;
@@ -2491,6 +2515,8 @@ struct ixgbe_mac_operations {
2491 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); 2515 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
2492 s32 (*setup_sfp)(struct ixgbe_hw *); 2516 s32 (*setup_sfp)(struct ixgbe_hw *);
2493 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2517 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2518 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
2519 void (*release_swfw_sync)(struct ixgbe_hw *, u16);
2494 2520
2495 /* Link */ 2521 /* Link */
2496 void (*disable_tx_laser)(struct ixgbe_hw *); 2522 void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2513,7 +2539,6 @@ struct ixgbe_mac_operations {
2513 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2539 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2514 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2540 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2515 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2541 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2516 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2517 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); 2542 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2518 s32 (*enable_mc)(struct ixgbe_hw *); 2543 s32 (*enable_mc)(struct ixgbe_hw *);
2519 s32 (*disable_mc)(struct ixgbe_hw *); 2544 s32 (*disable_mc)(struct ixgbe_hw *);
@@ -2554,6 +2579,7 @@ struct ixgbe_eeprom_info {
2554 u16 address_bits; 2579 u16 address_bits;
2555}; 2580};
2556 2581
2582#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
2557struct ixgbe_mac_info { 2583struct ixgbe_mac_info {
2558 struct ixgbe_mac_operations ops; 2584 struct ixgbe_mac_operations ops;
2559 enum ixgbe_mac_type type; 2585 enum ixgbe_mac_type type;
@@ -2564,6 +2590,8 @@ struct ixgbe_mac_info {
2564 u16 wwnn_prefix; 2590 u16 wwnn_prefix;
2565 /* prefix for World Wide Port Name (WWPN) */ 2591 /* prefix for World Wide Port Name (WWPN) */
2566 u16 wwpn_prefix; 2592 u16 wwpn_prefix;
2593#define IXGBE_MAX_MTA 128
2594 u32 mta_shadow[IXGBE_MAX_MTA];
2567 s32 mc_filter_type; 2595 s32 mc_filter_type;
2568 u32 mcft_size; 2596 u32 mcft_size;
2569 u32 vft_size; 2597 u32 vft_size;
@@ -2576,6 +2604,7 @@ struct ixgbe_mac_info {
2576 u32 orig_autoc2; 2604 u32 orig_autoc2;
2577 bool orig_link_settings_stored; 2605 bool orig_link_settings_stored;
2578 bool autotry_restart; 2606 bool autotry_restart;
2607 u8 flags;
2579}; 2608};
2580 2609
2581struct ixgbe_phy_info { 2610struct ixgbe_phy_info {
@@ -2682,7 +2711,9 @@ struct ixgbe_info {
2682#define IXGBE_ERR_EEPROM_VERSION -24 2711#define IXGBE_ERR_EEPROM_VERSION -24
2683#define IXGBE_ERR_NO_SPACE -25 2712#define IXGBE_ERR_NO_SPACE -25
2684#define IXGBE_ERR_OVERTEMP -26 2713#define IXGBE_ERR_OVERTEMP -26
2685#define IXGBE_ERR_RAR_INDEX -27 2714#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
2715#define IXGBE_ERR_FC_NOT_SUPPORTED -28
2716#define IXGBE_ERR_FLOW_CONTROL -29
2686#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 2717#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2687#define IXGBE_ERR_PBA_SECTION -31 2718#define IXGBE_ERR_PBA_SECTION -31
2688#define IXGBE_ERR_INVALID_ARGUMENT -32 2719#define IXGBE_ERR_INVALID_ARGUMENT -32
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a8923993ce..f47e93fe32b 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,7 +31,6 @@
31 31
32#include "ixgbe.h" 32#include "ixgbe.h"
33#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
34//#include "ixgbe_mbx.h"
35 34
36#define IXGBE_X540_MAX_TX_QUEUES 128 35#define IXGBE_X540_MAX_TX_QUEUES 128
37#define IXGBE_X540_MAX_RX_QUEUES 128 36#define IXGBE_X540_MAX_RX_QUEUES 128
@@ -110,12 +109,9 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
110 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 109 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
111 * access and verify no pending requests before reset 110 * access and verify no pending requests before reset
112 */ 111 */
113 status = ixgbe_disable_pcie_master(hw); 112 ixgbe_disable_pcie_master(hw);
114 if (status != 0) {
115 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
116 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
117 }
118 113
114mac_reset_top:
119 /* 115 /*
120 * Issue global reset to the MAC. Needs to be SW reset if link is up. 116 * Issue global reset to the MAC. Needs to be SW reset if link is up.
121 * If link reset is used when link is up, it might reset the PHY when 117 * If link reset is used when link is up, it might reset the PHY when
@@ -133,21 +129,34 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
133 } 129 }
134 130
135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 131 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 132 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
137 IXGBE_WRITE_FLUSH(hw); 133 IXGBE_WRITE_FLUSH(hw);
138 134
139 /* Poll for reset bit to self-clear indicating reset is complete */ 135 /* Poll for reset bit to self-clear indicating reset is complete */
140 for (i = 0; i < 10; i++) { 136 for (i = 0; i < 10; i++) {
141 udelay(1); 137 udelay(1);
142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 138 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
143 if (!(ctrl & IXGBE_CTRL_RST)) 139 if (!(ctrl & reset_bit))
144 break; 140 break;
145 } 141 }
146 if (ctrl & IXGBE_CTRL_RST) { 142 if (ctrl & reset_bit) {
147 status = IXGBE_ERR_RESET_FAILED; 143 status = IXGBE_ERR_RESET_FAILED;
148 hw_dbg(hw, "Reset polling failed to complete.\n"); 144 hw_dbg(hw, "Reset polling failed to complete.\n");
149 } 145 }
150 146
147 /*
148 * Double resets are required for recovery from certain error
149 * conditions. Between resets, it is necessary to stall to allow time
150 * for any pending HW events to complete. We use 1usec since that is
151 * what is needed for ixgbe_disable_pcie_master(). The second reset
152 * then clears out any effects of those events.
153 */
154 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
155 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
156 udelay(1);
157 goto mac_reset_top;
158 }
159
151 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ 160 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
152 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 161 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
153 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 162 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -191,7 +200,7 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
191 * clear the multicast table. Also reset num_rar_entries to 128, 200 * clear the multicast table. Also reset num_rar_entries to 128,
192 * since we modify this value when programming the SAN MAC address. 201 * since we modify this value when programming the SAN MAC address.
193 */ 202 */
194 hw->mac.num_rar_entries = 128; 203 hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
195 hw->mac.ops.init_rx_addrs(hw); 204 hw->mac.ops.init_rx_addrs(hw);
196 205
197 /* Store the permanent mac address */ 206 /* Store the permanent mac address */
@@ -242,8 +251,11 @@ static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
242} 251}
243 252
244/** 253/**
245 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params 254 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
246 * @hw: pointer to hardware structure 255 * @hw: pointer to hardware structure
256 *
257 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
258 * ixgbe_hw struct in order to set up EEPROM access.
247 **/ 259 **/
248static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) 260static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
249{ 261{
@@ -262,7 +274,7 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
262 IXGBE_EEPROM_WORD_SIZE_SHIFT); 274 IXGBE_EEPROM_WORD_SIZE_SHIFT);
263 275
264 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", 276 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
265 eeprom->type, eeprom->word_size); 277 eeprom->type, eeprom->word_size);
266 } 278 }
267 279
268 return 0; 280 return 0;
@@ -278,7 +290,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
278{ 290{
279 s32 status; 291 s32 status;
280 292
281 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) 293 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
282 status = ixgbe_read_eerd_generic(hw, offset, data); 294 status = ixgbe_read_eerd_generic(hw, offset, data);
283 else 295 else
284 status = IXGBE_ERR_SWFW_SYNC; 296 status = IXGBE_ERR_SWFW_SYNC;
@@ -311,7 +323,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
311 (data << IXGBE_EEPROM_RW_REG_DATA) | 323 (data << IXGBE_EEPROM_RW_REG_DATA) |
312 IXGBE_EEPROM_RW_REG_START; 324 IXGBE_EEPROM_RW_REG_START;
313 325
314 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) { 326 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
315 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 327 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
316 if (status != 0) { 328 if (status != 0) {
317 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 329 hw_dbg(hw, "Eeprom write EEWR timed out\n");
@@ -676,7 +688,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
676 .set_vmdq = &ixgbe_set_vmdq_generic, 688 .set_vmdq = &ixgbe_set_vmdq_generic,
677 .clear_vmdq = &ixgbe_clear_vmdq_generic, 689 .clear_vmdq = &ixgbe_clear_vmdq_generic,
678 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 690 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
679 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
680 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 691 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
681 .enable_mc = &ixgbe_enable_mc_generic, 692 .enable_mc = &ixgbe_enable_mc_generic,
682 .disable_mc = &ixgbe_disable_mc_generic, 693 .disable_mc = &ixgbe_disable_mc_generic,
@@ -687,6 +698,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
687 .setup_sfp = NULL, 698 .setup_sfp = NULL,
688 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 699 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
689 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 700 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
701 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
702 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
690}; 703};
691 704
692static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 705static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
@@ -702,7 +715,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
702 .identify = &ixgbe_identify_phy_generic, 715 .identify = &ixgbe_identify_phy_generic,
703 .identify_sfp = &ixgbe_identify_sfp_module_generic, 716 .identify_sfp = &ixgbe_identify_sfp_module_generic,
704 .init = NULL, 717 .init = NULL,
705 .reset = &ixgbe_reset_phy_generic, 718 .reset = NULL,
706 .read_reg = &ixgbe_read_phy_reg_generic, 719 .read_reg = &ixgbe_read_phy_reg_generic,
707 .write_reg = &ixgbe_write_phy_reg_generic, 720 .write_reg = &ixgbe_write_phy_reg_generic,
708 .setup_link = &ixgbe_setup_phy_link_generic, 721 .setup_link = &ixgbe_setup_phy_link_generic,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index de643eb2ada..78abb6f1a86 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -65,6 +65,8 @@ typedef u32 ixgbe_link_speed;
65#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ 65#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
66#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 66#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
67#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ 67#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
68#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
69#define IXGBE_RXDCTL_RLPML_EN 0x00008000
68 70
69/* DCA Control */ 71/* DCA Control */
70#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 72#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index fa29b3c8c46..0563ab29264 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -172,7 +172,7 @@ static char *ixgbevf_reg_names[] = {
172 "IXGBE_VFSTATUS", 172 "IXGBE_VFSTATUS",
173 "IXGBE_VFLINKS", 173 "IXGBE_VFLINKS",
174 "IXGBE_VFRXMEMWRAP", 174 "IXGBE_VFRXMEMWRAP",
175 "IXGBE_VFRTIMER", 175 "IXGBE_VFFRTIMER",
176 "IXGBE_VTEICR", 176 "IXGBE_VTEICR",
177 "IXGBE_VTEICS", 177 "IXGBE_VTEICS",
178 "IXGBE_VTEIMS", 178 "IXGBE_VTEIMS",
@@ -240,7 +240,7 @@ static void ixgbevf_get_regs(struct net_device *netdev,
240 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); 240 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
241 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 241 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
242 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); 242 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
243 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER); 243 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
244 244
245 /* Interrupt */ 245 /* Interrupt */
246 /* don't read EICR because it can clear interrupt causes, instead 246 /* don't read EICR because it can clear interrupt causes, instead
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index a63efcb2cf1..b703f60be3b 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -207,7 +207,6 @@ struct ixgbevf_adapter {
207 u64 hw_tso_ctxt; 207 u64 hw_tso_ctxt;
208 u64 hw_tso6_ctxt; 208 u64 hw_tso6_ctxt;
209 u32 tx_timeout_count; 209 u32 tx_timeout_count;
210 bool detect_tx_hung;
211 210
212 /* RX */ 211 /* RX */
213 struct ixgbevf_ring *rx_ring; /* One per active queue */ 212 struct ixgbevf_ring *rx_ring; /* One per active queue */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 464e6c9d3fc..054ab05b7c6 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -49,9 +49,9 @@
49 49
50char ixgbevf_driver_name[] = "ixgbevf"; 50char ixgbevf_driver_name[] = "ixgbevf";
51static const char ixgbevf_driver_string[] = 51static const char ixgbevf_driver_string[] =
52 "Intel(R) 82599 Virtual Function"; 52 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
53 53
54#define DRV_VERSION "1.0.19-k0" 54#define DRV_VERSION "2.0.0-k2"
55const char ixgbevf_driver_version[] = DRV_VERSION; 55const char ixgbevf_driver_version[] = DRV_VERSION;
56static char ixgbevf_copyright[] = 56static char ixgbevf_copyright[] =
57 "Copyright (c) 2009 - 2010 Intel Corporation."; 57 "Copyright (c) 2009 - 2010 Intel Corporation.";
@@ -107,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
107} 107}
108 108
109/* 109/*
110 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 110 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
111 * @adapter: pointer to adapter struct 111 * @adapter: pointer to adapter struct
112 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 112 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
113 * @queue: queue to map the corresponding interrupt to 113 * @queue: queue to map the corresponding interrupt to
@@ -162,42 +162,6 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
162 /* tx_buffer_info must be completely set up in the transmit path */ 162 /* tx_buffer_info must be completely set up in the transmit path */
163} 163}
164 164
165static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
166 struct ixgbevf_ring *tx_ring,
167 unsigned int eop)
168{
169 struct ixgbe_hw *hw = &adapter->hw;
170 u32 head, tail;
171
172 /* Detect a transmit hang in hardware, this serializes the
173 * check with the clearing of time_stamp and movement of eop */
174 head = readl(hw->hw_addr + tx_ring->head);
175 tail = readl(hw->hw_addr + tx_ring->tail);
176 adapter->detect_tx_hung = false;
177 if ((head != tail) &&
178 tx_ring->tx_buffer_info[eop].time_stamp &&
179 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
180 /* detected Tx unit hang */
181 union ixgbe_adv_tx_desc *tx_desc;
182 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
183 printk(KERN_ERR "Detected Tx Unit Hang\n"
184 " Tx Queue <%d>\n"
185 " TDH, TDT <%x>, <%x>\n"
186 " next_to_use <%x>\n"
187 " next_to_clean <%x>\n"
188 "tx_buffer_info[next_to_clean]\n"
189 " time_stamp <%lx>\n"
190 " jiffies <%lx>\n",
191 tx_ring->queue_index,
192 head, tail,
193 tx_ring->next_to_use, eop,
194 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
195 return true;
196 }
197
198 return false;
199}
200
201#define IXGBE_MAX_TXD_PWR 14 165#define IXGBE_MAX_TXD_PWR 14
202#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 166#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
203 167
@@ -293,16 +257,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
293#endif 257#endif
294 } 258 }
295 259
296 if (adapter->detect_tx_hung) {
297 if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
298 /* schedule immediate reset if we believe we hung */
299 printk(KERN_INFO
300 "tx hang %d detected, resetting adapter\n",
301 adapter->tx_timeout_count + 1);
302 ixgbevf_tx_timeout(adapter->netdev);
303 }
304 }
305
306 /* re-arm the interrupt */ 260 /* re-arm the interrupt */
307 if ((count >= tx_ring->work_limit) && 261 if ((count >= tx_ring->work_limit) &&
308 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { 262 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
@@ -334,7 +288,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
334 struct ixgbevf_adapter *adapter = q_vector->adapter; 288 struct ixgbevf_adapter *adapter = q_vector->adapter;
335 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 289 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
336 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 290 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
337 int ret;
338 291
339 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 292 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
340 if (adapter->vlgrp && is_vlan) 293 if (adapter->vlgrp && is_vlan)
@@ -345,9 +298,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
345 napi_gro_receive(&q_vector->napi, skb); 298 napi_gro_receive(&q_vector->napi, skb);
346 } else { 299 } else {
347 if (adapter->vlgrp && is_vlan) 300 if (adapter->vlgrp && is_vlan)
348 ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 301 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
349 else 302 else
350 ret = netif_rx(skb); 303 netif_rx(skb);
351 } 304 }
352} 305}
353 306
@@ -1017,7 +970,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
1017} 970}
1018 971
1019/** 972/**
1020 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) 973 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
1021 * @irq: unused 974 * @irq: unused
1022 * @data: pointer to our q_vector struct for this interrupt vector 975 * @data: pointer to our q_vector struct for this interrupt vector
1023 **/ 976 **/
@@ -1665,6 +1618,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1665 j = adapter->rx_ring[i].reg_idx; 1618 j = adapter->rx_ring[i].reg_idx;
1666 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1619 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1667 rxdctl |= IXGBE_RXDCTL_ENABLE; 1620 rxdctl |= IXGBE_RXDCTL_ENABLE;
1621 if (hw->mac.type == ixgbe_mac_X540_vf) {
1622 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1623 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1624 IXGBE_RXDCTL_RLPML_EN);
1625 }
1668 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1626 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1669 ixgbevf_rx_desc_queue_enable(adapter, i); 1627 ixgbevf_rx_desc_queue_enable(adapter, i);
1670 } 1628 }
@@ -1967,7 +1925,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1967} 1925}
1968 1926
1969/* 1927/*
1970 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 1928 * ixgbevf_set_num_queues: Allocate queues for device, feature dependant
1971 * @adapter: board private structure to initialize 1929 * @adapter: board private structure to initialize
1972 * 1930 *
1973 * This is the top level queue allocation routine. The order here is very 1931 * This is the top level queue allocation routine. The order here is very
@@ -2216,7 +2174,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2216 2174
2217 hw->vendor_id = pdev->vendor; 2175 hw->vendor_id = pdev->vendor;
2218 hw->device_id = pdev->device; 2176 hw->device_id = pdev->device;
2219 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2177 hw->revision_id = pdev->revision;
2220 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2178 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2221 hw->subsystem_device_id = pdev->subsystem_device; 2179 hw->subsystem_device_id = pdev->subsystem_device;
2222 2180
@@ -2410,9 +2368,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2410 10 : 1); 2368 10 : 1);
2411 netif_carrier_on(netdev); 2369 netif_carrier_on(netdev);
2412 netif_tx_wake_all_queues(netdev); 2370 netif_tx_wake_all_queues(netdev);
2413 } else {
2414 /* Force detection of hung controller */
2415 adapter->detect_tx_hung = true;
2416 } 2371 }
2417 } else { 2372 } else {
2418 adapter->link_up = false; 2373 adapter->link_up = false;
@@ -2427,9 +2382,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2427 ixgbevf_update_stats(adapter); 2382 ixgbevf_update_stats(adapter);
2428 2383
2429pf_has_reset: 2384pf_has_reset:
2430 /* Force detection of hung controller every watchdog period */
2431 adapter->detect_tx_hung = true;
2432
2433 /* Reset the timer */ 2385 /* Reset the timer */
2434 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2386 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2435 mod_timer(&adapter->watchdog_timer, 2387 mod_timer(&adapter->watchdog_timer,
@@ -3217,10 +3169,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3217static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3169static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3218{ 3170{
3219 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3171 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3172 struct ixgbe_hw *hw = &adapter->hw;
3220 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3173 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3174 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3175 u32 msg[2];
3176
3177 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3178 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3221 3179
3222 /* MTU < 68 is an error and causes problems on some kernels */ 3180 /* MTU < 68 is an error and causes problems on some kernels */
3223 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) 3181 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3224 return -EINVAL; 3182 return -EINVAL;
3225 3183
3226 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3184 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
@@ -3228,6 +3186,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3228 /* must set new MTU before calling down or up */ 3186 /* must set new MTU before calling down or up */
3229 netdev->mtu = new_mtu; 3187 netdev->mtu = new_mtu;
3230 3188
3189 msg[0] = IXGBE_VF_SET_LPE;
3190 msg[1] = max_frame;
3191 hw->mbx.ops.write_posted(hw, msg, 2);
3192
3231 if (netif_running(netdev)) 3193 if (netif_running(netdev))
3232 ixgbevf_reinit_locked(adapter); 3194 ixgbevf_reinit_locked(adapter);
3233 3195
@@ -3272,8 +3234,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
3272 3234
3273static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3235static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3274{ 3236{
3275 struct ixgbevf_adapter *adapter;
3276 adapter = netdev_priv(dev);
3277 dev->netdev_ops = &ixgbe_netdev_ops; 3237 dev->netdev_ops = &ixgbe_netdev_ops;
3278 ixgbevf_set_ethtool_ops(dev); 3238 ixgbevf_set_ethtool_ops(dev);
3279 dev->watchdog_timeo = 5 * HZ; 3239 dev->watchdog_timeo = 5 * HZ;
@@ -3519,9 +3479,9 @@ static struct pci_driver ixgbevf_driver = {
3519}; 3479};
3520 3480
3521/** 3481/**
3522 * ixgbe_init_module - Driver Registration Routine 3482 * ixgbevf_init_module - Driver Registration Routine
3523 * 3483 *
3524 * ixgbe_init_module is the first routine called when the driver is 3484 * ixgbevf_init_module is the first routine called when the driver is
3525 * loaded. All it does is register with the PCI subsystem. 3485 * loaded. All it does is register with the PCI subsystem.
3526 **/ 3486 **/
3527static int __init ixgbevf_init_module(void) 3487static int __init ixgbevf_init_module(void)
@@ -3539,9 +3499,9 @@ static int __init ixgbevf_init_module(void)
3539module_init(ixgbevf_init_module); 3499module_init(ixgbevf_init_module);
3540 3500
3541/** 3501/**
3542 * ixgbe_exit_module - Driver Exit Cleanup Routine 3502 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3543 * 3503 *
3544 * ixgbe_exit_module is called just before the driver is removed 3504 * ixgbevf_exit_module is called just before the driver is removed
3545 * from memory. 3505 * from memory.
3546 **/ 3506 **/
3547static void __exit ixgbevf_exit_module(void) 3507static void __exit ixgbevf_exit_module(void)
@@ -3551,7 +3511,7 @@ static void __exit ixgbevf_exit_module(void)
3551 3511
3552#ifdef DEBUG 3512#ifdef DEBUG
3553/** 3513/**
3554 * ixgbe_get_hw_dev_name - return device name string 3514 * ixgbevf_get_hw_dev_name - return device name string
3555 * used by hardware layer to print debugging information 3515 * used by hardware layer to print debugging information
3556 **/ 3516 **/
3557char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3517char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index fb80ca1bcc9..189200eeca2 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -31,7 +31,7 @@
31#define IXGBE_VFCTRL 0x00000 31#define IXGBE_VFCTRL 0x00000
32#define IXGBE_VFSTATUS 0x00008 32#define IXGBE_VFSTATUS 0x00008
33#define IXGBE_VFLINKS 0x00010 33#define IXGBE_VFLINKS 0x00010
34#define IXGBE_VFRTIMER 0x00048 34#define IXGBE_VFFRTIMER 0x00048
35#define IXGBE_VFRXMEMWRAP 0x03190 35#define IXGBE_VFRXMEMWRAP 0x03190
36#define IXGBE_VTEICR 0x00100 36#define IXGBE_VTEICR 0x00100
37#define IXGBE_VTEICS 0x00104 37#define IXGBE_VTEICS 0x00104
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index e97ebef3cf4..994c80939c7 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -161,6 +161,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
161} 161}
162 162
163static inline void 163static inline void
164jme_mac_rxclk_off(struct jme_adapter *jme)
165{
166 jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
167 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
168}
169
170static inline void
171jme_mac_rxclk_on(struct jme_adapter *jme)
172{
173 jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
174 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
175}
176
177static inline void
178jme_mac_txclk_off(struct jme_adapter *jme)
179{
180 jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
181 jwrite32f(jme, JME_GHC, jme->reg_ghc);
182}
183
184static inline void
185jme_mac_txclk_on(struct jme_adapter *jme)
186{
187 u32 speed = jme->reg_ghc & GHC_SPEED;
188 if (speed == GHC_SPEED_1000M)
189 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
190 else
191 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
192 jwrite32f(jme, JME_GHC, jme->reg_ghc);
193}
194
195static inline void
196jme_reset_ghc_speed(struct jme_adapter *jme)
197{
198 jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
199 jwrite32f(jme, JME_GHC, jme->reg_ghc);
200}
201
202static inline void
203jme_reset_250A2_workaround(struct jme_adapter *jme)
204{
205 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
206 GPREG1_RSSPATCH);
207 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
208}
209
210static inline void
211jme_assert_ghc_reset(struct jme_adapter *jme)
212{
213 jme->reg_ghc |= GHC_SWRST;
214 jwrite32f(jme, JME_GHC, jme->reg_ghc);
215}
216
217static inline void
218jme_clear_ghc_reset(struct jme_adapter *jme)
219{
220 jme->reg_ghc &= ~GHC_SWRST;
221 jwrite32f(jme, JME_GHC, jme->reg_ghc);
222}
223
224static inline void
164jme_reset_mac_processor(struct jme_adapter *jme) 225jme_reset_mac_processor(struct jme_adapter *jme)
165{ 226{
166 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 227 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
@@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme)
168 u32 gpreg0; 229 u32 gpreg0;
169 int i; 230 int i;
170 231
171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); 232 jme_reset_ghc_speed(jme);
172 udelay(2); 233 jme_reset_250A2_workaround(jme);
173 jwrite32(jme, JME_GHC, jme->reg_ghc); 234
235 jme_mac_rxclk_on(jme);
236 jme_mac_txclk_on(jme);
237 udelay(1);
238 jme_assert_ghc_reset(jme);
239 udelay(1);
240 jme_mac_rxclk_off(jme);
241 jme_mac_txclk_off(jme);
242 udelay(1);
243 jme_clear_ghc_reset(jme);
244 udelay(1);
245 jme_mac_rxclk_on(jme);
246 jme_mac_txclk_on(jme);
247 udelay(1);
248 jme_mac_rxclk_off(jme);
249 jme_mac_txclk_off(jme);
174 250
175 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 251 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
176 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 252 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
190 else 266 else
191 gpreg0 = GPREG0_DEFAULT; 267 gpreg0 = GPREG0_DEFAULT;
192 jwrite32(jme, JME_GPREG0, gpreg0); 268 jwrite32(jme, JME_GPREG0, gpreg0);
193 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
194}
195
196static inline void
197jme_reset_ghc_speed(struct jme_adapter *jme)
198{
199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
200 jwrite32(jme, JME_GHC, jme->reg_ghc);
201} 269}
202 270
203static inline void 271static inline void
@@ -205,7 +273,7 @@ jme_clear_pm(struct jme_adapter *jme)
205{ 273{
206 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); 274 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
207 pci_set_power_state(jme->pdev, PCI_D0); 275 pci_set_power_state(jme->pdev, PCI_D0);
208 pci_enable_wake(jme->pdev, PCI_D0, false); 276 device_set_wakeup_enable(&jme->pdev->dev, false);
209} 277}
210 278
211static int 279static int
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
336} 404}
337 405
338static inline void 406static inline void
339jme_set_phyfifoa(struct jme_adapter *jme) 407jme_set_phyfifo_5level(struct jme_adapter *jme)
340{ 408{
341 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 409 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
342} 410}
343 411
344static inline void 412static inline void
345jme_set_phyfifob(struct jme_adapter *jme) 413jme_set_phyfifo_8level(struct jme_adapter *jme)
346{ 414{
347 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 415 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
348} 416}
@@ -351,7 +419,7 @@ static int
351jme_check_link(struct net_device *netdev, int testonly) 419jme_check_link(struct net_device *netdev, int testonly)
352{ 420{
353 struct jme_adapter *jme = netdev_priv(netdev); 421 struct jme_adapter *jme = netdev_priv(netdev);
354 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1; 422 u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
355 char linkmsg[64]; 423 char linkmsg[64];
356 int rc = 0; 424 int rc = 0;
357 425
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
414 482
415 jme->phylink = phylink; 483 jme->phylink = phylink;
416 484
417 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | 485 /*
418 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | 486 * The speed/duplex setting of jme->reg_ghc already cleared
419 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); 487 * by jme_reset_mac_processor()
488 */
420 switch (phylink & PHY_LINK_SPEED_MASK) { 489 switch (phylink & PHY_LINK_SPEED_MASK) {
421 case PHY_LINK_SPEED_10M: 490 case PHY_LINK_SPEED_10M:
422 ghc |= GHC_SPEED_10M | 491 jme->reg_ghc |= GHC_SPEED_10M;
423 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
424 strcat(linkmsg, "10 Mbps, "); 492 strcat(linkmsg, "10 Mbps, ");
425 break; 493 break;
426 case PHY_LINK_SPEED_100M: 494 case PHY_LINK_SPEED_100M:
427 ghc |= GHC_SPEED_100M | 495 jme->reg_ghc |= GHC_SPEED_100M;
428 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
429 strcat(linkmsg, "100 Mbps, "); 496 strcat(linkmsg, "100 Mbps, ");
430 break; 497 break;
431 case PHY_LINK_SPEED_1000M: 498 case PHY_LINK_SPEED_1000M:
432 ghc |= GHC_SPEED_1000M | 499 jme->reg_ghc |= GHC_SPEED_1000M;
433 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
434 strcat(linkmsg, "1000 Mbps, "); 500 strcat(linkmsg, "1000 Mbps, ");
435 break; 501 break;
436 default: 502 default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
439 505
440 if (phylink & PHY_LINK_DUPLEX) { 506 if (phylink & PHY_LINK_DUPLEX) {
441 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 507 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
442 ghc |= GHC_DPX; 508 jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
509 jme->reg_ghc |= GHC_DPX;
443 } else { 510 } else {
444 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 511 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
445 TXMCS_BACKOFF | 512 TXMCS_BACKOFF |
446 TXMCS_CARRIERSENSE | 513 TXMCS_CARRIERSENSE |
447 TXMCS_COLLISION); 514 TXMCS_COLLISION);
448 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | 515 jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
449 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
450 TXTRHD_TXREN |
451 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
452 } 516 }
453 517
454 gpreg1 = GPREG1_DEFAULT; 518 jwrite32(jme, JME_GHC, jme->reg_ghc);
519
455 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 520 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
521 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
522 GPREG1_RSSPATCH);
456 if (!(phylink & PHY_LINK_DUPLEX)) 523 if (!(phylink & PHY_LINK_DUPLEX))
457 gpreg1 |= GPREG1_HALFMODEPATCH; 524 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
458 switch (phylink & PHY_LINK_SPEED_MASK) { 525 switch (phylink & PHY_LINK_SPEED_MASK) {
459 case PHY_LINK_SPEED_10M: 526 case PHY_LINK_SPEED_10M:
460 jme_set_phyfifoa(jme); 527 jme_set_phyfifo_8level(jme);
461 gpreg1 |= GPREG1_RSSPATCH; 528 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
462 break; 529 break;
463 case PHY_LINK_SPEED_100M: 530 case PHY_LINK_SPEED_100M:
464 jme_set_phyfifob(jme); 531 jme_set_phyfifo_5level(jme);
465 gpreg1 |= GPREG1_RSSPATCH; 532 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
466 break; 533 break;
467 case PHY_LINK_SPEED_1000M: 534 case PHY_LINK_SPEED_1000M:
468 jme_set_phyfifoa(jme); 535 jme_set_phyfifo_8level(jme);
469 break; 536 break;
470 default: 537 default:
471 break; 538 break;
472 } 539 }
473 } 540 }
474 541 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
475 jwrite32(jme, JME_GPREG1, gpreg1);
476 jwrite32(jme, JME_GHC, ghc);
477 jme->reg_ghc = ghc;
478 542
479 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 543 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
480 "Full-Duplex, " : 544 "Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
613 * Enable TX Engine 677 * Enable TX Engine
614 */ 678 */
615 wmb(); 679 wmb();
616 jwrite32(jme, JME_TXCS, jme->reg_txcs | 680 jwrite32f(jme, JME_TXCS, jme->reg_txcs |
617 TXCS_SELECT_QUEUE0 | 681 TXCS_SELECT_QUEUE0 |
618 TXCS_ENABLE); 682 TXCS_ENABLE);
619 683
684 /*
685 * Start clock for TX MAC Processor
686 */
687 jme_mac_txclk_on(jme);
620} 688}
621 689
622static inline void 690static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
651 719
652 if (!i) 720 if (!i)
653 pr_err("Disable TX engine timeout\n"); 721 pr_err("Disable TX engine timeout\n");
722
723 /*
724 * Stop clock for TX MAC Processor
725 */
726 jme_mac_txclk_off(jme);
654} 727}
655 728
656static void 729static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
825 /* 898 /*
826 * Setup Unicast Filter 899 * Setup Unicast Filter
827 */ 900 */
901 jme_set_unicastaddr(jme->dev);
828 jme_set_multi(jme->dev); 902 jme_set_multi(jme->dev);
829 903
830 /* 904 /*
831 * Enable RX Engine 905 * Enable RX Engine
832 */ 906 */
833 wmb(); 907 wmb();
834 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 908 jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
835 RXCS_QUEUESEL_Q0 | 909 RXCS_QUEUESEL_Q0 |
836 RXCS_ENABLE | 910 RXCS_ENABLE |
837 RXCS_QST); 911 RXCS_QST);
912
913 /*
914 * Start clock for RX MAC Processor
915 */
916 jme_mac_rxclk_on(jme);
838} 917}
839 918
840static inline void 919static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
871 if (!i) 950 if (!i)
872 pr_err("Disable RX engine timeout\n"); 951 pr_err("Disable RX engine timeout\n");
873 952
953 /*
954 * Stop clock for RX MAC Processor
955 */
956 jme_mac_rxclk_off(jme);
957}
958
959static u16
960jme_udpsum(struct sk_buff *skb)
961{
962 u16 csum = 0xFFFFu;
963
964 if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
965 return csum;
966 if (skb->protocol != htons(ETH_P_IP))
967 return csum;
968 skb_set_network_header(skb, ETH_HLEN);
969 if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
970 (skb->len < (ETH_HLEN +
971 (ip_hdr(skb)->ihl << 2) +
972 sizeof(struct udphdr)))) {
973 skb_reset_network_header(skb);
974 return csum;
975 }
976 skb_set_transport_header(skb,
977 ETH_HLEN + (ip_hdr(skb)->ihl << 2));
978 csum = udp_hdr(skb)->check;
979 skb_reset_transport_header(skb);
980 skb_reset_network_header(skb);
981
982 return csum;
874} 983}
875 984
876static int 985static int
877jme_rxsum_ok(struct jme_adapter *jme, u16 flags) 986jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
878{ 987{
879 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 988 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
880 return false; 989 return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
887 } 996 }
888 997
889 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 998 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
890 == RXWBFLAG_UDPON)) { 999 == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
891 if (flags & RXWBFLAG_IPV4) 1000 if (flags & RXWBFLAG_IPV4)
892 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); 1001 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
893 return false; 1002 return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
935 skb_put(skb, framesize); 1044 skb_put(skb, framesize);
936 skb->protocol = eth_type_trans(skb, jme->dev); 1045 skb->protocol = eth_type_trans(skb, jme->dev);
937 1046
938 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 1047 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
939 skb->ip_summed = CHECKSUM_UNNECESSARY; 1048 skb->ip_summed = CHECKSUM_UNNECESSARY;
940 else 1049 else
941 skb_checksum_none_assert(skb); 1050 skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
1207 tasklet_disable(&jme->rxempty_task); 1316 tasklet_disable(&jme->rxempty_task);
1208 1317
1209 if (netif_carrier_ok(netdev)) { 1318 if (netif_carrier_ok(netdev)) {
1210 jme_reset_ghc_speed(jme);
1211 jme_disable_rx_engine(jme); 1319 jme_disable_rx_engine(jme);
1212 jme_disable_tx_engine(jme); 1320 jme_disable_tx_engine(jme);
1213 jme_reset_mac_processor(jme); 1321 jme_reset_mac_processor(jme);
@@ -1577,6 +1685,38 @@ jme_free_irq(struct jme_adapter *jme)
1577} 1685}
1578 1686
1579static inline void 1687static inline void
1688jme_new_phy_on(struct jme_adapter *jme)
1689{
1690 u32 reg;
1691
1692 reg = jread32(jme, JME_PHY_PWR);
1693 reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1694 PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1695 jwrite32(jme, JME_PHY_PWR, reg);
1696
1697 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1698 reg &= ~PE1_GPREG0_PBG;
1699 reg |= PE1_GPREG0_ENBG;
1700 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1701}
1702
1703static inline void
1704jme_new_phy_off(struct jme_adapter *jme)
1705{
1706 u32 reg;
1707
1708 reg = jread32(jme, JME_PHY_PWR);
1709 reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1710 PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1711 jwrite32(jme, JME_PHY_PWR, reg);
1712
1713 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1714 reg &= ~PE1_GPREG0_PBG;
1715 reg |= PE1_GPREG0_PDD3COLD;
1716 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1717}
1718
1719static inline void
1580jme_phy_on(struct jme_adapter *jme) 1720jme_phy_on(struct jme_adapter *jme)
1581{ 1721{
1582 u32 bmcr; 1722 u32 bmcr;
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
1584 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1724 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1585 bmcr &= ~BMCR_PDOWN; 1725 bmcr &= ~BMCR_PDOWN;
1586 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1726 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1727
1728 if (new_phy_power_ctrl(jme->chip_main_rev))
1729 jme_new_phy_on(jme);
1730}
1731
1732static inline void
1733jme_phy_off(struct jme_adapter *jme)
1734{
1735 u32 bmcr;
1736
1737 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1738 bmcr |= BMCR_PDOWN;
1739 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1740
1741 if (new_phy_power_ctrl(jme->chip_main_rev))
1742 jme_new_phy_off(jme);
1587} 1743}
1588 1744
1589static int 1745static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
1606 1762
1607 jme_start_irq(jme); 1763 jme_start_irq(jme);
1608 1764
1609 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 1765 jme_phy_on(jme);
1610 jme_phy_on(jme); 1766 if (test_bit(JME_FLAG_SSET, &jme->flags))
1611 jme_set_settings(netdev, &jme->old_ecmd); 1767 jme_set_settings(netdev, &jme->old_ecmd);
1612 } else { 1768 else
1613 jme_reset_phy_processor(jme); 1769 jme_reset_phy_processor(jme);
1614 }
1615 1770
1616 jme_reset_link(jme); 1771 jme_reset_link(jme);
1617 1772
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
1657 } 1812 }
1658} 1813}
1659 1814
1660static inline void
1661jme_phy_off(struct jme_adapter *jme)
1662{
1663 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1664}
1665
1666static void 1815static void
1667jme_powersave_phy(struct jme_adapter *jme) 1816jme_powersave_phy(struct jme_adapter *jme)
1668{ 1817{
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
1696 tasklet_disable(&jme->rxclean_task); 1845 tasklet_disable(&jme->rxclean_task);
1697 tasklet_disable(&jme->rxempty_task); 1846 tasklet_disable(&jme->rxempty_task);
1698 1847
1699 jme_reset_ghc_speed(jme);
1700 jme_disable_rx_engine(jme); 1848 jme_disable_rx_engine(jme);
1701 jme_disable_tx_engine(jme); 1849 jme_disable_tx_engine(jme);
1702 jme_reset_mac_processor(jme); 1850 jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1993 return NETDEV_TX_OK; 2141 return NETDEV_TX_OK;
1994} 2142}
1995 2143
2144static void
2145jme_set_unicastaddr(struct net_device *netdev)
2146{
2147 struct jme_adapter *jme = netdev_priv(netdev);
2148 u32 val;
2149
2150 val = (netdev->dev_addr[3] & 0xff) << 24 |
2151 (netdev->dev_addr[2] & 0xff) << 16 |
2152 (netdev->dev_addr[1] & 0xff) << 8 |
2153 (netdev->dev_addr[0] & 0xff);
2154 jwrite32(jme, JME_RXUMA_LO, val);
2155 val = (netdev->dev_addr[5] & 0xff) << 8 |
2156 (netdev->dev_addr[4] & 0xff);
2157 jwrite32(jme, JME_RXUMA_HI, val);
2158}
2159
1996static int 2160static int
1997jme_set_macaddr(struct net_device *netdev, void *p) 2161jme_set_macaddr(struct net_device *netdev, void *p)
1998{ 2162{
1999 struct jme_adapter *jme = netdev_priv(netdev); 2163 struct jme_adapter *jme = netdev_priv(netdev);
2000 struct sockaddr *addr = p; 2164 struct sockaddr *addr = p;
2001 u32 val;
2002 2165
2003 if (netif_running(netdev)) 2166 if (netif_running(netdev))
2004 return -EBUSY; 2167 return -EBUSY;
2005 2168
2006 spin_lock_bh(&jme->macaddr_lock); 2169 spin_lock_bh(&jme->macaddr_lock);
2007 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2170 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2008 2171 jme_set_unicastaddr(netdev);
2009 val = (addr->sa_data[3] & 0xff) << 24 |
2010 (addr->sa_data[2] & 0xff) << 16 |
2011 (addr->sa_data[1] & 0xff) << 8 |
2012 (addr->sa_data[0] & 0xff);
2013 jwrite32(jme, JME_RXUMA_LO, val);
2014 val = (addr->sa_data[5] & 0xff) << 8 |
2015 (addr->sa_data[4] & 0xff);
2016 jwrite32(jme, JME_RXUMA_HI, val);
2017 spin_unlock_bh(&jme->macaddr_lock); 2172 spin_unlock_bh(&jme->macaddr_lock);
2018 2173
2019 return 0; 2174 return 0;
@@ -2383,6 +2538,8 @@ jme_set_wol(struct net_device *netdev,
2383 2538
2384 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 2539 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2385 2540
2541 device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs);
2542
2386 return 0; 2543 return 0;
2387} 2544}
2388 2545
@@ -2731,6 +2888,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
2731 2888
2732 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2889 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2733 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2890 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2891 jme->chip_main_rev = jme->chiprev & 0xF;
2892 jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2734} 2893}
2735 2894
2736static const struct net_device_ops jme_netdev_ops = { 2895static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3039,7 @@ jme_init_one(struct pci_dev *pdev,
2880 jme->reg_rxmcs = RXMCS_DEFAULT; 3039 jme->reg_rxmcs = RXMCS_DEFAULT;
2881 jme->reg_txpfc = 0; 3040 jme->reg_txpfc = 0;
2882 jme->reg_pmcs = PMCS_MFEN; 3041 jme->reg_pmcs = PMCS_MFEN;
3042 jme->reg_gpreg1 = GPREG1_DEFAULT;
2883 set_bit(JME_FLAG_TXCSUM, &jme->flags); 3043 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2884 set_bit(JME_FLAG_TSO, &jme->flags); 3044 set_bit(JME_FLAG_TSO, &jme->flags);
2885 3045
@@ -2936,8 +3096,8 @@ jme_init_one(struct pci_dev *pdev,
2936 jme->mii_if.mdio_write = jme_mdio_write; 3096 jme->mii_if.mdio_write = jme_mdio_write;
2937 3097
2938 jme_clear_pm(jme); 3098 jme_clear_pm(jme);
2939 jme_set_phyfifoa(jme); 3099 jme_set_phyfifo_5level(jme);
2940 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); 3100 jme->pcirev = pdev->revision;
2941 if (!jme->fpgaver) 3101 if (!jme->fpgaver)
2942 jme_phy_init(jme); 3102 jme_phy_init(jme);
2943 jme_phy_off(jme); 3103 jme_phy_off(jme);
@@ -2964,14 +3124,14 @@ jme_init_one(struct pci_dev *pdev,
2964 goto err_out_unmap; 3124 goto err_out_unmap;
2965 } 3125 }
2966 3126
2967 netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n", 3127 netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
2968 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 3128 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
2969 "JMC250 Gigabit Ethernet" : 3129 "JMC250 Gigabit Ethernet" :
2970 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 3130 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
2971 "JMC260 Fast Ethernet" : "Unknown", 3131 "JMC260 Fast Ethernet" : "Unknown",
2972 (jme->fpgaver != 0) ? " (FPGA)" : "", 3132 (jme->fpgaver != 0) ? " (FPGA)" : "",
2973 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 3133 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2974 jme->rev, netdev->dev_addr); 3134 jme->pcirev, netdev->dev_addr);
2975 3135
2976 return 0; 3136 return 0;
2977 3137
@@ -3014,9 +3174,9 @@ jme_shutdown(struct pci_dev *pdev)
3014} 3174}
3015 3175
3016#ifdef CONFIG_PM 3176#ifdef CONFIG_PM
3017static int 3177static int jme_suspend(struct device *dev)
3018jme_suspend(struct pci_dev *pdev, pm_message_t state)
3019{ 3178{
3179 struct pci_dev *pdev = to_pci_dev(dev);
3020 struct net_device *netdev = pci_get_drvdata(pdev); 3180 struct net_device *netdev = pci_get_drvdata(pdev);
3021 struct jme_adapter *jme = netdev_priv(netdev); 3181 struct jme_adapter *jme = netdev_priv(netdev);
3022 3182
@@ -3035,7 +3195,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
3035 jme_polling_mode(jme); 3195 jme_polling_mode(jme);
3036 3196
3037 jme_stop_pcc_timer(jme); 3197 jme_stop_pcc_timer(jme);
3038 jme_reset_ghc_speed(jme);
3039 jme_disable_rx_engine(jme); 3198 jme_disable_rx_engine(jme);
3040 jme_disable_tx_engine(jme); 3199 jme_disable_tx_engine(jme);
3041 jme_reset_mac_processor(jme); 3200 jme_reset_mac_processor(jme);
@@ -3049,29 +3208,24 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
3049 tasklet_hi_enable(&jme->rxclean_task); 3208 tasklet_hi_enable(&jme->rxclean_task);
3050 tasklet_hi_enable(&jme->rxempty_task); 3209 tasklet_hi_enable(&jme->rxempty_task);
3051 3210
3052 pci_save_state(pdev);
3053 jme_powersave_phy(jme); 3211 jme_powersave_phy(jme);
3054 pci_enable_wake(jme->pdev, PCI_D3hot, true);
3055 pci_set_power_state(pdev, PCI_D3hot);
3056 3212
3057 return 0; 3213 return 0;
3058} 3214}
3059 3215
3060static int 3216static int jme_resume(struct device *dev)
3061jme_resume(struct pci_dev *pdev)
3062{ 3217{
3218 struct pci_dev *pdev = to_pci_dev(dev);
3063 struct net_device *netdev = pci_get_drvdata(pdev); 3219 struct net_device *netdev = pci_get_drvdata(pdev);
3064 struct jme_adapter *jme = netdev_priv(netdev); 3220 struct jme_adapter *jme = netdev_priv(netdev);
3065 3221
3066 jme_clear_pm(jme); 3222 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
3067 pci_restore_state(pdev);
3068 3223
3069 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 3224 jme_phy_on(jme);
3070 jme_phy_on(jme); 3225 if (test_bit(JME_FLAG_SSET, &jme->flags))
3071 jme_set_settings(netdev, &jme->old_ecmd); 3226 jme_set_settings(netdev, &jme->old_ecmd);
3072 } else { 3227 else
3073 jme_reset_phy_processor(jme); 3228 jme_reset_phy_processor(jme);
3074 }
3075 3229
3076 jme_start_irq(jme); 3230 jme_start_irq(jme);
3077 netif_device_attach(netdev); 3231 netif_device_attach(netdev);
@@ -3082,6 +3236,13 @@ jme_resume(struct pci_dev *pdev)
3082 3236
3083 return 0; 3237 return 0;
3084} 3238}
3239
3240static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
3241#define JME_PM_OPS (&jme_pm_ops)
3242
3243#else
3244
3245#define JME_PM_OPS NULL
3085#endif 3246#endif
3086 3247
3087static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { 3248static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
@@ -3095,11 +3256,8 @@ static struct pci_driver jme_driver = {
3095 .id_table = jme_pci_tbl, 3256 .id_table = jme_pci_tbl,
3096 .probe = jme_init_one, 3257 .probe = jme_init_one,
3097 .remove = __devexit_p(jme_remove_one), 3258 .remove = __devexit_p(jme_remove_one),
3098#ifdef CONFIG_PM
3099 .suspend = jme_suspend,
3100 .resume = jme_resume,
3101#endif /* CONFIG_PM */
3102 .shutdown = jme_shutdown, 3259 .shutdown = jme_shutdown,
3260 .driver.pm = JME_PM_OPS,
3103}; 3261};
3104 3262
3105static int __init 3263static int __init
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index eac09264bf2..8bf30451e82 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -26,7 +26,7 @@
26#define __JME_H_INCLUDED__ 26#define __JME_H_INCLUDED__
27 27
28#define DRV_NAME "jme" 28#define DRV_NAME "jme"
29#define DRV_VERSION "1.0.7" 29#define DRV_VERSION "1.0.8"
30#define PFX DRV_NAME ": " 30#define PFX DRV_NAME ": "
31 31
32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
103#define HALF_US 500 /* 500 ns */ 103#define HALF_US 500 /* 500 ns */
104#define JMESPIIOCTL SIOCDEVPRIVATE 104#define JMESPIIOCTL SIOCDEVPRIVATE
105 105
106#define PCI_PRIV_PE1 0xE4
107
108enum pci_priv_pe1_bit_masks {
109 PE1_ASPMSUPRT = 0x00000003, /*
110 * RW:
111 * Aspm_support[1:0]
112 * (R/W Port of 5C[11:10])
113 */
114 PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */
115 PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */
116 PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */
117 PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
118 PE1_GPREG0 = 0x0000FF00, /*
119 * SRW:
120 * Cfg_gp_reg0
121 * [7:6] phy_giga BG control
122 * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
123 * [4:0] Reserved
124 */
125 PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */
126 PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */
127 PE1_REVID = 0xFF000000, /* RO: Rev ID */
128};
129
130enum pci_priv_pe1_values {
131 PE1_GPREG0_ENBG = 0x00000000, /* en BG */
132 PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */
133 PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */
134 PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */
135};
136
106/* 137/*
107 * Dynamic(adaptive)/Static PCC values 138 * Dynamic(adaptive)/Static PCC values
108 */ 139 */
@@ -403,6 +434,7 @@ struct jme_adapter {
403 u32 reg_rxmcs; 434 u32 reg_rxmcs;
404 u32 reg_ghc; 435 u32 reg_ghc;
405 u32 reg_pmcs; 436 u32 reg_pmcs;
437 u32 reg_gpreg1;
406 u32 phylink; 438 u32 phylink;
407 u32 tx_ring_size; 439 u32 tx_ring_size;
408 u32 tx_ring_mask; 440 u32 tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
411 u32 rx_ring_mask; 443 u32 rx_ring_mask;
412 u8 mrrs; 444 u8 mrrs;
413 unsigned int fpgaver; 445 unsigned int fpgaver;
414 unsigned int chiprev; 446 u8 chiprev;
415 u8 rev; 447 u8 chip_main_rev;
448 u8 chip_sub_rev;
449 u8 pcirev;
416 u32 msg_enable; 450 u32 msg_enable;
417 struct ethtool_cmd old_ecmd; 451 struct ethtool_cmd old_ecmd;
418 unsigned int old_mtu; 452 unsigned int old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
497 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */ 531 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
498 532
499 533
534 JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
500 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */ 535 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
501 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */ 536 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
502 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */ 537 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
624 TXTRHD_TXRL_SHIFT = 0, 659 TXTRHD_TXRL_SHIFT = 0,
625}; 660};
626 661
662enum jme_txtrhd_values {
663 TXTRHD_FULLDUPLEX = 0x00000000,
664 TXTRHD_HALFDUPLEX = TXTRHD_TXPEN |
665 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
666 TXTRHD_TXREN |
667 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
668};
669
627/* 670/*
628 * RX Control/Status Bits 671 * RX Control/Status Bits
629 */ 672 */
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
779 */ 822 */
780enum jme_ghc_bit_mask { 823enum jme_ghc_bit_mask {
781 GHC_SWRST = 0x40000000, 824 GHC_SWRST = 0x40000000,
825 GHC_TO_CLK_SRC = 0x00C00000,
826 GHC_TXMAC_CLK_SRC = 0x00300000,
782 GHC_DPX = 0x00000040, 827 GHC_DPX = 0x00000040,
783 GHC_SPEED = 0x00000030, 828 GHC_SPEED = 0x00000030,
784 GHC_LINK_POLL = 0x00000001, 829 GHC_LINK_POLL = 0x00000001,
@@ -833,6 +878,21 @@ enum jme_pmcs_bit_masks {
833}; 878};
834 879
835/* 880/*
881 * New PHY Power Control Register
882 */
883enum jme_phy_pwr_bit_masks {
884 PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
885 PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
886 PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
887 PHY_PWR_CLKSEL = 0x08000000, /*
888 * XTL_OUT Clock select
889 * (an internal free-running clock)
890 * 0: xtl_out = phy_giga.A_XTL25_O
891 * 1: xtl_out = phy_giga.PD_OSC
892 */
893};
894
895/*
836 * Giga PHY Status Registers 896 * Giga PHY Status Registers
837 */ 897 */
838enum jme_phy_link_bit_mask { 898enum jme_phy_link_bit_mask {
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
942 1002
943/* 1003/*
944 * General Purpose REG-1 1004 * General Purpose REG-1
945 * Note: All theses bits defined here are for
946 * Chip mode revision 0x11 only
947 */ 1005 */
948enum jme_gpreg1_masks { 1006enum jme_gpreg1_bit_masks {
1007 GPREG1_RXCLKOFF = 0x04000000,
1008 GPREG1_PCREQN = 0x00020000,
1009 GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */
1010 GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */
949 GPREG1_INTRDELAYUNIT = 0x00000018, 1011 GPREG1_INTRDELAYUNIT = 0x00000018,
950 GPREG1_INTRDELAYENABLE = 0x00000007, 1012 GPREG1_INTRDELAYENABLE = 0x00000007,
951}; 1013};
952 1014
953enum jme_gpreg1_vals { 1015enum jme_gpreg1_vals {
954 GPREG1_RSSPATCH = 0x00000040,
955 GPREG1_HALFMODEPATCH = 0x00000020,
956
957 GPREG1_INTDLYUNIT_16NS = 0x00000000, 1016 GPREG1_INTDLYUNIT_16NS = 0x00000000,
958 GPREG1_INTDLYUNIT_256NS = 0x00000008, 1017 GPREG1_INTDLYUNIT_256NS = 0x00000008,
959 GPREG1_INTDLYUNIT_1US = 0x00000010, 1018 GPREG1_INTDLYUNIT_1US = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
967 GPREG1_INTDLYEN_6U = 0x00000006, 1026 GPREG1_INTDLYEN_6U = 0x00000006,
968 GPREG1_INTDLYEN_7U = 0x00000007, 1027 GPREG1_INTDLYEN_7U = 0x00000007,
969 1028
970 GPREG1_DEFAULT = 0x00000000, 1029 GPREG1_DEFAULT = GPREG1_PCREQN,
971}; 1030};
972 1031
973/* 1032/*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
1184/* 1243/*
1185 * Workaround 1244 * Workaround
1186 */ 1245 */
1187static inline int is_buggy250(unsigned short device, unsigned int chiprev) 1246static inline int is_buggy250(unsigned short device, u8 chiprev)
1188{ 1247{
1189 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11; 1248 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
1190} 1249}
1191 1250
1251static inline int new_phy_power_ctrl(u8 chip_main_rev)
1252{
1253 return chip_main_rev >= 5;
1254}
1255
1192/* 1256/*
1193 * Function prototypes 1257 * Function prototypes
1194 */ 1258 */
1195static int jme_set_settings(struct net_device *netdev, 1259static int jme_set_settings(struct net_device *netdev,
1196 struct ethtool_cmd *ecmd); 1260 struct ethtool_cmd *ecmd);
1261static void jme_set_unicastaddr(struct net_device *netdev);
1197static void jme_set_multi(struct net_device *netdev); 1262static void jme_set_multi(struct net_device *netdev);
1198 1263
1199#endif 1264#endif
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 928b2b83cef..efd44afeae8 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/mfd/core.h>
29#include <linux/netdevice.h> 30#include <linux/netdevice.h>
30#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
31#include <linux/ethtool.h> 32#include <linux/ethtool.h>
@@ -1145,7 +1146,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
1145 struct resource *iomem; 1146 struct resource *iomem;
1146 struct net_device *netdev; 1147 struct net_device *netdev;
1147 struct ks8842_adapter *adapter; 1148 struct ks8842_adapter *adapter;
1148 struct ks8842_platform_data *pdata = pdev->dev.platform_data; 1149 struct ks8842_platform_data *pdata = mfd_get_data(pdev);
1149 u16 id; 1150 u16 id;
1150 unsigned i; 1151 unsigned i;
1151 1152
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 540a8dcbcc4..7f7d5708a65 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -4898,7 +4898,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4898 goto unlock; 4898 goto unlock;
4899 } 4899 }
4900 skb_copy_and_csum_dev(org_skb, skb->data); 4900 skb_copy_and_csum_dev(org_skb, skb->data);
4901 org_skb->ip_summed = 0; 4901 org_skb->ip_summed = CHECKSUM_NONE;
4902 skb->len = org_skb->len; 4902 skb->len = org_skb->len;
4903 copy_old_skb(org_skb, skb); 4903 copy_old_skb(org_skb, skb);
4904 } 4904 }
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index f35554d1144..b7948ccfcf7 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -952,8 +952,7 @@ static const struct attribute_group temac_attr_group = {
952 .attrs = temac_device_attrs, 952 .attrs = temac_device_attrs,
953}; 953};
954 954
955static int __devinit 955static int __devinit temac_of_probe(struct platform_device *op)
956temac_of_probe(struct platform_device *op, const struct of_device_id *match)
957{ 956{
958 struct device_node *np; 957 struct device_node *np;
959 struct temac_local *lp; 958 struct temac_local *lp;
@@ -1123,7 +1122,7 @@ static struct of_device_id temac_of_match[] __devinitdata = {
1123}; 1122};
1124MODULE_DEVICE_TABLE(of, temac_of_match); 1123MODULE_DEVICE_TABLE(of, temac_of_match);
1125 1124
1126static struct of_platform_driver temac_of_driver = { 1125static struct platform_driver temac_of_driver = {
1127 .probe = temac_of_probe, 1126 .probe = temac_of_probe,
1128 .remove = __devexit_p(temac_of_remove), 1127 .remove = __devexit_p(temac_of_remove),
1129 .driver = { 1128 .driver = {
@@ -1135,13 +1134,13 @@ static struct of_platform_driver temac_of_driver = {
1135 1134
1136static int __init temac_init(void) 1135static int __init temac_init(void)
1137{ 1136{
1138 return of_register_platform_driver(&temac_of_driver); 1137 return platform_driver_register(&temac_of_driver);
1139} 1138}
1140module_init(temac_init); 1139module_init(temac_init);
1141 1140
1142static void __exit temac_exit(void) 1141static void __exit temac_exit(void)
1143{ 1142{
1144 of_unregister_platform_driver(&temac_of_driver); 1143 platform_driver_unregister(&temac_of_driver);
1145} 1144}
1146module_exit(temac_exit); 1145module_exit(temac_exit);
1147 1146
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2d9663a1c54..ea0dc451da9 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
129 129
130static const struct ethtool_ops loopback_ethtool_ops = { 130static const struct ethtool_ops loopback_ethtool_ops = {
131 .get_link = always_on, 131 .get_link = always_on,
132 .set_tso = ethtool_op_set_tso,
133 .get_tx_csum = always_on,
134 .get_sg = always_on,
135 .get_rx_csum = always_on,
136}; 132};
137 133
138static int loopback_dev_init(struct net_device *dev) 134static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
169 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 165 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
170 dev->flags = IFF_LOOPBACK; 166 dev->flags = IFF_LOOPBACK;
171 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 167 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
168 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
172 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 169 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
173 | NETIF_F_TSO 170 | NETIF_F_ALL_TSO
171 | NETIF_F_UFO
174 | NETIF_F_NO_CSUM 172 | NETIF_F_NO_CSUM
173 | NETIF_F_RXCSUM
175 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
176 | NETIF_F_LLTX 175 | NETIF_F_LLTX
177 | NETIF_F_NETNS_LOCAL; 176 | NETIF_F_NETNS_LOCAL;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e2191..79ccb54ab00 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
260 for (i = 0; i < PHY_MAX_ADDR; i++) 260 for (i = 0; i < PHY_MAX_ADDR; i++)
261 bp->mii_bus->irq[i] = PHY_POLL; 261 bp->mii_bus->irq[i] = PHY_POLL;
262 262
263 platform_set_drvdata(bp->dev, bp->mii_bus); 263 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
264 264
265 if (mdiobus_register(bp->mii_bus)) 265 if (mdiobus_register(bp->mii_bus))
266 goto err_out_free_mdio_irq; 266 goto err_out_free_mdio_irq;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6ed577b065d..78e34e9e4f0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,8 +39,11 @@ struct macvlan_port {
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu; 40 struct rcu_head rcu;
41 bool passthru; 41 bool passthru;
42 int count;
42}; 43};
43 44
45static void macvlan_port_destroy(struct net_device *dev);
46
44#define macvlan_port_get_rcu(dev) \ 47#define macvlan_port_get_rcu(dev) \
45 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) 48 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
46#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) 49#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
@@ -152,9 +155,10 @@ static void macvlan_broadcast(struct sk_buff *skb,
152} 155}
153 156
154/* called under rcu_read_lock() from netif_receive_skb */ 157/* called under rcu_read_lock() from netif_receive_skb */
155static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) 158static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
156{ 159{
157 struct macvlan_port *port; 160 struct macvlan_port *port;
161 struct sk_buff *skb = *pskb;
158 const struct ethhdr *eth = eth_hdr(skb); 162 const struct ethhdr *eth = eth_hdr(skb);
159 const struct macvlan_dev *vlan; 163 const struct macvlan_dev *vlan;
160 const struct macvlan_dev *src; 164 const struct macvlan_dev *src;
@@ -184,7 +188,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
184 */ 188 */
185 macvlan_broadcast(skb, port, src->dev, 189 macvlan_broadcast(skb, port, src->dev,
186 MACVLAN_MODE_VEPA); 190 MACVLAN_MODE_VEPA);
187 return skb; 191 return RX_HANDLER_PASS;
188 } 192 }
189 193
190 if (port->passthru) 194 if (port->passthru)
@@ -192,12 +196,12 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
192 else 196 else
193 vlan = macvlan_hash_lookup(port, eth->h_dest); 197 vlan = macvlan_hash_lookup(port, eth->h_dest);
194 if (vlan == NULL) 198 if (vlan == NULL)
195 return skb; 199 return RX_HANDLER_PASS;
196 200
197 dev = vlan->dev; 201 dev = vlan->dev;
198 if (unlikely(!(dev->flags & IFF_UP))) { 202 if (unlikely(!(dev->flags & IFF_UP))) {
199 kfree_skb(skb); 203 kfree_skb(skb);
200 return NULL; 204 return RX_HANDLER_CONSUMED;
201 } 205 }
202 len = skb->len + ETH_HLEN; 206 len = skb->len + ETH_HLEN;
203 skb = skb_share_check(skb, GFP_ATOMIC); 207 skb = skb_share_check(skb, GFP_ATOMIC);
@@ -211,7 +215,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
211 215
212out: 216out:
213 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); 217 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
214 return NULL; 218 return RX_HANDLER_CONSUMED;
215} 219}
216 220
217static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 221static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -219,9 +223,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
219 const struct macvlan_dev *vlan = netdev_priv(dev); 223 const struct macvlan_dev *vlan = netdev_priv(dev);
220 const struct macvlan_port *port = vlan->port; 224 const struct macvlan_port *port = vlan->port;
221 const struct macvlan_dev *dest; 225 const struct macvlan_dev *dest;
226 __u8 ip_summed = skb->ip_summed;
222 227
223 if (vlan->mode == MACVLAN_MODE_BRIDGE) { 228 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
224 const struct ethhdr *eth = (void *)skb->data; 229 const struct ethhdr *eth = (void *)skb->data;
230 skb->ip_summed = CHECKSUM_UNNECESSARY;
225 231
226 /* send to other bridge ports directly */ 232 /* send to other bridge ports directly */
227 if (is_multicast_ether_addr(eth->h_dest)) { 233 if (is_multicast_ether_addr(eth->h_dest)) {
@@ -241,6 +247,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
241 } 247 }
242 248
243xmit_world: 249xmit_world:
250 skb->ip_summed = ip_summed;
244 skb_set_dev(skb, vlan->lowerdev); 251 skb_set_dev(skb, vlan->lowerdev);
245 return dev_queue_xmit(skb); 252 return dev_queue_xmit(skb);
246} 253}
@@ -453,8 +460,13 @@ static int macvlan_init(struct net_device *dev)
453static void macvlan_uninit(struct net_device *dev) 460static void macvlan_uninit(struct net_device *dev)
454{ 461{
455 struct macvlan_dev *vlan = netdev_priv(dev); 462 struct macvlan_dev *vlan = netdev_priv(dev);
463 struct macvlan_port *port = vlan->port;
456 464
457 free_percpu(vlan->pcpu_stats); 465 free_percpu(vlan->pcpu_stats);
466
467 port->count -= 1;
468 if (!port->count)
469 macvlan_port_destroy(port->dev);
458} 470}
459 471
460static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, 472static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -687,12 +699,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
687 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 699 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
688 700
689 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 701 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
690 if (!list_empty(&port->vlans)) 702 if (port->count)
691 return -EINVAL; 703 return -EINVAL;
692 port->passthru = true; 704 port->passthru = true;
693 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); 705 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
694 } 706 }
695 707
708 port->count += 1;
696 err = register_netdevice(dev); 709 err = register_netdevice(dev);
697 if (err < 0) 710 if (err < 0)
698 goto destroy_port; 711 goto destroy_port;
@@ -703,7 +716,8 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
703 return 0; 716 return 0;
704 717
705destroy_port: 718destroy_port:
706 if (list_empty(&port->vlans)) 719 port->count -= 1;
720 if (!port->count)
707 macvlan_port_destroy(lowerdev); 721 macvlan_port_destroy(lowerdev);
708 722
709 return err; 723 return err;
@@ -721,13 +735,9 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
721void macvlan_dellink(struct net_device *dev, struct list_head *head) 735void macvlan_dellink(struct net_device *dev, struct list_head *head)
722{ 736{
723 struct macvlan_dev *vlan = netdev_priv(dev); 737 struct macvlan_dev *vlan = netdev_priv(dev);
724 struct macvlan_port *port = vlan->port;
725 738
726 list_del(&vlan->list); 739 list_del(&vlan->list);
727 unregister_netdevice_queue(dev, head); 740 unregister_netdevice_queue(dev, head);
728
729 if (list_empty(&port->vlans))
730 macvlan_port_destroy(port->dev);
731} 741}
732EXPORT_SYMBOL_GPL(macvlan_dellink); 742EXPORT_SYMBOL_GPL(macvlan_dellink);
733 743
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621ac3f..6696e56e632 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@ struct macvtap_queue {
39 struct socket sock; 39 struct socket sock;
40 struct socket_wq wq; 40 struct socket_wq wq;
41 int vnet_hdr_sz; 41 int vnet_hdr_sz;
42 struct macvlan_dev *vlan; 42 struct macvlan_dev __rcu *vlan;
43 struct file *file; 43 struct file *file;
44 unsigned int flags; 44 unsigned int flags;
45}; 45};
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
141 struct macvlan_dev *vlan; 141 struct macvlan_dev *vlan;
142 142
143 spin_lock(&macvtap_lock); 143 spin_lock(&macvtap_lock);
144 vlan = rcu_dereference(q->vlan); 144 vlan = rcu_dereference_protected(q->vlan,
145 lockdep_is_held(&macvtap_lock));
145 if (vlan) { 146 if (vlan) {
146 int index = get_slot(vlan, q); 147 int index = get_slot(vlan, q);
147 148
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
219 /* macvtap_put_queue can free some slots, so go through all slots */ 220 /* macvtap_put_queue can free some slots, so go through all slots */
220 spin_lock(&macvtap_lock); 221 spin_lock(&macvtap_lock);
221 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) { 222 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
222 q = rcu_dereference(vlan->taps[i]); 223 q = rcu_dereference_protected(vlan->taps[i],
224 lockdep_is_held(&macvtap_lock));
223 if (q) { 225 if (q) {
224 qlist[j++] = q; 226 qlist[j++] = q;
225 rcu_assign_pointer(vlan->taps[i], NULL); 227 rcu_assign_pointer(vlan->taps[i], NULL);
@@ -528,8 +530,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
528 vnet_hdr_len = q->vnet_hdr_sz; 530 vnet_hdr_len = q->vnet_hdr_sz;
529 531
530 err = -EINVAL; 532 err = -EINVAL;
531 if ((len -= vnet_hdr_len) < 0) 533 if (len < vnet_hdr_len)
532 goto err; 534 goto err;
535 len -= vnet_hdr_len;
533 536
534 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, 537 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
535 sizeof(vnet_hdr)); 538 sizeof(vnet_hdr));
@@ -569,7 +572,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
569 } 572 }
570 573
571 rcu_read_lock_bh(); 574 rcu_read_lock_bh();
572 vlan = rcu_dereference(q->vlan); 575 vlan = rcu_dereference_bh(q->vlan);
573 if (vlan) 576 if (vlan)
574 macvlan_start_xmit(skb, vlan->dev); 577 macvlan_start_xmit(skb, vlan->dev);
575 else 578 else
@@ -583,7 +586,7 @@ err_kfree:
583 586
584err: 587err:
585 rcu_read_lock_bh(); 588 rcu_read_lock_bh();
586 vlan = rcu_dereference(q->vlan); 589 vlan = rcu_dereference_bh(q->vlan);
587 if (vlan) 590 if (vlan)
588 vlan->dev->stats.tx_dropped++; 591 vlan->dev->stats.tx_dropped++;
589 rcu_read_unlock_bh(); 592 rcu_read_unlock_bh();
@@ -631,7 +634,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
631 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); 634 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
632 635
633 rcu_read_lock_bh(); 636 rcu_read_lock_bh();
634 vlan = rcu_dereference(q->vlan); 637 vlan = rcu_dereference_bh(q->vlan);
635 if (vlan) 638 if (vlan)
636 macvlan_count_rx(vlan, len, ret == 0, 0); 639 macvlan_count_rx(vlan, len, ret == 0, 0);
637 rcu_read_unlock_bh(); 640 rcu_read_unlock_bh();
@@ -727,7 +730,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
727 730
728 case TUNGETIFF: 731 case TUNGETIFF:
729 rcu_read_lock_bh(); 732 rcu_read_lock_bh();
730 vlan = rcu_dereference(q->vlan); 733 vlan = rcu_dereference_bh(q->vlan);
731 if (vlan) 734 if (vlan)
732 dev_hold(vlan->dev); 735 dev_hold(vlan->dev);
733 rcu_read_unlock_bh(); 736 rcu_read_unlock_bh();
@@ -736,7 +739,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
736 return -ENOLINK; 739 return -ENOLINK;
737 740
738 ret = 0; 741 ret = 0;
739 if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) || 742 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
740 put_user(q->flags, &ifr->ifr_flags)) 743 put_user(q->flags, &ifr->ifr_flags))
741 ret = -EFAULT; 744 ret = -EFAULT;
742 dev_put(vlan->dev); 745 dev_put(vlan->dev);
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 210b2b164b3..0a6c6a2e755 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -354,7 +354,7 @@ unsigned int mii_check_media (struct mii_if_info *mii,
354 if (!new_carrier) { 354 if (!new_carrier) {
355 netif_carrier_off(mii->dev); 355 netif_carrier_off(mii->dev);
356 if (ok_to_print) 356 if (ok_to_print)
357 printk(KERN_INFO "%s: link down\n", mii->dev->name); 357 netdev_info(mii->dev, "link down\n");
358 return 0; /* duplex did not change */ 358 return 0; /* duplex did not change */
359 } 359 }
360 360
@@ -381,12 +381,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
381 duplex = 1; 381 duplex = 1;
382 382
383 if (ok_to_print) 383 if (ok_to_print)
384 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", 384 netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n",
385 mii->dev->name, 385 lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
386 lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" : 386 media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ?
387 media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", 387 100 : 10,
388 duplex ? "full" : "half", 388 duplex ? "full" : "half",
389 lpa); 389 lpa);
390 390
391 if ((init_media) || (mii->full_duplex != duplex)) { 391 if ((init_media) || (mii->full_duplex != duplex)) {
392 mii->full_duplex = duplex; 392 mii->full_duplex = duplex;
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 3a4277f6fac..116cae334da 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -62,6 +62,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
62 } else 62 } else
63 obj = -1; 63 obj = -1;
64 64
65 if (obj != -1)
66 --bitmap->avail;
67
65 spin_unlock(&bitmap->lock); 68 spin_unlock(&bitmap->lock);
66 69
67 return obj; 70 return obj;
@@ -101,11 +104,19 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
101 } else 104 } else
102 obj = -1; 105 obj = -1;
103 106
107 if (obj != -1)
108 bitmap->avail -= cnt;
109
104 spin_unlock(&bitmap->lock); 110 spin_unlock(&bitmap->lock);
105 111
106 return obj; 112 return obj;
107} 113}
108 114
115u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
116{
117 return bitmap->avail;
118}
119
109void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) 120void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
110{ 121{
111 obj &= bitmap->max + bitmap->reserved_top - 1; 122 obj &= bitmap->max + bitmap->reserved_top - 1;
@@ -115,6 +126,7 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
115 bitmap->last = min(bitmap->last, obj); 126 bitmap->last = min(bitmap->last, obj);
116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 127 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
117 & bitmap->mask; 128 & bitmap->mask;
129 bitmap->avail += cnt;
118 spin_unlock(&bitmap->lock); 130 spin_unlock(&bitmap->lock);
119} 131}
120 132
@@ -130,6 +142,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
130 bitmap->max = num - reserved_top; 142 bitmap->max = num - reserved_top;
131 bitmap->mask = mask; 143 bitmap->mask = mask;
132 bitmap->reserved_top = reserved_top; 144 bitmap->reserved_top = reserved_top;
145 bitmap->avail = num - reserved_top - reserved_bot;
133 spin_lock_init(&bitmap->lock); 146 spin_lock_init(&bitmap->lock);
134 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 147 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
135 sizeof (long), GFP_KERNEL); 148 sizeof (long), GFP_KERNEL);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 7cd34e9c7c7..bd8ef9f2fa7 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -198,7 +198,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
198 u64 mtt_addr; 198 u64 mtt_addr;
199 int err; 199 int err;
200 200
201 if (vector >= dev->caps.num_comp_vectors) 201 if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
202 return -EINVAL; 202 return -EINVAL;
203 203
204 cq->vector = vector; 204 cq->vector = vector;
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 21786ad4455..ec4b6d047fe 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -51,13 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
51 int err; 51 int err;
52 52
53 cq->size = entries; 53 cq->size = entries;
54 if (mode == RX) { 54 if (mode == RX)
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe); 55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
56 cq->vector = ring % mdev->dev->caps.num_comp_vectors; 56 else
57 } else {
58 cq->buf_size = sizeof(struct mlx4_cqe); 57 cq->buf_size = sizeof(struct mlx4_cqe);
59 cq->vector = 0;
60 }
61 58
62 cq->ring = ring; 59 cq->ring = ring;
63 cq->is_tx = mode; 60 cq->is_tx = mode;
@@ -80,7 +77,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
80int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 77int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
81{ 78{
82 struct mlx4_en_dev *mdev = priv->mdev; 79 struct mlx4_en_dev *mdev = priv->mdev;
83 int err; 80 int err = 0;
81 char name[25];
84 82
85 cq->dev = mdev->pndev[priv->port]; 83 cq->dev = mdev->pndev[priv->port];
86 cq->mcq.set_ci_db = cq->wqres.db.db; 84 cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -89,6 +87,29 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
89 *cq->mcq.arm_db = 0; 87 *cq->mcq.arm_db = 0;
90 memset(cq->buf, 0, cq->buf_size); 88 memset(cq->buf, 0, cq->buf_size);
91 89
90 if (cq->is_tx == RX) {
91 if (mdev->dev->caps.comp_pool) {
92 if (!cq->vector) {
93 sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
94 if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
95 cq->vector = (cq->ring + 1 + priv->port) %
96 mdev->dev->caps.num_comp_vectors;
97 mlx4_warn(mdev, "Failed Assigning an EQ to "
98 "%s_rx-%d ,Falling back to legacy EQ's\n",
99 priv->dev->name, cq->ring);
100 }
101 }
102 } else {
103 cq->vector = (cq->ring + 1 + priv->port) %
104 mdev->dev->caps.num_comp_vectors;
105 }
106 } else {
107 if (!cq->vector || !mdev->dev->caps.comp_pool) {
108 /*Fallback to legacy pool in case of error*/
109 cq->vector = 0;
110 }
111 }
112
92 if (!cq->is_tx) 113 if (!cq->is_tx)
93 cq->size = priv->rx_ring[cq->ring].actual_size; 114 cq->size = priv->rx_ring[cq->ring].actual_size;
94 115
@@ -112,12 +133,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
112 return 0; 133 return 0;
113} 134}
114 135
115void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 136void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
137 bool reserve_vectors)
116{ 138{
117 struct mlx4_en_dev *mdev = priv->mdev; 139 struct mlx4_en_dev *mdev = priv->mdev;
118 140
119 mlx4_en_unmap_buffer(&cq->wqres.buf); 141 mlx4_en_unmap_buffer(&cq->wqres.buf);
120 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 142 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
143 if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
144 mlx4_release_eq(priv->mdev->dev, cq->vector);
121 cq->buf_size = 0; 145 cq->buf_size = 0;
122 cq->buf = NULL; 146 cq->buf = NULL;
123} 147}
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 056152b3ff5..d54b7abf022 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -45,7 +45,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
45 struct mlx4_en_priv *priv = netdev_priv(dev); 45 struct mlx4_en_priv *priv = netdev_priv(dev);
46 struct mlx4_en_dev *mdev = priv->mdev; 46 struct mlx4_en_dev *mdev = priv->mdev;
47 47
48 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); 48 strncpy(drvinfo->driver, DRV_NAME, 32);
49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); 49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
50 sprintf(drvinfo->fw_version, "%d.%d.%d", 50 sprintf(drvinfo->fw_version, "%d.%d.%d",
51 (u16) (mdev->dev->caps.fw_ver >> 32), 51 (u16) (mdev->dev->caps.fw_ver >> 32),
@@ -131,8 +131,65 @@ static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
131static void mlx4_en_get_wol(struct net_device *netdev, 131static void mlx4_en_get_wol(struct net_device *netdev,
132 struct ethtool_wolinfo *wol) 132 struct ethtool_wolinfo *wol)
133{ 133{
134 wol->supported = 0; 134 struct mlx4_en_priv *priv = netdev_priv(netdev);
135 wol->wolopts = 0; 135 int err = 0;
136 u64 config = 0;
137
138 if (!priv->mdev->dev->caps.wol) {
139 wol->supported = 0;
140 wol->wolopts = 0;
141 return;
142 }
143
144 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
145 if (err) {
146 en_err(priv, "Failed to get WoL information\n");
147 return;
148 }
149
150 if (config & MLX4_EN_WOL_MAGIC)
151 wol->supported = WAKE_MAGIC;
152 else
153 wol->supported = 0;
154
155 if (config & MLX4_EN_WOL_ENABLED)
156 wol->wolopts = WAKE_MAGIC;
157 else
158 wol->wolopts = 0;
159}
160
161static int mlx4_en_set_wol(struct net_device *netdev,
162 struct ethtool_wolinfo *wol)
163{
164 struct mlx4_en_priv *priv = netdev_priv(netdev);
165 u64 config = 0;
166 int err = 0;
167
168 if (!priv->mdev->dev->caps.wol)
169 return -EOPNOTSUPP;
170
171 if (wol->supported & ~WAKE_MAGIC)
172 return -EINVAL;
173
174 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
175 if (err) {
176 en_err(priv, "Failed to get WoL info, unable to modify\n");
177 return err;
178 }
179
180 if (wol->wolopts & WAKE_MAGIC) {
181 config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
182 MLX4_EN_WOL_MAGIC;
183 } else {
184 config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
185 config |= MLX4_EN_WOL_DO_MODIFY;
186 }
187
188 err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
189 if (err)
190 en_err(priv, "Failed to set WoL information\n");
191
192 return err;
136} 193}
137 194
138static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 195static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
@@ -388,7 +445,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
388 mlx4_en_stop_port(dev); 445 mlx4_en_stop_port(dev);
389 } 446 }
390 447
391 mlx4_en_free_resources(priv); 448 mlx4_en_free_resources(priv, true);
392 449
393 priv->prof->tx_ring_size = tx_size; 450 priv->prof->tx_ring_size = tx_size;
394 priv->prof->rx_ring_size = rx_size; 451 priv->prof->rx_ring_size = rx_size;
@@ -442,6 +499,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
442 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 499 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
443 .self_test = mlx4_en_self_test, 500 .self_test = mlx4_en_self_test,
444 .get_wol = mlx4_en_get_wol, 501 .get_wol = mlx4_en_get_wol,
502 .set_wol = mlx4_en_set_wol,
445 .get_msglevel = mlx4_en_get_msglevel, 503 .get_msglevel = mlx4_en_get_msglevel,
446 .set_msglevel = mlx4_en_set_msglevel, 504 .set_msglevel = mlx4_en_set_msglevel,
447 .get_coalesce = mlx4_en_get_coalesce, 505 .get_coalesce = mlx4_en_get_coalesce,
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 1ff6ca6466e..9317b61a75b 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -241,16 +241,18 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
242 mdev->port_cnt++; 242 mdev->port_cnt++;
243 243
244 /* If we did not receive an explicit number of Rx rings, default to
245 * the number of completion vectors populated by the mlx4_core */
246 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 244 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
247 mlx4_info(mdev, "Using %d tx rings for port:%d\n", 245 if (!dev->caps.comp_pool) {
248 mdev->profile.prof[i].tx_ring_num, i); 246 mdev->profile.prof[i].rx_ring_num =
249 mdev->profile.prof[i].rx_ring_num = min_t(int, 247 rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
250 roundup_pow_of_two(dev->caps.num_comp_vectors), 248 min_t(int,
251 MAX_RX_RINGS); 249 dev->caps.num_comp_vectors,
252 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", 250 MAX_RX_RINGS)));
253 mdev->profile.prof[i].rx_ring_num, i); 251 } else {
252 mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
253 min_t(int, dev->caps.comp_pool/
254 dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
255 }
254 } 256 }
255 257
256 /* Create our own workqueue for reset/multicast tasks 258 /* Create our own workqueue for reset/multicast tasks
@@ -294,7 +296,7 @@ static struct mlx4_interface mlx4_en_interface = {
294 .remove = mlx4_en_remove, 296 .remove = mlx4_en_remove,
295 .event = mlx4_en_event, 297 .event = mlx4_en_event,
296 .get_dev = mlx4_en_get_netdev, 298 .get_dev = mlx4_en_get_netdev,
297 .protocol = MLX4_PROTOCOL_EN, 299 .protocol = MLX4_PROT_ETH,
298}; 300};
299 301
300static int __init mlx4_en_init(void) 302static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 897f576b8b1..4f158baa024 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -156,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
156 mutex_lock(&mdev->state_lock); 156 mutex_lock(&mdev->state_lock);
157 if (priv->port_up) { 157 if (priv->port_up) {
158 /* Remove old MAC and insert the new one */ 158 /* Remove old MAC and insert the new one */
159 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 159 err = mlx4_replace_mac(mdev->dev, priv->port,
160 err = mlx4_register_mac(mdev->dev, priv->port, 160 priv->base_qpn, priv->mac, 0);
161 priv->mac, &priv->mac_index);
162 if (err) 161 if (err)
163 en_err(priv, "Failed changing HW MAC address\n"); 162 en_err(priv, "Failed changing HW MAC address\n");
164 } else 163 } else
@@ -214,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
214 struct mlx4_en_dev *mdev = priv->mdev; 213 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev; 214 struct net_device *dev = priv->dev;
216 u64 mcast_addr = 0; 215 u64 mcast_addr = 0;
216 u8 mc_list[16] = {0};
217 int err; 217 int err;
218 218
219 mutex_lock(&mdev->state_lock); 219 mutex_lock(&mdev->state_lock);
@@ -239,8 +239,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
239 priv->flags |= MLX4_EN_FLAG_PROMISC; 239 priv->flags |= MLX4_EN_FLAG_PROMISC;
240 240
241 /* Enable promiscouos mode */ 241 /* Enable promiscouos mode */
242 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 242 if (!mdev->dev->caps.vep_uc_steering)
243 priv->base_qpn, 1); 243 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
244 priv->base_qpn, 1);
245 else
246 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
247 priv->port);
244 if (err) 248 if (err)
245 en_err(priv, "Failed enabling " 249 en_err(priv, "Failed enabling "
246 "promiscous mode\n"); 250 "promiscous mode\n");
@@ -252,10 +256,21 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
252 en_err(priv, "Failed disabling " 256 en_err(priv, "Failed disabling "
253 "multicast filter\n"); 257 "multicast filter\n");
254 258
255 /* Disable port VLAN filter */ 259 /* Add the default qp number as multicast promisc */
256 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 260 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
257 if (err) 261 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
258 en_err(priv, "Failed disabling VLAN filter\n"); 262 priv->port);
263 if (err)
264 en_err(priv, "Failed entering multicast promisc mode\n");
265 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
266 }
267
268 if (priv->vlgrp) {
269 /* Disable port VLAN filter */
270 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
271 if (err)
272 en_err(priv, "Failed disabling VLAN filter\n");
273 }
259 } 274 }
260 goto out; 275 goto out;
261 } 276 }
@@ -270,11 +285,24 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
270 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 285 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
271 286
272 /* Disable promiscouos mode */ 287 /* Disable promiscouos mode */
273 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 288 if (!mdev->dev->caps.vep_uc_steering)
274 priv->base_qpn, 0); 289 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
290 priv->base_qpn, 0);
291 else
292 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
293 priv->port);
275 if (err) 294 if (err)
276 en_err(priv, "Failed disabling promiscous mode\n"); 295 en_err(priv, "Failed disabling promiscous mode\n");
277 296
297 /* Disable Multicast promisc */
298 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
299 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
300 priv->port);
301 if (err)
302 en_err(priv, "Failed disabling multicast promiscous mode\n");
303 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
304 }
305
278 /* Enable port VLAN filter */ 306 /* Enable port VLAN filter */
279 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 307 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
280 if (err) 308 if (err)
@@ -287,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
287 0, MLX4_MCAST_DISABLE); 315 0, MLX4_MCAST_DISABLE);
288 if (err) 316 if (err)
289 en_err(priv, "Failed disabling multicast filter\n"); 317 en_err(priv, "Failed disabling multicast filter\n");
318
319 /* Add the default qp number as multicast promisc */
320 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
321 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
322 priv->port);
323 if (err)
324 en_err(priv, "Failed entering multicast promisc mode\n");
325 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
326 }
290 } else { 327 } else {
291 int i; 328 int i;
329 /* Disable Multicast promisc */
330 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
331 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
332 priv->port);
333 if (err)
334 en_err(priv, "Failed disabling multicast promiscous mode\n");
335 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
336 }
292 337
293 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 338 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
294 0, MLX4_MCAST_DISABLE); 339 0, MLX4_MCAST_DISABLE);
295 if (err) 340 if (err)
296 en_err(priv, "Failed disabling multicast filter\n"); 341 en_err(priv, "Failed disabling multicast filter\n");
297 342
343 /* Detach our qp from all the multicast addresses */
344 for (i = 0; i < priv->mc_addrs_cnt; i++) {
345 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
346 mc_list[5] = priv->port;
347 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
348 mc_list, MLX4_PROT_ETH);
349 }
298 /* Flush mcast filter and init it with broadcast address */ 350 /* Flush mcast filter and init it with broadcast address */
299 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 351 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
300 1, MLX4_MCAST_CONFIG); 352 1, MLX4_MCAST_CONFIG);
@@ -307,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
307 for (i = 0; i < priv->mc_addrs_cnt; i++) { 359 for (i = 0; i < priv->mc_addrs_cnt; i++) {
308 mcast_addr = 360 mcast_addr =
309 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 361 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
362 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
363 mc_list[5] = priv->port;
364 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
365 mc_list, 0, MLX4_PROT_ETH);
310 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 366 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
311 mcast_addr, 0, MLX4_MCAST_CONFIG); 367 mcast_addr, 0, MLX4_MCAST_CONFIG);
312 } 368 }
@@ -314,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
314 0, MLX4_MCAST_ENABLE); 370 0, MLX4_MCAST_ENABLE);
315 if (err) 371 if (err)
316 en_err(priv, "Failed enabling multicast filter\n"); 372 en_err(priv, "Failed enabling multicast filter\n");
317
318 mlx4_en_clear_list(dev);
319 } 373 }
320out: 374out:
321 mutex_unlock(&mdev->state_lock); 375 mutex_unlock(&mdev->state_lock);
@@ -417,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
417 unsigned long avg_pkt_size; 471 unsigned long avg_pkt_size;
418 unsigned long rx_packets; 472 unsigned long rx_packets;
419 unsigned long rx_bytes; 473 unsigned long rx_bytes;
420 unsigned long rx_byte_diff;
421 unsigned long tx_packets; 474 unsigned long tx_packets;
422 unsigned long tx_pkt_diff; 475 unsigned long tx_pkt_diff;
423 unsigned long rx_pkt_diff; 476 unsigned long rx_pkt_diff;
@@ -441,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
441 rx_pkt_diff = ((unsigned long) (rx_packets - 494 rx_pkt_diff = ((unsigned long) (rx_packets -
442 priv->last_moder_packets)); 495 priv->last_moder_packets));
443 packets = max(tx_pkt_diff, rx_pkt_diff); 496 packets = max(tx_pkt_diff, rx_pkt_diff);
444 rx_byte_diff = rx_bytes - priv->last_moder_bytes;
445 rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1;
446 rate = packets * HZ / period; 497 rate = packets * HZ / period;
447 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 498 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
448 priv->last_moder_bytes)) / packets : 0; 499 priv->last_moder_bytes)) / packets : 0;
449 500
450 /* Apply auto-moderation only when packet rate exceeds a rate that 501 /* Apply auto-moderation only when packet rate exceeds a rate that
451 * it matters */ 502 * it matters */
452 if (rate > MLX4_EN_RX_RATE_THRESH) { 503 if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
453 /* If tx and rx packet rates are not balanced, assume that 504 /* If tx and rx packet rates are not balanced, assume that
454 * traffic is mainly BW bound and apply maximum moderation. 505 * traffic is mainly BW bound and apply maximum moderation.
455 * Otherwise, moderate according to packet rate */ 506 * Otherwise, moderate according to packet rate */
456 if (2 * tx_pkt_diff > 3 * rx_pkt_diff && 507 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
457 rx_pkt_diff / rx_byte_diff < 508 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
458 MLX4_EN_SMALL_PKT_SIZE)
459 moder_time = priv->rx_usecs_low;
460 else if (2 * rx_pkt_diff > 3 * tx_pkt_diff)
461 moder_time = priv->rx_usecs_high; 509 moder_time = priv->rx_usecs_high;
462 else { 510 } else {
463 if (rate < priv->pkt_rate_low) 511 if (rate < priv->pkt_rate_low)
464 moder_time = priv->rx_usecs_low; 512 moder_time = priv->rx_usecs_low;
465 else if (rate > priv->pkt_rate_high) 513 else if (rate > priv->pkt_rate_high)
@@ -471,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
471 priv->rx_usecs_low; 519 priv->rx_usecs_low;
472 } 520 }
473 } else { 521 } else {
474 /* When packet rate is low, use default moderation rather than 522 moder_time = priv->rx_usecs_low;
475 * 0 to prevent interrupt storms if traffic suddenly increases */
476 moder_time = priv->rx_usecs;
477 } 523 }
478 524
479 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 525 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
@@ -565,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev)
565 int err = 0; 611 int err = 0;
566 int i; 612 int i;
567 int j; 613 int j;
614 u8 mc_list[16] = {0};
615 char name[32];
568 616
569 if (priv->port_up) { 617 if (priv->port_up) {
570 en_dbg(DRV, priv, "start port called while port already up\n"); 618 en_dbg(DRV, priv, "start port called while port already up\n");
@@ -603,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev)
603 ++rx_index; 651 ++rx_index;
604 } 652 }
605 653
654 /* Set port mac number */
655 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
656 err = mlx4_register_mac(mdev->dev, priv->port,
657 priv->mac, &priv->base_qpn, 0);
658 if (err) {
659 en_err(priv, "Failed setting port mac\n");
660 goto cq_err;
661 }
662 mdev->mac_removed[priv->port] = 0;
663
606 err = mlx4_en_config_rss_steer(priv); 664 err = mlx4_en_config_rss_steer(priv);
607 if (err) { 665 if (err) {
608 en_err(priv, "Failed configuring rss steering\n"); 666 en_err(priv, "Failed configuring rss steering\n");
609 goto cq_err; 667 goto mac_err;
610 } 668 }
611 669
670 if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
671 sprintf(name , "%s-tx", priv->dev->name);
672 if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
673 mlx4_warn(mdev, "Failed Assigning an EQ to "
674 "%s_tx ,Falling back to legacy "
675 "EQ's\n", priv->dev->name);
676 }
677 }
612 /* Configure tx cq's and rings */ 678 /* Configure tx cq's and rings */
613 for (i = 0; i < priv->tx_ring_num; i++) { 679 for (i = 0; i < priv->tx_ring_num; i++) {
614 /* Configure cq */ 680 /* Configure cq */
615 cq = &priv->tx_cq[i]; 681 cq = &priv->tx_cq[i];
682 cq->vector = priv->tx_vector;
616 err = mlx4_en_activate_cq(priv, cq); 683 err = mlx4_en_activate_cq(priv, cq);
617 if (err) { 684 if (err) {
618 en_err(priv, "Failed allocating Tx CQ\n"); 685 en_err(priv, "Failed allocating Tx CQ\n");
@@ -659,24 +726,25 @@ int mlx4_en_start_port(struct net_device *dev)
659 en_err(priv, "Failed setting default qp numbers\n"); 726 en_err(priv, "Failed setting default qp numbers\n");
660 goto tx_err; 727 goto tx_err;
661 } 728 }
662 /* Set port mac number */
663 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
664 err = mlx4_register_mac(mdev->dev, priv->port,
665 priv->mac, &priv->mac_index);
666 if (err) {
667 en_err(priv, "Failed setting port mac\n");
668 goto tx_err;
669 }
670 mdev->mac_removed[priv->port] = 0;
671 729
672 /* Init port */ 730 /* Init port */
673 en_dbg(HW, priv, "Initializing port\n"); 731 en_dbg(HW, priv, "Initializing port\n");
674 err = mlx4_INIT_PORT(mdev->dev, priv->port); 732 err = mlx4_INIT_PORT(mdev->dev, priv->port);
675 if (err) { 733 if (err) {
676 en_err(priv, "Failed Initializing port\n"); 734 en_err(priv, "Failed Initializing port\n");
677 goto mac_err; 735 goto tx_err;
678 } 736 }
679 737
738 /* Attach rx QP to bradcast address */
739 memset(&mc_list[10], 0xff, ETH_ALEN);
740 mc_list[5] = priv->port;
741 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
742 0, MLX4_PROT_ETH))
743 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
744
745 /* Must redo promiscuous mode setup. */
746 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
747
680 /* Schedule multicast task to populate multicast list */ 748 /* Schedule multicast task to populate multicast list */
681 queue_work(mdev->workqueue, &priv->mcast_task); 749 queue_work(mdev->workqueue, &priv->mcast_task);
682 750
@@ -684,8 +752,6 @@ int mlx4_en_start_port(struct net_device *dev)
684 netif_tx_start_all_queues(dev); 752 netif_tx_start_all_queues(dev);
685 return 0; 753 return 0;
686 754
687mac_err:
688 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
689tx_err: 755tx_err:
690 while (tx_index--) { 756 while (tx_index--) {
691 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 757 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
@@ -693,6 +759,8 @@ tx_err:
693 } 759 }
694 760
695 mlx4_en_release_rss_steer(priv); 761 mlx4_en_release_rss_steer(priv);
762mac_err:
763 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
696cq_err: 764cq_err:
697 while (rx_index--) 765 while (rx_index--)
698 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 766 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -708,6 +776,7 @@ void mlx4_en_stop_port(struct net_device *dev)
708 struct mlx4_en_priv *priv = netdev_priv(dev); 776 struct mlx4_en_priv *priv = netdev_priv(dev);
709 struct mlx4_en_dev *mdev = priv->mdev; 777 struct mlx4_en_dev *mdev = priv->mdev;
710 int i; 778 int i;
779 u8 mc_list[16] = {0};
711 780
712 if (!priv->port_up) { 781 if (!priv->port_up) {
713 en_dbg(DRV, priv, "stop port called while port already down\n"); 782 en_dbg(DRV, priv, "stop port called while port already down\n");
@@ -722,8 +791,23 @@ void mlx4_en_stop_port(struct net_device *dev)
722 /* Set port as not active */ 791 /* Set port as not active */
723 priv->port_up = false; 792 priv->port_up = false;
724 793
794 /* Detach All multicasts */
795 memset(&mc_list[10], 0xff, ETH_ALEN);
796 mc_list[5] = priv->port;
797 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
798 MLX4_PROT_ETH);
799 for (i = 0; i < priv->mc_addrs_cnt; i++) {
800 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
801 mc_list[5] = priv->port;
802 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
803 mc_list, MLX4_PROT_ETH);
804 }
805 mlx4_en_clear_list(dev);
806 /* Flush multicast filter */
807 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
808
725 /* Unregister Mac address for the port */ 809 /* Unregister Mac address for the port */
726 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 810 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
727 mdev->mac_removed[priv->port] = 1; 811 mdev->mac_removed[priv->port] = 1;
728 812
729 /* Free TX Rings */ 813 /* Free TX Rings */
@@ -801,7 +885,6 @@ static int mlx4_en_open(struct net_device *dev)
801 priv->rx_ring[i].packets = 0; 885 priv->rx_ring[i].packets = 0;
802 } 886 }
803 887
804 mlx4_en_set_default_moderation(priv);
805 err = mlx4_en_start_port(dev); 888 err = mlx4_en_start_port(dev);
806 if (err) 889 if (err)
807 en_err(priv, "Failed starting port:%d\n", priv->port); 890 en_err(priv, "Failed starting port:%d\n", priv->port);
@@ -828,7 +911,7 @@ static int mlx4_en_close(struct net_device *dev)
828 return 0; 911 return 0;
829} 912}
830 913
831void mlx4_en_free_resources(struct mlx4_en_priv *priv) 914void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
832{ 915{
833 int i; 916 int i;
834 917
@@ -836,14 +919,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
836 if (priv->tx_ring[i].tx_info) 919 if (priv->tx_ring[i].tx_info)
837 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 920 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
838 if (priv->tx_cq[i].buf) 921 if (priv->tx_cq[i].buf)
839 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 922 mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
840 } 923 }
841 924
842 for (i = 0; i < priv->rx_ring_num; i++) { 925 for (i = 0; i < priv->rx_ring_num; i++) {
843 if (priv->rx_ring[i].rx_info) 926 if (priv->rx_ring[i].rx_info)
844 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 927 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
845 if (priv->rx_cq[i].buf) 928 if (priv->rx_cq[i].buf)
846 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 929 mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
847 } 930 }
848} 931}
849 932
@@ -851,6 +934,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
851{ 934{
852 struct mlx4_en_port_profile *prof = priv->prof; 935 struct mlx4_en_port_profile *prof = priv->prof;
853 int i; 936 int i;
937 int base_tx_qpn, err;
938
939 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
940 if (err) {
941 en_err(priv, "failed reserving range for TX rings\n");
942 return err;
943 }
854 944
855 /* Create tx Rings */ 945 /* Create tx Rings */
856 for (i = 0; i < priv->tx_ring_num; i++) { 946 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -858,7 +948,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
858 prof->tx_ring_size, i, TX)) 948 prof->tx_ring_size, i, TX))
859 goto err; 949 goto err;
860 950
861 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 951 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
862 prof->tx_ring_size, TXBB_SIZE)) 952 prof->tx_ring_size, TXBB_SIZE))
863 goto err; 953 goto err;
864 } 954 }
@@ -878,6 +968,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
878 968
879err: 969err:
880 en_err(priv, "Failed to allocate NIC resources\n"); 970 en_err(priv, "Failed to allocate NIC resources\n");
971 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
881 return -ENOMEM; 972 return -ENOMEM;
882} 973}
883 974
@@ -905,7 +996,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
905 mdev->pndev[priv->port] = NULL; 996 mdev->pndev[priv->port] = NULL;
906 mutex_unlock(&mdev->state_lock); 997 mutex_unlock(&mdev->state_lock);
907 998
908 mlx4_en_free_resources(priv); 999 mlx4_en_free_resources(priv, false);
909 free_netdev(dev); 1000 free_netdev(dev);
910} 1001}
911 1002
@@ -932,7 +1023,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
932 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1023 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
933 } else { 1024 } else {
934 mlx4_en_stop_port(dev); 1025 mlx4_en_stop_port(dev);
935 mlx4_en_set_default_moderation(priv);
936 err = mlx4_en_start_port(dev); 1026 err = mlx4_en_start_port(dev);
937 if (err) { 1027 if (err) {
938 en_err(priv, "Failed restarting port:%d\n", 1028 en_err(priv, "Failed restarting port:%d\n",
@@ -1079,7 +1169,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1079 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1169 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1080 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1170 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1081 1171
1172 /* Configure port */
1173 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1174 MLX4_EN_MIN_MTU,
1175 0, 0, 0, 0);
1176 if (err) {
1177 en_err(priv, "Failed setting port general configurations "
1178 "for port %d, with error %d\n", priv->port, err);
1179 goto out;
1180 }
1181
1182 /* Init port */
1183 en_warn(priv, "Initializing port\n");
1184 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1185 if (err) {
1186 en_err(priv, "Failed Initializing port\n");
1187 goto out;
1188 }
1082 priv->registered = 1; 1189 priv->registered = 1;
1190 mlx4_en_set_default_moderation(priv);
1083 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1191 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1084 return 0; 1192 return 0;
1085 1193
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 7f5a3221e0c..f2a4f5dd313 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -119,6 +119,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
119 struct mlx4_set_port_rqp_calc_context *context; 119 struct mlx4_set_port_rqp_calc_context *context;
120 int err; 120 int err;
121 u32 in_mod; 121 u32 in_mod;
122 u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT;
123
124 if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering)
125 return 0;
122 126
123 mailbox = mlx4_alloc_cmd_mailbox(dev); 127 mailbox = mlx4_alloc_cmd_mailbox(dev);
124 if (IS_ERR(mailbox)) 128 if (IS_ERR(mailbox))
@@ -127,8 +131,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
127 memset(context, 0, sizeof *context); 131 memset(context, 0, sizeof *context);
128 132
129 context->base_qpn = cpu_to_be32(base_qpn); 133 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn); 134 context->n_mac = 0x7;
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn); 135 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
136 base_qpn);
137 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
138 base_qpn);
132 context->intra_no_vlan = 0; 139 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX; 140 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0; 141 context->intra_vlan_miss = 0;
@@ -206,7 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
206 } 213 }
207 stats->tx_packets = 0; 214 stats->tx_packets = 0;
208 stats->tx_bytes = 0; 215 stats->tx_bytes = 0;
209 for (i = 0; i <= priv->tx_ring_num; i++) { 216 for (i = 0; i < priv->tx_ring_num; i++) {
210 stats->tx_packets += priv->tx_ring[i].packets; 217 stats->tx_packets += priv->tx_ring[i].packets;
211 stats->tx_bytes += priv->tx_ring[i].bytes; 218 stats->tx_bytes += priv->tx_ring[i].bytes;
212 } 219 }
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index 092e814b198..e3d73e41c56 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,8 +36,8 @@
36 36
37 37
38#define SET_PORT_GEN_ALL_VALID 0x7 38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_EN_SHIFT 31 39#define SET_PORT_PROMISC_SHIFT 31
40#define SET_PORT_PROMISC_MODE_SHIFT 30 40#define SET_PORT_MC_PROMISC_SHIFT 30
41 41
42enum { 42enum {
43 MLX4_CMD_SET_VLAN_FLTR = 0x47, 43 MLX4_CMD_SET_VLAN_FLTR = 0x47,
@@ -45,6 +45,12 @@ enum {
45 MLX4_CMD_DUMP_ETH_STATS = 0x49, 45 MLX4_CMD_DUMP_ETH_STATS = 0x49,
46}; 46};
47 47
48enum {
49 MCAST_DIRECT_ONLY = 0,
50 MCAST_DIRECT = 1,
51 MCAST_DEFAULT = 2
52};
53
48struct mlx4_set_port_general_context { 54struct mlx4_set_port_general_context {
49 u8 reserved[3]; 55 u8 reserved[3];
50 u8 flags; 56 u8 flags;
@@ -60,14 +66,17 @@ struct mlx4_set_port_general_context {
60 66
61struct mlx4_set_port_rqp_calc_context { 67struct mlx4_set_port_rqp_calc_context {
62 __be32 base_qpn; 68 __be32 base_qpn;
63 __be32 flags; 69 u8 rererved;
64 u8 reserved[3]; 70 u8 n_mac;
71 u8 n_vlan;
72 u8 n_prio;
73 u8 reserved2[3];
65 u8 mac_miss; 74 u8 mac_miss;
66 u8 intra_no_vlan; 75 u8 intra_no_vlan;
67 u8 no_vlan; 76 u8 no_vlan;
68 u8 intra_vlan_miss; 77 u8 intra_vlan_miss;
69 u8 vlan_miss; 78 u8 vlan_miss;
70 u8 reserved2[3]; 79 u8 reserved3[3];
71 u8 no_vlan_prio; 80 u8 no_vlan_prio;
72 __be32 promisc; 81 __be32 promisc;
73 __be32 mcast; 82 __be32 mcast;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 570f2508fb3..05998ee297c 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -845,16 +845,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
845 } 845 }
846 846
847 /* Configure RSS indirection qp */ 847 /* Configure RSS indirection qp */
848 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
849 if (err) {
850 en_err(priv, "Failed to reserve range for RSS "
851 "indirection qp\n");
852 goto rss_err;
853 }
854 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 848 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
855 if (err) { 849 if (err) {
856 en_err(priv, "Failed to allocate RSS indirection QP\n"); 850 en_err(priv, "Failed to allocate RSS indirection QP\n");
857 goto reserve_err; 851 goto rss_err;
858 } 852 }
859 rss_map->indir_qp.event = mlx4_en_sqp_event; 853 rss_map->indir_qp.event = mlx4_en_sqp_event;
860 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 854 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
@@ -881,8 +875,6 @@ indir_err:
881 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 875 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
882 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 876 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
883 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 877 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
884reserve_err:
885 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
886rss_err: 878rss_err:
887 for (i = 0; i < good_qps; i++) { 879 for (i = 0; i < good_qps; i++) {
888 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 880 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
@@ -904,7 +896,6 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
904 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 896 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
905 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 897 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
906 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 898 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
907 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
908 899
909 for (i = 0; i < priv->rx_ring_num; i++) { 900 for (i = 0; i < priv->rx_ring_num; i++) {
910 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 901 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index a680cd4a5ab..01feb8fd42a 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -44,6 +44,7 @@
44 44
45enum { 45enum {
46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
47 MAX_BF = 256,
47}; 48};
48 49
49static int inline_thold __read_mostly = MAX_INLINE; 50static int inline_thold __read_mostly = MAX_INLINE;
@@ -52,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444);
52MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 53MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
53 54
54int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 55int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
55 struct mlx4_en_tx_ring *ring, u32 size, 56 struct mlx4_en_tx_ring *ring, int qpn, u32 size,
56 u16 stride) 57 u16 stride)
57{ 58{
58 struct mlx4_en_dev *mdev = priv->mdev; 59 struct mlx4_en_dev *mdev = priv->mdev;
@@ -103,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
103 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 104 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
104 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 105 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
105 106
106 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 107 ring->qpn = qpn;
107 if (err) {
108 en_err(priv, "Failed reserving qp for tx ring.\n");
109 goto err_map;
110 }
111
112 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 108 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
113 if (err) { 109 if (err) {
114 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 110 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
115 goto err_reserve; 111 goto err_map;
116 } 112 }
117 ring->qp.event = mlx4_en_sqp_event; 113 ring->qp.event = mlx4_en_sqp_event;
118 114
115 err = mlx4_bf_alloc(mdev->dev, &ring->bf);
116 if (err) {
117 en_dbg(DRV, priv, "working without blueflame (%d)", err);
118 ring->bf.uar = &mdev->priv_uar;
119 ring->bf.uar->map = mdev->uar_map;
120 ring->bf_enabled = false;
121 } else
122 ring->bf_enabled = true;
123
119 return 0; 124 return 0;
120 125
121err_reserve:
122 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
123err_map: 126err_map:
124 mlx4_en_unmap_buffer(&ring->wqres.buf); 127 mlx4_en_unmap_buffer(&ring->wqres.buf);
125err_hwq_res: 128err_hwq_res:
@@ -139,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_dev *mdev = priv->mdev; 142 struct mlx4_en_dev *mdev = priv->mdev;
140 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 143 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
141 144
145 if (ring->bf_enabled)
146 mlx4_bf_free(mdev->dev, &ring->bf);
142 mlx4_qp_remove(mdev->dev, &ring->qp); 147 mlx4_qp_remove(mdev->dev, &ring->qp);
143 mlx4_qp_free(mdev->dev, &ring->qp); 148 mlx4_qp_free(mdev->dev, &ring->qp);
144 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 149 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
@@ -171,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
171 176
172 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 177 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
173 ring->cqn, &ring->context); 178 ring->cqn, &ring->context);
179 if (ring->bf_enabled)
180 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
174 181
175 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 182 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
176 &ring->qp, &ring->qp_state); 183 &ring->qp, &ring->qp_state);
@@ -591,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
591 return skb_tx_hash(dev, skb); 598 return skb_tx_hash(dev, skb);
592} 599}
593 600
601static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
602{
603 __iowrite64_copy(dst, src, bytecnt / 8);
604}
605
594netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 606netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
595{ 607{
596 struct mlx4_en_priv *priv = netdev_priv(dev); 608 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -609,12 +621,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
609 int desc_size; 621 int desc_size;
610 int real_size; 622 int real_size;
611 dma_addr_t dma; 623 dma_addr_t dma;
612 u32 index; 624 u32 index, bf_index;
613 __be32 op_own; 625 __be32 op_own;
614 u16 vlan_tag = 0; 626 u16 vlan_tag = 0;
615 int i; 627 int i;
616 int lso_header_size; 628 int lso_header_size;
617 void *fragptr; 629 void *fragptr;
630 bool bounce = false;
618 631
619 if (!priv->port_up) 632 if (!priv->port_up)
620 goto tx_drop; 633 goto tx_drop;
@@ -657,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
657 670
658 /* Packet is good - grab an index and transmit it */ 671 /* Packet is good - grab an index and transmit it */
659 index = ring->prod & ring->size_mask; 672 index = ring->prod & ring->size_mask;
673 bf_index = ring->prod;
660 674
661 /* See if we have enough space for whole descriptor TXBB for setting 675 /* See if we have enough space for whole descriptor TXBB for setting
662 * SW ownership on next descriptor; if not, use a bounce buffer. */ 676 * SW ownership on next descriptor; if not, use a bounce buffer. */
663 if (likely(index + nr_txbb <= ring->size)) 677 if (likely(index + nr_txbb <= ring->size))
664 tx_desc = ring->buf + index * TXBB_SIZE; 678 tx_desc = ring->buf + index * TXBB_SIZE;
665 else 679 else {
666 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 680 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
681 bounce = true;
682 }
667 683
668 /* Save skb in tx_info ring */ 684 /* Save skb in tx_info ring */
669 tx_info = &ring->tx_info[index]; 685 tx_info = &ring->tx_info[index];
@@ -768,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
768 ring->prod += nr_txbb; 784 ring->prod += nr_txbb;
769 785
770 /* If we used a bounce buffer then copy descriptor back into place */ 786 /* If we used a bounce buffer then copy descriptor back into place */
771 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) 787 if (bounce)
772 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 788 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
773 789
774 /* Run destructor before passing skb to HW */ 790 /* Run destructor before passing skb to HW */
775 if (likely(!skb_shared(skb))) 791 if (likely(!skb_shared(skb)))
776 skb_orphan(skb); 792 skb_orphan(skb);
777 793
778 /* Ensure new descirptor hits memory 794 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
779 * before setting ownership of this descriptor to HW */ 795 *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
780 wmb(); 796 op_own |= htonl((bf_index & 0xffff) << 8);
781 tx_desc->ctrl.owner_opcode = op_own; 797 /* Ensure new descirptor hits memory
798 * before setting ownership of this descriptor to HW */
799 wmb();
800 tx_desc->ctrl.owner_opcode = op_own;
782 801
783 /* Ring doorbell! */ 802 wmb();
784 wmb(); 803
785 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 804 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
805 desc_size);
806
807 wmb();
808
809 ring->bf.offset ^= ring->bf.buf_size;
810 } else {
811 /* Ensure new descirptor hits memory
812 * before setting ownership of this descriptor to HW */
813 wmb();
814 tx_desc->ctrl.owner_opcode = op_own;
815 wmb();
816 writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
817 }
786 818
787 /* Poll CQ here */ 819 /* Poll CQ here */
788 mlx4_en_xmit_poll(priv, tx_ind); 820 mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 552d0fce6f6..506cfd0372e 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -42,7 +42,7 @@
42#include "fw.h" 42#include "fw.h"
43 43
44enum { 44enum {
45 MLX4_IRQNAME_SIZE = 64 45 MLX4_IRQNAME_SIZE = 32
46}; 46};
47 47
48enum { 48enum {
@@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
317 * we need to map, take the difference of highest index and 317 * we need to map, take the difference of highest index and
318 * the lowest index we'll use and add 1. 318 * the lowest index we'll use and add 1.
319 */ 319 */
320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
321 dev->caps.reserved_eqs / 4 + 1; 321 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
322} 322}
323 323
324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
496static void mlx4_free_irqs(struct mlx4_dev *dev) 496static void mlx4_free_irqs(struct mlx4_dev *dev)
497{ 497{
498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
499 int i; 499 struct mlx4_priv *priv = mlx4_priv(dev);
500 int i, vec;
500 501
501 if (eq_table->have_irq) 502 if (eq_table->have_irq)
502 free_irq(dev->pdev->irq, dev); 503 free_irq(dev->pdev->irq, dev);
504
503 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 505 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
504 if (eq_table->eq[i].have_irq) { 506 if (eq_table->eq[i].have_irq) {
505 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 507 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
506 eq_table->eq[i].have_irq = 0; 508 eq_table->eq[i].have_irq = 0;
507 } 509 }
508 510
511 for (i = 0; i < dev->caps.comp_pool; i++) {
512 /*
513 * Freeing the assigned irq's
514 * all bits should be 0, but we need to validate
515 */
516 if (priv->msix_ctl.pool_bm & 1ULL << i) {
517 /* NO need protecting*/
518 vec = dev->caps.num_comp_vectors + 1 + i;
519 free_irq(priv->eq_table.eq[vec].irq,
520 &priv->eq_table.eq[vec]);
521 }
522 }
523
524
509 kfree(eq_table->irq_names); 525 kfree(eq_table->irq_names);
510} 526}
511 527
@@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
578 (priv->eq_table.inta_pin < 32 ? 4 : 0); 594 (priv->eq_table.inta_pin < 32 ? 4 : 0);
579 595
580 priv->eq_table.irq_names = 596 priv->eq_table.irq_names =
581 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), 597 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
598 dev->caps.comp_pool),
582 GFP_KERNEL); 599 GFP_KERNEL);
583 if (!priv->eq_table.irq_names) { 600 if (!priv->eq_table.irq_names) {
584 err = -ENOMEM; 601 err = -ENOMEM;
@@ -601,6 +618,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
601 if (err) 618 if (err)
602 goto err_out_comp; 619 goto err_out_comp;
603 620
621 /*if additional completion vectors poolsize is 0 this loop will not run*/
622 for (i = dev->caps.num_comp_vectors + 1;
623 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
624
625 err = mlx4_create_eq(dev, dev->caps.num_cqs -
626 dev->caps.reserved_cqs +
627 MLX4_NUM_SPARE_EQE,
628 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
629 &priv->eq_table.eq[i]);
630 if (err) {
631 --i;
632 goto err_out_unmap;
633 }
634 }
635
636
604 if (dev->flags & MLX4_FLAG_MSI_X) { 637 if (dev->flags & MLX4_FLAG_MSI_X) {
605 const char *eq_name; 638 const char *eq_name;
606 639
@@ -686,7 +719,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
686 719
687 mlx4_free_irqs(dev); 720 mlx4_free_irqs(dev);
688 721
689 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 722 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
690 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 723 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
691 724
692 mlx4_unmap_clr_int(dev); 725 mlx4_unmap_clr_int(dev);
@@ -743,3 +776,65 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
743 return err; 776 return err;
744} 777}
745EXPORT_SYMBOL(mlx4_test_interrupts); 778EXPORT_SYMBOL(mlx4_test_interrupts);
779
780int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
781{
782
783 struct mlx4_priv *priv = mlx4_priv(dev);
784 int vec = 0, err = 0, i;
785
786 spin_lock(&priv->msix_ctl.pool_lock);
787 for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
788 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
789 priv->msix_ctl.pool_bm |= 1ULL << i;
790 vec = dev->caps.num_comp_vectors + 1 + i;
791 snprintf(priv->eq_table.irq_names +
792 vec * MLX4_IRQNAME_SIZE,
793 MLX4_IRQNAME_SIZE, "%s", name);
794 err = request_irq(priv->eq_table.eq[vec].irq,
795 mlx4_msi_x_interrupt, 0,
796 &priv->eq_table.irq_names[vec<<5],
797 priv->eq_table.eq + vec);
798 if (err) {
799 /*zero out bit by fliping it*/
800 priv->msix_ctl.pool_bm ^= 1 << i;
801 vec = 0;
802 continue;
803 /*we dont want to break here*/
804 }
805 eq_set_ci(&priv->eq_table.eq[vec], 1);
806 }
807 }
808 spin_unlock(&priv->msix_ctl.pool_lock);
809
810 if (vec) {
811 *vector = vec;
812 } else {
813 *vector = 0;
814 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
815 }
816 return err;
817}
818EXPORT_SYMBOL(mlx4_assign_eq);
819
820void mlx4_release_eq(struct mlx4_dev *dev, int vec)
821{
822 struct mlx4_priv *priv = mlx4_priv(dev);
823 /*bm index*/
824 int i = vec - dev->caps.num_comp_vectors - 1;
825
826 if (likely(i >= 0)) {
827 /*sanity check , making sure were not trying to free irq's
828 Belonging to a legacy EQ*/
829 spin_lock(&priv->msix_ctl.pool_lock);
830 if (priv->msix_ctl.pool_bm & 1ULL << i) {
831 free_irq(priv->eq_table.eq[vec].irq,
832 &priv->eq_table.eq[vec]);
833 priv->msix_ctl.pool_bm &= ~(1ULL << i);
834 }
835 spin_unlock(&priv->msix_ctl.pool_lock);
836 }
837
838}
839EXPORT_SYMBOL(mlx4_release_eq);
840
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 5de1db89783..67a209ba939 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -274,8 +274,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
274 dev_cap->stat_rate_support = stat_rate; 274 dev_cap->stat_rate_support = stat_rate;
275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); 275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
276 dev_cap->udp_rss = field & 0x1; 276 dev_cap->udp_rss = field & 0x1;
277 dev_cap->vep_uc_steering = field & 0x2;
278 dev_cap->vep_mc_steering = field & 0x4;
277 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); 279 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
278 dev_cap->loopback_support = field & 0x1; 280 dev_cap->loopback_support = field & 0x1;
281 dev_cap->wol = field & 0x40;
279 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 282 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
280 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 283 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
281 dev_cap->reserved_uars = field >> 4; 284 dev_cap->reserved_uars = field >> 4;
@@ -737,6 +740,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
737#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 740#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
738#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 741#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
739#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 742#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
743#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
740#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 744#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
741#define INIT_HCA_TPT_OFFSET 0x0f0 745#define INIT_HCA_TPT_OFFSET 0x0f0
742#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 746#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
@@ -797,6 +801,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
797 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 801 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
798 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 802 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
799 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 803 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
804 if (dev->caps.vep_mc_steering)
805 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
800 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 806 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
801 807
802 /* TPT attributes */ 808 /* TPT attributes */
@@ -908,3 +914,22 @@ int mlx4_NOP(struct mlx4_dev *dev)
908 /* Input modifier of 0x1f means "finish as soon as possible." */ 914 /* Input modifier of 0x1f means "finish as soon as possible." */
909 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); 915 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
910} 916}
917
918#define MLX4_WOL_SETUP_MODE (5 << 28)
919int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
920{
921 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
922
923 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
924 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
925}
926EXPORT_SYMBOL_GPL(mlx4_wol_read);
927
928int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
929{
930 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
931
932 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
933 MLX4_CMD_TIME_CLASS_A);
934}
935EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 65cc72eb899..88003ebc618 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -80,6 +80,9 @@ struct mlx4_dev_cap {
80 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int udp_rss; 81 int udp_rss;
82 int loopback_support; 82 int loopback_support;
83 int vep_uc_steering;
84 int vep_mc_steering;
85 int wol;
83 u32 flags; 86 u32 flags;
84 int reserved_uars; 87 int reserved_uars;
85 int uar_size; 88 int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 4ffdc18fcb8..62fa7eec5f0 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -39,6 +39,7 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h>
42 43
43#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
44#include <linux/mlx4/doorbell.h> 45#include <linux/mlx4/doorbell.h>
@@ -227,6 +228,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
227 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 228 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
228 dev->caps.udp_rss = dev_cap->udp_rss; 229 dev->caps.udp_rss = dev_cap->udp_rss;
229 dev->caps.loopback_support = dev_cap->loopback_support; 230 dev->caps.loopback_support = dev_cap->loopback_support;
231 dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
232 dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
233 dev->caps.wol = dev_cap->wol;
230 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 234 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
231 235
232 dev->caps.log_num_macs = log_num_mac; 236 dev->caps.log_num_macs = log_num_mac;
@@ -718,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
718 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 722 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
719} 723}
720 724
725static int map_bf_area(struct mlx4_dev *dev)
726{
727 struct mlx4_priv *priv = mlx4_priv(dev);
728 resource_size_t bf_start;
729 resource_size_t bf_len;
730 int err = 0;
731
732 bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
733 bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
734 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
735 if (!priv->bf_mapping)
736 err = -ENOMEM;
737
738 return err;
739}
740
741static void unmap_bf_area(struct mlx4_dev *dev)
742{
743 if (mlx4_priv(dev)->bf_mapping)
744 io_mapping_free(mlx4_priv(dev)->bf_mapping);
745}
746
721static void mlx4_close_hca(struct mlx4_dev *dev) 747static void mlx4_close_hca(struct mlx4_dev *dev)
722{ 748{
749 unmap_bf_area(dev);
723 mlx4_CLOSE_HCA(dev, 0); 750 mlx4_CLOSE_HCA(dev, 0);
724 mlx4_free_icms(dev); 751 mlx4_free_icms(dev);
725 mlx4_UNMAP_FA(dev); 752 mlx4_UNMAP_FA(dev);
@@ -772,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
772 goto err_stop_fw; 799 goto err_stop_fw;
773 } 800 }
774 801
802 if (map_bf_area(dev))
803 mlx4_dbg(dev, "Failed to map blue flame area\n");
804
775 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 805 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
776 806
777 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 807 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
@@ -802,6 +832,7 @@ err_free_icm:
802 mlx4_free_icms(dev); 832 mlx4_free_icms(dev);
803 833
804err_stop_fw: 834err_stop_fw:
835 unmap_bf_area(dev);
805 mlx4_UNMAP_FA(dev); 836 mlx4_UNMAP_FA(dev);
806 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 837 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
807 838
@@ -969,13 +1000,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
969{ 1000{
970 struct mlx4_priv *priv = mlx4_priv(dev); 1001 struct mlx4_priv *priv = mlx4_priv(dev);
971 struct msix_entry *entries; 1002 struct msix_entry *entries;
972 int nreq; 1003 int nreq = min_t(int, dev->caps.num_ports *
1004 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1005 + MSIX_LEGACY_SZ, MAX_MSIX);
973 int err; 1006 int err;
974 int i; 1007 int i;
975 1008
976 if (msi_x) { 1009 if (msi_x) {
977 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 1010 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
978 num_possible_cpus() + 1); 1011 nreq);
979 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1012 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
980 if (!entries) 1013 if (!entries)
981 goto no_msi; 1014 goto no_msi;
@@ -998,7 +1031,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
998 goto no_msi; 1031 goto no_msi;
999 } 1032 }
1000 1033
1001 dev->caps.num_comp_vectors = nreq - 1; 1034 if (nreq <
1035 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1036 /*Working in legacy mode , all EQ's shared*/
1037 dev->caps.comp_pool = 0;
1038 dev->caps.num_comp_vectors = nreq - 1;
1039 } else {
1040 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1041 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1042 }
1002 for (i = 0; i < nreq; ++i) 1043 for (i = 0; i < nreq; ++i)
1003 priv->eq_table.eq[i].irq = entries[i].vector; 1044 priv->eq_table.eq[i].irq = entries[i].vector;
1004 1045
@@ -1010,6 +1051,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1010 1051
1011no_msi: 1052no_msi:
1012 dev->caps.num_comp_vectors = 1; 1053 dev->caps.num_comp_vectors = 1;
1054 dev->caps.comp_pool = 0;
1013 1055
1014 for (i = 0; i < 2; ++i) 1056 for (i = 0; i < 2; ++i)
1015 priv->eq_table.eq[i].irq = dev->pdev->irq; 1057 priv->eq_table.eq[i].irq = dev->pdev->irq;
@@ -1049,6 +1091,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1049 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1091 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1050} 1092}
1051 1093
1094static int mlx4_init_steering(struct mlx4_dev *dev)
1095{
1096 struct mlx4_priv *priv = mlx4_priv(dev);
1097 int num_entries = dev->caps.num_ports;
1098 int i, j;
1099
1100 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1101 if (!priv->steer)
1102 return -ENOMEM;
1103
1104 for (i = 0; i < num_entries; i++) {
1105 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1106 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1107 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1108 }
1109 INIT_LIST_HEAD(&priv->steer[i].high_prios);
1110 }
1111 return 0;
1112}
1113
1114static void mlx4_clear_steering(struct mlx4_dev *dev)
1115{
1116 struct mlx4_priv *priv = mlx4_priv(dev);
1117 struct mlx4_steer_index *entry, *tmp_entry;
1118 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1119 int num_entries = dev->caps.num_ports;
1120 int i, j;
1121
1122 for (i = 0; i < num_entries; i++) {
1123 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1124 list_for_each_entry_safe(pqp, tmp_pqp,
1125 &priv->steer[i].promisc_qps[j],
1126 list) {
1127 list_del(&pqp->list);
1128 kfree(pqp);
1129 }
1130 list_for_each_entry_safe(entry, tmp_entry,
1131 &priv->steer[i].steer_entries[j],
1132 list) {
1133 list_del(&entry->list);
1134 list_for_each_entry_safe(pqp, tmp_pqp,
1135 &entry->duplicates,
1136 list) {
1137 list_del(&pqp->list);
1138 kfree(pqp);
1139 }
1140 kfree(entry);
1141 }
1142 }
1143 }
1144 kfree(priv->steer);
1145}
1146
1052static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1147static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1053{ 1148{
1054 struct mlx4_priv *priv; 1149 struct mlx4_priv *priv;
@@ -1109,6 +1204,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1109 } 1204 }
1110 } 1205 }
1111 1206
1207 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1208 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1209
1112 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1210 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1113 if (!priv) { 1211 if (!priv) {
1114 dev_err(&pdev->dev, "Device struct alloc failed, " 1212 dev_err(&pdev->dev, "Device struct alloc failed, "
@@ -1127,6 +1225,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1127 INIT_LIST_HEAD(&priv->pgdir_list); 1225 INIT_LIST_HEAD(&priv->pgdir_list);
1128 mutex_init(&priv->pgdir_mutex); 1226 mutex_init(&priv->pgdir_mutex);
1129 1227
1228 pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
1229
1230 INIT_LIST_HEAD(&priv->bf_list);
1231 mutex_init(&priv->bf_mutex);
1232
1130 /* 1233 /*
1131 * Now reset the HCA before we touch the PCI capabilities or 1234 * Now reset the HCA before we touch the PCI capabilities or
1132 * attempt a firmware command, since a boot ROM may have left 1235 * attempt a firmware command, since a boot ROM may have left
@@ -1151,8 +1254,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1151 if (err) 1254 if (err)
1152 goto err_close; 1255 goto err_close;
1153 1256
1257 priv->msix_ctl.pool_bm = 0;
1258 spin_lock_init(&priv->msix_ctl.pool_lock);
1259
1154 mlx4_enable_msi_x(dev); 1260 mlx4_enable_msi_x(dev);
1155 1261
1262 err = mlx4_init_steering(dev);
1263 if (err)
1264 goto err_free_eq;
1265
1156 err = mlx4_setup_hca(dev); 1266 err = mlx4_setup_hca(dev);
1157 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { 1267 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1158 dev->flags &= ~MLX4_FLAG_MSI_X; 1268 dev->flags &= ~MLX4_FLAG_MSI_X;
@@ -1161,7 +1271,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1161 } 1271 }
1162 1272
1163 if (err) 1273 if (err)
1164 goto err_free_eq; 1274 goto err_steer;
1165 1275
1166 for (port = 1; port <= dev->caps.num_ports; port++) { 1276 for (port = 1; port <= dev->caps.num_ports; port++) {
1167 err = mlx4_init_port_info(dev, port); 1277 err = mlx4_init_port_info(dev, port);
@@ -1194,6 +1304,9 @@ err_port:
1194 mlx4_cleanup_pd_table(dev); 1304 mlx4_cleanup_pd_table(dev);
1195 mlx4_cleanup_uar_table(dev); 1305 mlx4_cleanup_uar_table(dev);
1196 1306
1307err_steer:
1308 mlx4_clear_steering(dev);
1309
1197err_free_eq: 1310err_free_eq:
1198 mlx4_free_eq_table(dev); 1311 mlx4_free_eq_table(dev);
1199 1312
@@ -1253,6 +1366,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1253 iounmap(priv->kar); 1366 iounmap(priv->kar);
1254 mlx4_uar_free(dev, &priv->driver_uar); 1367 mlx4_uar_free(dev, &priv->driver_uar);
1255 mlx4_cleanup_uar_table(dev); 1368 mlx4_cleanup_uar_table(dev);
1369 mlx4_clear_steering(dev);
1256 mlx4_free_eq_table(dev); 1370 mlx4_free_eq_table(dev);
1257 mlx4_close_hca(dev); 1371 mlx4_close_hca(dev);
1258 mlx4_cmd_cleanup(dev); 1372 mlx4_cmd_cleanup(dev);
@@ -1286,6 +1400,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1400 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 1401 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1402 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1403 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1404 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1405 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1406 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1407 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1408 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1409 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1410 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1411 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1412 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1413 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1414 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1415 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1416 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1417 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1289 { 0, } 1418 { 0, }
1290}; 1419};
1291 1420
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 79cf42db2ea..e71372aa9cc 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/etherdevice.h>
35 36
36#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
37 38
@@ -40,38 +41,40 @@
40#define MGM_QPN_MASK 0x00FFFFFF 41#define MGM_QPN_MASK 0x00FFFFFF
41#define MGM_BLCK_LB_BIT 30 42#define MGM_BLCK_LB_BIT 30
42 43
43struct mlx4_mgm {
44 __be32 next_gid_index;
45 __be32 members_count;
46 u32 reserved[2];
47 u8 gid[16];
48 __be32 qp[MLX4_QP_PER_MGM];
49};
50
51static const u8 zero_gid[16]; /* automatically initialized to 0 */ 44static const u8 zero_gid[16]; /* automatically initialized to 0 */
52 45
53static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, 46static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
54 struct mlx4_cmd_mailbox *mailbox) 47 struct mlx4_cmd_mailbox *mailbox)
55{ 48{
56 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 49 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
57 MLX4_CMD_TIME_CLASS_A); 50 MLX4_CMD_TIME_CLASS_A);
58} 51}
59 52
60static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, 53static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
61 struct mlx4_cmd_mailbox *mailbox) 54 struct mlx4_cmd_mailbox *mailbox)
62{ 55{
63 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 56 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
64 MLX4_CMD_TIME_CLASS_A); 57 MLX4_CMD_TIME_CLASS_A);
65} 58}
66 59
67static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 60static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
68 u16 *hash) 61 struct mlx4_cmd_mailbox *mailbox)
62{
63 u32 in_mod;
64
65 in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
66 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
67 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
68}
69
70static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
71 u16 *hash, u8 op_mod)
69{ 72{
70 u64 imm; 73 u64 imm;
71 int err; 74 int err;
72 75
73 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, 76 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
74 MLX4_CMD_TIME_CLASS_A); 77 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
75 78
76 if (!err) 79 if (!err)
77 *hash = imm; 80 *hash = imm;
@@ -79,6 +82,457 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
79 return err; 82 return err;
80} 83}
81 84
85static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
86 enum mlx4_steer_type steer,
87 u32 qpn)
88{
89 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
90 struct mlx4_promisc_qp *pqp;
91
92 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
93 if (pqp->qpn == qpn)
94 return pqp;
95 }
96 /* not found */
97 return NULL;
98}
99
100/*
101 * Add new entry to steering data structure.
102 * All promisc QPs should be added as well
103 */
104static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
105 enum mlx4_steer_type steer,
106 unsigned int index, u32 qpn)
107{
108 struct mlx4_steer *s_steer;
109 struct mlx4_cmd_mailbox *mailbox;
110 struct mlx4_mgm *mgm;
111 u32 members_count;
112 struct mlx4_steer_index *new_entry;
113 struct mlx4_promisc_qp *pqp;
114 struct mlx4_promisc_qp *dqp;
115 u32 prot;
116 int err;
117 u8 pf_num;
118
119 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
120 s_steer = &mlx4_priv(dev)->steer[pf_num];
121 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
122 if (!new_entry)
123 return -ENOMEM;
124
125 INIT_LIST_HEAD(&new_entry->duplicates);
126 new_entry->index = index;
127 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
128
129 /* If the given qpn is also a promisc qp,
130 * it should be inserted to duplicates list
131 */
132 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
133 if (pqp) {
134 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
135 if (!dqp) {
136 err = -ENOMEM;
137 goto out_alloc;
138 }
139 dqp->qpn = qpn;
140 list_add_tail(&dqp->list, &new_entry->duplicates);
141 }
142
143 /* if no promisc qps for this vep, we are done */
144 if (list_empty(&s_steer->promisc_qps[steer]))
145 return 0;
146
147 /* now need to add all the promisc qps to the new
148 * steering entry, as they should also receive the packets
149 * destined to this address */
150 mailbox = mlx4_alloc_cmd_mailbox(dev);
151 if (IS_ERR(mailbox)) {
152 err = -ENOMEM;
153 goto out_alloc;
154 }
155 mgm = mailbox->buf;
156
157 err = mlx4_READ_ENTRY(dev, index, mailbox);
158 if (err)
159 goto out_mailbox;
160
161 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
162 prot = be32_to_cpu(mgm->members_count) >> 30;
163 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
164 /* don't add already existing qpn */
165 if (pqp->qpn == qpn)
166 continue;
167 if (members_count == MLX4_QP_PER_MGM) {
168 /* out of space */
169 err = -ENOMEM;
170 goto out_mailbox;
171 }
172
173 /* add the qpn */
174 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
175 }
176 /* update the qps count and update the entry with all the promisc qps*/
177 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
178 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
179
180out_mailbox:
181 mlx4_free_cmd_mailbox(dev, mailbox);
182 if (!err)
183 return 0;
184out_alloc:
185 if (dqp) {
186 list_del(&dqp->list);
187 kfree(&dqp);
188 }
189 list_del(&new_entry->list);
190 kfree(new_entry);
191 return err;
192}
193
194/* update the data structures with existing steering entry */
195static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
196 enum mlx4_steer_type steer,
197 unsigned int index, u32 qpn)
198{
199 struct mlx4_steer *s_steer;
200 struct mlx4_steer_index *tmp_entry, *entry = NULL;
201 struct mlx4_promisc_qp *pqp;
202 struct mlx4_promisc_qp *dqp;
203 u8 pf_num;
204
205 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
206 s_steer = &mlx4_priv(dev)->steer[pf_num];
207
208 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
209 if (!pqp)
210 return 0; /* nothing to do */
211
212 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
213 if (tmp_entry->index == index) {
214 entry = tmp_entry;
215 break;
216 }
217 }
218 if (unlikely(!entry)) {
219 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
220 return -EINVAL;
221 }
222
223 /* the given qpn is listed as a promisc qpn
224 * we need to add it as a duplicate to this entry
225 * for future refernce */
226 list_for_each_entry(dqp, &entry->duplicates, list) {
227 if (qpn == dqp->qpn)
228 return 0; /* qp is already duplicated */
229 }
230
231 /* add the qp as a duplicate on this index */
232 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
233 if (!dqp)
234 return -ENOMEM;
235 dqp->qpn = qpn;
236 list_add_tail(&dqp->list, &entry->duplicates);
237
238 return 0;
239}
240
241/* Check whether a qpn is a duplicate on steering entry
242 * If so, it should not be removed from mgm */
243static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
244 enum mlx4_steer_type steer,
245 unsigned int index, u32 qpn)
246{
247 struct mlx4_steer *s_steer;
248 struct mlx4_steer_index *tmp_entry, *entry = NULL;
249 struct mlx4_promisc_qp *dqp, *tmp_dqp;
250 u8 pf_num;
251
252 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
253 s_steer = &mlx4_priv(dev)->steer[pf_num];
254
255 /* if qp is not promisc, it cannot be duplicated */
256 if (!get_promisc_qp(dev, pf_num, steer, qpn))
257 return false;
258
259 /* The qp is promisc qp so it is a duplicate on this index
260 * Find the index entry, and remove the duplicate */
261 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
262 if (tmp_entry->index == index) {
263 entry = tmp_entry;
264 break;
265 }
266 }
267 if (unlikely(!entry)) {
268 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
269 return false;
270 }
271 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
272 if (dqp->qpn == qpn) {
273 list_del(&dqp->list);
274 kfree(dqp);
275 }
276 }
277 return true;
278}
279
280/* I a steering entry contains only promisc QPs, it can be removed. */
281static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
282 enum mlx4_steer_type steer,
283 unsigned int index, u32 tqpn)
284{
285 struct mlx4_steer *s_steer;
286 struct mlx4_cmd_mailbox *mailbox;
287 struct mlx4_mgm *mgm;
288 struct mlx4_steer_index *entry = NULL, *tmp_entry;
289 u32 qpn;
290 u32 members_count;
291 bool ret = false;
292 int i;
293 u8 pf_num;
294
295 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
296 s_steer = &mlx4_priv(dev)->steer[pf_num];
297
298 mailbox = mlx4_alloc_cmd_mailbox(dev);
299 if (IS_ERR(mailbox))
300 return false;
301 mgm = mailbox->buf;
302
303 if (mlx4_READ_ENTRY(dev, index, mailbox))
304 goto out;
305 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
306 for (i = 0; i < members_count; i++) {
307 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
308 if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
309 /* the qp is not promisc, the entry can't be removed */
310 goto out;
311 }
312 }
313 /* All the qps currently registered for this entry are promiscuous,
314 * Checking for duplicates */
315 ret = true;
316 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
317 if (entry->index == index) {
318 if (list_empty(&entry->duplicates)) {
319 list_del(&entry->list);
320 kfree(entry);
321 } else {
322 /* This entry contains duplicates so it shouldn't be removed */
323 ret = false;
324 goto out;
325 }
326 }
327 }
328
329out:
330 mlx4_free_cmd_mailbox(dev, mailbox);
331 return ret;
332}
333
334static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
335 enum mlx4_steer_type steer, u32 qpn)
336{
337 struct mlx4_steer *s_steer;
338 struct mlx4_cmd_mailbox *mailbox;
339 struct mlx4_mgm *mgm;
340 struct mlx4_steer_index *entry;
341 struct mlx4_promisc_qp *pqp;
342 struct mlx4_promisc_qp *dqp;
343 u32 members_count;
344 u32 prot;
345 int i;
346 bool found;
347 int last_index;
348 int err;
349 u8 pf_num;
350 struct mlx4_priv *priv = mlx4_priv(dev);
351 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
352 s_steer = &mlx4_priv(dev)->steer[pf_num];
353
354 mutex_lock(&priv->mcg_table.mutex);
355
356 if (get_promisc_qp(dev, pf_num, steer, qpn)) {
357 err = 0; /* Noting to do, already exists */
358 goto out_mutex;
359 }
360
361 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
362 if (!pqp) {
363 err = -ENOMEM;
364 goto out_mutex;
365 }
366 pqp->qpn = qpn;
367
368 mailbox = mlx4_alloc_cmd_mailbox(dev);
369 if (IS_ERR(mailbox)) {
370 err = -ENOMEM;
371 goto out_alloc;
372 }
373 mgm = mailbox->buf;
374
375 /* the promisc qp needs to be added for each one of the steering
376 * entries, if it already exists, needs to be added as a duplicate
377 * for this entry */
378 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
379 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
380 if (err)
381 goto out_mailbox;
382
383 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
384 prot = be32_to_cpu(mgm->members_count) >> 30;
385 found = false;
386 for (i = 0; i < members_count; i++) {
387 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
388 /* Entry already exists, add to duplicates */
389 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
390 if (!dqp)
391 goto out_mailbox;
392 dqp->qpn = qpn;
393 list_add_tail(&dqp->list, &entry->duplicates);
394 found = true;
395 }
396 }
397 if (!found) {
398 /* Need to add the qpn to mgm */
399 if (members_count == MLX4_QP_PER_MGM) {
400 /* entry is full */
401 err = -ENOMEM;
402 goto out_mailbox;
403 }
404 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
405 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
406 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
407 if (err)
408 goto out_mailbox;
409 }
410 last_index = entry->index;
411 }
412
413 /* add the new qpn to list of promisc qps */
414 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
415 /* now need to add all the promisc qps to default entry */
416 memset(mgm, 0, sizeof *mgm);
417 members_count = 0;
418 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
419 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
420 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
421
422 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
423 if (err)
424 goto out_list;
425
426 mlx4_free_cmd_mailbox(dev, mailbox);
427 mutex_unlock(&priv->mcg_table.mutex);
428 return 0;
429
430out_list:
431 list_del(&pqp->list);
432out_mailbox:
433 mlx4_free_cmd_mailbox(dev, mailbox);
434out_alloc:
435 kfree(pqp);
436out_mutex:
437 mutex_unlock(&priv->mcg_table.mutex);
438 return err;
439}
440
441static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
442 enum mlx4_steer_type steer, u32 qpn)
443{
444 struct mlx4_priv *priv = mlx4_priv(dev);
445 struct mlx4_steer *s_steer;
446 struct mlx4_cmd_mailbox *mailbox;
447 struct mlx4_mgm *mgm;
448 struct mlx4_steer_index *entry;
449 struct mlx4_promisc_qp *pqp;
450 struct mlx4_promisc_qp *dqp;
451 u32 members_count;
452 bool found;
453 bool back_to_list = false;
454 int loc, i;
455 int err;
456 u8 pf_num;
457
458 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
459 s_steer = &mlx4_priv(dev)->steer[pf_num];
460 mutex_lock(&priv->mcg_table.mutex);
461
462 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
463 if (unlikely(!pqp)) {
464 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
465 /* nothing to do */
466 err = 0;
467 goto out_mutex;
468 }
469
470 /*remove from list of promisc qps */
471 list_del(&pqp->list);
472 kfree(pqp);
473
474 /* set the default entry not to include the removed one */
475 mailbox = mlx4_alloc_cmd_mailbox(dev);
476 if (IS_ERR(mailbox)) {
477 err = -ENOMEM;
478 back_to_list = true;
479 goto out_list;
480 }
481 mgm = mailbox->buf;
482 members_count = 0;
483 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
484 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
485 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
486
487 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
488 if (err)
489 goto out_mailbox;
490
491 /* remove the qp from all the steering entries*/
492 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
493 found = false;
494 list_for_each_entry(dqp, &entry->duplicates, list) {
495 if (dqp->qpn == qpn) {
496 found = true;
497 break;
498 }
499 }
500 if (found) {
501 /* a duplicate, no need to change the mgm,
502 * only update the duplicates list */
503 list_del(&dqp->list);
504 kfree(dqp);
505 } else {
506 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
507 if (err)
508 goto out_mailbox;
509 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
510 for (loc = -1, i = 0; i < members_count; ++i)
511 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
512 loc = i;
513
514 mgm->members_count = cpu_to_be32(--members_count |
515 (MLX4_PROT_ETH << 30));
516 mgm->qp[loc] = mgm->qp[i - 1];
517 mgm->qp[i - 1] = 0;
518
519 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
520 if (err)
521 goto out_mailbox;
522 }
523
524 }
525
526out_mailbox:
527 mlx4_free_cmd_mailbox(dev, mailbox);
528out_list:
529 if (back_to_list)
530 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
531out_mutex:
532 mutex_unlock(&priv->mcg_table.mutex);
533 return err;
534}
535
82/* 536/*
83 * Caller must hold MCG table semaphore. gid and mgm parameters must 537 * Caller must hold MCG table semaphore. gid and mgm parameters must
84 * be properly aligned for command interface. 538 * be properly aligned for command interface.
@@ -94,15 +548,17 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
94 * If no AMGM exists for given gid, *index = -1, *prev = index of last 548 * If no AMGM exists for given gid, *index = -1, *prev = index of last
95 * entry in hash chain and *mgm holds end of hash chain. 549 * entry in hash chain and *mgm holds end of hash chain.
96 */ 550 */
97static int find_mgm(struct mlx4_dev *dev, 551static int find_entry(struct mlx4_dev *dev, u8 port,
98 u8 *gid, enum mlx4_protocol protocol, 552 u8 *gid, enum mlx4_protocol prot,
99 struct mlx4_cmd_mailbox *mgm_mailbox, 553 enum mlx4_steer_type steer,
100 u16 *hash, int *prev, int *index) 554 struct mlx4_cmd_mailbox *mgm_mailbox,
555 u16 *hash, int *prev, int *index)
101{ 556{
102 struct mlx4_cmd_mailbox *mailbox; 557 struct mlx4_cmd_mailbox *mailbox;
103 struct mlx4_mgm *mgm = mgm_mailbox->buf; 558 struct mlx4_mgm *mgm = mgm_mailbox->buf;
104 u8 *mgid; 559 u8 *mgid;
105 int err; 560 int err;
561 u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
106 562
107 mailbox = mlx4_alloc_cmd_mailbox(dev); 563 mailbox = mlx4_alloc_cmd_mailbox(dev);
108 if (IS_ERR(mailbox)) 564 if (IS_ERR(mailbox))
@@ -111,7 +567,7 @@ static int find_mgm(struct mlx4_dev *dev,
111 567
112 memcpy(mgid, gid, 16); 568 memcpy(mgid, gid, 16);
113 569
114 err = mlx4_MGID_HASH(dev, mailbox, hash); 570 err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
115 mlx4_free_cmd_mailbox(dev, mailbox); 571 mlx4_free_cmd_mailbox(dev, mailbox);
116 if (err) 572 if (err)
117 return err; 573 return err;
@@ -123,11 +579,11 @@ static int find_mgm(struct mlx4_dev *dev,
123 *prev = -1; 579 *prev = -1;
124 580
125 do { 581 do {
126 err = mlx4_READ_MCG(dev, *index, mgm_mailbox); 582 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
127 if (err) 583 if (err)
128 return err; 584 return err;
129 585
130 if (!memcmp(mgm->gid, zero_gid, 16)) { 586 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
131 if (*index != *hash) { 587 if (*index != *hash) {
132 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 588 mlx4_err(dev, "Found zero MGID in AMGM.\n");
133 err = -EINVAL; 589 err = -EINVAL;
@@ -136,7 +592,7 @@ static int find_mgm(struct mlx4_dev *dev,
136 } 592 }
137 593
138 if (!memcmp(mgm->gid, gid, 16) && 594 if (!memcmp(mgm->gid, gid, 16) &&
139 be32_to_cpu(mgm->members_count) >> 30 == protocol) 595 be32_to_cpu(mgm->members_count) >> 30 == prot)
140 return err; 596 return err;
141 597
142 *prev = *index; 598 *prev = *index;
@@ -147,8 +603,9 @@ static int find_mgm(struct mlx4_dev *dev,
147 return err; 603 return err;
148} 604}
149 605
150int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 606int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
151 int block_mcast_loopback, enum mlx4_protocol protocol) 607 int block_mcast_loopback, enum mlx4_protocol prot,
608 enum mlx4_steer_type steer)
152{ 609{
153 struct mlx4_priv *priv = mlx4_priv(dev); 610 struct mlx4_priv *priv = mlx4_priv(dev);
154 struct mlx4_cmd_mailbox *mailbox; 611 struct mlx4_cmd_mailbox *mailbox;
@@ -159,6 +616,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
159 int link = 0; 616 int link = 0;
160 int i; 617 int i;
161 int err; 618 int err;
619 u8 port = gid[5];
620 u8 new_entry = 0;
162 621
163 mailbox = mlx4_alloc_cmd_mailbox(dev); 622 mailbox = mlx4_alloc_cmd_mailbox(dev);
164 if (IS_ERR(mailbox)) 623 if (IS_ERR(mailbox))
@@ -166,14 +625,16 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
166 mgm = mailbox->buf; 625 mgm = mailbox->buf;
167 626
168 mutex_lock(&priv->mcg_table.mutex); 627 mutex_lock(&priv->mcg_table.mutex);
169 628 err = find_entry(dev, port, gid, prot, steer,
170 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 629 mailbox, &hash, &prev, &index);
171 if (err) 630 if (err)
172 goto out; 631 goto out;
173 632
174 if (index != -1) { 633 if (index != -1) {
175 if (!memcmp(mgm->gid, zero_gid, 16)) 634 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
635 new_entry = 1;
176 memcpy(mgm->gid, gid, 16); 636 memcpy(mgm->gid, gid, 16);
637 }
177 } else { 638 } else {
178 link = 1; 639 link = 1;
179 640
@@ -209,26 +670,34 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
209 else 670 else
210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 671 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
211 672
212 mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); 673 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
213 674
214 err = mlx4_WRITE_MCG(dev, index, mailbox); 675 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
215 if (err) 676 if (err)
216 goto out; 677 goto out;
217 678
218 if (!link) 679 if (!link)
219 goto out; 680 goto out;
220 681
221 err = mlx4_READ_MCG(dev, prev, mailbox); 682 err = mlx4_READ_ENTRY(dev, prev, mailbox);
222 if (err) 683 if (err)
223 goto out; 684 goto out;
224 685
225 mgm->next_gid_index = cpu_to_be32(index << 6); 686 mgm->next_gid_index = cpu_to_be32(index << 6);
226 687
227 err = mlx4_WRITE_MCG(dev, prev, mailbox); 688 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
228 if (err) 689 if (err)
229 goto out; 690 goto out;
230 691
231out: 692out:
693 if (prot == MLX4_PROT_ETH) {
694 /* manage the steering entry for promisc mode */
695 if (new_entry)
696 new_steering_entry(dev, 0, port, steer, index, qp->qpn);
697 else
698 existing_steering_entry(dev, 0, port, steer,
699 index, qp->qpn);
700 }
232 if (err && link && index != -1) { 701 if (err && link && index != -1) {
233 if (index < dev->caps.num_mgms) 702 if (index < dev->caps.num_mgms)
234 mlx4_warn(dev, "Got AMGM index %d < %d", 703 mlx4_warn(dev, "Got AMGM index %d < %d",
@@ -242,10 +711,9 @@ out:
242 mlx4_free_cmd_mailbox(dev, mailbox); 711 mlx4_free_cmd_mailbox(dev, mailbox);
243 return err; 712 return err;
244} 713}
245EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
246 714
247int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 715int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
248 enum mlx4_protocol protocol) 716 enum mlx4_protocol prot, enum mlx4_steer_type steer)
249{ 717{
250 struct mlx4_priv *priv = mlx4_priv(dev); 718 struct mlx4_priv *priv = mlx4_priv(dev);
251 struct mlx4_cmd_mailbox *mailbox; 719 struct mlx4_cmd_mailbox *mailbox;
@@ -255,6 +723,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
255 int prev, index; 723 int prev, index;
256 int i, loc; 724 int i, loc;
257 int err; 725 int err;
726 u8 port = gid[5];
727 bool removed_entry = false;
258 728
259 mailbox = mlx4_alloc_cmd_mailbox(dev); 729 mailbox = mlx4_alloc_cmd_mailbox(dev);
260 if (IS_ERR(mailbox)) 730 if (IS_ERR(mailbox))
@@ -263,7 +733,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
263 733
264 mutex_lock(&priv->mcg_table.mutex); 734 mutex_lock(&priv->mcg_table.mutex);
265 735
266 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 736 err = find_entry(dev, port, gid, prot, steer,
737 mailbox, &hash, &prev, &index);
267 if (err) 738 if (err)
268 goto out; 739 goto out;
269 740
@@ -273,6 +744,11 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
273 goto out; 744 goto out;
274 } 745 }
275 746
747 /* if this pq is also a promisc qp, it shouldn't be removed */
748 if (prot == MLX4_PROT_ETH &&
749 check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
750 goto out;
751
276 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 752 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
277 for (loc = -1, i = 0; i < members_count; ++i) 753 for (loc = -1, i = 0; i < members_count; ++i)
278 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 754 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
@@ -285,26 +761,31 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
285 } 761 }
286 762
287 763
288 mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); 764 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
289 mgm->qp[loc] = mgm->qp[i - 1]; 765 mgm->qp[loc] = mgm->qp[i - 1];
290 mgm->qp[i - 1] = 0; 766 mgm->qp[i - 1] = 0;
291 767
292 if (i != 1) { 768 if (prot == MLX4_PROT_ETH)
293 err = mlx4_WRITE_MCG(dev, index, mailbox); 769 removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
770 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
771 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
294 goto out; 772 goto out;
295 } 773 }
296 774
775 /* We are going to delete the entry, members count should be 0 */
776 mgm->members_count = cpu_to_be32((u32) prot << 30);
777
297 if (prev == -1) { 778 if (prev == -1) {
298 /* Remove entry from MGM */ 779 /* Remove entry from MGM */
299 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 780 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
300 if (amgm_index) { 781 if (amgm_index) {
301 err = mlx4_READ_MCG(dev, amgm_index, mailbox); 782 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
302 if (err) 783 if (err)
303 goto out; 784 goto out;
304 } else 785 } else
305 memset(mgm->gid, 0, 16); 786 memset(mgm->gid, 0, 16);
306 787
307 err = mlx4_WRITE_MCG(dev, index, mailbox); 788 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
308 if (err) 789 if (err)
309 goto out; 790 goto out;
310 791
@@ -319,13 +800,13 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
319 } else { 800 } else {
320 /* Remove entry from AMGM */ 801 /* Remove entry from AMGM */
321 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 802 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
322 err = mlx4_READ_MCG(dev, prev, mailbox); 803 err = mlx4_READ_ENTRY(dev, prev, mailbox);
323 if (err) 804 if (err)
324 goto out; 805 goto out;
325 806
326 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 807 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
327 808
328 err = mlx4_WRITE_MCG(dev, prev, mailbox); 809 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
329 if (err) 810 if (err)
330 goto out; 811 goto out;
331 812
@@ -343,8 +824,85 @@ out:
343 mlx4_free_cmd_mailbox(dev, mailbox); 824 mlx4_free_cmd_mailbox(dev, mailbox);
344 return err; 825 return err;
345} 826}
827
828
829int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
830 int block_mcast_loopback, enum mlx4_protocol prot)
831{
832 enum mlx4_steer_type steer;
833
834 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
835
836 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
837 return 0;
838
839 if (prot == MLX4_PROT_ETH)
840 gid[7] |= (steer << 1);
841
842 return mlx4_qp_attach_common(dev, qp, gid,
843 block_mcast_loopback, prot,
844 steer);
845}
846EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
847
848int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
849 enum mlx4_protocol prot)
850{
851 enum mlx4_steer_type steer;
852
853 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
854
855 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
856 return 0;
857
858 if (prot == MLX4_PROT_ETH) {
859 gid[7] |= (steer << 1);
860 }
861
862 return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
863}
346EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 864EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
347 865
866
867int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
868{
869 if (!dev->caps.vep_mc_steering)
870 return 0;
871
872
873 return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
874}
875EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
876
877int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
878{
879 if (!dev->caps.vep_mc_steering)
880 return 0;
881
882
883 return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
884}
885EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
886
887int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
888{
889 if (!dev->caps.vep_mc_steering)
890 return 0;
891
892
893 return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
894}
895EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
896
897int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
898{
899 if (!dev->caps.vep_mc_steering)
900 return 0;
901
902 return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
903}
904EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
905
348int mlx4_init_mcg_table(struct mlx4_dev *dev) 906int mlx4_init_mcg_table(struct mlx4_dev *dev)
349{ 907{
350 struct mlx4_priv *priv = mlx4_priv(dev); 908 struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 0da5bb7285b..c1e0e5f1bcd 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -105,6 +105,7 @@ struct mlx4_bitmap {
105 u32 max; 105 u32 max;
106 u32 reserved_top; 106 u32 reserved_top;
107 u32 mask; 107 u32 mask;
108 u32 avail;
108 spinlock_t lock; 109 spinlock_t lock;
109 unsigned long *table; 110 unsigned long *table;
110}; 111};
@@ -162,6 +163,27 @@ struct mlx4_fw {
162 u8 catas_bar; 163 u8 catas_bar;
163}; 164};
164 165
166#define MGM_QPN_MASK 0x00FFFFFF
167#define MGM_BLCK_LB_BIT 30
168
169struct mlx4_promisc_qp {
170 struct list_head list;
171 u32 qpn;
172};
173
174struct mlx4_steer_index {
175 struct list_head list;
176 unsigned int index;
177 struct list_head duplicates;
178};
179
180struct mlx4_mgm {
181 __be32 next_gid_index;
182 __be32 members_count;
183 u32 reserved[2];
184 u8 gid[16];
185 __be32 qp[MLX4_QP_PER_MGM];
186};
165struct mlx4_cmd { 187struct mlx4_cmd {
166 struct pci_pool *pool; 188 struct pci_pool *pool;
167 void __iomem *hcr; 189 void __iomem *hcr;
@@ -265,6 +287,10 @@ struct mlx4_vlan_table {
265 int max; 287 int max;
266}; 288};
267 289
290struct mlx4_mac_entry {
291 u64 mac;
292};
293
268struct mlx4_port_info { 294struct mlx4_port_info {
269 struct mlx4_dev *dev; 295 struct mlx4_dev *dev;
270 int port; 296 int port;
@@ -272,7 +298,9 @@ struct mlx4_port_info {
272 struct device_attribute port_attr; 298 struct device_attribute port_attr;
273 enum mlx4_port_type tmp_type; 299 enum mlx4_port_type tmp_type;
274 struct mlx4_mac_table mac_table; 300 struct mlx4_mac_table mac_table;
301 struct radix_tree_root mac_tree;
275 struct mlx4_vlan_table vlan_table; 302 struct mlx4_vlan_table vlan_table;
303 int base_qpn;
276}; 304};
277 305
278struct mlx4_sense { 306struct mlx4_sense {
@@ -282,6 +310,17 @@ struct mlx4_sense {
282 struct delayed_work sense_poll; 310 struct delayed_work sense_poll;
283}; 311};
284 312
313struct mlx4_msix_ctl {
314 u64 pool_bm;
315 spinlock_t pool_lock;
316};
317
318struct mlx4_steer {
319 struct list_head promisc_qps[MLX4_NUM_STEERS];
320 struct list_head steer_entries[MLX4_NUM_STEERS];
321 struct list_head high_prios;
322};
323
285struct mlx4_priv { 324struct mlx4_priv {
286 struct mlx4_dev dev; 325 struct mlx4_dev dev;
287 326
@@ -313,6 +352,11 @@ struct mlx4_priv {
313 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 352 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
314 struct mlx4_sense sense; 353 struct mlx4_sense sense;
315 struct mutex port_mutex; 354 struct mutex port_mutex;
355 struct mlx4_msix_ctl msix_ctl;
356 struct mlx4_steer *steer;
357 struct list_head bf_list;
358 struct mutex bf_mutex;
359 struct io_mapping *bf_mapping;
316}; 360};
317 361
318static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 362static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -328,6 +372,7 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
328void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 372void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
329u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 373u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
330void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); 374void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
375u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
331int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 376int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
332 u32 reserved_bot, u32 resetrved_top); 377 u32 reserved_bot, u32 resetrved_top);
333void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 378void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
@@ -403,4 +448,9 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
403int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); 448int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
404int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 449int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
405 450
451int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
452 enum mlx4_protocol prot, enum mlx4_steer_type steer);
453int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
454 int block_mcast_loopback, enum mlx4_protocol prot,
455 enum mlx4_steer_type steer);
406#endif /* MLX4_H */ 456#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index dfed6a07c2d..e30f6099c0d 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -49,8 +49,8 @@
49#include "en_port.h" 49#include "en_port.h"
50 50
51#define DRV_NAME "mlx4_en" 51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.5.1.6" 52#define DRV_VERSION "1.5.4.1"
53#define DRV_RELDATE "August 2010" 53#define DRV_RELDATE "March 2011"
54 54
55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
56 56
@@ -62,6 +62,7 @@
62#define MLX4_EN_PAGE_SHIFT 12 62#define MLX4_EN_PAGE_SHIFT 12
63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
64#define MAX_RX_RINGS 16 64#define MAX_RX_RINGS 16
65#define MIN_RX_RINGS 4
65#define TXBB_SIZE 64 66#define TXBB_SIZE 64
66#define HEADROOM (2048 / TXBB_SIZE + 1) 67#define HEADROOM (2048 / TXBB_SIZE + 1)
67#define STAMP_STRIDE 64 68#define STAMP_STRIDE 64
@@ -124,6 +125,7 @@ enum {
124#define MLX4_EN_RX_SIZE_THRESH 1024 125#define MLX4_EN_RX_SIZE_THRESH 1024
125#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) 126#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
126#define MLX4_EN_SAMPLE_INTERVAL 0 127#define MLX4_EN_SAMPLE_INTERVAL 0
128#define MLX4_EN_AVG_PKT_SMALL 256
127 129
128#define MLX4_EN_AUTO_CONF 0xffff 130#define MLX4_EN_AUTO_CONF 0xffff
129 131
@@ -214,6 +216,9 @@ struct mlx4_en_tx_desc {
214 216
215#define MLX4_EN_USE_SRQ 0x01000000 217#define MLX4_EN_USE_SRQ 0x01000000
216 218
219#define MLX4_EN_CX3_LOW_ID 0x1000
220#define MLX4_EN_CX3_HIGH_ID 0x1005
221
217struct mlx4_en_rx_alloc { 222struct mlx4_en_rx_alloc {
218 struct page *page; 223 struct page *page;
219 u16 offset; 224 u16 offset;
@@ -243,6 +248,8 @@ struct mlx4_en_tx_ring {
243 unsigned long bytes; 248 unsigned long bytes;
244 unsigned long packets; 249 unsigned long packets;
245 spinlock_t comp_lock; 250 spinlock_t comp_lock;
251 struct mlx4_bf bf;
252 bool bf_enabled;
246}; 253};
247 254
248struct mlx4_en_rx_desc { 255struct mlx4_en_rx_desc {
@@ -453,6 +460,7 @@ struct mlx4_en_priv {
453 struct mlx4_en_rss_map rss_map; 460 struct mlx4_en_rss_map rss_map;
454 u32 flags; 461 u32 flags;
455#define MLX4_EN_FLAG_PROMISC 0x1 462#define MLX4_EN_FLAG_PROMISC 0x1
463#define MLX4_EN_FLAG_MC_PROMISC 0x2
456 u32 tx_ring_num; 464 u32 tx_ring_num;
457 u32 rx_ring_num; 465 u32 rx_ring_num;
458 u32 rx_skb_size; 466 u32 rx_skb_size;
@@ -461,6 +469,7 @@ struct mlx4_en_priv {
461 u16 log_rx_info; 469 u16 log_rx_info;
462 470
463 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; 471 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
472 int tx_vector;
464 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 473 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
465 struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; 474 struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
466 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 475 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
@@ -476,6 +485,13 @@ struct mlx4_en_priv {
476 int mc_addrs_cnt; 485 int mc_addrs_cnt;
477 struct mlx4_en_stat_out_mbox hw_stats; 486 struct mlx4_en_stat_out_mbox hw_stats;
478 int vids[128]; 487 int vids[128];
488 bool wol;
489};
490
491enum mlx4_en_wol {
492 MLX4_EN_WOL_MAGIC = (1ULL << 61),
493 MLX4_EN_WOL_ENABLED = (1ULL << 62),
494 MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
479}; 495};
480 496
481 497
@@ -486,12 +502,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
486int mlx4_en_start_port(struct net_device *dev); 502int mlx4_en_start_port(struct net_device *dev);
487void mlx4_en_stop_port(struct net_device *dev); 503void mlx4_en_stop_port(struct net_device *dev);
488 504
489void mlx4_en_free_resources(struct mlx4_en_priv *priv); 505void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
490int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 506int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
491 507
492int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 508int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
493 int entries, int ring, enum cq_type mode); 509 int entries, int ring, enum cq_type mode);
494void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 510void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
511 bool reserve_vectors);
495int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 512int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
496void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 513void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
497int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 514int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -503,7 +520,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
503netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 520netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
504 521
505int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 522int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
506 u32 size, u16 stride); 523 int qpn, u32 size, u16 stride);
507void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 524void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
508int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 525int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
509 struct mlx4_en_tx_ring *ring, 526 struct mlx4_en_tx_ring *ring,
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index c4988d6bd5b..1286b886dce 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -32,12 +32,17 @@
32 */ 32 */
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/io-mapping.h>
35 36
36#include <asm/page.h> 37#include <asm/page.h>
37 38
38#include "mlx4.h" 39#include "mlx4.h"
39#include "icm.h" 40#include "icm.h"
40 41
42enum {
43 MLX4_NUM_RESERVED_UARS = 8
44};
45
41int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) 46int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
42{ 47{
43 struct mlx4_priv *priv = mlx4_priv(dev); 48 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -77,6 +82,7 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
77 return -ENOMEM; 82 return -ENOMEM;
78 83
79 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; 84 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
85 uar->map = NULL;
80 86
81 return 0; 87 return 0;
82} 88}
@@ -88,6 +94,102 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
88} 94}
89EXPORT_SYMBOL_GPL(mlx4_uar_free); 95EXPORT_SYMBOL_GPL(mlx4_uar_free);
90 96
97int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
98{
99 struct mlx4_priv *priv = mlx4_priv(dev);
100 struct mlx4_uar *uar;
101 int err = 0;
102 int idx;
103
104 if (!priv->bf_mapping)
105 return -ENOMEM;
106
107 mutex_lock(&priv->bf_mutex);
108 if (!list_empty(&priv->bf_list))
109 uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
110 else {
111 if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
112 err = -ENOMEM;
113 goto out;
114 }
115 uar = kmalloc(sizeof *uar, GFP_KERNEL);
116 if (!uar) {
117 err = -ENOMEM;
118 goto out;
119 }
120 err = mlx4_uar_alloc(dev, uar);
121 if (err)
122 goto free_kmalloc;
123
124 uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
125 if (!uar->map) {
126 err = -ENOMEM;
127 goto free_uar;
128 }
129
130 uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
131 if (!uar->bf_map) {
132 err = -ENOMEM;
133 goto unamp_uar;
134 }
135 uar->free_bf_bmap = 0;
136 list_add(&uar->bf_list, &priv->bf_list);
137 }
138
139 bf->uar = uar;
140 idx = ffz(uar->free_bf_bmap);
141 uar->free_bf_bmap |= 1 << idx;
142 bf->uar = uar;
143 bf->offset = 0;
144 bf->buf_size = dev->caps.bf_reg_size / 2;
145 bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
146 if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
147 list_del_init(&uar->bf_list);
148
149 goto out;
150
151unamp_uar:
152 bf->uar = NULL;
153 iounmap(uar->map);
154
155free_uar:
156 mlx4_uar_free(dev, uar);
157
158free_kmalloc:
159 kfree(uar);
160
161out:
162 mutex_unlock(&priv->bf_mutex);
163 return err;
164}
165EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
166
167void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
168{
169 struct mlx4_priv *priv = mlx4_priv(dev);
170 int idx;
171
172 if (!bf->uar || !bf->uar->bf_map)
173 return;
174
175 mutex_lock(&priv->bf_mutex);
176 idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
177 bf->uar->free_bf_bmap &= ~(1 << idx);
178 if (!bf->uar->free_bf_bmap) {
179 if (!list_empty(&bf->uar->bf_list))
180 list_del(&bf->uar->bf_list);
181
182 io_mapping_unmap(bf->uar->bf_map);
183 iounmap(bf->uar->map);
184 mlx4_uar_free(dev, bf->uar);
185 kfree(bf->uar);
186 } else if (list_empty(&bf->uar->bf_list))
187 list_add(&bf->uar->bf_list, &priv->bf_list);
188
189 mutex_unlock(&priv->bf_mutex);
190}
191EXPORT_SYMBOL_GPL(mlx4_bf_free);
192
91int mlx4_init_uar_table(struct mlx4_dev *dev) 193int mlx4_init_uar_table(struct mlx4_dev *dev)
92{ 194{
93 if (dev->caps.num_uars <= 128) { 195 if (dev->caps.num_uars <= 128) {
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 451339559bd..eca7d8596f8 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -90,12 +90,79 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
90 return err; 90 return err;
91} 91}
92 92
93int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) 93static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
94 u64 mac, int *qpn, u8 reserve)
94{ 95{
95 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 96 struct mlx4_qp qp;
97 u8 gid[16] = {0};
98 int err;
99
100 if (reserve) {
101 err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
102 if (err) {
103 mlx4_err(dev, "Failed to reserve qp for mac registration\n");
104 return err;
105 }
106 }
107 qp.qpn = *qpn;
108
109 mac &= 0xffffffffffffULL;
110 mac = cpu_to_be64(mac << 16);
111 memcpy(&gid[10], &mac, ETH_ALEN);
112 gid[5] = port;
113 gid[7] = MLX4_UC_STEER << 1;
114
115 err = mlx4_qp_attach_common(dev, &qp, gid, 0,
116 MLX4_PROT_ETH, MLX4_UC_STEER);
117 if (err && reserve)
118 mlx4_qp_release_range(dev, *qpn, 1);
119
120 return err;
121}
122
123static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
124 u64 mac, int qpn, u8 free)
125{
126 struct mlx4_qp qp;
127 u8 gid[16] = {0};
128
129 qp.qpn = qpn;
130 mac &= 0xffffffffffffULL;
131 mac = cpu_to_be64(mac << 16);
132 memcpy(&gid[10], &mac, ETH_ALEN);
133 gid[5] = port;
134 gid[7] = MLX4_UC_STEER << 1;
135
136 mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
137 if (free)
138 mlx4_qp_release_range(dev, qpn, 1);
139}
140
141int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
142{
143 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
144 struct mlx4_mac_table *table = &info->mac_table;
145 struct mlx4_mac_entry *entry;
96 int i, err = 0; 146 int i, err = 0;
97 int free = -1; 147 int free = -1;
98 148
149 if (dev->caps.vep_uc_steering) {
150 err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
151 if (!err) {
152 entry = kmalloc(sizeof *entry, GFP_KERNEL);
153 if (!entry) {
154 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
155 return -ENOMEM;
156 }
157 entry->mac = mac;
158 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
159 if (err) {
160 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
161 return err;
162 }
163 } else
164 return err;
165 }
99 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); 166 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
100 mutex_lock(&table->mutex); 167 mutex_lock(&table->mutex);
101 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { 168 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
@@ -106,7 +173,6 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
106 173
107 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 174 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
108 /* MAC already registered, increase refernce count */ 175 /* MAC already registered, increase refernce count */
109 *index = i;
110 ++table->refs[i]; 176 ++table->refs[i];
111 goto out; 177 goto out;
112 } 178 }
@@ -137,7 +203,8 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
137 goto out; 203 goto out;
138 } 204 }
139 205
140 *index = free; 206 if (!dev->caps.vep_uc_steering)
207 *qpn = info->base_qpn + free;
141 ++table->total; 208 ++table->total;
142out: 209out:
143 mutex_unlock(&table->mutex); 210 mutex_unlock(&table->mutex);
@@ -145,20 +212,52 @@ out:
145} 212}
146EXPORT_SYMBOL_GPL(mlx4_register_mac); 213EXPORT_SYMBOL_GPL(mlx4_register_mac);
147 214
148void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) 215static int validate_index(struct mlx4_dev *dev,
216 struct mlx4_mac_table *table, int index)
149{ 217{
150 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 218 int err = 0;
151 219
152 mutex_lock(&table->mutex); 220 if (index < 0 || index >= table->max || !table->entries[index]) {
153 if (!table->refs[index]) { 221 mlx4_warn(dev, "No valid Mac entry for the given index\n");
154 mlx4_warn(dev, "No MAC entry for index %d\n", index); 222 err = -EINVAL;
155 goto out;
156 } 223 }
157 if (--table->refs[index]) { 224 return err;
158 mlx4_warn(dev, "Have more references for index %d," 225}
159 "no need to modify MAC table\n", index); 226
160 goto out; 227static int find_index(struct mlx4_dev *dev,
228 struct mlx4_mac_table *table, u64 mac)
229{
230 int i;
231 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
232 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
233 return i;
161 } 234 }
235 /* Mac not found */
236 return -EINVAL;
237}
238
239void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
240{
241 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
242 struct mlx4_mac_table *table = &info->mac_table;
243 int index = qpn - info->base_qpn;
244 struct mlx4_mac_entry *entry;
245
246 if (dev->caps.vep_uc_steering) {
247 entry = radix_tree_lookup(&info->mac_tree, qpn);
248 if (entry) {
249 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
250 radix_tree_delete(&info->mac_tree, qpn);
251 index = find_index(dev, table, entry->mac);
252 kfree(entry);
253 }
254 }
255
256 mutex_lock(&table->mutex);
257
258 if (validate_index(dev, table, index))
259 goto out;
260
162 table->entries[index] = 0; 261 table->entries[index] = 0;
163 mlx4_set_port_mac_table(dev, port, table->entries); 262 mlx4_set_port_mac_table(dev, port, table->entries);
164 --table->total; 263 --table->total;
@@ -167,6 +266,44 @@ out:
167} 266}
168EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 267EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
169 268
269int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
270{
271 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
272 struct mlx4_mac_table *table = &info->mac_table;
273 int index = qpn - info->base_qpn;
274 struct mlx4_mac_entry *entry;
275 int err;
276
277 if (dev->caps.vep_uc_steering) {
278 entry = radix_tree_lookup(&info->mac_tree, qpn);
279 if (!entry)
280 return -EINVAL;
281 index = find_index(dev, table, entry->mac);
282 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
283 entry->mac = new_mac;
284 err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
285 if (err || index < 0)
286 return err;
287 }
288
289 mutex_lock(&table->mutex);
290
291 err = validate_index(dev, table, index);
292 if (err)
293 goto out;
294
295 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
296
297 err = mlx4_set_port_mac_table(dev, port, table->entries);
298 if (unlikely(err)) {
299 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
300 table->entries[index] = 0;
301 }
302out:
303 mutex_unlock(&table->mutex);
304 return err;
305}
306EXPORT_SYMBOL_GPL(mlx4_replace_mac);
170static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 307static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
171 __be32 *entries) 308 __be32 *entries)
172{ 309{
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index e749f82865f..b967647d0c7 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,9 +107,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
107 profile[MLX4_RES_AUXC].num = request->num_qp; 107 profile[MLX4_RES_AUXC].num = request->num_qp;
108 profile[MLX4_RES_SRQ].num = request->num_srq; 108 profile[MLX4_RES_SRQ].num = request->num_srq;
109 profile[MLX4_RES_CQ].num = request->num_cq; 109 profile[MLX4_RES_CQ].num = request->num_cq;
110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, 110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
111 dev_cap->reserved_eqs +
112 num_possible_cpus() + 1);
113 profile[MLX4_RES_DMPT].num = request->num_mpt; 111 profile[MLX4_RES_DMPT].num = request->num_mpt;
114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
115 profile[MLX4_RES_MTT].num = request->num_mtt; 113 profile[MLX4_RES_MTT].num = request->num_mtt;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 02076e16542..34425b94452 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -35,6 +35,8 @@
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 */ 36 */
37 37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
38#include <linux/init.h> 40#include <linux/init.h>
39#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
40#include <linux/in.h> 42#include <linux/in.h>
@@ -627,9 +629,8 @@ err:
627 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 629 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
628 (RX_FIRST_DESC | RX_LAST_DESC)) { 630 (RX_FIRST_DESC | RX_LAST_DESC)) {
629 if (net_ratelimit()) 631 if (net_ratelimit())
630 dev_printk(KERN_ERR, &mp->dev->dev, 632 netdev_err(mp->dev,
631 "received packet spanning " 633 "received packet spanning multiple descriptors\n");
632 "multiple descriptors\n");
633 } 634 }
634 635
635 if (cmd_sts & ERROR_SUMMARY) 636 if (cmd_sts & ERROR_SUMMARY)
@@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
868 869
869 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 870 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
870 txq->tx_dropped++; 871 txq->tx_dropped++;
871 dev_printk(KERN_DEBUG, &dev->dev, 872 netdev_printk(KERN_DEBUG, dev,
872 "failed to linearize skb with tiny " 873 "failed to linearize skb with tiny unaligned fragment\n");
873 "unaligned fragment\n");
874 return NETDEV_TX_BUSY; 874 return NETDEV_TX_BUSY;
875 } 875 }
876 876
877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
878 if (net_ratelimit()) 878 if (net_ratelimit())
879 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 879 netdev_err(dev, "tx queue full?!\n");
880 kfree_skb(skb); 880 kfree_skb(skb);
881 return NETDEV_TX_OK; 881 return NETDEV_TX_OK;
882 } 882 }
@@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
959 skb = __skb_dequeue(&txq->tx_skb); 959 skb = __skb_dequeue(&txq->tx_skb);
960 960
961 if (cmd_sts & ERROR_SUMMARY) { 961 if (cmd_sts & ERROR_SUMMARY) {
962 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 962 netdev_info(mp->dev, "tx error\n");
963 mp->dev->stats.tx_errors++; 963 mp->dev->stats.tx_errors++;
964 } 964 }
965 965
@@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1122 int ret; 1122 int ret;
1123 1123
1124 if (smi_wait_ready(msp)) { 1124 if (smi_wait_ready(msp)) {
1125 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1125 pr_warn("SMI bus busy timeout\n");
1126 return -ETIMEDOUT; 1126 return -ETIMEDOUT;
1127 } 1127 }
1128 1128
1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1130 1130
1131 if (smi_wait_ready(msp)) { 1131 if (smi_wait_ready(msp)) {
1132 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1132 pr_warn("SMI bus busy timeout\n");
1133 return -ETIMEDOUT; 1133 return -ETIMEDOUT;
1134 } 1134 }
1135 1135
1136 ret = readl(smi_reg); 1136 ret = readl(smi_reg);
1137 if (!(ret & SMI_READ_VALID)) { 1137 if (!(ret & SMI_READ_VALID)) {
1138 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); 1138 pr_warn("SMI bus read not valid\n");
1139 return -ENODEV; 1139 return -ENODEV;
1140 } 1140 }
1141 1141
@@ -1148,7 +1148,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1148 void __iomem *smi_reg = msp->base + SMI_REG; 1148 void __iomem *smi_reg = msp->base + SMI_REG;
1149 1149
1150 if (smi_wait_ready(msp)) { 1150 if (smi_wait_ready(msp)) {
1151 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1151 pr_warn("SMI bus busy timeout\n");
1152 return -ETIMEDOUT; 1152 return -ETIMEDOUT;
1153 } 1153 }
1154 1154
@@ -1156,7 +1156,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1156 (addr << 16) | (val & 0xffff), smi_reg); 1156 (addr << 16) | (val & 0xffff), smi_reg);
1157 1157
1158 if (smi_wait_ready(msp)) { 1158 if (smi_wait_ready(msp)) {
1159 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1159 pr_warn("SMI bus busy timeout\n");
1160 return -ETIMEDOUT; 1160 return -ETIMEDOUT;
1161 } 1161 }
1162 1162
@@ -1566,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1566 if (netif_running(dev)) { 1566 if (netif_running(dev)) {
1567 mv643xx_eth_stop(dev); 1567 mv643xx_eth_stop(dev);
1568 if (mv643xx_eth_open(dev)) { 1568 if (mv643xx_eth_open(dev)) {
1569 dev_printk(KERN_ERR, &dev->dev, 1569 netdev_err(dev,
1570 "fatal error on re-opening device after " 1570 "fatal error on re-opening device after ring param change\n");
1571 "ring param change\n");
1572 return -ENOMEM; 1571 return -ENOMEM;
1573 } 1572 }
1574 } 1573 }
@@ -1874,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1874 } 1873 }
1875 1874
1876 if (rxq->rx_desc_area == NULL) { 1875 if (rxq->rx_desc_area == NULL) {
1877 dev_printk(KERN_ERR, &mp->dev->dev, 1876 netdev_err(mp->dev,
1878 "can't allocate rx ring (%d bytes)\n", size); 1877 "can't allocate rx ring (%d bytes)\n", size);
1879 goto out; 1878 goto out;
1880 } 1879 }
@@ -1884,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1884 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1883 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
1885 GFP_KERNEL); 1884 GFP_KERNEL);
1886 if (rxq->rx_skb == NULL) { 1885 if (rxq->rx_skb == NULL) {
1887 dev_printk(KERN_ERR, &mp->dev->dev, 1886 netdev_err(mp->dev, "can't allocate rx skb ring\n");
1888 "can't allocate rx skb ring\n");
1889 goto out_free; 1887 goto out_free;
1890 } 1888 }
1891 1889
@@ -1944,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1944 } 1942 }
1945 1943
1946 if (rxq->rx_desc_count) { 1944 if (rxq->rx_desc_count) {
1947 dev_printk(KERN_ERR, &mp->dev->dev, 1945 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1948 "error freeing rx ring -- %d skbs stuck\n",
1949 rxq->rx_desc_count); 1946 rxq->rx_desc_count);
1950 } 1947 }
1951 1948
@@ -1987,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1987 } 1984 }
1988 1985
1989 if (txq->tx_desc_area == NULL) { 1986 if (txq->tx_desc_area == NULL) {
1990 dev_printk(KERN_ERR, &mp->dev->dev, 1987 netdev_err(mp->dev,
1991 "can't allocate tx ring (%d bytes)\n", size); 1988 "can't allocate tx ring (%d bytes)\n", size);
1992 return -ENOMEM; 1989 return -ENOMEM;
1993 } 1990 }
@@ -2093,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2093 if (netif_carrier_ok(dev)) { 2090 if (netif_carrier_ok(dev)) {
2094 int i; 2091 int i;
2095 2092
2096 printk(KERN_INFO "%s: link down\n", dev->name); 2093 netdev_info(dev, "link down\n");
2097 2094
2098 netif_carrier_off(dev); 2095 netif_carrier_off(dev);
2099 2096
@@ -2124,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2124 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2121 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2125 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2122 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2126 2123
2127 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " 2124 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2128 "flow control %sabled\n", dev->name, 2125 speed, duplex ? "full" : "half", fc ? "en" : "dis");
2129 speed, duplex ? "full" : "half",
2130 fc ? "en" : "dis");
2131 2126
2132 if (!netif_carrier_ok(dev)) 2127 if (!netif_carrier_ok(dev))
2133 netif_carrier_on(dev); 2128 netif_carrier_on(dev);
@@ -2337,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2337 err = request_irq(dev->irq, mv643xx_eth_irq, 2332 err = request_irq(dev->irq, mv643xx_eth_irq,
2338 IRQF_SHARED, dev->name, dev); 2333 IRQF_SHARED, dev->name, dev);
2339 if (err) { 2334 if (err) {
2340 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2335 netdev_err(dev, "can't assign irq\n");
2341 return -EAGAIN; 2336 return -EAGAIN;
2342 } 2337 }
2343 2338
@@ -2483,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2483 */ 2478 */
2484 mv643xx_eth_stop(dev); 2479 mv643xx_eth_stop(dev);
2485 if (mv643xx_eth_open(dev)) { 2480 if (mv643xx_eth_open(dev)) {
2486 dev_printk(KERN_ERR, &dev->dev, 2481 netdev_err(dev,
2487 "fatal error on re-opening device after " 2482 "fatal error on re-opening device after MTU change\n");
2488 "MTU change\n");
2489 } 2483 }
2490 2484
2491 return 0; 2485 return 0;
@@ -2508,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
2508{ 2502{
2509 struct mv643xx_eth_private *mp = netdev_priv(dev); 2503 struct mv643xx_eth_private *mp = netdev_priv(dev);
2510 2504
2511 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); 2505 netdev_info(dev, "tx timeout\n");
2512 2506
2513 schedule_work(&mp->tx_timeout_task); 2507 schedule_work(&mp->tx_timeout_task);
2514} 2508}
@@ -2603,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2603 int ret; 2597 int ret;
2604 2598
2605 if (!mv643xx_eth_version_printed++) 2599 if (!mv643xx_eth_version_printed++)
2606 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " 2600 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2607 "driver version %s\n", mv643xx_eth_driver_version); 2601 mv643xx_eth_driver_version);
2608 2602
2609 ret = -EINVAL; 2603 ret = -EINVAL;
2610 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2604 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2871,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2871 2865
2872 pd = pdev->dev.platform_data; 2866 pd = pdev->dev.platform_data;
2873 if (pd == NULL) { 2867 if (pd == NULL) {
2874 dev_printk(KERN_ERR, &pdev->dev, 2868 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
2875 "no mv643xx_eth_platform_data\n");
2876 return -ENODEV; 2869 return -ENODEV;
2877 } 2870 }
2878 2871
2879 if (pd->shared == NULL) { 2872 if (pd->shared == NULL) {
2880 dev_printk(KERN_ERR, &pdev->dev, 2873 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
2881 "no mv643xx_eth_platform_data->shared\n");
2882 return -ENODEV; 2874 return -ENODEV;
2883 } 2875 }
2884 2876
@@ -2957,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2957 if (err) 2949 if (err)
2958 goto out; 2950 goto out;
2959 2951
2960 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", 2952 netdev_notice(dev, "port %d with MAC address %pM\n",
2961 mp->port_num, dev->dev_addr); 2953 mp->port_num, dev->dev_addr);
2962 2954
2963 if (mp->tx_desc_sram_size > 0) 2955 if (mp->tx_desc_sram_size > 0)
2964 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2956 netdev_notice(dev, "configured with sram\n");
2965 2957
2966 return 0; 2958 return 0;
2967 2959
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ea5cfe2c3a0..673dc600c89 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@ struct myri10ge_priv {
253 unsigned long serial_number; 253 unsigned long serial_number;
254 int vendor_specific_offset; 254 int vendor_specific_offset;
255 int fw_multicast_support; 255 int fw_multicast_support;
256 unsigned long features; 256 u32 features;
257 u32 max_tso6; 257 u32 max_tso6;
258 u32 read_dma; 258 u32 read_dma;
259 u32 write_dma; 259 u32 write_dma;
@@ -1312,17 +1312,26 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
1312 * page into an skb */ 1312 * page into an skb */
1313 1313
1314static inline int 1314static inline int
1315myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, 1315myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
1316 int bytes, int len, __wsum csum) 1316 int lro_enabled)
1317{ 1317{
1318 struct myri10ge_priv *mgp = ss->mgp; 1318 struct myri10ge_priv *mgp = ss->mgp;
1319 struct sk_buff *skb; 1319 struct sk_buff *skb;
1320 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; 1320 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
1321 int i, idx, hlen, remainder; 1321 struct myri10ge_rx_buf *rx;
1322 int i, idx, hlen, remainder, bytes;
1322 struct pci_dev *pdev = mgp->pdev; 1323 struct pci_dev *pdev = mgp->pdev;
1323 struct net_device *dev = mgp->dev; 1324 struct net_device *dev = mgp->dev;
1324 u8 *va; 1325 u8 *va;
1325 1326
1327 if (len <= mgp->small_bytes) {
1328 rx = &ss->rx_small;
1329 bytes = mgp->small_bytes;
1330 } else {
1331 rx = &ss->rx_big;
1332 bytes = mgp->big_bytes;
1333 }
1334
1326 len += MXGEFW_PAD; 1335 len += MXGEFW_PAD;
1327 idx = rx->cnt & rx->mask; 1336 idx = rx->cnt & rx->mask;
1328 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; 1337 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
@@ -1341,7 +1350,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1341 remainder -= MYRI10GE_ALLOC_SIZE; 1350 remainder -= MYRI10GE_ALLOC_SIZE;
1342 } 1351 }
1343 1352
1344 if (dev->features & NETIF_F_LRO) { 1353 if (lro_enabled) {
1345 rx_frags[0].page_offset += MXGEFW_PAD; 1354 rx_frags[0].page_offset += MXGEFW_PAD;
1346 rx_frags[0].size -= MXGEFW_PAD; 1355 rx_frags[0].size -= MXGEFW_PAD;
1347 len -= MXGEFW_PAD; 1356 len -= MXGEFW_PAD;
@@ -1463,7 +1472,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1463{ 1472{
1464 struct myri10ge_rx_done *rx_done = &ss->rx_done; 1473 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1465 struct myri10ge_priv *mgp = ss->mgp; 1474 struct myri10ge_priv *mgp = ss->mgp;
1466 struct net_device *netdev = mgp->dev; 1475
1467 unsigned long rx_bytes = 0; 1476 unsigned long rx_bytes = 0;
1468 unsigned long rx_packets = 0; 1477 unsigned long rx_packets = 0;
1469 unsigned long rx_ok; 1478 unsigned long rx_ok;
@@ -1474,18 +1483,18 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1474 u16 length; 1483 u16 length;
1475 __wsum checksum; 1484 __wsum checksum;
1476 1485
1486 /*
1487 * Prevent compiler from generating more than one ->features memory
1488 * access to avoid theoretical race condition with functions that
1489 * change NETIF_F_LRO flag at runtime.
1490 */
1491 bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
1492
1477 while (rx_done->entry[idx].length != 0 && work_done < budget) { 1493 while (rx_done->entry[idx].length != 0 && work_done < budget) {
1478 length = ntohs(rx_done->entry[idx].length); 1494 length = ntohs(rx_done->entry[idx].length);
1479 rx_done->entry[idx].length = 0; 1495 rx_done->entry[idx].length = 0;
1480 checksum = csum_unfold(rx_done->entry[idx].checksum); 1496 checksum = csum_unfold(rx_done->entry[idx].checksum);
1481 if (length <= mgp->small_bytes) 1497 rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
1482 rx_ok = myri10ge_rx_done(ss, &ss->rx_small,
1483 mgp->small_bytes,
1484 length, checksum);
1485 else
1486 rx_ok = myri10ge_rx_done(ss, &ss->rx_big,
1487 mgp->big_bytes,
1488 length, checksum);
1489 rx_packets += rx_ok; 1498 rx_packets += rx_ok;
1490 rx_bytes += rx_ok * (unsigned long)length; 1499 rx_bytes += rx_ok * (unsigned long)length;
1491 cnt++; 1500 cnt++;
@@ -1497,7 +1506,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1497 ss->stats.rx_packets += rx_packets; 1506 ss->stats.rx_packets += rx_packets;
1498 ss->stats.rx_bytes += rx_bytes; 1507 ss->stats.rx_bytes += rx_bytes;
1499 1508
1500 if (netdev->features & NETIF_F_LRO) 1509 if (lro_enabled)
1501 lro_flush_all(&rx_done->lro_mgr); 1510 lro_flush_all(&rx_done->lro_mgr);
1502 1511
1503 /* restock receive rings if needed */ 1512 /* restock receive rings if needed */
@@ -1776,7 +1785,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
1776static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) 1785static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
1777{ 1786{
1778 struct myri10ge_priv *mgp = netdev_priv(netdev); 1787 struct myri10ge_priv *mgp = netdev_priv(netdev);
1779 unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); 1788 u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
1780 1789
1781 if (tso_enabled) 1790 if (tso_enabled)
1782 netdev->features |= flags; 1791 netdev->features |= flags;
@@ -3645,6 +3654,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3645 dma_free_coherent(&pdev->dev, bytes, 3654 dma_free_coherent(&pdev->dev, bytes,
3646 ss->fw_stats, ss->fw_stats_bus); 3655 ss->fw_stats, ss->fw_stats_bus);
3647 ss->fw_stats = NULL; 3656 ss->fw_stats = NULL;
3657 netif_napi_del(&ss->napi);
3648 } 3658 }
3649 } 3659 }
3650 kfree(mgp->ss); 3660 kfree(mgp->ss);
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 4846e131a04..a761076b69c 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -926,7 +926,7 @@ static const struct net_device_ops myri_ops = {
926 .ndo_validate_addr = eth_validate_addr, 926 .ndo_validate_addr = eth_validate_addr,
927}; 927};
928 928
929static int __devinit myri_sbus_probe(struct platform_device *op, const struct of_device_id *match) 929static int __devinit myri_sbus_probe(struct platform_device *op)
930{ 930{
931 struct device_node *dp = op->dev.of_node; 931 struct device_node *dp = op->dev.of_node;
932 static unsigned version_printed; 932 static unsigned version_printed;
@@ -1160,7 +1160,7 @@ static const struct of_device_id myri_sbus_match[] = {
1160 1160
1161MODULE_DEVICE_TABLE(of, myri_sbus_match); 1161MODULE_DEVICE_TABLE(of, myri_sbus_match);
1162 1162
1163static struct of_platform_driver myri_sbus_driver = { 1163static struct platform_driver myri_sbus_driver = {
1164 .driver = { 1164 .driver = {
1165 .name = "myri", 1165 .name = "myri",
1166 .owner = THIS_MODULE, 1166 .owner = THIS_MODULE,
@@ -1172,12 +1172,12 @@ static struct of_platform_driver myri_sbus_driver = {
1172 1172
1173static int __init myri_sbus_init(void) 1173static int __init myri_sbus_init(void)
1174{ 1174{
1175 return of_register_platform_driver(&myri_sbus_driver); 1175 return platform_driver_register(&myri_sbus_driver);
1176} 1176}
1177 1177
1178static void __exit myri_sbus_exit(void) 1178static void __exit myri_sbus_exit(void)
1179{ 1179{
1180 of_unregister_platform_driver(&myri_sbus_driver); 1180 platform_driver_unregister(&myri_sbus_driver);
1181} 1181}
1182 1182
1183module_init(myri_sbus_init); 1183module_init(myri_sbus_init);
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index a11380544e6..d7299f1a494 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -739,7 +739,8 @@ struct netxen_recv_context {
739#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c 739#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
740#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d 740#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
741#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e 741#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
742#define NX_CDRP_CMD_MAX 0x0000001f 742#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f
743#define NX_CDRP_CMD_MAX 0x00000020
743 744
744#define NX_RCODE_SUCCESS 0 745#define NX_RCODE_SUCCESS 0
745#define NX_RCODE_NO_HOST_MEM 1 746#define NX_RCODE_NO_HOST_MEM 1
@@ -1054,6 +1055,7 @@ typedef struct {
1054#define NX_FW_CAPABILITY_BDG (1 << 8) 1055#define NX_FW_CAPABILITY_BDG (1 << 8)
1055#define NX_FW_CAPABILITY_FVLANTX (1 << 9) 1056#define NX_FW_CAPABILITY_FVLANTX (1 << 9)
1056#define NX_FW_CAPABILITY_HW_LRO (1 << 10) 1057#define NX_FW_CAPABILITY_HW_LRO (1 << 10)
1058#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11)
1057 1059
1058/* module types */ 1060/* module types */
1059#define LINKEVENT_MODULE_NOT_PRESENT 1 1061#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -1349,6 +1351,8 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
1349void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *); 1351void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
1350void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64); 1352void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
1351 1353
1354int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
1355 u32 speed, u32 duplex, u32 autoneg);
1352int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); 1356int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
1353int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1357int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
1354int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); 1358int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index f7d06cbc70a..f16966afa64 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -112,6 +112,21 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
112 return 0; 112 return 0;
113} 113}
114 114
115int
116nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
117 u32 speed, u32 duplex, u32 autoneg)
118{
119
120 return netxen_issue_cmd(adapter,
121 adapter->ahw.pci_func,
122 NXHAL_VERSION,
123 speed,
124 duplex,
125 autoneg,
126 NX_CDRP_CMD_CONFIG_GBE_PORT);
127
128}
129
115static int 130static int
116nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) 131nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
117{ 132{
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 587498e140b..3bdcc803ec6 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -214,7 +214,6 @@ skip:
214 check_sfp_module = netif_running(dev) && 214 check_sfp_module = netif_running(dev) &&
215 adapter->has_link_events; 215 adapter->has_link_events;
216 } else { 216 } else {
217 ecmd->autoneg = AUTONEG_ENABLE;
218 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); 217 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
219 ecmd->advertising |= 218 ecmd->advertising |=
220 (ADVERTISED_TP | ADVERTISED_Autoneg); 219 (ADVERTISED_TP | ADVERTISED_Autoneg);
@@ -252,53 +251,24 @@ static int
252netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 251netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
253{ 252{
254 struct netxen_adapter *adapter = netdev_priv(dev); 253 struct netxen_adapter *adapter = netdev_priv(dev);
255 __u32 status; 254 int ret;
256 255
257 /* read which mode */ 256 if (adapter->ahw.port_type != NETXEN_NIC_GBE)
258 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 257 return -EOPNOTSUPP;
259 /* autonegotiation */
260 if (adapter->phy_write &&
261 adapter->phy_write(adapter,
262 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
263 ecmd->autoneg) != 0)
264 return -EIO;
265 else
266 adapter->link_autoneg = ecmd->autoneg;
267 258
268 if (adapter->phy_read && 259 if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
269 adapter->phy_read(adapter, 260 return -EOPNOTSUPP;
270 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
271 &status) != 0)
272 return -EIO;
273 261
274 /* speed */ 262 ret = nx_fw_cmd_set_gbe_port(adapter, ecmd->speed, ecmd->duplex,
275 switch (ecmd->speed) { 263 ecmd->autoneg);
276 case SPEED_10: 264 if (ret == NX_RCODE_NOT_SUPPORTED)
277 netxen_set_phy_speed(status, 0);
278 break;
279 case SPEED_100:
280 netxen_set_phy_speed(status, 1);
281 break;
282 case SPEED_1000:
283 netxen_set_phy_speed(status, 2);
284 break;
285 }
286 /* set duplex mode */
287 if (ecmd->duplex == DUPLEX_HALF)
288 netxen_clear_phy_duplex(status);
289 if (ecmd->duplex == DUPLEX_FULL)
290 netxen_set_phy_duplex(status);
291 if (adapter->phy_write &&
292 adapter->phy_write(adapter,
293 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
294 *((int *)&status)) != 0)
295 return -EIO;
296 else {
297 adapter->link_speed = ecmd->speed;
298 adapter->link_duplex = ecmd->duplex;
299 }
300 } else
301 return -EOPNOTSUPP; 265 return -EOPNOTSUPP;
266 else if (ret)
267 return -EIO;
268
269 adapter->link_speed = ecmd->speed;
270 adapter->link_duplex = ecmd->duplex;
271 adapter->link_autoneg = ecmd->autoneg;
302 272
303 if (!netif_running(dev)) 273 if (!netif_running(dev))
304 return 0; 274 return 0;
@@ -901,7 +871,7 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
901 struct netxen_adapter *adapter = netdev_priv(netdev); 871 struct netxen_adapter *adapter = netdev_priv(netdev);
902 int hw_lro; 872 int hw_lro;
903 873
904 if (data & ~ETH_FLAG_LRO) 874 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
905 return -EINVAL; 875 return -EINVAL;
906 876
907 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) 877 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 33fac32e0d9..83348dc4b18 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1032,6 +1032,9 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1032 netif_carrier_off(netdev); 1032 netif_carrier_off(netdev);
1033 netif_tx_disable(netdev); 1033 netif_tx_disable(netdev);
1034 1034
1035 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
1036 netxen_linkevent_request(adapter, 0);
1037
1035 if (adapter->stop_port) 1038 if (adapter->stop_port)
1036 adapter->stop_port(adapter); 1039 adapter->stop_port(adapter);
1037 1040
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2541321bad8..32678b6c6b3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
4489{ 4489{
4490 struct niu_parent *parent = np->parent; 4490 struct niu_parent *parent = np->parent;
4491 int first_rx_channel, first_tx_channel; 4491 int first_rx_channel, first_tx_channel;
4492 int num_rx_rings, num_tx_rings;
4493 struct rx_ring_info *rx_rings;
4494 struct tx_ring_info *tx_rings;
4492 int i, port, err; 4495 int i, port, err;
4493 4496
4494 port = np->port; 4497 port = np->port;
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
4498 first_tx_channel += parent->txchan_per_port[i]; 4501 first_tx_channel += parent->txchan_per_port[i];
4499 } 4502 }
4500 4503
4501 np->num_rx_rings = parent->rxchan_per_port[port]; 4504 num_rx_rings = parent->rxchan_per_port[port];
4502 np->num_tx_rings = parent->txchan_per_port[port]; 4505 num_tx_rings = parent->txchan_per_port[port];
4503 4506
4504 netif_set_real_num_rx_queues(np->dev, np->num_rx_rings); 4507 rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
4505 netif_set_real_num_tx_queues(np->dev, np->num_tx_rings); 4508 GFP_KERNEL);
4506
4507 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
4508 GFP_KERNEL);
4509 err = -ENOMEM; 4509 err = -ENOMEM;
4510 if (!np->rx_rings) 4510 if (!rx_rings)
4511 goto out_err; 4511 goto out_err;
4512 4512
4513 np->num_rx_rings = num_rx_rings;
4514 smp_wmb();
4515 np->rx_rings = rx_rings;
4516
4517 netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4518
4513 for (i = 0; i < np->num_rx_rings; i++) { 4519 for (i = 0; i < np->num_rx_rings; i++) {
4514 struct rx_ring_info *rp = &np->rx_rings[i]; 4520 struct rx_ring_info *rp = &np->rx_rings[i];
4515 4521
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
4538 return err; 4544 return err;
4539 } 4545 }
4540 4546
4541 np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info), 4547 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
4542 GFP_KERNEL); 4548 GFP_KERNEL);
4543 err = -ENOMEM; 4549 err = -ENOMEM;
4544 if (!np->tx_rings) 4550 if (!tx_rings)
4545 goto out_err; 4551 goto out_err;
4546 4552
4553 np->num_tx_rings = num_tx_rings;
4554 smp_wmb();
4555 np->tx_rings = tx_rings;
4556
4557 netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4558
4547 for (i = 0; i < np->num_tx_rings; i++) { 4559 for (i = 0; i < np->num_tx_rings; i++) {
4548 struct tx_ring_info *rp = &np->tx_rings[i]; 4560 struct tx_ring_info *rp = &np->tx_rings[i];
4549 4561
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
6246static void niu_get_rx_stats(struct niu *np) 6258static void niu_get_rx_stats(struct niu *np)
6247{ 6259{
6248 unsigned long pkts, dropped, errors, bytes; 6260 unsigned long pkts, dropped, errors, bytes;
6261 struct rx_ring_info *rx_rings;
6249 int i; 6262 int i;
6250 6263
6251 pkts = dropped = errors = bytes = 0; 6264 pkts = dropped = errors = bytes = 0;
6265
6266 rx_rings = ACCESS_ONCE(np->rx_rings);
6267 if (!rx_rings)
6268 goto no_rings;
6269
6252 for (i = 0; i < np->num_rx_rings; i++) { 6270 for (i = 0; i < np->num_rx_rings; i++) {
6253 struct rx_ring_info *rp = &np->rx_rings[i]; 6271 struct rx_ring_info *rp = &rx_rings[i];
6254 6272
6255 niu_sync_rx_discard_stats(np, rp, 0); 6273 niu_sync_rx_discard_stats(np, rp, 0);
6256 6274
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
6259 dropped += rp->rx_dropped; 6277 dropped += rp->rx_dropped;
6260 errors += rp->rx_errors; 6278 errors += rp->rx_errors;
6261 } 6279 }
6280
6281no_rings:
6262 np->dev->stats.rx_packets = pkts; 6282 np->dev->stats.rx_packets = pkts;
6263 np->dev->stats.rx_bytes = bytes; 6283 np->dev->stats.rx_bytes = bytes;
6264 np->dev->stats.rx_dropped = dropped; 6284 np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
6268static void niu_get_tx_stats(struct niu *np) 6288static void niu_get_tx_stats(struct niu *np)
6269{ 6289{
6270 unsigned long pkts, errors, bytes; 6290 unsigned long pkts, errors, bytes;
6291 struct tx_ring_info *tx_rings;
6271 int i; 6292 int i;
6272 6293
6273 pkts = errors = bytes = 0; 6294 pkts = errors = bytes = 0;
6295
6296 tx_rings = ACCESS_ONCE(np->tx_rings);
6297 if (!tx_rings)
6298 goto no_rings;
6299
6274 for (i = 0; i < np->num_tx_rings; i++) { 6300 for (i = 0; i < np->num_tx_rings; i++) {
6275 struct tx_ring_info *rp = &np->tx_rings[i]; 6301 struct tx_ring_info *rp = &tx_rings[i];
6276 6302
6277 pkts += rp->tx_packets; 6303 pkts += rp->tx_packets;
6278 bytes += rp->tx_bytes; 6304 bytes += rp->tx_bytes;
6279 errors += rp->tx_errors; 6305 errors += rp->tx_errors;
6280 } 6306 }
6307
6308no_rings:
6281 np->dev->stats.tx_packets = pkts; 6309 np->dev->stats.tx_packets = pkts;
6282 np->dev->stats.tx_bytes = bytes; 6310 np->dev->stats.tx_bytes = bytes;
6283 np->dev->stats.tx_errors = errors; 6311 np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
6287{ 6315{
6288 struct niu *np = netdev_priv(dev); 6316 struct niu *np = netdev_priv(dev);
6289 6317
6290 niu_get_rx_stats(np); 6318 if (netif_running(dev)) {
6291 niu_get_tx_stats(np); 6319 niu_get_rx_stats(np);
6292 6320 niu_get_tx_stats(np);
6321 }
6293 return &dev->stats; 6322 return &dev->stats;
6294} 6323}
6295 6324
@@ -9472,7 +9501,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
9472 struct niu_parent *p; 9501 struct niu_parent *p;
9473 int i; 9502 int i;
9474 9503
9475 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9504 plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
9476 NULL, 0); 9505 NULL, 0);
9477 if (IS_ERR(plat_dev)) 9506 if (IS_ERR(plat_dev))
9478 return NULL; 9507 return NULL;
@@ -10033,8 +10062,7 @@ static const struct niu_ops niu_phys_ops = {
10033 .unmap_single = niu_phys_unmap_single, 10062 .unmap_single = niu_phys_unmap_single,
10034}; 10063};
10035 10064
10036static int __devinit niu_of_probe(struct platform_device *op, 10065static int __devinit niu_of_probe(struct platform_device *op)
10037 const struct of_device_id *match)
10038{ 10066{
10039 union niu_parent_id parent_id; 10067 union niu_parent_id parent_id;
10040 struct net_device *dev; 10068 struct net_device *dev;
@@ -10194,7 +10222,7 @@ static const struct of_device_id niu_match[] = {
10194}; 10222};
10195MODULE_DEVICE_TABLE(of, niu_match); 10223MODULE_DEVICE_TABLE(of, niu_match);
10196 10224
10197static struct of_platform_driver niu_of_driver = { 10225static struct platform_driver niu_of_driver = {
10198 .driver = { 10226 .driver = {
10199 .name = "niu", 10227 .name = "niu",
10200 .owner = THIS_MODULE, 10228 .owner = THIS_MODULE,
@@ -10215,14 +10243,14 @@ static int __init niu_init(void)
10215 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 10243 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10216 10244
10217#ifdef CONFIG_SPARC64 10245#ifdef CONFIG_SPARC64
10218 err = of_register_platform_driver(&niu_of_driver); 10246 err = platform_driver_register(&niu_of_driver);
10219#endif 10247#endif
10220 10248
10221 if (!err) { 10249 if (!err) {
10222 err = pci_register_driver(&niu_pci_driver); 10250 err = pci_register_driver(&niu_pci_driver);
10223#ifdef CONFIG_SPARC64 10251#ifdef CONFIG_SPARC64
10224 if (err) 10252 if (err)
10225 of_unregister_platform_driver(&niu_of_driver); 10253 platform_driver_unregister(&niu_of_driver);
10226#endif 10254#endif
10227 } 10255 }
10228 10256
@@ -10233,7 +10261,7 @@ static void __exit niu_exit(void)
10233{ 10261{
10234 pci_unregister_driver(&niu_pci_driver); 10262 pci_unregister_driver(&niu_pci_driver);
10235#ifdef CONFIG_SPARC64 10263#ifdef CONFIG_SPARC64
10236 of_unregister_platform_driver(&niu_of_driver); 10264 platform_driver_unregister(&niu_of_driver);
10237#endif 10265#endif
10238} 10266}
10239 10267
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c766f3..a41b2cf4d91 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
1988 } 1988 }
1989 1989
1990 ndev = alloc_etherdev(sizeof(struct ns83820)); 1990 ndev = alloc_etherdev(sizeof(struct ns83820));
1991 dev = PRIV(ndev);
1992
1993 err = -ENOMEM; 1991 err = -ENOMEM;
1994 if (!dev) 1992 if (!ndev)
1995 goto out; 1993 goto out;
1996 1994
1995 dev = PRIV(ndev);
1997 dev->ndev = ndev; 1996 dev->ndev = ndev;
1998 1997
1999 spin_lock_init(&dev->rx_info.lock); 1998 spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a99520..e1e33c80fb2 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
73 struct pch_gbe_regs_mac_adr mac_adr[16]; 73 struct pch_gbe_regs_mac_adr mac_adr[16];
74 u32 ADDR_MASK; 74 u32 ADDR_MASK;
75 u32 MIIM; 75 u32 MIIM;
76 u32 reserve2; 76 u32 MAC_ADDR_LOAD;
77 u32 RGMII_ST; 77 u32 RGMII_ST;
78 u32 RGMII_CTRL; 78 u32 RGMII_CTRL;
79 u32 reserve3[3]; 79 u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index d7355306a73..50986840c99 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
29#define PCH_GBE_SHORT_PKT 64 29#define PCH_GBE_SHORT_PKT 64
30#define DSC_INIT16 0xC000 30#define DSC_INIT16 0xC000
31#define PCH_GBE_DMA_ALIGN 0 31#define PCH_GBE_DMA_ALIGN 0
32#define PCH_GBE_DMA_PADDING 2
32#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
33#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
34#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
88static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 89static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
89static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 90static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
90 int data); 91 int data);
92
93inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
94{
95 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
96}
97
91/** 98/**
92 * pch_gbe_mac_read_mac_addr - Read MAC address 99 * pch_gbe_mac_read_mac_addr - Read MAC address
93 * @hw: Pointer to the HW structure 100 * @hw: Pointer to the HW structure
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
519 struct pch_gbe_adapter *adapter; 526 struct pch_gbe_adapter *adapter;
520 adapter = container_of(work, struct pch_gbe_adapter, reset_task); 527 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
521 528
529 rtnl_lock();
522 pch_gbe_reinit_locked(adapter); 530 pch_gbe_reinit_locked(adapter);
531 rtnl_unlock();
523} 532}
524 533
525/** 534/**
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
528 */ 537 */
529void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) 538void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
530{ 539{
531 struct net_device *netdev = adapter->netdev; 540 pch_gbe_down(adapter);
532 541 pch_gbe_up(adapter);
533 rtnl_lock();
534 if (netif_running(netdev)) {
535 pch_gbe_down(adapter);
536 pch_gbe_up(adapter);
537 }
538 rtnl_unlock();
539} 542}
540 543
541/** 544/**
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1369 struct pch_gbe_buffer *buffer_info; 1372 struct pch_gbe_buffer *buffer_info;
1370 struct pch_gbe_rx_desc *rx_desc; 1373 struct pch_gbe_rx_desc *rx_desc;
1371 u32 length; 1374 u32 length;
1372 unsigned char tmp_packet[ETH_HLEN];
1373 unsigned int i; 1375 unsigned int i;
1374 unsigned int cleaned_count = 0; 1376 unsigned int cleaned_count = 0;
1375 bool cleaned = false; 1377 bool cleaned = false;
1376 struct sk_buff *skb; 1378 struct sk_buff *skb, *new_skb;
1377 u8 dma_status; 1379 u8 dma_status;
1378 u16 gbec_status; 1380 u16 gbec_status;
1379 u32 tcp_ip_status; 1381 u32 tcp_ip_status;
1380 u8 skb_copy_flag = 0;
1381 u8 skb_padding_flag = 0;
1382 1382
1383 i = rx_ring->next_to_clean; 1383 i = rx_ring->next_to_clean;
1384 1384
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1422 pr_err("Receive CRC Error\n"); 1422 pr_err("Receive CRC Error\n");
1423 } else { 1423 } else {
1424 /* get receive length */ 1424 /* get receive length */
1425 /* length convert[-3], padding[-2] */ 1425 /* length convert[-3] */
1426 length = (rx_desc->rx_words_eob) - 3 - 2; 1426 length = (rx_desc->rx_words_eob) - 3;
1427 1427
1428 /* Decide the data conversion method */ 1428 /* Decide the data conversion method */
1429 if (!adapter->rx_csum) { 1429 if (!adapter->rx_csum) {
1430 /* [Header:14][payload] */ 1430 /* [Header:14][payload] */
1431 skb_padding_flag = 0; 1431 if (NET_IP_ALIGN) {
1432 skb_copy_flag = 1; 1432 /* Because alignment differs,
1433 * the new_skb is newly allocated,
1434 * and data is copied to new_skb.*/
1435 new_skb = netdev_alloc_skb(netdev,
1436 length + NET_IP_ALIGN);
1437 if (!new_skb) {
1438 /* dorrop error */
1439 pr_err("New skb allocation "
1440 "Error\n");
1441 goto dorrop;
1442 }
1443 skb_reserve(new_skb, NET_IP_ALIGN);
1444 memcpy(new_skb->data, skb->data,
1445 length);
1446 skb = new_skb;
1447 } else {
1448 /* DMA buffer is used as SKB as it is.*/
1449 buffer_info->skb = NULL;
1450 }
1433 } else { 1451 } else {
1434 /* [Header:14][padding:2][payload] */ 1452 /* [Header:14][padding:2][payload] */
1435 skb_padding_flag = 1; 1453 /* The length includes padding length */
1436 if (length < copybreak) 1454 length = length - PCH_GBE_DMA_PADDING;
1437 skb_copy_flag = 1; 1455 if ((length < copybreak) ||
1438 else 1456 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1439 skb_copy_flag = 0; 1457 /* Because alignment differs,
1440 } 1458 * the new_skb is newly allocated,
1441 1459 * and data is copied to new_skb.
1442 /* Data conversion */ 1460 * Padding data is deleted
1443 if (skb_copy_flag) { /* recycle skb */ 1461 * at the time of a copy.*/
1444 struct sk_buff *new_skb; 1462 new_skb = netdev_alloc_skb(netdev,
1445 new_skb = 1463 length + NET_IP_ALIGN);
1446 netdev_alloc_skb(netdev, 1464 if (!new_skb) {
1447 length + NET_IP_ALIGN); 1465 /* dorrop error */
1448 if (new_skb) { 1466 pr_err("New skb allocation "
1449 if (!skb_padding_flag) { 1467 "Error\n");
1450 skb_reserve(new_skb, 1468 goto dorrop;
1451 NET_IP_ALIGN);
1452 } 1469 }
1470 skb_reserve(new_skb, NET_IP_ALIGN);
1453 memcpy(new_skb->data, skb->data, 1471 memcpy(new_skb->data, skb->data,
1454 length); 1472 ETH_HLEN);
1455 /* save the skb 1473 memcpy(&new_skb->data[ETH_HLEN],
1456 * in buffer_info as good */ 1474 &skb->data[ETH_HLEN +
1475 PCH_GBE_DMA_PADDING],
1476 length - ETH_HLEN);
1457 skb = new_skb; 1477 skb = new_skb;
1458 } else if (!skb_padding_flag) { 1478 } else {
1459 /* dorrop error */ 1479 /* Padding data is deleted
1460 pr_err("New skb allocation Error\n"); 1480 * by moving header data.*/
1461 goto dorrop; 1481 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1482 &skb->data[0], ETH_HLEN);
1483 skb_reserve(skb, NET_IP_ALIGN);
1484 buffer_info->skb = NULL;
1462 } 1485 }
1463 } else {
1464 buffer_info->skb = NULL;
1465 }
1466 if (skb_padding_flag) {
1467 memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
1468 memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
1469 ETH_HLEN);
1470 skb_reserve(skb, NET_IP_ALIGN);
1471
1472 } 1486 }
1473 1487 /* The length includes FCS length */
1488 length = length - ETH_FCS_LEN;
1474 /* update status of driver */ 1489 /* update status of driver */
1475 adapter->stats.rx_bytes += length; 1490 adapter->stats.rx_bytes += length;
1476 adapter->stats.rx_packets++; 1491 adapter->stats.rx_packets++;
@@ -2247,7 +2262,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
2247 struct net_device *netdev = pci_get_drvdata(pdev); 2262 struct net_device *netdev = pci_get_drvdata(pdev);
2248 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2263 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2249 2264
2250 flush_scheduled_work(); 2265 cancel_work_sync(&adapter->reset_task);
2251 unregister_netdev(netdev); 2266 unregister_netdev(netdev);
2252 2267
2253 pch_gbe_hal_phy_hw_reset(&adapter->hw); 2268 pch_gbe_hal_phy_hw_reset(&adapter->hw);
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2322 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2337 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2323 pch_gbe_set_ethtool_ops(netdev); 2338 pch_gbe_set_ethtool_ops(netdev);
2324 2339
2340 pch_gbe_mac_load_mac_addr(&adapter->hw);
2325 pch_gbe_mac_reset_hw(&adapter->hw); 2341 pch_gbe_mac_reset_hw(&adapter->hw);
2326 2342
2327 /* setup the private structure */ 2343 /* setup the private structure */
@@ -2425,12 +2441,12 @@ static struct pci_error_handlers pch_gbe_err_handler = {
2425 .resume = pch_gbe_io_resume 2441 .resume = pch_gbe_io_resume
2426}; 2442};
2427 2443
2428static struct pci_driver pch_gbe_pcidev = { 2444static struct pci_driver pch_gbe_driver = {
2429 .name = KBUILD_MODNAME, 2445 .name = KBUILD_MODNAME,
2430 .id_table = pch_gbe_pcidev_id, 2446 .id_table = pch_gbe_pcidev_id,
2431 .probe = pch_gbe_probe, 2447 .probe = pch_gbe_probe,
2432 .remove = pch_gbe_remove, 2448 .remove = pch_gbe_remove,
2433#ifdef CONFIG_PM_OPS 2449#ifdef CONFIG_PM
2434 .driver.pm = &pch_gbe_pm_ops, 2450 .driver.pm = &pch_gbe_pm_ops,
2435#endif 2451#endif
2436 .shutdown = pch_gbe_shutdown, 2452 .shutdown = pch_gbe_shutdown,
@@ -2442,7 +2458,7 @@ static int __init pch_gbe_init_module(void)
2442{ 2458{
2443 int ret; 2459 int ret;
2444 2460
2445 ret = pci_register_driver(&pch_gbe_pcidev); 2461 ret = pci_register_driver(&pch_gbe_driver);
2446 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { 2462 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2447 if (copybreak == 0) { 2463 if (copybreak == 0) {
2448 pr_info("copybreak disabled\n"); 2464 pr_info("copybreak disabled\n");
@@ -2456,7 +2472,7 @@ static int __init pch_gbe_init_module(void)
2456 2472
2457static void __exit pch_gbe_exit_module(void) 2473static void __exit pch_gbe_exit_module(void)
2458{ 2474{
2459 pci_unregister_driver(&pch_gbe_pcidev); 2475 pci_unregister_driver(&pch_gbe_driver);
2460} 2476}
2461 2477
2462module_init(pch_gbe_init_module); 2478module_init(pch_gbe_init_module);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1f42f6ac855..d3cb7720586 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
1488 1488
1489 /* 1489 /*
1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. 1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
1491 * Early datasheets said to poll the reset bit, but now they say that 1491 * We wait at least 2ms.
1492 * it "is not a reliable indicator and subsequently should be ignored."
1493 * We wait at least 10ms.
1494 */ 1492 */
1495 1493
1496 mdelay(10); 1494 mdelay(2);
1497 1495
1498 /* 1496 /*
1499 * Reset RBCR[01] back to zero as per magic incantation. 1497 * Reset RBCR[01] back to zero as per magic incantation.
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda4d05..530ab5a10bd 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 695 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
695 PCMCIA_DEVICE_NULL, 696 PCMCIA_DEVICE_NULL,
696}; 697};
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 35fda5ac812..392a6c4b72e 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -77,7 +77,6 @@ config NATIONAL_PHY
77 Currently supports the DP83865 PHY. 77 Currently supports the DP83865 PHY.
78 78
79config STE10XP 79config STE10XP
80 depends on PHYLIB
81 tristate "Driver for STMicroelectronics STe10Xp PHYs" 80 tristate "Driver for STMicroelectronics STe10Xp PHYs"
82 ---help--- 81 ---help---
83 This is the driver for the STe100p and STe101p PHYs. 82 This is the driver for the STe100p and STe101p PHYs.
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index f62c7b717bc..47c8339a035 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -188,8 +188,7 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
188 188
189#ifdef CONFIG_OF_GPIO 189#ifdef CONFIG_OF_GPIO
190 190
191static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev, 191static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev)
192 const struct of_device_id *match)
193{ 192{
194 struct mdio_gpio_platform_data *pdata; 193 struct mdio_gpio_platform_data *pdata;
195 struct mii_bus *new_bus; 194 struct mii_bus *new_bus;
@@ -240,7 +239,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
240}; 239};
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); 240MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
242 241
243static struct of_platform_driver mdio_ofgpio_driver = { 242static struct platform_driver mdio_ofgpio_driver = {
244 .driver = { 243 .driver = {
245 .name = "mdio-gpio", 244 .name = "mdio-gpio",
246 .owner = THIS_MODULE, 245 .owner = THIS_MODULE,
@@ -252,12 +251,12 @@ static struct of_platform_driver mdio_ofgpio_driver = {
252 251
253static inline int __init mdio_ofgpio_init(void) 252static inline int __init mdio_ofgpio_init(void)
254{ 253{
255 return of_register_platform_driver(&mdio_ofgpio_driver); 254 return platform_driver_register(&mdio_ofgpio_driver);
256} 255}
257 256
258static inline void __exit mdio_ofgpio_exit(void) 257static inline void __exit mdio_ofgpio_exit(void)
259{ 258{
260 of_unregister_platform_driver(&mdio_ofgpio_driver); 259 platform_driver_unregister(&mdio_ofgpio_driver);
261} 260}
262#else 261#else
263static inline int __init mdio_ofgpio_init(void) { return 0; } 262static inline int __init mdio_ofgpio_init(void) { return 0; }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0fd1678bc5a..590f902deb6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,13 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/phy.h> 21#include <linux/phy.h>
22 22#include <linux/micrel_phy.h>
23#define PHY_ID_KSZ9021 0x00221611
24#define PHY_ID_KS8737 0x00221720
25#define PHY_ID_KS8041 0x00221510
26#define PHY_ID_KS8051 0x00221550
27/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
28#define PHY_ID_KS8001 0x0022161A
29 23
30/* general Interrupt control/status reg in vendor specific block. */ 24/* general Interrupt control/status reg in vendor specific block. */
31#define MII_KSZPHY_INTCS 0x1B 25#define MII_KSZPHY_INTCS 0x1B
@@ -46,6 +40,7 @@
46#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9) 40#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
47#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14) 41#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
48#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) 42#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
43#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
49 44
50static int kszphy_ack_interrupt(struct phy_device *phydev) 45static int kszphy_ack_interrupt(struct phy_device *phydev)
51{ 46{
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
106 return 0; 101 return 0;
107} 102}
108 103
104static int ks8051_config_init(struct phy_device *phydev)
105{
106 int regval;
107
108 if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
109 regval = phy_read(phydev, MII_KSZPHY_CTRL);
110 regval |= KSZ8051_RMII_50MHZ_CLK;
111 phy_write(phydev, MII_KSZPHY_CTRL, regval);
112 }
113
114 return 0;
115}
116
109static struct phy_driver ks8737_driver = { 117static struct phy_driver ks8737_driver = {
110 .phy_id = PHY_ID_KS8737, 118 .phy_id = PHY_ID_KS8737,
111 .phy_id_mask = 0x00fffff0, 119 .phy_id_mask = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
142 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 150 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
143 | SUPPORTED_Asym_Pause), 151 | SUPPORTED_Asym_Pause),
144 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 152 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
145 .config_init = kszphy_config_init, 153 .config_init = ks8051_config_init,
146 .config_aneg = genphy_config_aneg, 154 .config_aneg = genphy_config_aneg,
147 .read_status = genphy_read_status, 155 .read_status = genphy_read_status,
148 .ack_interrupt = kszphy_ack_interrupt, 156 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a8445c72fc1..f7670330f98 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -319,7 +319,8 @@ int phy_mii_ioctl(struct phy_device *phydev,
319 /* fall through */ 319 /* fall through */
320 320
321 case SIOCGMIIREG: 321 case SIOCGMIIREG:
322 mii_data->val_out = phy_read(phydev, mii_data->reg_num); 322 mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
323 mii_data->reg_num);
323 break; 324 break;
324 325
325 case SIOCSMIIREG: 326 case SIOCSMIIREG:
@@ -350,8 +351,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
350 } 351 }
351 } 352 }
352 353
353 phy_write(phydev, mii_data->reg_num, val); 354 mdiobus_write(phydev->bus, mii_data->phy_id,
354 355 mii_data->reg_num, val);
356
355 if (mii_data->reg_num == MII_BMCR && 357 if (mii_data->reg_num == MII_BMCR &&
356 val & BMCR_RESET && 358 val & BMCR_RESET &&
357 phydev->drv->config_init) { 359 phydev->drv->config_init) {
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 43583309a65..31e9407a073 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -129,7 +129,7 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
129 129
130 state->strm.next_in = NULL; 130 state->strm.next_in = NULL;
131 state->w_size = w_size; 131 state->w_size = w_size;
132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize()); 132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8));
133 if (state->strm.workspace == NULL) 133 if (state->strm.workspace == NULL)
134 goto out_free; 134 goto out_free;
135 135
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index c7a6c446697..9f6d670748d 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
592 ppp_release(NULL, file); 592 ppp_release(NULL, file);
593 err = 0; 593 err = 0;
594 } else 594 } else
595 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", 595 pr_warn("PPPIOCDETACH file->f_count=%ld\n",
596 atomic_long_read(&file->f_count)); 596 atomic_long_read(&file->f_count));
597 mutex_unlock(&ppp_mutex); 597 mutex_unlock(&ppp_mutex);
598 return err; 598 return err;
599 } 599 }
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
630 630
631 if (pf->kind != INTERFACE) { 631 if (pf->kind != INTERFACE) {
632 /* can't happen */ 632 /* can't happen */
633 printk(KERN_ERR "PPP: not interface or channel??\n"); 633 pr_err("PPP: not interface or channel??\n");
634 return -EINVAL; 634 return -EINVAL;
635 } 635 }
636 636
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
704 } 704 }
705 vj = slhc_init(val2+1, val+1); 705 vj = slhc_init(val2+1, val+1);
706 if (!vj) { 706 if (!vj) {
707 printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); 707 netdev_err(ppp->dev,
708 "PPP: no memory (VJ compressor)\n");
708 err = -ENOMEM; 709 err = -ENOMEM;
709 break; 710 break;
710 } 711 }
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
898{ 899{
899 int err; 900 int err;
900 901
901 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 902 pr_info("PPP generic driver version " PPP_VERSION "\n");
902 903
903 err = register_pernet_device(&ppp_net_ops); 904 err = register_pernet_device(&ppp_net_ops);
904 if (err) { 905 if (err) {
905 printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); 906 pr_err("failed to register PPP pernet device (%d)\n", err);
906 goto out; 907 goto out;
907 } 908 }
908 909
909 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 910 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
910 if (err) { 911 if (err) {
911 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 912 pr_err("failed to register PPP device (%d)\n", err);
912 goto out_net; 913 goto out_net;
913 } 914 }
914 915
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1078 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1079 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1079 if (!new_skb) { 1080 if (!new_skb) {
1080 if (net_ratelimit()) 1081 if (net_ratelimit())
1081 printk(KERN_ERR "PPP: no memory (comp pkt)\n"); 1082 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1082 return NULL; 1083 return NULL;
1083 } 1084 }
1084 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1085 if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1108 * the same number. 1109 * the same number.
1109 */ 1110 */
1110 if (net_ratelimit()) 1111 if (net_ratelimit())
1111 printk(KERN_ERR "ppp: compressor dropped pkt\n"); 1112 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1112 kfree_skb(skb); 1113 kfree_skb(skb);
1113 kfree_skb(new_skb); 1114 kfree_skb(new_skb);
1114 new_skb = NULL; 1115 new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1138 if (ppp->pass_filter && 1139 if (ppp->pass_filter &&
1139 sk_run_filter(skb, ppp->pass_filter) == 0) { 1140 sk_run_filter(skb, ppp->pass_filter) == 0) {
1140 if (ppp->debug & 1) 1141 if (ppp->debug & 1)
1141 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1142 netdev_printk(KERN_DEBUG, ppp->dev,
1143 "PPP: outbound frame "
1144 "not passed\n");
1142 kfree_skb(skb); 1145 kfree_skb(skb);
1143 return; 1146 return;
1144 } 1147 }
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1164 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1167 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1165 GFP_ATOMIC); 1168 GFP_ATOMIC);
1166 if (!new_skb) { 1169 if (!new_skb) {
1167 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); 1170 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1168 goto drop; 1171 goto drop;
1169 } 1172 }
1170 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1173 skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1202 proto != PPP_LCP && proto != PPP_CCP) { 1205 proto != PPP_LCP && proto != PPP_CCP) {
1203 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1206 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1204 if (net_ratelimit()) 1207 if (net_ratelimit())
1205 printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); 1208 netdev_err(ppp->dev,
1209 "ppp: compression required but "
1210 "down - pkt dropped.\n");
1206 goto drop; 1211 goto drop;
1207 } 1212 }
1208 skb = pad_compress_skb(ppp, skb); 1213 skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1505 noskb: 1510 noskb:
1506 spin_unlock_bh(&pch->downl); 1511 spin_unlock_bh(&pch->downl);
1507 if (ppp->debug & 1) 1512 if (ppp->debug & 1)
1508 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1513 netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1509 ++ppp->dev->stats.tx_errors; 1514 ++ppp->dev->stats.tx_errors;
1510 ++ppp->nxseq; 1515 ++ppp->nxseq;
1511 return 1; /* abandon the frame */ 1516 return 1; /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1686 /* copy to a new sk_buff with more tailroom */ 1691 /* copy to a new sk_buff with more tailroom */
1687 ns = dev_alloc_skb(skb->len + 128); 1692 ns = dev_alloc_skb(skb->len + 128);
1688 if (!ns) { 1693 if (!ns) {
1689 printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); 1694 netdev_err(ppp->dev, "PPP: no memory "
1695 "(VJ decomp)\n");
1690 goto err; 1696 goto err;
1691 } 1697 }
1692 skb_reserve(ns, 2); 1698 skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1699 1705
1700 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1706 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1701 if (len <= 0) { 1707 if (len <= 0) {
1702 printk(KERN_DEBUG "PPP: VJ decompression error\n"); 1708 netdev_printk(KERN_DEBUG, ppp->dev,
1709 "PPP: VJ decompression error\n");
1703 goto err; 1710 goto err;
1704 } 1711 }
1705 len += 2; 1712 len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1721 goto err; 1728 goto err;
1722 1729
1723 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1730 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1724 printk(KERN_ERR "PPP: VJ uncompressed error\n"); 1731 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
1725 goto err; 1732 goto err;
1726 } 1733 }
1727 proto = PPP_IP; 1734 proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1762 if (ppp->pass_filter && 1769 if (ppp->pass_filter &&
1763 sk_run_filter(skb, ppp->pass_filter) == 0) { 1770 sk_run_filter(skb, ppp->pass_filter) == 0) {
1764 if (ppp->debug & 1) 1771 if (ppp->debug & 1)
1765 printk(KERN_DEBUG "PPP: inbound frame " 1772 netdev_printk(KERN_DEBUG, ppp->dev,
1766 "not passed\n"); 1773 "PPP: inbound frame "
1774 "not passed\n");
1767 kfree_skb(skb); 1775 kfree_skb(skb);
1768 return; 1776 return;
1769 } 1777 }
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1821 1829
1822 ns = dev_alloc_skb(obuff_size); 1830 ns = dev_alloc_skb(obuff_size);
1823 if (!ns) { 1831 if (!ns) {
1824 printk(KERN_ERR "ppp_decompress_frame: no memory\n"); 1832 netdev_err(ppp->dev, "ppp_decompress_frame: "
1833 "no memory\n");
1825 goto err; 1834 goto err;
1826 } 1835 }
1827 /* the decompressor still expects the A/C bytes in the hdr */ 1836 /* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
1989 u32 seq = ppp->nextseq; 1998 u32 seq = ppp->nextseq;
1990 u32 minseq = ppp->minseq; 1999 u32 minseq = ppp->minseq;
1991 struct sk_buff_head *list = &ppp->mrq; 2000 struct sk_buff_head *list = &ppp->mrq;
1992 struct sk_buff *p, *next; 2001 struct sk_buff *p, *tmp;
1993 struct sk_buff *head, *tail; 2002 struct sk_buff *head, *tail;
1994 struct sk_buff *skb = NULL; 2003 struct sk_buff *skb = NULL;
1995 int lost = 0, len = 0; 2004 int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
1998 return NULL; 2007 return NULL;
1999 head = list->next; 2008 head = list->next;
2000 tail = NULL; 2009 tail = NULL;
2001 for (p = head; p != (struct sk_buff *) list; p = next) { 2010 skb_queue_walk_safe(list, p, tmp) {
2002 next = p->next; 2011 again:
2003 if (seq_before(PPP_MP_CB(p)->sequence, seq)) { 2012 if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2004 /* this can't happen, anyway ignore the skb */ 2013 /* this can't happen, anyway ignore the skb */
2005 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 2014 netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2006 PPP_MP_CB(p)->sequence, seq); 2015 "seq %u < %u\n",
2007 head = next; 2016 PPP_MP_CB(p)->sequence, seq);
2017 __skb_unlink(p, list);
2018 kfree_skb(p);
2008 continue; 2019 continue;
2009 } 2020 }
2010 if (PPP_MP_CB(p)->sequence != seq) { 2021 if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
2016 lost = 1; 2027 lost = 1;
2017 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? 2028 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2018 minseq + 1: PPP_MP_CB(p)->sequence; 2029 minseq + 1: PPP_MP_CB(p)->sequence;
2019 next = p; 2030 goto again;
2020 continue;
2021 } 2031 }
2022 2032
2023 /* 2033 /*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
2042 (PPP_MP_CB(head)->BEbits & B)) { 2052 (PPP_MP_CB(head)->BEbits & B)) {
2043 if (len > ppp->mrru + 2) { 2053 if (len > ppp->mrru + 2) {
2044 ++ppp->dev->stats.rx_length_errors; 2054 ++ppp->dev->stats.rx_length_errors;
2045 printk(KERN_DEBUG "PPP: reconstructed packet" 2055 netdev_printk(KERN_DEBUG, ppp->dev,
2046 " is too long (%d)\n", len); 2056 "PPP: reconstructed packet"
2047 } else if (p == head) { 2057 " is too long (%d)\n", len);
2048 /* fragment is complete packet - reuse skb */
2049 tail = p;
2050 skb = skb_get(p);
2051 break;
2052 } else if ((skb = dev_alloc_skb(len)) == NULL) {
2053 ++ppp->dev->stats.rx_missed_errors;
2054 printk(KERN_DEBUG "PPP: no memory for "
2055 "reconstructed packet");
2056 } else { 2058 } else {
2057 tail = p; 2059 tail = p;
2058 break; 2060 break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
2065 * and we haven't found a complete valid packet yet, 2067 * and we haven't found a complete valid packet yet,
2066 * we can discard up to and including this fragment. 2068 * we can discard up to and including this fragment.
2067 */ 2069 */
2068 if (PPP_MP_CB(p)->BEbits & E) 2070 if (PPP_MP_CB(p)->BEbits & E) {
2069 head = next; 2071 struct sk_buff *tmp2;
2070 2072
2073 skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2074 __skb_unlink(p, list);
2075 kfree_skb(p);
2076 }
2077 head = skb_peek(list);
2078 if (!head)
2079 break;
2080 }
2071 ++seq; 2081 ++seq;
2072 } 2082 }
2073 2083
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
2077 signal a receive error. */ 2087 signal a receive error. */
2078 if (PPP_MP_CB(head)->sequence != ppp->nextseq) { 2088 if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2079 if (ppp->debug & 1) 2089 if (ppp->debug & 1)
2080 printk(KERN_DEBUG " missed pkts %u..%u\n", 2090 netdev_printk(KERN_DEBUG, ppp->dev,
2081 ppp->nextseq, 2091 " missed pkts %u..%u\n",
2082 PPP_MP_CB(head)->sequence-1); 2092 ppp->nextseq,
2093 PPP_MP_CB(head)->sequence-1);
2083 ++ppp->dev->stats.rx_dropped; 2094 ++ppp->dev->stats.rx_dropped;
2084 ppp_receive_error(ppp); 2095 ppp_receive_error(ppp);
2085 } 2096 }
2086 2097
2087 if (head != tail) 2098 skb = head;
2088 /* copy to a single skb */ 2099 if (head != tail) {
2089 for (p = head; p != tail->next; p = p->next) 2100 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2090 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); 2101 p = skb_queue_next(list, head);
2091 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; 2102 __skb_unlink(skb, list);
2092 head = tail->next; 2103 skb_queue_walk_from_safe(list, p, tmp) {
2093 } 2104 __skb_unlink(p, list);
2105 *fragpp = p;
2106 p->next = NULL;
2107 fragpp = &p->next;
2108
2109 skb->len += p->len;
2110 skb->data_len += p->len;
2111 skb->truesize += p->len;
2112
2113 if (p == tail)
2114 break;
2115 }
2116 } else {
2117 __skb_unlink(skb, list);
2118 }
2094 2119
2095 /* Discard all the skbuffs that we have copied the data out of 2120 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2096 or that we can't use. */
2097 while ((p = list->next) != head) {
2098 __skb_unlink(p, list);
2099 kfree_skb(p);
2100 } 2121 }
2101 2122
2102 return skb; 2123 return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2617 ret = register_netdev(dev); 2638 ret = register_netdev(dev);
2618 if (ret != 0) { 2639 if (ret != 0) {
2619 unit_put(&pn->units_idr, unit); 2640 unit_put(&pn->units_idr, unit);
2620 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2641 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
2621 dev->name, ret); 2642 dev->name, ret);
2622 goto out2; 2643 goto out2;
2623 } 2644 }
2624 2645
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
2690 2711
2691 if (!ppp->file.dead || ppp->n_channels) { 2712 if (!ppp->file.dead || ppp->n_channels) {
2692 /* "can't happen" */ 2713 /* "can't happen" */
2693 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " 2714 netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
2694 "n_channels=%d !\n", ppp, ppp->file.dead, 2715 "but dead=%d n_channels=%d !\n",
2695 ppp->n_channels); 2716 ppp, ppp->file.dead, ppp->n_channels);
2696 return; 2717 return;
2697 } 2718 }
2698 2719
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
2834 2855
2835 if (!pch->file.dead) { 2856 if (!pch->file.dead) {
2836 /* "can't happen" */ 2857 /* "can't happen" */
2837 printk(KERN_ERR "ppp: destroying undead channel %p !\n", 2858 pr_err("ppp: destroying undead channel %p !\n", pch);
2838 pch);
2839 return; 2859 return;
2840 } 2860 }
2841 skb_queue_purge(&pch->file.xq); 2861 skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
2847{ 2867{
2848 /* should never happen */ 2868 /* should never happen */
2849 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2869 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2850 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2870 pr_err("PPP: removing module but units remain!\n");
2851 unregister_chrdev(PPP_MAJOR, "ppp"); 2871 unregister_chrdev(PPP_MAJOR, "ppp");
2852 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2872 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2853 class_destroy(ppp_class); 2873 class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
2865 2885
2866again: 2886again:
2867 if (!idr_pre_get(p, GFP_KERNEL)) { 2887 if (!idr_pre_get(p, GFP_KERNEL)) {
2868 printk(KERN_ERR "PPP: No free memory for idr\n"); 2888 pr_err("PPP: No free memory for idr\n");
2869 return -ENOMEM; 2889 return -ENOMEM;
2870 } 2890 }
2871 2891
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index 164cfad6ce7..51dfcf8023c 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -175,7 +175,6 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
175 struct pptp_opt *opt = &po->proto.pptp; 175 struct pptp_opt *opt = &po->proto.pptp;
176 struct pptp_gre_header *hdr; 176 struct pptp_gre_header *hdr;
177 unsigned int header_len = sizeof(*hdr); 177 unsigned int header_len = sizeof(*hdr);
178 int err = 0;
179 int islcp; 178 int islcp;
180 int len; 179 int len;
181 unsigned char *data; 180 unsigned char *data;
@@ -190,18 +189,14 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
190 if (sk_pppox(po)->sk_state & PPPOX_DEAD) 189 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
191 goto tx_error; 190 goto tx_error;
192 191
193 { 192 rt = ip_route_output_ports(&init_net, NULL,
194 struct flowi fl = { .oif = 0, 193 opt->dst_addr.sin_addr.s_addr,
195 .nl_u = { 194 opt->src_addr.sin_addr.s_addr,
196 .ip4_u = { 195 0, 0, IPPROTO_GRE,
197 .daddr = opt->dst_addr.sin_addr.s_addr, 196 RT_TOS(0), 0);
198 .saddr = opt->src_addr.sin_addr.s_addr, 197 if (IS_ERR(rt))
199 .tos = RT_TOS(0) } }, 198 goto tx_error;
200 .proto = IPPROTO_GRE }; 199
201 err = ip_route_output_key(&init_net, &rt, &fl);
202 if (err)
203 goto tx_error;
204 }
205 tdev = rt->dst.dev; 200 tdev = rt->dst.dev;
206 201
207 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2; 202 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
@@ -468,21 +463,17 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
468 po->chan.private = sk; 463 po->chan.private = sk;
469 po->chan.ops = &pptp_chan_ops; 464 po->chan.ops = &pptp_chan_ops;
470 465
471 { 466 rt = ip_route_output_ports(&init_net, sk,
472 struct flowi fl = { 467 opt->dst_addr.sin_addr.s_addr,
473 .nl_u = { 468 opt->src_addr.sin_addr.s_addr,
474 .ip4_u = { 469 0, 0,
475 .daddr = opt->dst_addr.sin_addr.s_addr, 470 IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
476 .saddr = opt->src_addr.sin_addr.s_addr, 471 if (IS_ERR(rt)) {
477 .tos = RT_CONN_FLAGS(sk) } }, 472 error = -EHOSTUNREACH;
478 .proto = IPPROTO_GRE }; 473 goto end;
479 security_sk_classify_flow(sk, &fl);
480 if (ip_route_output_key(&init_net, &rt, &fl)) {
481 error = -EHOSTUNREACH;
482 goto end;
483 }
484 sk_setup_caps(sk, &rt->dst);
485 } 474 }
475 sk_setup_caps(sk, &rt->dst);
476
486 po->chan.mtu = dst_mtu(&rt->dst); 477 po->chan.mtu = dst_mtu(&rt->dst);
487 if (!po->chan.mtu) 478 if (!po->chan.mtu)
488 po->chan.mtu = PPP_MTU; 479 po->chan.mtu = PPP_MTU;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 1a3584edd79..348b4f1367c 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
379{ 379{
380 struct ql3xxx_port_registers __iomem *port_regs = 380 struct ql3xxx_port_registers __iomem *port_regs =
381 qdev->mem_map_registers; 381 qdev->mem_map_registers;
382 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 382 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
383 383
384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
398 u32 previousBit; 398 u32 previousBit;
399 struct ql3xxx_port_registers __iomem *port_regs = 399 struct ql3xxx_port_registers __iomem *port_regs =
400 qdev->mem_map_registers; 400 qdev->mem_map_registers;
401 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 401 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
402 402
403 /* Clock in a zero, then do the start bit */ 403 /* Clock in a zero, then do the start bit */
404 ql_write_nvram_reg(qdev, spir, 404 ql_write_nvram_reg(qdev, spir,
@@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
467{ 467{
468 struct ql3xxx_port_registers __iomem *port_regs = 468 struct ql3xxx_port_registers __iomem *port_regs =
469 qdev->mem_map_registers; 469 qdev->mem_map_registers;
470 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 470 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
471 471
472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
483 u32 dataBit; 483 u32 dataBit;
484 struct ql3xxx_port_registers __iomem *port_regs = 484 struct ql3xxx_port_registers __iomem *port_regs =
485 qdev->mem_map_registers; 485 qdev->mem_map_registers;
486 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 486 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
487 487
488 /* Read the data bits */ 488 /* Read the data bits */
489 /* The first bit is a dummy. Clock right over it. */ 489 /* The first bit is a dummy. Clock right over it. */
@@ -2460,7 +2460,7 @@ map_error:
2460 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2460 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2461 * in the IOCB plus a chain of outbound address lists (OAL) that 2461 * in the IOCB plus a chain of outbound address lists (OAL) that
2462 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2462 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2463 * will used to point to an OAL when more ALP entries are required. 2463 * will be used to point to an OAL when more ALP entries are required.
2464 * The IOCB is always the top of the chain followed by one or more 2464 * The IOCB is always the top of the chain followed by one or more
2465 * OALs (when necessary). 2465 * OALs (when necessary).
2466 */ 2466 */
@@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3011 u32 value; 3011 u32 value;
3012 struct ql3xxx_port_registers __iomem *port_regs = 3012 struct ql3xxx_port_registers __iomem *port_regs =
3013 qdev->mem_map_registers; 3013 qdev->mem_map_registers;
3014 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3014 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3016 (void __iomem *)port_regs; 3016 (void __iomem *)port_regs;
3017 u32 delay = 10; 3017 u32 delay = 10;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 44e316fd67b..dc44564ef6f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -867,7 +867,6 @@ struct qlcnic_nic_intr_coalesce {
867#define LINKEVENT_LINKSPEED_MBPS 0 867#define LINKEVENT_LINKSPEED_MBPS 0
868#define LINKEVENT_LINKSPEED_ENCODED 1 868#define LINKEVENT_LINKSPEED_ENCODED 1
869 869
870#define AUTO_FW_RESET_ENABLED 0x01
871/* firmware response header: 870/* firmware response header:
872 * 63:58 - message type 871 * 63:58 - message type
873 * 57:56 - owner 872 * 57:56 - owner
@@ -1133,14 +1132,10 @@ struct qlcnic_eswitch {
1133#define MAX_BW 100 /* % of link speed */ 1132#define MAX_BW 100 /* % of link speed */
1134#define MAX_VLAN_ID 4095 1133#define MAX_VLAN_ID 4095
1135#define MIN_VLAN_ID 2 1134#define MIN_VLAN_ID 2
1136#define MAX_TX_QUEUES 1
1137#define MAX_RX_QUEUES 4
1138#define DEFAULT_MAC_LEARN 1 1135#define DEFAULT_MAC_LEARN 1
1139 1136
1140#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) 1137#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1141#define IS_VALID_BW(bw) (bw <= MAX_BW) 1138#define IS_VALID_BW(bw) (bw <= MAX_BW)
1142#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1143#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1144 1139
1145struct qlcnic_pci_func_cfg { 1140struct qlcnic_pci_func_cfg {
1146 u16 func_type; 1141 u16 func_type;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 4c14510e2a8..45b2755d6cb 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -1003,7 +1003,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
1003 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1003 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1004 int hw_lro; 1004 int hw_lro;
1005 1005
1006 if (data & ~ETH_FLAG_LRO) 1006 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
1007 return -EINVAL; 1007 return -EINVAL;
1008 1008
1009 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) 1009 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 37c04b4fade..cd88c7e1bfa 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -42,7 +42,7 @@ static int use_msi_x = 1;
42module_param(use_msi_x, int, 0444); 42module_param(use_msi_x, int, 0444);
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44 44
45static int auto_fw_reset = AUTO_FW_RESET_ENABLED; 45static int auto_fw_reset = 1;
46module_param(auto_fw_reset, int, 0644); 46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48 48
@@ -2959,8 +2959,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2959 if (adapter->need_fw_reset) 2959 if (adapter->need_fw_reset)
2960 goto detach; 2960 goto detach;
2961 2961
2962 if (adapter->reset_context && 2962 if (adapter->reset_context && auto_fw_reset) {
2963 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
2964 qlcnic_reset_hw_context(adapter); 2963 qlcnic_reset_hw_context(adapter);
2965 adapter->netdev->trans_start = jiffies; 2964 adapter->netdev->trans_start = jiffies;
2966 } 2965 }
@@ -2973,7 +2972,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2973 2972
2974 qlcnic_dev_request_reset(adapter); 2973 qlcnic_dev_request_reset(adapter);
2975 2974
2976 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED)) 2975 if (auto_fw_reset)
2977 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); 2976 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2978 2977
2979 dev_info(&netdev->dev, "firmware hang detected\n"); 2978 dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2982,7 +2981,7 @@ detach:
2982 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : 2981 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2983 QLCNIC_DEV_NEED_RESET; 2982 QLCNIC_DEV_NEED_RESET;
2984 2983
2985 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && 2984 if (auto_fw_reset &&
2986 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { 2985 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2987 2986
2988 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); 2987 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
@@ -3654,10 +3653,8 @@ validate_npar_config(struct qlcnic_adapter *adapter,
3654 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) 3653 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3655 return QL_STATUS_INVALID_PARAM; 3654 return QL_STATUS_INVALID_PARAM;
3656 3655
3657 if (!IS_VALID_BW(np_cfg[i].min_bw) 3656 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3658 || !IS_VALID_BW(np_cfg[i].max_bw) 3657 !IS_VALID_BW(np_cfg[i].max_bw))
3659 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3660 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3661 return QL_STATUS_INVALID_PARAM; 3658 return QL_STATUS_INVALID_PARAM;
3662 } 3659 }
3663 return 0; 3660 return 0;
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 27e6f6d43ca..e3ebd90ae65 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
52#define DRV_VERSION "0.26" 52#define DRV_VERSION "0.27"
53#define DRV_RELDATE "30May2010" 53#define DRV_RELDATE "23Feb2011"
54 54
55/* PHY CHIP Address */ 55/* PHY CHIP Address */
56#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -69,6 +69,8 @@
69 69
70/* MAC registers */ 70/* MAC registers */
71#define MCR0 0x00 /* Control register 0 */ 71#define MCR0 0x00 /* Control register 0 */
72#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
73#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
72#define MCR1 0x04 /* Control register 1 */ 74#define MCR1 0x04 /* Control register 1 */
73#define MAC_RST 0x0001 /* Reset the MAC */ 75#define MAC_RST 0x0001 /* Reset the MAC */
74#define MBCR 0x08 /* Bus control */ 76#define MBCR 0x08 /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
851{ 853{
852 struct r6040_private *lp = netdev_priv(dev); 854 struct r6040_private *lp = netdev_priv(dev);
853 void __iomem *ioaddr = lp->base; 855 void __iomem *ioaddr = lp->base;
854 u16 *adrp;
855 u16 reg;
856 unsigned long flags; 856 unsigned long flags;
857 struct netdev_hw_addr *ha; 857 struct netdev_hw_addr *ha;
858 int i; 858 int i;
859 u16 *adrp;
860 u16 hash_table[4] = { 0 };
861
862 spin_lock_irqsave(&lp->lock, flags);
859 863
860 /* MAC Address */ 864 /* Keep our MAC Address */
861 adrp = (u16 *)dev->dev_addr; 865 adrp = (u16 *)dev->dev_addr;
862 iowrite16(adrp[0], ioaddr + MID_0L); 866 iowrite16(adrp[0], ioaddr + MID_0L);
863 iowrite16(adrp[1], ioaddr + MID_0M); 867 iowrite16(adrp[1], ioaddr + MID_0M);
864 iowrite16(adrp[2], ioaddr + MID_0H); 868 iowrite16(adrp[2], ioaddr + MID_0H);
865 869
866 /* Promiscous Mode */
867 spin_lock_irqsave(&lp->lock, flags);
868
869 /* Clear AMCP & PROM bits */ 870 /* Clear AMCP & PROM bits */
870 reg = ioread16(ioaddr) & ~0x0120; 871 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
871 if (dev->flags & IFF_PROMISC) {
872 reg |= 0x0020;
873 lp->mcr0 |= 0x0020;
874 }
875 /* Too many multicast addresses
876 * accept all traffic */
877 else if ((netdev_mc_count(dev) > MCAST_MAX) ||
878 (dev->flags & IFF_ALLMULTI))
879 reg |= 0x0020;
880 872
881 iowrite16(reg, ioaddr); 873 /* Promiscuous mode */
882 spin_unlock_irqrestore(&lp->lock, flags); 874 if (dev->flags & IFF_PROMISC)
875 lp->mcr0 |= MCR0_PROMISC;
883 876
884 /* Build the hash table */ 877 /* Enable multicast hash table function to
885 if (netdev_mc_count(dev) > MCAST_MAX) { 878 * receive all multicast packets. */
886 u16 hash_table[4]; 879 else if (dev->flags & IFF_ALLMULTI) {
887 u32 crc; 880 lp->mcr0 |= MCR0_HASH_EN;
888 881
889 for (i = 0; i < 4; i++) 882 for (i = 0; i < MCAST_MAX ; i++) {
890 hash_table[i] = 0; 883 iowrite16(0, ioaddr + MID_1L + 8 * i);
884 iowrite16(0, ioaddr + MID_1M + 8 * i);
885 iowrite16(0, ioaddr + MID_1H + 8 * i);
886 }
891 887
888 for (i = 0; i < 4; i++)
889 hash_table[i] = 0xffff;
890 }
891 /* Use internal multicast address registers if the number of
892 * multicast addresses is not greater than MCAST_MAX. */
893 else if (netdev_mc_count(dev) <= MCAST_MAX) {
894 i = 0;
892 netdev_for_each_mc_addr(ha, dev) { 895 netdev_for_each_mc_addr(ha, dev) {
893 char *addrs = ha->addr; 896 u16 *adrp = (u16 *) ha->addr;
897 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
898 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
899 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
900 i++;
901 }
902 while (i < MCAST_MAX) {
903 iowrite16(0, ioaddr + MID_1L + 8 * i);
904 iowrite16(0, ioaddr + MID_1M + 8 * i);
905 iowrite16(0, ioaddr + MID_1H + 8 * i);
906 i++;
907 }
908 }
909 /* Otherwise, Enable multicast hash table function. */
910 else {
911 u32 crc;
894 912
895 if (!(*addrs & 1)) 913 lp->mcr0 |= MCR0_HASH_EN;
896 continue; 914
915 for (i = 0; i < MCAST_MAX ; i++) {
916 iowrite16(0, ioaddr + MID_1L + 8 * i);
917 iowrite16(0, ioaddr + MID_1M + 8 * i);
918 iowrite16(0, ioaddr + MID_1H + 8 * i);
919 }
897 920
898 crc = ether_crc_le(6, addrs); 921 /* Build multicast hash table */
922 netdev_for_each_mc_addr(ha, dev) {
923 u8 *addrs = ha->addr;
924
925 crc = ether_crc(ETH_ALEN, addrs);
899 crc >>= 26; 926 crc >>= 26;
900 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 927 hash_table[crc >> 4] |= 1 << (crc & 0xf);
901 } 928 }
902 /* Fill the MAC hash tables with their values */ 929 }
930
931 iowrite16(lp->mcr0, ioaddr + MCR0);
932
933 /* Fill the MAC hash tables with their values */
934 if (lp->mcr0 && MCR0_HASH_EN) {
903 iowrite16(hash_table[0], ioaddr + MAR0); 935 iowrite16(hash_table[0], ioaddr + MAR0);
904 iowrite16(hash_table[1], ioaddr + MAR1); 936 iowrite16(hash_table[1], ioaddr + MAR1);
905 iowrite16(hash_table[2], ioaddr + MAR2); 937 iowrite16(hash_table[2], ioaddr + MAR2);
906 iowrite16(hash_table[3], ioaddr + MAR3); 938 iowrite16(hash_table[3], ioaddr + MAR3);
907 } 939 }
908 /* Multicast Address 1~4 case */ 940
909 i = 0; 941 spin_unlock_irqrestore(&lp->lock, flags);
910 netdev_for_each_mc_addr(ha, dev) {
911 if (i >= MCAST_MAX)
912 break;
913 adrp = (u16 *) ha->addr;
914 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
915 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
916 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
917 i++;
918 }
919 while (i < MCAST_MAX) {
920 iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
921 iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
922 iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
923 i++;
924 }
925} 942}
926 943
927static void netdev_get_drvinfo(struct net_device *dev, 944static void netdev_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bde7d61f193..493b0de3848 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -25,6 +25,7 @@
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <linux/pci-aspm.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/io.h> 31#include <asm/io.h>
@@ -36,6 +37,7 @@
36 37
37#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" 38#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
38#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" 39#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
40#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
39 41
40#ifdef RTL8169_DEBUG 42#ifdef RTL8169_DEBUG
41#define assert(expr) \ 43#define assert(expr) \
@@ -123,6 +125,8 @@ enum mac_version {
123 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D 125 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
124 RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP 126 RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP
125 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP 127 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
128 RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E
129 RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E
126}; 130};
127 131
128#define _R(NAME,MAC,MASK) \ 132#define _R(NAME,MAC,MASK) \
@@ -160,7 +164,9 @@ static const struct {
160 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E 164 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
161 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E 165 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
162 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E 166 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
163 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880) // PCI-E 167 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
168 _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
169 _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880) // PCI-E
164}; 170};
165#undef _R 171#undef _R
166 172
@@ -267,9 +273,15 @@ enum rtl8168_8101_registers {
267#define EPHYAR_REG_MASK 0x1f 273#define EPHYAR_REG_MASK 0x1f
268#define EPHYAR_REG_SHIFT 16 274#define EPHYAR_REG_SHIFT 16
269#define EPHYAR_DATA_MASK 0xffff 275#define EPHYAR_DATA_MASK 0xffff
276 DLLPR = 0xd0,
277#define PM_SWITCH (1 << 6)
270 DBG_REG = 0xd1, 278 DBG_REG = 0xd1,
271#define FIX_NAK_1 (1 << 4) 279#define FIX_NAK_1 (1 << 4)
272#define FIX_NAK_2 (1 << 3) 280#define FIX_NAK_2 (1 << 3)
281 TWSI = 0xd2,
282 MCU = 0xd3,
283#define EN_NDP (1 << 3)
284#define EN_OOB_RESET (1 << 2)
273 EFUSEAR = 0xdc, 285 EFUSEAR = 0xdc,
274#define EFUSEAR_FLAG 0x80000000 286#define EFUSEAR_FLAG 0x80000000
275#define EFUSEAR_WRITE_CMD 0x80000000 287#define EFUSEAR_WRITE_CMD 0x80000000
@@ -526,9 +538,6 @@ struct rtl8169_private {
526 u16 napi_event; 538 u16 napi_event;
527 u16 intr_mask; 539 u16 intr_mask;
528 int phy_1000_ctrl_reg; 540 int phy_1000_ctrl_reg;
529#ifdef CONFIG_R8169_VLAN
530 struct vlan_group *vlgrp;
531#endif
532 541
533 struct mdio_ops { 542 struct mdio_ops {
534 void (*write)(void __iomem *, int, int); 543 void (*write)(void __iomem *, int, int);
@@ -540,7 +549,7 @@ struct rtl8169_private {
540 void (*up)(struct rtl8169_private *); 549 void (*up)(struct rtl8169_private *);
541 } pll_power_ops; 550 } pll_power_ops;
542 551
543 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); 552 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
544 int (*get_settings)(struct net_device *, struct ethtool_cmd *); 553 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
545 void (*phy_reset_enable)(struct rtl8169_private *tp); 554 void (*phy_reset_enable)(struct rtl8169_private *tp);
546 void (*hw_start)(struct net_device *); 555 void (*hw_start)(struct net_device *);
@@ -568,6 +577,7 @@ MODULE_LICENSE("GPL");
568MODULE_VERSION(RTL8169_VERSION); 577MODULE_VERSION(RTL8169_VERSION);
569MODULE_FIRMWARE(FIRMWARE_8168D_1); 578MODULE_FIRMWARE(FIRMWARE_8168D_1);
570MODULE_FIRMWARE(FIRMWARE_8168D_2); 579MODULE_FIRMWARE(FIRMWARE_8168D_2);
580MODULE_FIRMWARE(FIRMWARE_8105E_1);
571 581
572static int rtl8169_open(struct net_device *dev); 582static int rtl8169_open(struct net_device *dev);
573static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, 583static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -617,8 +627,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
617 } 627 }
618} 628}
619 629
620static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) 630static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
621{ 631{
632 void __iomem *ioaddr = tp->mmio_addr;
622 int i; 633 int i;
623 634
624 RTL_W8(ERIDR, cmd); 635 RTL_W8(ERIDR, cmd);
@@ -630,7 +641,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
630 break; 641 break;
631 } 642 }
632 643
633 ocp_write(ioaddr, 0x1, 0x30, 0x00000001); 644 ocp_write(tp, 0x1, 0x30, 0x00000001);
634} 645}
635 646
636#define OOB_CMD_RESET 0x00 647#define OOB_CMD_RESET 0x00
@@ -973,7 +984,8 @@ static void __rtl8169_check_link_status(struct net_device *dev,
973 if (pm) 984 if (pm)
974 pm_request_resume(&tp->pci_dev->dev); 985 pm_request_resume(&tp->pci_dev->dev);
975 netif_carrier_on(dev); 986 netif_carrier_on(dev);
976 netif_info(tp, ifup, dev, "link up\n"); 987 if (net_ratelimit())
988 netif_info(tp, ifup, dev, "link up\n");
977 } else { 989 } else {
978 netif_carrier_off(dev); 990 netif_carrier_off(dev);
979 netif_info(tp, ifdown, dev, "link down\n"); 991 netif_info(tp, ifdown, dev, "link down\n");
@@ -1095,7 +1107,7 @@ static int rtl8169_get_regs_len(struct net_device *dev)
1095} 1107}
1096 1108
1097static int rtl8169_set_speed_tbi(struct net_device *dev, 1109static int rtl8169_set_speed_tbi(struct net_device *dev,
1098 u8 autoneg, u16 speed, u8 duplex) 1110 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1099{ 1111{
1100 struct rtl8169_private *tp = netdev_priv(dev); 1112 struct rtl8169_private *tp = netdev_priv(dev);
1101 void __iomem *ioaddr = tp->mmio_addr; 1113 void __iomem *ioaddr = tp->mmio_addr;
@@ -1118,17 +1130,30 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
1118} 1130}
1119 1131
1120static int rtl8169_set_speed_xmii(struct net_device *dev, 1132static int rtl8169_set_speed_xmii(struct net_device *dev,
1121 u8 autoneg, u16 speed, u8 duplex) 1133 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1122{ 1134{
1123 struct rtl8169_private *tp = netdev_priv(dev); 1135 struct rtl8169_private *tp = netdev_priv(dev);
1124 int giga_ctrl, bmcr; 1136 int giga_ctrl, bmcr;
1137 int rc = -EINVAL;
1138
1139 rtl_writephy(tp, 0x1f, 0x0000);
1125 1140
1126 if (autoneg == AUTONEG_ENABLE) { 1141 if (autoneg == AUTONEG_ENABLE) {
1127 int auto_nego; 1142 int auto_nego;
1128 1143
1129 auto_nego = rtl_readphy(tp, MII_ADVERTISE); 1144 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1130 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL | 1145 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1131 ADVERTISE_100HALF | ADVERTISE_100FULL); 1146 ADVERTISE_100HALF | ADVERTISE_100FULL);
1147
1148 if (adv & ADVERTISED_10baseT_Half)
1149 auto_nego |= ADVERTISE_10HALF;
1150 if (adv & ADVERTISED_10baseT_Full)
1151 auto_nego |= ADVERTISE_10FULL;
1152 if (adv & ADVERTISED_100baseT_Half)
1153 auto_nego |= ADVERTISE_100HALF;
1154 if (adv & ADVERTISED_100baseT_Full)
1155 auto_nego |= ADVERTISE_100FULL;
1156
1132 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1157 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1133 1158
1134 giga_ctrl = rtl_readphy(tp, MII_CTRL1000); 1159 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
@@ -1142,27 +1167,22 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1142 (tp->mac_version != RTL_GIGA_MAC_VER_13) && 1167 (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
1143 (tp->mac_version != RTL_GIGA_MAC_VER_14) && 1168 (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
1144 (tp->mac_version != RTL_GIGA_MAC_VER_15) && 1169 (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
1145 (tp->mac_version != RTL_GIGA_MAC_VER_16)) { 1170 (tp->mac_version != RTL_GIGA_MAC_VER_16) &&
1146 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; 1171 (tp->mac_version != RTL_GIGA_MAC_VER_29) &&
1147 } else { 1172 (tp->mac_version != RTL_GIGA_MAC_VER_30)) {
1173 if (adv & ADVERTISED_1000baseT_Half)
1174 giga_ctrl |= ADVERTISE_1000HALF;
1175 if (adv & ADVERTISED_1000baseT_Full)
1176 giga_ctrl |= ADVERTISE_1000FULL;
1177 } else if (adv & (ADVERTISED_1000baseT_Half |
1178 ADVERTISED_1000baseT_Full)) {
1148 netif_info(tp, link, dev, 1179 netif_info(tp, link, dev,
1149 "PHY does not support 1000Mbps\n"); 1180 "PHY does not support 1000Mbps\n");
1181 goto out;
1150 } 1182 }
1151 1183
1152 bmcr = BMCR_ANENABLE | BMCR_ANRESTART; 1184 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1153 1185
1154 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
1155 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
1156 (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
1157 /*
1158 * Wake up the PHY.
1159 * Vendor specific (0x1f) and reserved (0x0e) MII
1160 * registers.
1161 */
1162 rtl_writephy(tp, 0x1f, 0x0000);
1163 rtl_writephy(tp, 0x0e, 0x0000);
1164 }
1165
1166 rtl_writephy(tp, MII_ADVERTISE, auto_nego); 1186 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1167 rtl_writephy(tp, MII_CTRL1000, giga_ctrl); 1187 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1168 } else { 1188 } else {
@@ -1173,12 +1193,10 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1173 else if (speed == SPEED_100) 1193 else if (speed == SPEED_100)
1174 bmcr = BMCR_SPEED100; 1194 bmcr = BMCR_SPEED100;
1175 else 1195 else
1176 return -EINVAL; 1196 goto out;
1177 1197
1178 if (duplex == DUPLEX_FULL) 1198 if (duplex == DUPLEX_FULL)
1179 bmcr |= BMCR_FULLDPLX; 1199 bmcr |= BMCR_FULLDPLX;
1180
1181 rtl_writephy(tp, 0x1f, 0x0000);
1182 } 1200 }
1183 1201
1184 tp->phy_1000_ctrl_reg = giga_ctrl; 1202 tp->phy_1000_ctrl_reg = giga_ctrl;
@@ -1196,16 +1214,18 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1196 } 1214 }
1197 } 1215 }
1198 1216
1199 return 0; 1217 rc = 0;
1218out:
1219 return rc;
1200} 1220}
1201 1221
1202static int rtl8169_set_speed(struct net_device *dev, 1222static int rtl8169_set_speed(struct net_device *dev,
1203 u8 autoneg, u16 speed, u8 duplex) 1223 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1204{ 1224{
1205 struct rtl8169_private *tp = netdev_priv(dev); 1225 struct rtl8169_private *tp = netdev_priv(dev);
1206 int ret; 1226 int ret;
1207 1227
1208 ret = tp->set_speed(dev, autoneg, speed, duplex); 1228 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1209 1229
1210 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) 1230 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1211 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); 1231 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
@@ -1220,7 +1240,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1220 int ret; 1240 int ret;
1221 1241
1222 spin_lock_irqsave(&tp->lock, flags); 1242 spin_lock_irqsave(&tp->lock, flags);
1223 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex); 1243 ret = rtl8169_set_speed(dev,
1244 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1224 spin_unlock_irqrestore(&tp->lock, flags); 1245 spin_unlock_irqrestore(&tp->lock, flags);
1225 1246
1226 return ret; 1247 return ret;
@@ -1254,8 +1275,6 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
1254 return 0; 1275 return 0;
1255} 1276}
1256 1277
1257#ifdef CONFIG_R8169_VLAN
1258
1259static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, 1278static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1260 struct sk_buff *skb) 1279 struct sk_buff *skb)
1261{ 1280{
@@ -1263,64 +1282,37 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1263 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 1282 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1264} 1283}
1265 1284
1266static void rtl8169_vlan_rx_register(struct net_device *dev, 1285#define NETIF_F_HW_VLAN_TX_RX (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
1267 struct vlan_group *grp) 1286
1287static void rtl8169_vlan_mode(struct net_device *dev)
1268{ 1288{
1269 struct rtl8169_private *tp = netdev_priv(dev); 1289 struct rtl8169_private *tp = netdev_priv(dev);
1270 void __iomem *ioaddr = tp->mmio_addr; 1290 void __iomem *ioaddr = tp->mmio_addr;
1271 unsigned long flags; 1291 unsigned long flags;
1272 1292
1273 spin_lock_irqsave(&tp->lock, flags); 1293 spin_lock_irqsave(&tp->lock, flags);
1274 tp->vlgrp = grp; 1294 if (dev->features & NETIF_F_HW_VLAN_RX)
1275 /*
1276 * Do not disable RxVlan on 8110SCd.
1277 */
1278 if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
1279 tp->cp_cmd |= RxVlan; 1295 tp->cp_cmd |= RxVlan;
1280 else 1296 else
1281 tp->cp_cmd &= ~RxVlan; 1297 tp->cp_cmd &= ~RxVlan;
1282 RTL_W16(CPlusCmd, tp->cp_cmd); 1298 RTL_W16(CPlusCmd, tp->cp_cmd);
1299 /* PCI commit */
1283 RTL_R16(CPlusCmd); 1300 RTL_R16(CPlusCmd);
1284 spin_unlock_irqrestore(&tp->lock, flags); 1301 spin_unlock_irqrestore(&tp->lock, flags);
1302
1303 dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
1285} 1304}
1286 1305
1287static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1306static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1288 struct sk_buff *skb, int polling)
1289{ 1307{
1290 u32 opts2 = le32_to_cpu(desc->opts2); 1308 u32 opts2 = le32_to_cpu(desc->opts2);
1291 struct vlan_group *vlgrp = tp->vlgrp;
1292 int ret;
1293 1309
1294 if (vlgrp && (opts2 & RxVlanTag)) { 1310 if (opts2 & RxVlanTag)
1295 u16 vtag = swab16(opts2 & 0xffff); 1311 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1296 1312
1297 if (likely(polling))
1298 vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
1299 else
1300 __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
1301 ret = 0;
1302 } else
1303 ret = -1;
1304 desc->opts2 = 0; 1313 desc->opts2 = 0;
1305 return ret;
1306}
1307
1308#else /* !CONFIG_R8169_VLAN */
1309
1310static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1311 struct sk_buff *skb)
1312{
1313 return 0;
1314}
1315
1316static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1317 struct sk_buff *skb, int polling)
1318{
1319 return -1;
1320} 1314}
1321 1315
1322#endif
1323
1324static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) 1316static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1325{ 1317{
1326 struct rtl8169_private *tp = netdev_priv(dev); 1318 struct rtl8169_private *tp = netdev_priv(dev);
@@ -1491,6 +1483,28 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1491 } 1483 }
1492} 1484}
1493 1485
1486static int rtl8169_set_flags(struct net_device *dev, u32 data)
1487{
1488 struct rtl8169_private *tp = netdev_priv(dev);
1489 unsigned long old_feat = dev->features;
1490 int rc;
1491
1492 if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
1493 !(data & ETH_FLAG_RXVLAN)) {
1494 netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
1495 return -EINVAL;
1496 }
1497
1498 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
1499 if (rc)
1500 return rc;
1501
1502 if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
1503 rtl8169_vlan_mode(dev);
1504
1505 return 0;
1506}
1507
1494static const struct ethtool_ops rtl8169_ethtool_ops = { 1508static const struct ethtool_ops rtl8169_ethtool_ops = {
1495 .get_drvinfo = rtl8169_get_drvinfo, 1509 .get_drvinfo = rtl8169_get_drvinfo,
1496 .get_regs_len = rtl8169_get_regs_len, 1510 .get_regs_len = rtl8169_get_regs_len,
@@ -1510,6 +1524,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1510 .get_strings = rtl8169_get_strings, 1524 .get_strings = rtl8169_get_strings,
1511 .get_sset_count = rtl8169_get_sset_count, 1525 .get_sset_count = rtl8169_get_sset_count,
1512 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1526 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1527 .set_flags = rtl8169_set_flags,
1528 .get_flags = ethtool_op_get_flags,
1513}; 1529};
1514 1530
1515static void rtl8169_get_mac_version(struct rtl8169_private *tp, 1531static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1558,6 +1574,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1558 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1574 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1559 1575
1560 /* 8101 family. */ 1576 /* 8101 family. */
1577 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1578 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1579 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
1561 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, 1580 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1562 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, 1581 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1563 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, 1582 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
@@ -2434,6 +2453,33 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2434 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2453 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2435} 2454}
2436 2455
2456static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
2457{
2458 static const struct phy_reg phy_reg_init[] = {
2459 { 0x1f, 0x0005 },
2460 { 0x1a, 0x0000 },
2461 { 0x1f, 0x0000 },
2462
2463 { 0x1f, 0x0004 },
2464 { 0x1c, 0x0000 },
2465 { 0x1f, 0x0000 },
2466
2467 { 0x1f, 0x0001 },
2468 { 0x15, 0x7701 },
2469 { 0x1f, 0x0000 }
2470 };
2471
2472 /* Disable ALDPS before ram code */
2473 rtl_writephy(tp, 0x1f, 0x0000);
2474 rtl_writephy(tp, 0x18, 0x0310);
2475 msleep(100);
2476
2477 if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
2478 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2479
2480 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2481}
2482
2437static void rtl_hw_phy_config(struct net_device *dev) 2483static void rtl_hw_phy_config(struct net_device *dev)
2438{ 2484{
2439 struct rtl8169_private *tp = netdev_priv(dev); 2485 struct rtl8169_private *tp = netdev_priv(dev);
@@ -2501,6 +2547,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
2501 case RTL_GIGA_MAC_VER_28: 2547 case RTL_GIGA_MAC_VER_28:
2502 rtl8168d_4_hw_phy_config(tp); 2548 rtl8168d_4_hw_phy_config(tp);
2503 break; 2549 break;
2550 case RTL_GIGA_MAC_VER_29:
2551 case RTL_GIGA_MAC_VER_30:
2552 rtl8105e_hw_phy_config(tp);
2553 break;
2504 2554
2505 default: 2555 default:
2506 break; 2556 break;
@@ -2632,11 +2682,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2632 2682
2633 rtl8169_phy_reset(dev, tp); 2683 rtl8169_phy_reset(dev, tp);
2634 2684
2635 /* 2685 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
2636 * rtl8169_set_speed_xmii takes good care of the Fast Ethernet 2686 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2637 * only 8101. Don't panic. 2687 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
2638 */ 2688 (tp->mii.supports_gmii ?
2639 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL); 2689 ADVERTISED_1000baseT_Half |
2690 ADVERTISED_1000baseT_Full : 0));
2640 2691
2641 if (RTL_R8(PHYstatus) & TBI_Enable) 2692 if (RTL_R8(PHYstatus) & TBI_Enable)
2642 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 2693 netif_info(tp, link, dev, "TBI auto-negotiating\n");
@@ -2792,9 +2843,6 @@ static const struct net_device_ops rtl8169_netdev_ops = {
2792 .ndo_set_mac_address = rtl_set_mac_address, 2843 .ndo_set_mac_address = rtl_set_mac_address,
2793 .ndo_do_ioctl = rtl8169_ioctl, 2844 .ndo_do_ioctl = rtl8169_ioctl,
2794 .ndo_set_multicast_list = rtl_set_rx_mode, 2845 .ndo_set_multicast_list = rtl_set_rx_mode,
2795#ifdef CONFIG_R8169_VLAN
2796 .ndo_vlan_rx_register = rtl8169_vlan_rx_register,
2797#endif
2798#ifdef CONFIG_NET_POLL_CONTROLLER 2846#ifdef CONFIG_NET_POLL_CONTROLLER
2799 .ndo_poll_controller = rtl8169_netpoll, 2847 .ndo_poll_controller = rtl8169_netpoll,
2800#endif 2848#endif
@@ -2867,8 +2915,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2867{ 2915{
2868 void __iomem *ioaddr = tp->mmio_addr; 2916 void __iomem *ioaddr = tp->mmio_addr;
2869 2917
2870 if (tp->mac_version == RTL_GIGA_MAC_VER_27) 2918 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
2919 (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
2920 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
2871 return; 2921 return;
2922 }
2872 2923
2873 if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || 2924 if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
2874 (tp->mac_version == RTL_GIGA_MAC_VER_24)) && 2925 (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2890,6 +2941,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2890 switch (tp->mac_version) { 2941 switch (tp->mac_version) {
2891 case RTL_GIGA_MAC_VER_25: 2942 case RTL_GIGA_MAC_VER_25:
2892 case RTL_GIGA_MAC_VER_26: 2943 case RTL_GIGA_MAC_VER_26:
2944 case RTL_GIGA_MAC_VER_27:
2945 case RTL_GIGA_MAC_VER_28:
2893 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 2946 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
2894 break; 2947 break;
2895 } 2948 }
@@ -2899,12 +2952,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
2899{ 2952{
2900 void __iomem *ioaddr = tp->mmio_addr; 2953 void __iomem *ioaddr = tp->mmio_addr;
2901 2954
2902 if (tp->mac_version == RTL_GIGA_MAC_VER_27) 2955 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
2956 (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
2957 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
2903 return; 2958 return;
2959 }
2904 2960
2905 switch (tp->mac_version) { 2961 switch (tp->mac_version) {
2906 case RTL_GIGA_MAC_VER_25: 2962 case RTL_GIGA_MAC_VER_25:
2907 case RTL_GIGA_MAC_VER_26: 2963 case RTL_GIGA_MAC_VER_26:
2964 case RTL_GIGA_MAC_VER_27:
2965 case RTL_GIGA_MAC_VER_28:
2908 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 2966 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
2909 break; 2967 break;
2910 } 2968 }
@@ -2939,6 +2997,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
2939 case RTL_GIGA_MAC_VER_09: 2997 case RTL_GIGA_MAC_VER_09:
2940 case RTL_GIGA_MAC_VER_10: 2998 case RTL_GIGA_MAC_VER_10:
2941 case RTL_GIGA_MAC_VER_16: 2999 case RTL_GIGA_MAC_VER_16:
3000 case RTL_GIGA_MAC_VER_29:
3001 case RTL_GIGA_MAC_VER_30:
2942 ops->down = r810x_pll_power_down; 3002 ops->down = r810x_pll_power_down;
2943 ops->up = r810x_pll_power_up; 3003 ops->up = r810x_pll_power_up;
2944 break; 3004 break;
@@ -3008,6 +3068,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3008 mii->reg_num_mask = 0x1f; 3068 mii->reg_num_mask = 0x1f;
3009 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); 3069 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
3010 3070
3071 /* disable ASPM completely as that cause random device stop working
3072 * problems as well as full system hangs for some PCIe devices users */
3073 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3074 PCIE_LINK_STATE_CLKPM);
3075
3011 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 3076 /* enable device (incl. PCI PM wakeup and hotplug setup) */
3012 rc = pci_enable_device(pdev); 3077 rc = pci_enable_device(pdev);
3013 if (rc < 0) { 3078 if (rc < 0) {
@@ -3041,7 +3106,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3041 goto err_out_mwi_2; 3106 goto err_out_mwi_2;
3042 } 3107 }
3043 3108
3044 tp->cp_cmd = PCIMulRW | RxChkSum; 3109 tp->cp_cmd = RxChkSum;
3045 3110
3046 if ((sizeof(dma_addr_t) > 4) && 3111 if ((sizeof(dma_addr_t) > 4) &&
3047 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { 3112 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3086,6 +3151,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3086 /* Identify chip attached to board */ 3151 /* Identify chip attached to board */
3087 rtl8169_get_mac_version(tp, ioaddr); 3152 rtl8169_get_mac_version(tp, ioaddr);
3088 3153
3154 /*
3155 * Pretend we are using VLANs; This bypasses a nasty bug where
3156 * Interrupts stop flowing on high load on 8110SCd controllers.
3157 */
3158 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3159 tp->cp_cmd |= RxVlan;
3160
3089 rtl_init_mdio_ops(tp); 3161 rtl_init_mdio_ops(tp);
3090 rtl_init_pll_power_ops(tp); 3162 rtl_init_pll_power_ops(tp);
3091 3163
@@ -3154,10 +3226,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3154 3226
3155 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 3227 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
3156 3228
3157#ifdef CONFIG_R8169_VLAN 3229 dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO;
3158 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3159#endif
3160 dev->features |= NETIF_F_GRO;
3161 3230
3162 tp->intr_mask = 0xffff; 3231 tp->intr_mask = 0xffff;
3163 tp->hw_start = cfg->hw_start; 3232 tp->hw_start = cfg->hw_start;
@@ -3189,6 +3258,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3189 if (pci_dev_run_wake(pdev)) 3258 if (pci_dev_run_wake(pdev))
3190 pm_runtime_put_noidle(&pdev->dev); 3259 pm_runtime_put_noidle(&pdev->dev);
3191 3260
3261 netif_carrier_off(dev);
3262
3192out: 3263out:
3193 return rc; 3264 return rc;
3194 3265
@@ -3273,12 +3344,7 @@ static int rtl8169_open(struct net_device *dev)
3273 3344
3274 rtl8169_init_phy(dev, tp); 3345 rtl8169_init_phy(dev, tp);
3275 3346
3276 /* 3347 rtl8169_vlan_mode(dev);
3277 * Pretend we are using VLANs; This bypasses a nasty bug where
3278 * Interrupts stop flowing on high load on 8110SCd controllers.
3279 */
3280 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3281 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3282 3348
3283 rtl_pll_power_up(tp); 3349 rtl_pll_power_up(tp);
3284 3350
@@ -3315,7 +3381,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3315 /* Disable interrupts */ 3381 /* Disable interrupts */
3316 rtl8169_irq_mask_and_ack(ioaddr); 3382 rtl8169_irq_mask_and_ack(ioaddr);
3317 3383
3318 if (tp->mac_version == RTL_GIGA_MAC_VER_28) { 3384 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3385 tp->mac_version == RTL_GIGA_MAC_VER_28) {
3319 while (RTL_R8(TxPoll) & NPQ) 3386 while (RTL_R8(TxPoll) & NPQ)
3320 udelay(20); 3387 udelay(20);
3321 3388
@@ -3757,7 +3824,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
3757 RTL_W16(IntrMitigate, 0x5151); 3824 RTL_W16(IntrMitigate, 0x5151);
3758 3825
3759 /* Work around for RxFIFO overflow. */ 3826 /* Work around for RxFIFO overflow. */
3760 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 3827 if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
3828 tp->mac_version == RTL_GIGA_MAC_VER_22) {
3761 tp->intr_event |= RxFIFOOver | PCSTimeout; 3829 tp->intr_event |= RxFIFOOver | PCSTimeout;
3762 tp->intr_event &= ~RxOverflow; 3830 tp->intr_event &= ~RxOverflow;
3763 } 3831 }
@@ -3843,8 +3911,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
3843 Cxpl_dbg_sel | \ 3911 Cxpl_dbg_sel | \
3844 ASF | \ 3912 ASF | \
3845 PktCntrDisable | \ 3913 PktCntrDisable | \
3846 PCIDAC | \ 3914 Mac_dbgo_sel)
3847 PCIMulRW)
3848 3915
3849static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) 3916static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3850{ 3917{
@@ -3874,8 +3941,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3874 if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) 3941 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3875 RTL_W8(Config1, cfg1 & ~LEDS0); 3942 RTL_W8(Config1, cfg1 & ~LEDS0);
3876 3943
3877 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3878
3879 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); 3944 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
3880} 3945}
3881 3946
@@ -3887,8 +3952,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3887 3952
3888 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); 3953 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
3889 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 3954 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3890
3891 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3892} 3955}
3893 3956
3894static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) 3957static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3898,6 +3961,37 @@ static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
3898 rtl_ephy_write(ioaddr, 0x03, 0xc2f9); 3961 rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
3899} 3962}
3900 3963
3964static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3965{
3966 static const struct ephy_info e_info_8105e_1[] = {
3967 { 0x07, 0, 0x4000 },
3968 { 0x19, 0, 0x0200 },
3969 { 0x19, 0, 0x0020 },
3970 { 0x1e, 0, 0x2000 },
3971 { 0x03, 0, 0x0001 },
3972 { 0x19, 0, 0x0100 },
3973 { 0x19, 0, 0x0004 },
3974 { 0x0a, 0, 0x0020 }
3975 };
3976
3977 /* Force LAN exit from ASPM if Rx/Tx are not idel */
3978 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
3979
3980 /* disable Early Tally Counter */
3981 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
3982
3983 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
3984 RTL_W8(DLLPR, RTL_R8(DLLPR) | PM_SWITCH);
3985
3986 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
3987}
3988
3989static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3990{
3991 rtl_hw_start_8105e_1(ioaddr, pdev);
3992 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
3993}
3994
3901static void rtl_hw_start_8101(struct net_device *dev) 3995static void rtl_hw_start_8101(struct net_device *dev)
3902{ 3996{
3903 struct rtl8169_private *tp = netdev_priv(dev); 3997 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3914,6 +4008,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
3914 } 4008 }
3915 } 4009 }
3916 4010
4011 RTL_W8(Cfg9346, Cfg9346_Unlock);
4012
3917 switch (tp->mac_version) { 4013 switch (tp->mac_version) {
3918 case RTL_GIGA_MAC_VER_07: 4014 case RTL_GIGA_MAC_VER_07:
3919 rtl_hw_start_8102e_1(ioaddr, pdev); 4015 rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3926,16 +4022,22 @@ static void rtl_hw_start_8101(struct net_device *dev)
3926 case RTL_GIGA_MAC_VER_09: 4022 case RTL_GIGA_MAC_VER_09:
3927 rtl_hw_start_8102e_2(ioaddr, pdev); 4023 rtl_hw_start_8102e_2(ioaddr, pdev);
3928 break; 4024 break;
4025
4026 case RTL_GIGA_MAC_VER_29:
4027 rtl_hw_start_8105e_1(ioaddr, pdev);
4028 break;
4029 case RTL_GIGA_MAC_VER_30:
4030 rtl_hw_start_8105e_2(ioaddr, pdev);
4031 break;
3929 } 4032 }
3930 4033
3931 RTL_W8(Cfg9346, Cfg9346_Unlock); 4034 RTL_W8(Cfg9346, Cfg9346_Lock);
3932 4035
3933 RTL_W8(MaxTxPacketSize, TxPacketMax); 4036 RTL_W8(MaxTxPacketSize, TxPacketMax);
3934 4037
3935 rtl_set_rx_max_size(ioaddr, rx_buf_sz); 4038 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3936 4039
3937 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; 4040 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
3938
3939 RTL_W16(CPlusCmd, tp->cp_cmd); 4041 RTL_W16(CPlusCmd, tp->cp_cmd);
3940 4042
3941 RTL_W16(IntrMitigate, 0x0000); 4043 RTL_W16(IntrMitigate, 0x0000);
@@ -3945,14 +4047,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
3945 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 4047 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3946 rtl_set_rx_tx_config_registers(tp); 4048 rtl_set_rx_tx_config_registers(tp);
3947 4049
3948 RTL_W8(Cfg9346, Cfg9346_Lock);
3949
3950 RTL_R8(IntrMask); 4050 RTL_R8(IntrMask);
3951 4051
3952 rtl_set_rx_mode(dev); 4052 rtl_set_rx_mode(dev);
3953 4053
3954 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3955
3956 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); 4054 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
3957 4055
3958 RTL_W16(IntrMask, tp->intr_event); 4056 RTL_W16(IntrMask, tp->intr_event);
@@ -4589,12 +4687,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4589 skb_put(skb, pkt_size); 4687 skb_put(skb, pkt_size);
4590 skb->protocol = eth_type_trans(skb, dev); 4688 skb->protocol = eth_type_trans(skb, dev);
4591 4689
4592 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) { 4690 rtl8169_rx_vlan_tag(desc, skb);
4593 if (likely(polling)) 4691
4594 napi_gro_receive(&tp->napi, skb); 4692 if (likely(polling))
4595 else 4693 napi_gro_receive(&tp->napi, skb);
4596 netif_rx(skb); 4694 else
4597 } 4695 netif_rx(skb);
4598 4696
4599 dev->stats.rx_bytes += pkt_size; 4697 dev->stats.rx_bytes += pkt_size;
4600 dev->stats.rx_packets++; 4698 dev->stats.rx_packets++;
@@ -4639,12 +4737,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4639 break; 4737 break;
4640 } 4738 }
4641 4739
4642 /* Work around for rx fifo overflow */ 4740 if (unlikely(status & RxFIFOOver)) {
4643 if (unlikely(status & RxFIFOOver) && 4741 switch (tp->mac_version) {
4644 (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 4742 /* Work around for rx fifo overflow */
4645 netif_stop_queue(dev); 4743 case RTL_GIGA_MAC_VER_11:
4646 rtl8169_tx_timeout(dev); 4744 case RTL_GIGA_MAC_VER_22:
4647 break; 4745 case RTL_GIGA_MAC_VER_26:
4746 netif_stop_queue(dev);
4747 rtl8169_tx_timeout(dev);
4748 goto done;
4749 /* Testers needed. */
4750 case RTL_GIGA_MAC_VER_17:
4751 case RTL_GIGA_MAC_VER_19:
4752 case RTL_GIGA_MAC_VER_20:
4753 case RTL_GIGA_MAC_VER_21:
4754 case RTL_GIGA_MAC_VER_23:
4755 case RTL_GIGA_MAC_VER_24:
4756 case RTL_GIGA_MAC_VER_27:
4757 case RTL_GIGA_MAC_VER_28:
4758 /* Experimental science. Pktgen proof. */
4759 case RTL_GIGA_MAC_VER_12:
4760 case RTL_GIGA_MAC_VER_25:
4761 if (status == RxFIFOOver)
4762 goto done;
4763 break;
4764 default:
4765 break;
4766 }
4648 } 4767 }
4649 4768
4650 if (unlikely(status & SYSErr)) { 4769 if (unlikely(status & SYSErr)) {
@@ -4680,7 +4799,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4680 (status & RxFIFOOver) ? (status | RxOverflow) : status); 4799 (status & RxFIFOOver) ? (status | RxOverflow) : status);
4681 status = RTL_R16(IntrStatus); 4800 status = RTL_R16(IntrStatus);
4682 } 4801 }
4683 4802done:
4684 return IRQ_RETVAL(handled); 4803 return IRQ_RETVAL(handled);
4685} 4804}
4686 4805
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 44150f2f7bf..26afbaae23f 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -382,7 +382,7 @@ static void rionet_remove(struct rio_dev *rdev)
382 struct rionet_peer *peer, *tmp; 382 struct rionet_peer *peer, *tmp;
383 383
384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
385 __ilog2(sizeof(void *)) + 4 : 0); 385 __fls(sizeof(void *)) + 4 : 0);
386 unregister_netdev(ndev); 386 unregister_netdev(ndev);
387 free_netdev(ndev); 387 free_netdev(ndev);
388 388
@@ -450,7 +450,7 @@ static int rionet_setup_netdev(struct rio_mport *mport)
450 } 450 }
451 451
452 rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, 452 rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
453 mport->sys_size ? __ilog2(sizeof(void *)) + 4 : 0); 453 mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
454 if (!rionet_active) { 454 if (!rionet_active) {
455 rc = -ENOMEM; 455 rc = -ENOMEM;
456 goto out; 456 goto out;
@@ -571,5 +571,5 @@ static void __exit rionet_exit(void)
571 rio_unregister_driver(&rionet_driver); 571 rio_unregister_driver(&rionet_driver);
572} 572}
573 573
574module_init(rionet_init); 574late_initcall(rionet_init);
575module_exit(rionet_exit); 575module_exit(rionet_exit);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 39c17cecb8b..356e74d20b8 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -6726,7 +6726,7 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6726 int rc = 0; 6726 int rc = 0;
6727 int changed = 0; 6727 int changed = 0;
6728 6728
6729 if (data & ~ETH_FLAG_LRO) 6729 if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO))
6730 return -EINVAL; 6730 return -EINVAL;
6731 6731
6732 if (data & ETH_FLAG_LRO) { 6732 if (data & ETH_FLAG_LRO) {
@@ -7556,7 +7556,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7556 */ 7556 */
7557 skb->ip_summed = CHECKSUM_UNNECESSARY; 7557 skb->ip_summed = CHECKSUM_UNNECESSARY;
7558 if (ring_data->lro) { 7558 if (ring_data->lro) {
7559 u32 tcp_len; 7559 u32 tcp_len = 0;
7560 u8 *tcp; 7560 u8 *tcp;
7561 int ret = 0; 7561 int ret = 0;
7562 7562
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 002bac74384..d890679e4c4 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h>
24#include "net_driver.h" 25#include "net_driver.h"
25#include "efx.h" 26#include "efx.h"
26#include "nic.h" 27#include "nic.h"
@@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
307 channel->irq_mod_score = 0; 308 channel->irq_mod_score = 0;
308 } 309 }
309 310
311 efx_filter_rfs_expire(channel);
312
310 /* There is no race here; although napi_disable() will 313 /* There is no race here; although napi_disable() will
311 * only wait for napi_complete(), this isn't a problem 314 * only wait for napi_complete(), this isn't a problem
312 * since efx_channel_processed() will have no effect if 315 * since efx_channel_processed() will have no effect if
@@ -673,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx)
673 676
674 efx_for_each_channel_rx_queue(rx_queue, channel) 677 efx_for_each_channel_rx_queue(rx_queue, channel)
675 efx_fini_rx_queue(rx_queue); 678 efx_fini_rx_queue(rx_queue);
676 efx_for_each_channel_tx_queue(tx_queue, channel) 679 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
677 efx_fini_tx_queue(tx_queue); 680 efx_fini_tx_queue(tx_queue);
678 efx_fini_eventq(channel); 681 efx_fini_eventq(channel);
679 } 682 }
@@ -689,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel)
689 692
690 efx_for_each_channel_rx_queue(rx_queue, channel) 693 efx_for_each_channel_rx_queue(rx_queue, channel)
691 efx_remove_rx_queue(rx_queue); 694 efx_remove_rx_queue(rx_queue);
692 efx_for_each_channel_tx_queue(tx_queue, channel) 695 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
693 efx_remove_tx_queue(tx_queue); 696 efx_remove_tx_queue(tx_queue);
694 efx_remove_eventq(channel); 697 efx_remove_eventq(channel);
695} 698}
@@ -1051,6 +1054,7 @@ static int efx_init_io(struct efx_nic *efx)
1051{ 1054{
1052 struct pci_dev *pci_dev = efx->pci_dev; 1055 struct pci_dev *pci_dev = efx->pci_dev;
1053 dma_addr_t dma_mask = efx->type->max_dma_mask; 1056 dma_addr_t dma_mask = efx->type->max_dma_mask;
1057 bool use_wc;
1054 int rc; 1058 int rc;
1055 1059
1056 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1060 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,8 +1105,21 @@ static int efx_init_io(struct efx_nic *efx)
1101 rc = -EIO; 1105 rc = -EIO;
1102 goto fail3; 1106 goto fail3;
1103 } 1107 }
1104 efx->membase = ioremap_nocache(efx->membase_phys, 1108
1105 efx->type->mem_map_size); 1109 /* bug22643: If SR-IOV is enabled then tx push over a write combined
1110 * mapping is unsafe. We need to disable write combining in this case.
1111 * MSI is unsupported when SR-IOV is enabled, and the firmware will
1112 * have removed the MSI capability. So write combining is safe if
1113 * there is an MSI capability.
1114 */
1115 use_wc = (!EFX_WORKAROUND_22643(efx) ||
1116 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
1117 if (use_wc)
1118 efx->membase = ioremap_wc(efx->membase_phys,
1119 efx->type->mem_map_size);
1120 else
1121 efx->membase = ioremap_nocache(efx->membase_phys,
1122 efx->type->mem_map_size);
1106 if (!efx->membase) { 1123 if (!efx->membase) {
1107 netif_err(efx, probe, efx->net_dev, 1124 netif_err(efx, probe, efx->net_dev,
1108 "could not map memory BAR at %llx+%x\n", 1125 "could not map memory BAR at %llx+%x\n",
@@ -1175,10 +1192,32 @@ static int efx_wanted_channels(void)
1175 return count; 1192 return count;
1176} 1193}
1177 1194
1195static int
1196efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1197{
1198#ifdef CONFIG_RFS_ACCEL
1199 int i, rc;
1200
1201 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1202 if (!efx->net_dev->rx_cpu_rmap)
1203 return -ENOMEM;
1204 for (i = 0; i < efx->n_rx_channels; i++) {
1205 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
1206 xentries[i].vector);
1207 if (rc) {
1208 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1209 efx->net_dev->rx_cpu_rmap = NULL;
1210 return rc;
1211 }
1212 }
1213#endif
1214 return 0;
1215}
1216
1178/* Probe the number and type of interrupts we are able to obtain, and 1217/* Probe the number and type of interrupts we are able to obtain, and
1179 * the resulting numbers of channels and RX queues. 1218 * the resulting numbers of channels and RX queues.
1180 */ 1219 */
1181static void efx_probe_interrupts(struct efx_nic *efx) 1220static int efx_probe_interrupts(struct efx_nic *efx)
1182{ 1221{
1183 int max_channels = 1222 int max_channels =
1184 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1223 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1259,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1220 efx->n_tx_channels = efx->n_channels; 1259 efx->n_tx_channels = efx->n_channels;
1221 efx->n_rx_channels = efx->n_channels; 1260 efx->n_rx_channels = efx->n_channels;
1222 } 1261 }
1262 rc = efx_init_rx_cpu_rmap(efx, xentries);
1263 if (rc) {
1264 pci_disable_msix(efx->pci_dev);
1265 return rc;
1266 }
1223 for (i = 0; i < n_channels; i++) 1267 for (i = 0; i < n_channels; i++)
1224 efx_get_channel(efx, i)->irq = 1268 efx_get_channel(efx, i)->irq =
1225 xentries[i].vector; 1269 xentries[i].vector;
@@ -1253,6 +1297,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1253 efx->n_tx_channels = 1; 1297 efx->n_tx_channels = 1;
1254 efx->legacy_irq = efx->pci_dev->irq; 1298 efx->legacy_irq = efx->pci_dev->irq;
1255 } 1299 }
1300
1301 return 0;
1256} 1302}
1257 1303
1258static void efx_remove_interrupts(struct efx_nic *efx) 1304static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1271,21 +1317,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1271 1317
1272static void efx_set_channels(struct efx_nic *efx) 1318static void efx_set_channels(struct efx_nic *efx)
1273{ 1319{
1274 struct efx_channel *channel;
1275 struct efx_tx_queue *tx_queue;
1276
1277 efx->tx_channel_offset = 1320 efx->tx_channel_offset =
1278 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1321 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1279
1280 /* Channel pointers were set in efx_init_struct() but we now
1281 * need to clear them for TX queues in any RX-only channels. */
1282 efx_for_each_channel(channel, efx) {
1283 if (channel->channel - efx->tx_channel_offset >=
1284 efx->n_tx_channels) {
1285 efx_for_each_channel_tx_queue(tx_queue, channel)
1286 tx_queue->channel = NULL;
1287 }
1288 }
1289} 1322}
1290 1323
1291static int efx_probe_nic(struct efx_nic *efx) 1324static int efx_probe_nic(struct efx_nic *efx)
@@ -1302,7 +1335,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1302 1335
1303 /* Determine the number of channels and queues by trying to hook 1336 /* Determine the number of channels and queues by trying to hook
1304 * in MSI-X interrupts. */ 1337 * in MSI-X interrupts. */
1305 efx_probe_interrupts(efx); 1338 rc = efx_probe_interrupts(efx);
1339 if (rc)
1340 goto fail;
1306 1341
1307 if (efx->n_channels > 1) 1342 if (efx->n_channels > 1)
1308 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1343 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1317,6 +1352,10 @@ static int efx_probe_nic(struct efx_nic *efx)
1317 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1352 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
1318 1353
1319 return 0; 1354 return 0;
1355
1356fail:
1357 efx->type->remove(efx);
1358 return rc;
1320} 1359}
1321 1360
1322static void efx_remove_nic(struct efx_nic *efx) 1361static void efx_remove_nic(struct efx_nic *efx)
@@ -1531,9 +1570,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1531 efx->irq_rx_adaptive = rx_adaptive; 1570 efx->irq_rx_adaptive = rx_adaptive;
1532 efx->irq_rx_moderation = rx_ticks; 1571 efx->irq_rx_moderation = rx_ticks;
1533 efx_for_each_channel(channel, efx) { 1572 efx_for_each_channel(channel, efx) {
1534 if (efx_channel_get_rx_queue(channel)) 1573 if (efx_channel_has_rx_queue(channel))
1535 channel->irq_moderation = rx_ticks; 1574 channel->irq_moderation = rx_ticks;
1536 else if (efx_channel_get_tx_queue(channel, 0)) 1575 else if (efx_channel_has_tx_queues(channel))
1537 channel->irq_moderation = tx_ticks; 1576 channel->irq_moderation = tx_ticks;
1538 } 1577 }
1539} 1578}
@@ -1849,6 +1888,10 @@ static const struct net_device_ops efx_netdev_ops = {
1849#ifdef CONFIG_NET_POLL_CONTROLLER 1888#ifdef CONFIG_NET_POLL_CONTROLLER
1850 .ndo_poll_controller = efx_netpoll, 1889 .ndo_poll_controller = efx_netpoll,
1851#endif 1890#endif
1891 .ndo_setup_tc = efx_setup_tc,
1892#ifdef CONFIG_RFS_ACCEL
1893 .ndo_rx_flow_steer = efx_filter_rfs,
1894#endif
1852}; 1895};
1853 1896
1854static void efx_update_name(struct efx_nic *efx) 1897static void efx_update_name(struct efx_nic *efx)
@@ -1910,10 +1953,8 @@ static int efx_register_netdev(struct efx_nic *efx)
1910 1953
1911 efx_for_each_channel(channel, efx) { 1954 efx_for_each_channel(channel, efx) {
1912 struct efx_tx_queue *tx_queue; 1955 struct efx_tx_queue *tx_queue;
1913 efx_for_each_channel_tx_queue(tx_queue, channel) { 1956 efx_for_each_channel_tx_queue(tx_queue, channel)
1914 tx_queue->core_txq = netdev_get_tx_queue( 1957 efx_init_tx_queue_core_txq(tx_queue);
1915 efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
1916 }
1917 } 1958 }
1918 1959
1919 /* Always start with carrier off; PHY events will detect the link */ 1960 /* Always start with carrier off; PHY events will detect the link */
@@ -2288,6 +2329,10 @@ static void efx_fini_struct(struct efx_nic *efx)
2288 */ 2329 */
2289static void efx_pci_remove_main(struct efx_nic *efx) 2330static void efx_pci_remove_main(struct efx_nic *efx)
2290{ 2331{
2332#ifdef CONFIG_RFS_ACCEL
2333 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2334 efx->net_dev->rx_cpu_rmap = NULL;
2335#endif
2291 efx_nic_fini_interrupt(efx); 2336 efx_nic_fini_interrupt(efx);
2292 efx_fini_channels(efx); 2337 efx_fini_channels(efx);
2293 efx_fini_port(efx); 2338 efx_fini_port(efx);
@@ -2401,7 +2446,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2401 int i, rc; 2446 int i, rc;
2402 2447
2403 /* Allocate and initialise a struct net_device and struct efx_nic */ 2448 /* Allocate and initialise a struct net_device and struct efx_nic */
2404 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2449 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
2450 EFX_MAX_RX_QUEUES);
2405 if (!net_dev) 2451 if (!net_dev)
2406 return -ENOMEM; 2452 return -ENOMEM;
2407 net_dev->features |= (type->offload_features | NETIF_F_SG | 2453 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d43a7e5212b..3d83a1f74fe 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,7 @@
29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
32extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
32extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 33extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
33extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); 34extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
34extern netdev_tx_t 35extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
36extern netdev_tx_t 37extern netdev_tx_t
37efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 38efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
38extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 39extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
40extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
39 41
40/* RX */ 42/* RX */
41extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx,
74 struct efx_filter_spec *spec); 76 struct efx_filter_spec *spec);
75extern void efx_filter_clear_rx(struct efx_nic *efx, 77extern void efx_filter_clear_rx(struct efx_nic *efx,
76 enum efx_filter_priority priority); 78 enum efx_filter_priority priority);
79#ifdef CONFIG_RFS_ACCEL
80extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
81 u16 rxq_index, u32 flow_id);
82extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
83static inline void efx_filter_rfs_expire(struct efx_channel *channel)
84{
85 if (channel->rfs_filters_added >= 60 &&
86 __efx_filter_rfs_expire(channel->efx, 100))
87 channel->rfs_filters_added -= 60;
88}
89#define efx_filter_rfs_enabled() 1
90#else
91static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
92#define efx_filter_rfs_enabled() 0
93#endif
77 94
78/* Channels */ 95/* Channels */
79extern void efx_process_channel_now(struct efx_channel *channel); 96extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19ed60..807178ef65a 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -28,7 +28,8 @@ struct efx_ethtool_stat {
28 enum { 28 enum {
29 EFX_ETHTOOL_STAT_SOURCE_mac_stats, 29 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
30 EFX_ETHTOOL_STAT_SOURCE_nic, 30 EFX_ETHTOOL_STAT_SOURCE_nic,
31 EFX_ETHTOOL_STAT_SOURCE_channel 31 EFX_ETHTOOL_STAT_SOURCE_channel,
32 EFX_ETHTOOL_STAT_SOURCE_tx_queue
32 } source; 33 } source;
33 unsigned offset; 34 unsigned offset;
34 u64(*get_stat) (void *field); /* Reader function */ 35 u64(*get_stat) (void *field); /* Reader function */
@@ -86,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field)
86 EFX_ETHTOOL_STAT(field, channel, n_##field, \ 87 EFX_ETHTOOL_STAT(field, channel, n_##field, \
87 unsigned int, efx_get_uint_stat) 88 unsigned int, efx_get_uint_stat)
88 89
90#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
91 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
92 unsigned int, efx_get_uint_stat)
93
89static struct efx_ethtool_stat efx_ethtool_stats[] = { 94static struct efx_ethtool_stat efx_ethtool_stats[] = {
90 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), 95 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
91 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), 96 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
@@ -116,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), 121 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), 122 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), 123 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
124 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
125 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
126 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
127 EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
119 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), 128 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), 129 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), 130 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
@@ -237,8 +246,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
237 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 246 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
238 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 247 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
239 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 248 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
240 siena_print_fwver(efx, info->fw_version, 249 efx_mcdi_print_fwver(efx, info->fw_version,
241 sizeof(info->fw_version)); 250 sizeof(info->fw_version));
242 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 251 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
243} 252}
244 253
@@ -470,6 +479,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
470 struct efx_mac_stats *mac_stats = &efx->mac_stats; 479 struct efx_mac_stats *mac_stats = &efx->mac_stats;
471 struct efx_ethtool_stat *stat; 480 struct efx_ethtool_stat *stat;
472 struct efx_channel *channel; 481 struct efx_channel *channel;
482 struct efx_tx_queue *tx_queue;
473 struct rtnl_link_stats64 temp; 483 struct rtnl_link_stats64 temp;
474 int i; 484 int i;
475 485
@@ -495,6 +505,15 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
495 data[i] += stat->get_stat((void *)channel + 505 data[i] += stat->get_stat((void *)channel +
496 stat->offset); 506 stat->offset);
497 break; 507 break;
508 case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
509 data[i] = 0;
510 efx_for_each_channel(channel, efx) {
511 efx_for_each_channel_tx_queue(tx_queue, channel)
512 data[i] +=
513 stat->get_stat((void *)tx_queue
514 + stat->offset);
515 }
516 break;
498 } 517 }
499 } 518 }
500} 519}
@@ -502,7 +521,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
502static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) 521static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
503{ 522{
504 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); 523 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
505 unsigned long features; 524 u32 features;
506 525
507 features = NETIF_F_TSO; 526 features = NETIF_F_TSO;
508 if (efx->type->offload_features & NETIF_F_V6_CSUM) 527 if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +538,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
519static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) 538static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
520{ 539{
521 struct efx_nic *efx = netdev_priv(net_dev); 540 struct efx_nic *efx = netdev_priv(net_dev);
522 unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; 541 u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
523 542
524 if (enable) 543 if (enable)
525 net_dev->features |= features; 544 net_dev->features |= features;
@@ -569,9 +588,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
569 struct ethtool_test *test, u64 *data) 588 struct ethtool_test *test, u64 *data)
570{ 589{
571 struct efx_nic *efx = netdev_priv(net_dev); 590 struct efx_nic *efx = netdev_priv(net_dev);
572 struct efx_self_tests efx_tests; 591 struct efx_self_tests *efx_tests;
573 int already_up; 592 int already_up;
574 int rc; 593 int rc = -ENOMEM;
594
595 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
596 if (!efx_tests)
597 goto fail;
598
575 599
576 ASSERT_RTNL(); 600 ASSERT_RTNL();
577 if (efx->state != STATE_RUNNING) { 601 if (efx->state != STATE_RUNNING) {
@@ -589,13 +613,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
589 if (rc) { 613 if (rc) {
590 netif_err(efx, drv, efx->net_dev, 614 netif_err(efx, drv, efx->net_dev,
591 "failed opening device.\n"); 615 "failed opening device.\n");
592 goto fail2; 616 goto fail1;
593 } 617 }
594 } 618 }
595 619
596 memset(&efx_tests, 0, sizeof(efx_tests)); 620 rc = efx_selftest(efx, efx_tests, test->flags);
597
598 rc = efx_selftest(efx, &efx_tests, test->flags);
599 621
600 if (!already_up) 622 if (!already_up)
601 dev_close(efx->net_dev); 623 dev_close(efx->net_dev);
@@ -604,10 +626,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
604 rc == 0 ? "passed" : "failed", 626 rc == 0 ? "passed" : "failed",
605 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 627 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
606 628
607 fail2: 629fail1:
608 fail1:
609 /* Fill ethtool results structures */ 630 /* Fill ethtool results structures */
610 efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); 631 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
632 kfree(efx_tests);
633fail:
611 if (rc) 634 if (rc)
612 test->flags |= ETH_TEST_FL_FAILED; 635 test->flags |= ETH_TEST_FL_FAILED;
613} 636}
@@ -631,7 +654,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
631 /* Find lowest IRQ moderation across all used TX queues */ 654 /* Find lowest IRQ moderation across all used TX queues */
632 coalesce->tx_coalesce_usecs_irq = ~((u32) 0); 655 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
633 efx_for_each_channel(channel, efx) { 656 efx_for_each_channel(channel, efx) {
634 if (!efx_channel_get_tx_queue(channel, 0)) 657 if (!efx_channel_has_tx_queues(channel))
635 continue; 658 continue;
636 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 659 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
637 if (channel->channel < efx->n_rx_channels) 660 if (channel->channel < efx->n_rx_channels)
@@ -676,8 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
676 699
677 /* If the channel is shared only allow RX parameters to be set */ 700 /* If the channel is shared only allow RX parameters to be set */
678 efx_for_each_channel(channel, efx) { 701 efx_for_each_channel(channel, efx) {
679 if (efx_channel_get_rx_queue(channel) && 702 if (efx_channel_has_rx_queue(channel) &&
680 efx_channel_get_tx_queue(channel, 0) && 703 efx_channel_has_tx_queues(channel) &&
681 tx_usecs) { 704 tx_usecs) {
682 netif_err(efx, drv, efx->net_dev, "Channel is shared. " 705 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
683 "Only RX coalescing may be set\n"); 706 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 61ddd2c6e75..734fcfb52e8 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -1478,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1478 /* RX control FIFO thresholds (32 entries) */ 1478 /* RX control FIFO thresholds (32 entries) */
1479 const unsigned ctrl_xon_thr = 20; 1479 const unsigned ctrl_xon_thr = 20;
1480 const unsigned ctrl_xoff_thr = 25; 1480 const unsigned ctrl_xoff_thr = 25;
1481 /* RX data FIFO thresholds (256-byte units; size varies) */
1482 int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
1483 int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
1484 efx_oword_t reg; 1481 efx_oword_t reg;
1485 1482
1486 efx_reado(efx, &reg, FR_AZ_RX_CFG); 1483 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1487 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { 1484 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1488 /* Data FIFO size is 5.5K */ 1485 /* Data FIFO size is 5.5K */
1489 if (data_xon_thr < 0)
1490 data_xon_thr = 512 >> 8;
1491 if (data_xoff_thr < 0)
1492 data_xoff_thr = 2048 >> 8;
1493 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); 1486 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1494 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, 1487 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1495 huge_buf_size); 1488 huge_buf_size);
1496 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr); 1489 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
1497 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr); 1490 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
1498 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); 1491 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
1499 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); 1492 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1500 } else { 1493 } else {
1501 /* Data FIFO size is 80K; register fields moved */ 1494 /* Data FIFO size is 80K; register fields moved */
1502 if (data_xon_thr < 0)
1503 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
1504 if (data_xoff_thr < 0)
1505 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
1506 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); 1495 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1507 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, 1496 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1508 huge_buf_size); 1497 huge_buf_size);
1509 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr); 1498 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
1510 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr); 1499 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
1500 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
1511 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); 1501 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1512 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); 1502 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1513 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); 1503 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 2dd16f0b3ce..b9cc846811d 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b49e8439464..2c9ee5db3bf 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index d4722c41c4c..95a980fd63d 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/in.h> 10#include <linux/in.h>
11#include <net/ip.h>
11#include "efx.h" 12#include "efx.h"
12#include "filter.h" 13#include "filter.h"
13#include "io.h" 14#include "io.h"
@@ -27,6 +28,10 @@
27 */ 28 */
28#define FILTER_CTL_SRCH_MAX 200 29#define FILTER_CTL_SRCH_MAX 200
29 30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
30enum efx_filter_table_id { 35enum efx_filter_table_id {
31 EFX_FILTER_TABLE_RX_IP = 0, 36 EFX_FILTER_TABLE_RX_IP = 0,
32 EFX_FILTER_TABLE_RX_MAC, 37 EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@ struct efx_filter_table {
47struct efx_filter_state { 52struct efx_filter_state {
48 spinlock_t lock; 53 spinlock_t lock;
49 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; 54 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
55#ifdef CONFIG_RFS_ACCEL
56 u32 *rps_flow_id;
57 unsigned rps_expire_index;
58#endif
50}; 59};
51 60
52/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 61/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
325 struct efx_filter_spec *spec, u32 key, 334 struct efx_filter_spec *spec, u32 key,
326 bool for_insert, int *depth_required) 335 bool for_insert, int *depth_required)
327{ 336{
328 unsigned hash, incr, filter_idx, depth; 337 unsigned hash, incr, filter_idx, depth, depth_max;
329 struct efx_filter_spec *cmp; 338 struct efx_filter_spec *cmp;
330 339
331 hash = efx_filter_hash(key); 340 hash = efx_filter_hash(key);
332 incr = efx_filter_increment(key); 341 incr = efx_filter_increment(key);
342 depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
343 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
333 344
334 for (depth = 1, filter_idx = hash & (table->size - 1); 345 for (depth = 1, filter_idx = hash & (table->size - 1);
335 depth <= FILTER_CTL_SRCH_MAX && 346 depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
336 test_bit(filter_idx, table->used_bitmap);
337 ++depth) { 347 ++depth) {
338 cmp = &table->spec[filter_idx]; 348 cmp = &table->spec[filter_idx];
339 if (efx_filter_equal(spec, cmp)) 349 if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table,
342 } 352 }
343 if (!for_insert) 353 if (!for_insert)
344 return -ENOENT; 354 return -ENOENT;
345 if (depth > FILTER_CTL_SRCH_MAX) 355 if (depth > depth_max)
346 return -EBUSY; 356 return -EBUSY;
347found: 357found:
348 *depth_required = depth; 358 *depth_required = depth;
@@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx)
562 spin_lock_init(&state->lock); 572 spin_lock_init(&state->lock);
563 573
564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 574 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
575#ifdef CONFIG_RFS_ACCEL
576 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
577 sizeof(*state->rps_flow_id),
578 GFP_KERNEL);
579 if (!state->rps_flow_id)
580 goto fail;
581#endif
565 table = &state->table[EFX_FILTER_TABLE_RX_IP]; 582 table = &state->table[EFX_FILTER_TABLE_RX_IP];
566 table->id = EFX_FILTER_TABLE_RX_IP; 583 table->id = EFX_FILTER_TABLE_RX_IP;
567 table->offset = FR_BZ_RX_FILTER_TBL0; 584 table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
607 kfree(state->table[table_id].used_bitmap); 624 kfree(state->table[table_id].used_bitmap);
608 vfree(state->table[table_id].spec); 625 vfree(state->table[table_id].spec);
609 } 626 }
627#ifdef CONFIG_RFS_ACCEL
628 kfree(state->rps_flow_id);
629#endif
610 kfree(state); 630 kfree(state);
611} 631}
632
633#ifdef CONFIG_RFS_ACCEL
634
635int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
636 u16 rxq_index, u32 flow_id)
637{
638 struct efx_nic *efx = netdev_priv(net_dev);
639 struct efx_channel *channel;
640 struct efx_filter_state *state = efx->filter_state;
641 struct efx_filter_spec spec;
642 const struct iphdr *ip;
643 const __be16 *ports;
644 int nhoff;
645 int rc;
646
647 nhoff = skb_network_offset(skb);
648
649 if (skb->protocol != htons(ETH_P_IP))
650 return -EPROTONOSUPPORT;
651
652 /* RFS must validate the IP header length before calling us */
653 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
654 ip = (const struct iphdr *)(skb->data + nhoff);
655 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
656 return -EPROTONOSUPPORT;
657 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
658 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
659
660 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
661 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
662 ip->daddr, ports[1], ip->saddr, ports[0]);
663 if (rc)
664 return rc;
665
666 rc = efx_filter_insert_filter(efx, &spec, true);
667 if (rc < 0)
668 return rc;
669
670 /* Remember this so we can check whether to expire the filter later */
671 state->rps_flow_id[rc] = flow_id;
672 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
673 ++channel->rfs_filters_added;
674
675 netif_info(efx, rx_status, efx->net_dev,
676 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
677 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
678 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
679 rxq_index, flow_id, rc);
680
681 return rc;
682}
683
684bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
685{
686 struct efx_filter_state *state = efx->filter_state;
687 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
688 unsigned mask = table->size - 1;
689 unsigned index;
690 unsigned stop;
691
692 if (!spin_trylock_bh(&state->lock))
693 return false;
694
695 index = state->rps_expire_index;
696 stop = (index + quota) & mask;
697
698 while (index != stop) {
699 if (test_bit(index, table->used_bitmap) &&
700 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
701 rps_may_expire_flow(efx->net_dev,
702 table->spec[index].dmaq_id,
703 state->rps_flow_id[index], index)) {
704 netif_info(efx, rx_status, efx->net_dev,
705 "expiring filter %d [flow %u]\n",
706 index, state->rps_flow_id[index]);
707 efx_filter_table_clear_entry(efx, table, index);
708 }
709 index = (index + 1) & mask;
710 }
711
712 state->rps_expire_index = stop;
713 if (table->used == 0)
714 efx_filter_table_reset_search_depth(table);
715
716 spin_unlock_bh(&state->lock);
717 return true;
718}
719
720#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 6da4ae20a03..d9d8c2ef107 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -48,9 +48,9 @@
48 * replacing the low 96 bits with zero does not affect functionality. 48 * replacing the low 96 bits with zero does not affect functionality.
49 * - If the host writes to the last dword address of such a register 49 * - If the host writes to the last dword address of such a register
50 * (i.e. the high 32 bits) the underlying register will always be 50 * (i.e. the high 32 bits) the underlying register will always be
51 * written. If the collector does not hold values for the low 96 51 * written. If the collector and the current write together do not
52 * bits of the register, they will be written as zero. Writing to 52 * provide values for all 128 bits of the register, the low 96 bits
53 * the last qword does not have this effect and must not be done. 53 * will be written as zero.
54 * - If the host writes to the address of any other part of such a 54 * - If the host writes to the address of any other part of such a
55 * register while the collector already holds values for some other 55 * register while the collector already holds values for some other
56 * register, the write is discarded and the collector maintains its 56 * register, the write is discarded and the collector maintains its
@@ -103,6 +103,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
103 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
104 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
105#endif 105#endif
106 wmb();
106 mmiowb(); 107 mmiowb();
107 spin_unlock_irqrestore(&efx->biu_lock, flags); 108 spin_unlock_irqrestore(&efx->biu_lock, flags);
108} 109}
@@ -125,6 +126,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
125 __raw_writel((__force u32)value->u32[0], membase + addr); 126 __raw_writel((__force u32)value->u32[0], membase + addr);
126 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 127 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
127#endif 128#endif
129 wmb();
128 mmiowb(); 130 mmiowb();
129 spin_unlock_irqrestore(&efx->biu_lock, flags); 131 spin_unlock_irqrestore(&efx->biu_lock, flags);
130} 132}
@@ -139,6 +141,7 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
139 141
140 /* No lock required */ 142 /* No lock required */
141 _efx_writed(efx, value->u32[0], reg); 143 _efx_writed(efx, value->u32[0], reg);
144 wmb();
142} 145}
143 146
144/* Read a 128-bit CSR, locking as appropriate. */ 147/* Read a 128-bit CSR, locking as appropriate. */
@@ -237,12 +240,14 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
237 240
238#ifdef EFX_USE_QWORD_IO 241#ifdef EFX_USE_QWORD_IO
239 _efx_writeq(efx, value->u64[0], reg + 0); 242 _efx_writeq(efx, value->u64[0], reg + 0);
243 _efx_writeq(efx, value->u64[1], reg + 8);
240#else 244#else
241 _efx_writed(efx, value->u32[0], reg + 0); 245 _efx_writed(efx, value->u32[0], reg + 0);
242 _efx_writed(efx, value->u32[1], reg + 4); 246 _efx_writed(efx, value->u32[1], reg + 4);
243#endif
244 _efx_writed(efx, value->u32[2], reg + 8); 247 _efx_writed(efx, value->u32[2], reg + 8);
245 _efx_writed(efx, value->u32[3], reg + 12); 248 _efx_writed(efx, value->u32[3], reg + 12);
249#endif
250 wmb();
246} 251}
247#define efx_writeo_page(efx, value, reg, page) \ 252#define efx_writeo_page(efx, value, reg, page) \
248 _efx_writeo_page(efx, value, \ 253 _efx_writeo_page(efx, value, \
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index b716e827b29..5e118f0d247 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc. 3 * Copyright 2008-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -94,14 +94,15 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
94 94
95 efx_writed(efx, &hdr, pdu); 95 efx_writed(efx, &hdr, pdu);
96 96
97 for (i = 0; i < inlen; i += 4) 97 for (i = 0; i < inlen; i += 4) {
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
99 99 /* use wmb() within loop to inhibit write combining */
100 /* Ensure the payload is written out before the header */ 100 wmb();
101 wmb(); 101 }
102 102
103 /* ring the doorbell with a distinctive value */ 103 /* ring the doorbell with a distinctive value */
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
105 wmb();
105} 106}
106 107
107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 108static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
@@ -602,7 +603,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
602 ************************************************************************** 603 **************************************************************************
603 */ 604 */
604 605
605int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) 606void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
606{ 607{
607 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; 608 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
608 size_t outlength; 609 size_t outlength;
@@ -616,29 +617,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
616 if (rc) 617 if (rc)
617 goto fail; 618 goto fail;
618 619
619 if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
620 *version = 0;
621 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
622 return 0;
623 }
624
625 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 620 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
626 rc = -EIO; 621 rc = -EIO;
627 goto fail; 622 goto fail;
628 } 623 }
629 624
630 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 625 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
631 *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | 626 snprintf(buf, len, "%u.%u.%u.%u",
632 ((u64)le16_to_cpu(ver_words[1]) << 32) | 627 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
633 ((u64)le16_to_cpu(ver_words[2]) << 16) | 628 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
634 le16_to_cpu(ver_words[3])); 629 return;
635 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
636
637 return 0;
638 630
639fail: 631fail:
640 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 632 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
641 return rc; 633 buf[0] = 0;
642} 634}
643 635
644int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 636int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index c792f1d65e4..aced2a7856f 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc. 3 * Copyright 2008-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
93#define MCDI_EVENT_FIELD(_ev, _field) \ 93#define MCDI_EVENT_FIELD(_ev, _field) \
94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
95 95
96extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build); 96extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
98 bool *was_attached_out); 98 bool *was_attached_out);
99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index f88f4bf986f..33f7294edb4 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 90359e64400..b86a15f221a 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e97eed663c..ec3f740f546 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 56b0266b441..19e68c26d10 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -51,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
51 return spins ? spins : -ETIMEDOUT; 51 return spins ? spins : -ETIMEDOUT;
52} 52}
53 53
54static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) 54static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
55{ 55{
56 int status; 56 int status;
57 57
58 if (LOOPBACK_INTERNAL(efx))
59 return 0;
60
61 if (mmd != MDIO_MMD_AN) { 58 if (mmd != MDIO_MMD_AN) {
62 /* Read MMD STATUS2 to check it is responding. */ 59 /* Read MMD STATUS2 to check it is responding. */
63 status = efx_mdio_read(efx, mmd, MDIO_STAT2); 60 status = efx_mdio_read(efx, mmd, MDIO_STAT2);
@@ -68,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
68 } 65 }
69 } 66 }
70 67
71 /* Read MMD STATUS 1 to check for fault. */
72 status = efx_mdio_read(efx, mmd, MDIO_STAT1);
73 if (status & MDIO_STAT1_FAULT) {
74 if (fault_fatal) {
75 netif_err(efx, hw, efx->net_dev,
76 "PHY MMD %d reporting fatal"
77 " fault: status %x\n", mmd, status);
78 return -EIO;
79 } else {
80 netif_dbg(efx, hw, efx->net_dev,
81 "PHY MMD %d reporting status"
82 " %x (expected)\n", mmd, status);
83 }
84 }
85 return 0; 68 return 0;
86} 69}
87 70
@@ -130,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
130 return rc; 113 return rc;
131} 114}
132 115
133int efx_mdio_check_mmds(struct efx_nic *efx, 116int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
134 unsigned int mmd_mask, unsigned int fatal_mask)
135{ 117{
136 int mmd = 0, probe_mmd, devs1, devs2; 118 int mmd = 0, probe_mmd, devs1, devs2;
137 u32 devices; 119 u32 devices;
@@ -161,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
161 143
162 /* Check all required MMDs are responding and happy. */ 144 /* Check all required MMDs are responding and happy. */
163 while (mmd_mask) { 145 while (mmd_mask) {
164 if (mmd_mask & 1) { 146 if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
165 int fault_fatal = fatal_mask & 1; 147 return -EIO;
166 if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
167 return -EIO;
168 }
169 mmd_mask = mmd_mask >> 1; 148 mmd_mask = mmd_mask >> 1;
170 fatal_mask = fatal_mask >> 1;
171 mmd++; 149 mmd++;
172 } 150 }
173 151
@@ -337,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
337 "no MDIO PHY present with ID %d\n", efx->mdio.prtad); 315 "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
338 rc = -EINVAL; 316 rc = -EINVAL;
339 } else { 317 } else {
340 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); 318 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
341 } 319 }
342 320
343 mutex_unlock(&efx->mac_lock); 321 mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 75791d3d496..df0703940c8 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
68 int spins, int spintime); 68 int spins, int spintime);
69 69
70/* As efx_mdio_check_mmd but for multiple MMDs */ 70/* As efx_mdio_check_mmd but for multiple MMDs */
71int efx_mdio_check_mmds(struct efx_nic *efx, 71int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
72 unsigned int mmd_mask, unsigned int fatal_mask);
73 72
74/* Check the link status of specified mmds in bit mask */ 73/* Check the link status of specified mmds in bit mask */
75extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); 74extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index d38627448c2..e646bfce2d8 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 28df8665256..215d5c51bfa 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -41,7 +41,7 @@
41 * 41 *
42 **************************************************************************/ 42 **************************************************************************/
43 43
44#define EFX_DRIVER_VERSION "3.0" 44#define EFX_DRIVER_VERSION "3.1"
45 45
46#ifdef EFX_ENABLE_DEBUG 46#ifdef EFX_ENABLE_DEBUG
47#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 47#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -63,10 +63,12 @@
63/* Checksum generation is a per-queue option in hardware, so each 63/* Checksum generation is a per-queue option in hardware, so each
64 * queue visible to the networking core is backed by two hardware TX 64 * queue visible to the networking core is backed by two hardware TX
65 * queues. */ 65 * queues. */
66#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS 66#define EFX_MAX_TX_TC 2
67#define EFX_TXQ_TYPE_OFFLOAD 1 67#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
68#define EFX_TXQ_TYPES 2 68#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) 69#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
70#define EFX_TXQ_TYPES 4
71#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 72
71/** 73/**
72 * struct efx_special_buffer - An Efx special buffer 74 * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
140 * @buffer: The software buffer ring 142 * @buffer: The software buffer ring
141 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
142 * @ptr_mask: The size of the ring minus 1. 144 * @ptr_mask: The size of the ring minus 1.
145 * @initialised: Has hardware queue been initialised?
143 * @flushed: Used when handling queue flushing 146 * @flushed: Used when handling queue flushing
144 * @read_count: Current read pointer. 147 * @read_count: Current read pointer.
145 * This is the number of buffers that have been removed from both rings. 148 * This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
182 struct efx_tx_buffer *buffer; 185 struct efx_tx_buffer *buffer;
183 struct efx_special_buffer txd; 186 struct efx_special_buffer txd;
184 unsigned int ptr_mask; 187 unsigned int ptr_mask;
188 bool initialised;
185 enum efx_flush_state flushed; 189 enum efx_flush_state flushed;
186 190
187 /* Members used mainly on the completion path */ 191 /* Members used mainly on the completion path */
@@ -210,15 +214,17 @@ struct efx_tx_queue {
210 * If both this and page are %NULL, the buffer slot is currently free. 214 * If both this and page are %NULL, the buffer slot is currently free.
211 * @page: The associated page buffer, if any. 215 * @page: The associated page buffer, if any.
212 * If both this and skb are %NULL, the buffer slot is currently free. 216 * If both this and skb are %NULL, the buffer slot is currently free.
213 * @data: Pointer to ethernet header
214 * @len: Buffer length, in bytes. 217 * @len: Buffer length, in bytes.
218 * @is_page: Indicates if @page is valid. If false, @skb is valid.
215 */ 219 */
216struct efx_rx_buffer { 220struct efx_rx_buffer {
217 dma_addr_t dma_addr; 221 dma_addr_t dma_addr;
218 struct sk_buff *skb; 222 union {
219 struct page *page; 223 struct sk_buff *skb;
220 char *data; 224 struct page *page;
225 } u;
221 unsigned int len; 226 unsigned int len;
227 bool is_page;
222}; 228};
223 229
224/** 230/**
@@ -358,6 +364,9 @@ struct efx_channel {
358 364
359 unsigned int irq_count; 365 unsigned int irq_count;
360 unsigned int irq_mod_score; 366 unsigned int irq_mod_score;
367#ifdef CONFIG_RFS_ACCEL
368 unsigned int rfs_filters_added;
369#endif
361 370
362 int rx_alloc_level; 371 int rx_alloc_level;
363 int rx_alloc_push_pages; 372 int rx_alloc_push_pages;
@@ -377,7 +386,7 @@ struct efx_channel {
377 bool rx_pkt_csummed; 386 bool rx_pkt_csummed;
378 387
379 struct efx_rx_queue rx_queue; 388 struct efx_rx_queue rx_queue;
380 struct efx_tx_queue tx_queue[2]; 389 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
381}; 390};
382 391
383enum efx_led_mode { 392enum efx_led_mode {
@@ -906,7 +915,7 @@ struct efx_nic_type {
906 unsigned int phys_addr_channels; 915 unsigned int phys_addr_channels;
907 unsigned int tx_dc_base; 916 unsigned int tx_dc_base;
908 unsigned int rx_dc_base; 917 unsigned int rx_dc_base;
909 unsigned long offload_features; 918 u32 offload_features;
910 u32 reset_world_flags; 919 u32 reset_world_flags;
911}; 920};
912 921
@@ -938,18 +947,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
938 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; 947 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
939} 948}
940 949
950static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
951{
952 return channel->channel - channel->efx->tx_channel_offset <
953 channel->efx->n_tx_channels;
954}
955
941static inline struct efx_tx_queue * 956static inline struct efx_tx_queue *
942efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 957efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
943{ 958{
944 struct efx_tx_queue *tx_queue = channel->tx_queue; 959 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
945 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); 960 type >= EFX_TXQ_TYPES);
946 return tx_queue->channel ? tx_queue + type : NULL; 961 return &channel->tx_queue[type];
962}
963
964static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
965{
966 return !(tx_queue->efx->net_dev->num_tc < 2 &&
967 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
947} 968}
948 969
949/* Iterate over all TX queues belonging to a channel */ 970/* Iterate over all TX queues belonging to a channel */
950#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 971#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
951 for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ 972 if (!efx_channel_has_tx_queues(_channel)) \
952 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 973 ; \
974 else \
975 for (_tx_queue = (_channel)->tx_queue; \
976 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
977 efx_tx_queue_used(_tx_queue); \
978 _tx_queue++)
979
980/* Iterate over all possible TX queues belonging to a channel */
981#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
982 for (_tx_queue = (_channel)->tx_queue; \
983 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
953 _tx_queue++) 984 _tx_queue++)
954 985
955static inline struct efx_rx_queue * 986static inline struct efx_rx_queue *
@@ -959,18 +990,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
959 return &efx->channel[index]->rx_queue; 990 return &efx->channel[index]->rx_queue;
960} 991}
961 992
993static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
994{
995 return channel->channel < channel->efx->n_rx_channels;
996}
997
962static inline struct efx_rx_queue * 998static inline struct efx_rx_queue *
963efx_channel_get_rx_queue(struct efx_channel *channel) 999efx_channel_get_rx_queue(struct efx_channel *channel)
964{ 1000{
965 return channel->channel < channel->efx->n_rx_channels ? 1001 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
966 &channel->rx_queue : NULL; 1002 return &channel->rx_queue;
967} 1003}
968 1004
969/* Iterate over all RX queues belonging to a channel */ 1005/* Iterate over all RX queues belonging to a channel */
970#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 1006#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
971 for (_rx_queue = efx_channel_get_rx_queue(channel); \ 1007 if (!efx_channel_has_rx_queue(_channel)) \
972 _rx_queue; \ 1008 ; \
973 _rx_queue = NULL) 1009 else \
1010 for (_rx_queue = &(_channel)->rx_queue; \
1011 _rx_queue; \
1012 _rx_queue = NULL)
974 1013
975static inline struct efx_channel * 1014static inline struct efx_channel *
976efx_rx_queue_channel(struct efx_rx_queue *rx_queue) 1015efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da386599ab6..e8396614daf 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -41,26 +41,6 @@
41#define RX_DC_ENTRIES 64 41#define RX_DC_ENTRIES 64
42#define RX_DC_ENTRIES_ORDER 3 42#define RX_DC_ENTRIES_ORDER 3
43 43
44/* RX FIFO XOFF watermark
45 *
46 * When the amount of the RX FIFO increases used increases past this
47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48 * This also has an effect on RX/TX arbitration
49 */
50int efx_nic_rx_xoff_thresh = -1;
51module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
52MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
53
54/* RX FIFO XON watermark
55 *
56 * When the amount of the RX FIFO used decreases below this
57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58 * This also has an effect on RX/TX arbitration
59 */
60int efx_nic_rx_xon_thresh = -1;
61module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
62MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
63
64/* If EFX_MAX_INT_ERRORS internal errors occur within 44/* If EFX_MAX_INT_ERRORS internal errors occur within
65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 45 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
66 * disable it. 46 * disable it.
@@ -445,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
445 425
446void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 426void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
447{ 427{
448 efx_oword_t tx_desc_ptr;
449 struct efx_nic *efx = tx_queue->efx; 428 struct efx_nic *efx = tx_queue->efx;
429 efx_oword_t reg;
450 430
451 tx_queue->flushed = FLUSH_NONE; 431 tx_queue->flushed = FLUSH_NONE;
452 432
@@ -454,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
454 efx_init_special_buffer(efx, &tx_queue->txd); 434 efx_init_special_buffer(efx, &tx_queue->txd);
455 435
456 /* Push TX descriptor ring to card */ 436 /* Push TX descriptor ring to card */
457 EFX_POPULATE_OWORD_10(tx_desc_ptr, 437 EFX_POPULATE_OWORD_10(reg,
458 FRF_AZ_TX_DESCQ_EN, 1, 438 FRF_AZ_TX_DESCQ_EN, 1,
459 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 439 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
460 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 440 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
470 450
471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 451 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 452 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
473 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 453 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
474 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 454 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
475 !csum); 455 !csum);
476 } 456 }
477 457
478 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 458 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
479 tx_queue->queue); 459 tx_queue->queue);
480 460
481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 461 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
482 efx_oword_t reg;
483
484 /* Only 128 bits in this register */ 462 /* Only 128 bits in this register */
485 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 463 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
486 464
@@ -491,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
491 set_bit_le(tx_queue->queue, (void *)&reg); 469 set_bit_le(tx_queue->queue, (void *)&reg);
492 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 470 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
493 } 471 }
472
473 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
474 EFX_POPULATE_OWORD_1(reg,
475 FRF_BZ_TX_PACE,
476 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
477 FFE_BZ_TX_PACE_OFF :
478 FFE_BZ_TX_PACE_RESERVED);
479 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
480 tx_queue->queue);
481 }
494} 482}
495 483
496static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 484static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1238 1226
1239 /* Flush all tx queues in parallel */ 1227 /* Flush all tx queues in parallel */
1240 efx_for_each_channel(channel, efx) { 1228 efx_for_each_channel(channel, efx) {
1241 efx_for_each_channel_tx_queue(tx_queue, channel) 1229 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1242 efx_flush_tx_queue(tx_queue); 1230 if (tx_queue->initialised)
1231 efx_flush_tx_queue(tx_queue);
1232 }
1243 } 1233 }
1244 1234
1245 /* The hardware supports four concurrent rx flushes, each of which may 1235 /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1262 ++rx_pending; 1252 ++rx_pending;
1263 } 1253 }
1264 } 1254 }
1265 efx_for_each_channel_tx_queue(tx_queue, channel) { 1255 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1266 if (tx_queue->flushed != FLUSH_DONE) 1256 if (tx_queue->initialised &&
1257 tx_queue->flushed != FLUSH_DONE)
1267 ++tx_pending; 1258 ++tx_pending;
1268 } 1259 }
1269 } 1260 }
@@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1278 /* Mark the queues as all flushed. We're going to return failure 1269 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway */ 1270 * leading to a reset, or fake up success anyway */
1280 efx_for_each_channel(channel, efx) { 1271 efx_for_each_channel(channel, efx) {
1281 efx_for_each_channel_tx_queue(tx_queue, channel) { 1272 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1282 if (tx_queue->flushed != FLUSH_DONE) 1273 if (tx_queue->initialised &&
1274 tx_queue->flushed != FLUSH_DONE)
1283 netif_err(efx, hw, efx->net_dev, 1275 netif_err(efx, hw, efx->net_dev,
1284 "tx queue %d flush command timed out\n", 1276 "tx queue %d flush command timed out\n",
1285 tx_queue->queue); 1277 tx_queue->queue);
@@ -1682,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx)
1682 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1674 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1683 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1675 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1684 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1676 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1677
1678 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1679 EFX_POPULATE_OWORD_4(temp,
1680 /* Default values */
1681 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1682 FRF_BZ_TX_PACE_SB_AF, 0xb,
1683 FRF_BZ_TX_PACE_FB_BASE, 0,
1684 /* Allow large pace values in the
1685 * fast bin. */
1686 FRF_BZ_TX_PACE_BIN_TH,
1687 FFE_BZ_TX_PACE_RESERVED);
1688 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1689 }
1685} 1690}
1686 1691
1687/* Register dump */ 1692/* Register dump */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index eb0586925b5..d9de1b647d4 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -142,20 +142,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
142 142
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @fw_version: Management controller firmware version
146 * @fw_build: Firmware build number
147 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
148 * @wol_filter_id: Wake-on-LAN packet filter id 146 * @wol_filter_id: Wake-on-LAN packet filter id
149 */ 147 */
150struct siena_nic_data { 148struct siena_nic_data {
151 u64 fw_version;
152 u32 fw_build;
153 struct efx_mcdi_iface mcdi; 149 struct efx_mcdi_iface mcdi;
154 int wol_filter_id; 150 int wol_filter_id;
155}; 151};
156 152
157extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
158
159extern struct efx_nic_type falcon_a1_nic_type; 153extern struct efx_nic_type falcon_a1_nic_type;
160extern struct efx_nic_type falcon_b0_nic_type; 154extern struct efx_nic_type falcon_b0_nic_type;
161extern struct efx_nic_type siena_a0_nic_type; 155extern struct efx_nic_type siena_a0_nic_type;
@@ -194,7 +188,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
194/* MAC/PHY */ 188/* MAC/PHY */
195extern void falcon_drain_tx_fifo(struct efx_nic *efx); 189extern void falcon_drain_tx_fifo(struct efx_nic *efx);
196extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 190extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
197extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
198 191
199/* Interrupts and test events */ 192/* Interrupts and test events */
200extern int efx_nic_init_interrupt(struct efx_nic *efx); 193extern int efx_nic_init_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 1dab609757f..b3b79472421 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index ea3ae008931..55f90924247 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed81c3..cc2c86b76a7 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -2907,6 +2907,12 @@
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909 2909
2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going
2912 * into the fast bin with a pace value of zero. */
2913#define FFE_BZ_TX_PACE_OFF 0
2914#define FFE_BZ_TX_PACE_RESERVED 21
2915
2910/* DRIVER_EV */ 2916/* DRIVER_EV */
2911/* Sub-fields of an RX flush completion event */ 2917/* Sub-fields of an RX flush completion event */
2912#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2918#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 3925fd62117..c0fdb59030f 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
89 */ 89 */
90#define EFX_RXD_HEAD_ROOM 2 90#define EFX_RXD_HEAD_ROOM 2
91 91
92static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) 92/* Offset of ethernet header within page */
93static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
94 struct efx_rx_buffer *buf)
93{ 95{
94 /* Offset is always within one page, so we don't need to consider 96 /* Offset is always within one page, so we don't need to consider
95 * the page order. 97 * the page order.
96 */ 98 */
97 return (__force unsigned long) buf->data & (PAGE_SIZE - 1); 99 return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
100 efx->type->rx_buffer_hash_size);
98} 101}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 102static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 103{
101 return PAGE_SIZE << efx->rx_buffer_order; 104 return PAGE_SIZE << efx->rx_buffer_order;
102} 105}
103 106
104static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf) 107static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
105{ 108{
109 if (buf->is_page)
110 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
111 else
112 return ((u8 *)buf->u.skb->data +
113 efx->type->rx_buffer_hash_size);
114}
115
116static inline u32 efx_rx_buf_hash(const u8 *eh)
117{
118 /* The ethernet header is always directly after any hash. */
106#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 119#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
107 return __le32_to_cpup((const __le32 *)(buf->data - 4)); 120 return __le32_to_cpup((const __le32 *)(eh - 4));
108#else 121#else
109 const u8 *data = (const u8 *)(buf->data - 4); 122 const u8 *data = eh - 4;
110 return ((u32)data[0] | 123 return ((u32)data[0] |
111 (u32)data[1] << 8 | 124 (u32)data[1] << 8 |
112 (u32)data[2] << 16 | 125 (u32)data[2] << 16 |
@@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
129 struct efx_nic *efx = rx_queue->efx; 142 struct efx_nic *efx = rx_queue->efx;
130 struct net_device *net_dev = efx->net_dev; 143 struct net_device *net_dev = efx->net_dev;
131 struct efx_rx_buffer *rx_buf; 144 struct efx_rx_buffer *rx_buf;
145 struct sk_buff *skb;
132 int skb_len = efx->rx_buffer_len; 146 int skb_len = efx->rx_buffer_len;
133 unsigned index, count; 147 unsigned index, count;
134 148
@@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
136 index = rx_queue->added_count & rx_queue->ptr_mask; 150 index = rx_queue->added_count & rx_queue->ptr_mask;
137 rx_buf = efx_rx_buffer(rx_queue, index); 151 rx_buf = efx_rx_buffer(rx_queue, index);
138 152
139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 153 rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
140 if (unlikely(!rx_buf->skb)) 154 if (unlikely(!skb))
141 return -ENOMEM; 155 return -ENOMEM;
142 rx_buf->page = NULL;
143 156
144 /* Adjust the SKB for padding and checksum */ 157 /* Adjust the SKB for padding and checksum */
145 skb_reserve(rx_buf->skb, NET_IP_ALIGN); 158 skb_reserve(skb, NET_IP_ALIGN);
146 rx_buf->len = skb_len - NET_IP_ALIGN; 159 rx_buf->len = skb_len - NET_IP_ALIGN;
147 rx_buf->data = (char *)rx_buf->skb->data; 160 rx_buf->is_page = false;
148 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; 161 skb->ip_summed = CHECKSUM_UNNECESSARY;
149 162
150 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 163 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
151 rx_buf->data, rx_buf->len, 164 skb->data, rx_buf->len,
152 PCI_DMA_FROMDEVICE); 165 PCI_DMA_FROMDEVICE);
153 if (unlikely(pci_dma_mapping_error(efx->pci_dev, 166 if (unlikely(pci_dma_mapping_error(efx->pci_dev,
154 rx_buf->dma_addr))) { 167 rx_buf->dma_addr))) {
155 dev_kfree_skb_any(rx_buf->skb); 168 dev_kfree_skb_any(skb);
156 rx_buf->skb = NULL; 169 rx_buf->u.skb = NULL;
157 return -EIO; 170 return -EIO;
158 } 171 }
159 172
@@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
211 index = rx_queue->added_count & rx_queue->ptr_mask; 224 index = rx_queue->added_count & rx_queue->ptr_mask;
212 rx_buf = efx_rx_buffer(rx_queue, index); 225 rx_buf = efx_rx_buffer(rx_queue, index);
213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 226 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
214 rx_buf->skb = NULL; 227 rx_buf->u.page = page;
215 rx_buf->page = page;
216 rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 228 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
229 rx_buf->is_page = true;
218 ++rx_queue->added_count; 230 ++rx_queue->added_count;
219 ++rx_queue->alloc_page_count; 231 ++rx_queue->alloc_page_count;
220 ++state->refcnt; 232 ++state->refcnt;
@@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
235static void efx_unmap_rx_buffer(struct efx_nic *efx, 247static void efx_unmap_rx_buffer(struct efx_nic *efx,
236 struct efx_rx_buffer *rx_buf) 248 struct efx_rx_buffer *rx_buf)
237{ 249{
238 if (rx_buf->page) { 250 if (rx_buf->is_page && rx_buf->u.page) {
239 struct efx_rx_page_state *state; 251 struct efx_rx_page_state *state;
240 252
241 EFX_BUG_ON_PARANOID(rx_buf->skb); 253 state = page_address(rx_buf->u.page);
242
243 state = page_address(rx_buf->page);
244 if (--state->refcnt == 0) { 254 if (--state->refcnt == 0) {
245 pci_unmap_page(efx->pci_dev, 255 pci_unmap_page(efx->pci_dev,
246 state->dma_addr, 256 state->dma_addr,
247 efx_rx_buf_size(efx), 257 efx_rx_buf_size(efx),
248 PCI_DMA_FROMDEVICE); 258 PCI_DMA_FROMDEVICE);
249 } 259 }
250 } else if (likely(rx_buf->skb)) { 260 } else if (!rx_buf->is_page && rx_buf->u.skb) {
251 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 261 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
252 rx_buf->len, PCI_DMA_FROMDEVICE); 262 rx_buf->len, PCI_DMA_FROMDEVICE);
253 } 263 }
@@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
256static void efx_free_rx_buffer(struct efx_nic *efx, 266static void efx_free_rx_buffer(struct efx_nic *efx,
257 struct efx_rx_buffer *rx_buf) 267 struct efx_rx_buffer *rx_buf)
258{ 268{
259 if (rx_buf->page) { 269 if (rx_buf->is_page && rx_buf->u.page) {
260 __free_pages(rx_buf->page, efx->rx_buffer_order); 270 __free_pages(rx_buf->u.page, efx->rx_buffer_order);
261 rx_buf->page = NULL; 271 rx_buf->u.page = NULL;
262 } else if (likely(rx_buf->skb)) { 272 } else if (!rx_buf->is_page && rx_buf->u.skb) {
263 dev_kfree_skb_any(rx_buf->skb); 273 dev_kfree_skb_any(rx_buf->u.skb);
264 rx_buf->skb = NULL; 274 rx_buf->u.skb = NULL;
265 } 275 }
266} 276}
267 277
@@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
277static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, 287static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
278 struct efx_rx_buffer *rx_buf) 288 struct efx_rx_buffer *rx_buf)
279{ 289{
280 struct efx_rx_page_state *state = page_address(rx_buf->page); 290 struct efx_rx_page_state *state = page_address(rx_buf->u.page);
281 struct efx_rx_buffer *new_buf; 291 struct efx_rx_buffer *new_buf;
282 unsigned fill_level, index; 292 unsigned fill_level, index;
283 293
@@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
292 } 302 }
293 303
294 ++state->refcnt; 304 ++state->refcnt;
295 get_page(rx_buf->page); 305 get_page(rx_buf->u.page);
296 306
297 index = rx_queue->added_count & rx_queue->ptr_mask; 307 index = rx_queue->added_count & rx_queue->ptr_mask;
298 new_buf = efx_rx_buffer(rx_queue, index); 308 new_buf = efx_rx_buffer(rx_queue, index);
299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 309 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
300 new_buf->skb = NULL; 310 new_buf->u.page = rx_buf->u.page;
301 new_buf->page = rx_buf->page;
302 new_buf->data = (void *)
303 ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
304 new_buf->len = rx_buf->len; 311 new_buf->len = rx_buf->len;
312 new_buf->is_page = true;
305 ++rx_queue->added_count; 313 ++rx_queue->added_count;
306} 314}
307 315
@@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
315 struct efx_rx_buffer *new_buf; 323 struct efx_rx_buffer *new_buf;
316 unsigned index; 324 unsigned index;
317 325
318 if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 326 if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
319 page_count(rx_buf->page) == 1) 327 page_count(rx_buf->u.page) == 1)
320 efx_resurrect_rx_buffer(rx_queue, rx_buf); 328 efx_resurrect_rx_buffer(rx_queue, rx_buf);
321 329
322 index = rx_queue->added_count & rx_queue->ptr_mask; 330 index = rx_queue->added_count & rx_queue->ptr_mask;
323 new_buf = efx_rx_buffer(rx_queue, index); 331 new_buf = efx_rx_buffer(rx_queue, index);
324 332
325 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 333 memcpy(new_buf, rx_buf, sizeof(*new_buf));
326 rx_buf->page = NULL; 334 rx_buf->u.page = NULL;
327 rx_buf->skb = NULL;
328 ++rx_queue->added_count; 335 ++rx_queue->added_count;
329} 336}
330 337
@@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
428 * data at the end of the skb will be trashed. So 435 * data at the end of the skb will be trashed. So
429 * we have no choice but to leak the fragment. 436 * we have no choice but to leak the fragment.
430 */ 437 */
431 *leak_packet = (rx_buf->skb != NULL); 438 *leak_packet = !rx_buf->is_page;
432 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 439 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
433 } else { 440 } else {
434 if (net_ratelimit()) 441 if (net_ratelimit())
@@ -448,19 +455,18 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
448 */ 455 */
449static void efx_rx_packet_gro(struct efx_channel *channel, 456static void efx_rx_packet_gro(struct efx_channel *channel,
450 struct efx_rx_buffer *rx_buf, 457 struct efx_rx_buffer *rx_buf,
451 bool checksummed) 458 const u8 *eh, bool checksummed)
452{ 459{
453 struct napi_struct *napi = &channel->napi_str; 460 struct napi_struct *napi = &channel->napi_str;
454 gro_result_t gro_result; 461 gro_result_t gro_result;
455 462
456 /* Pass the skb/page into the GRO engine */ 463 /* Pass the skb/page into the GRO engine */
457 if (rx_buf->page) { 464 if (rx_buf->is_page) {
458 struct efx_nic *efx = channel->efx; 465 struct efx_nic *efx = channel->efx;
459 struct page *page = rx_buf->page; 466 struct page *page = rx_buf->u.page;
460 struct sk_buff *skb; 467 struct sk_buff *skb;
461 468
462 EFX_BUG_ON_PARANOID(rx_buf->skb); 469 rx_buf->u.page = NULL;
463 rx_buf->page = NULL;
464 470
465 skb = napi_get_frags(napi); 471 skb = napi_get_frags(napi);
466 if (!skb) { 472 if (!skb) {
@@ -469,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
469 } 475 }
470 476
471 if (efx->net_dev->features & NETIF_F_RXHASH) 477 if (efx->net_dev->features & NETIF_F_RXHASH)
472 skb->rxhash = efx_rx_buf_hash(rx_buf); 478 skb->rxhash = efx_rx_buf_hash(eh);
473 479
474 skb_shinfo(skb)->frags[0].page = page; 480 skb_shinfo(skb)->frags[0].page = page;
475 skb_shinfo(skb)->frags[0].page_offset = 481 skb_shinfo(skb)->frags[0].page_offset =
476 efx_rx_buf_offset(rx_buf); 482 efx_rx_buf_offset(efx, rx_buf);
477 skb_shinfo(skb)->frags[0].size = rx_buf->len; 483 skb_shinfo(skb)->frags[0].size = rx_buf->len;
478 skb_shinfo(skb)->nr_frags = 1; 484 skb_shinfo(skb)->nr_frags = 1;
479 485
@@ -487,11 +493,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
487 493
488 gro_result = napi_gro_frags(napi); 494 gro_result = napi_gro_frags(napi);
489 } else { 495 } else {
490 struct sk_buff *skb = rx_buf->skb; 496 struct sk_buff *skb = rx_buf->u.skb;
491 497
492 EFX_BUG_ON_PARANOID(!skb);
493 EFX_BUG_ON_PARANOID(!checksummed); 498 EFX_BUG_ON_PARANOID(!checksummed);
494 rx_buf->skb = NULL; 499 rx_buf->u.skb = NULL;
495 500
496 gro_result = napi_gro_receive(napi, skb); 501 gro_result = napi_gro_receive(napi, skb);
497 } 502 }
@@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
513 bool leak_packet = false; 518 bool leak_packet = false;
514 519
515 rx_buf = efx_rx_buffer(rx_queue, index); 520 rx_buf = efx_rx_buffer(rx_queue, index);
516 EFX_BUG_ON_PARANOID(!rx_buf->data);
517 EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
518 EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
519 521
520 /* This allows the refill path to post another buffer. 522 /* This allows the refill path to post another buffer.
521 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 523 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
554 /* Prefetch nice and early so data will (hopefully) be in cache by 556 /* Prefetch nice and early so data will (hopefully) be in cache by
555 * the time we look at it. 557 * the time we look at it.
556 */ 558 */
557 prefetch(rx_buf->data); 559 prefetch(efx_rx_buf_eh(efx, rx_buf));
558 560
559 /* Pipeline receives so that we give time for packet headers to be 561 /* Pipeline receives so that we give time for packet headers to be
560 * prefetched into cache. 562 * prefetched into cache.
561 */ 563 */
562 rx_buf->len = len; 564 rx_buf->len = len - efx->type->rx_buffer_hash_size;
563out: 565out:
564 if (channel->rx_pkt) 566 if (channel->rx_pkt)
565 __efx_rx_packet(channel, 567 __efx_rx_packet(channel,
@@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel,
574{ 576{
575 struct efx_nic *efx = channel->efx; 577 struct efx_nic *efx = channel->efx;
576 struct sk_buff *skb; 578 struct sk_buff *skb;
577 579 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
578 rx_buf->data += efx->type->rx_buffer_hash_size;
579 rx_buf->len -= efx->type->rx_buffer_hash_size;
580 580
581 /* If we're in loopback test, then pass the packet directly to the 581 /* If we're in loopback test, then pass the packet directly to the
582 * loopback layer, and free the rx_buf here 582 * loopback layer, and free the rx_buf here
583 */ 583 */
584 if (unlikely(efx->loopback_selftest)) { 584 if (unlikely(efx->loopback_selftest)) {
585 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 585 efx_loopback_rx_packet(efx, eh, rx_buf->len);
586 efx_free_rx_buffer(efx, rx_buf); 586 efx_free_rx_buffer(efx, rx_buf);
587 return; 587 return;
588 } 588 }
589 589
590 if (rx_buf->skb) { 590 if (!rx_buf->is_page) {
591 prefetch(skb_shinfo(rx_buf->skb)); 591 skb = rx_buf->u.skb;
592 592
593 skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size); 593 prefetch(skb_shinfo(skb));
594 skb_put(rx_buf->skb, rx_buf->len); 594
595 skb_reserve(skb, efx->type->rx_buffer_hash_size);
596 skb_put(skb, rx_buf->len);
595 597
596 if (efx->net_dev->features & NETIF_F_RXHASH) 598 if (efx->net_dev->features & NETIF_F_RXHASH)
597 rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf); 599 skb->rxhash = efx_rx_buf_hash(eh);
598 600
599 /* Move past the ethernet header. rx_buf->data still points 601 /* Move past the ethernet header. rx_buf->data still points
600 * at the ethernet header */ 602 * at the ethernet header */
601 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, 603 skb->protocol = eth_type_trans(skb, efx->net_dev);
602 efx->net_dev);
603 604
604 skb_record_rx_queue(rx_buf->skb, channel->channel); 605 skb_record_rx_queue(skb, channel->channel);
605 } 606 }
606 607
607 if (likely(checksummed || rx_buf->page)) { 608 if (likely(checksummed || rx_buf->is_page)) {
608 efx_rx_packet_gro(channel, rx_buf, checksummed); 609 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
609 return; 610 return;
610 } 611 }
611 612
612 /* We now own the SKB */ 613 /* We now own the SKB */
613 skb = rx_buf->skb; 614 skb = rx_buf->u.skb;
614 rx_buf->skb = NULL; 615 rx_buf->u.skb = NULL;
615 EFX_BUG_ON_PARANOID(!skb);
616 616
617 /* Set the SKB flags */ 617 /* Set the SKB flags */
618 skb_checksum_none_assert(skb); 618 skb_checksum_none_assert(skb);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99f129..a0f49b348d6 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
644 goto out; 644 goto out;
645 } 645 }
646 646
647 /* Test both types of TX queue */ 647 /* Test all enabled types of TX queue */
648 efx_for_each_channel_tx_queue(tx_queue, channel) { 648 efx_for_each_channel_tx_queue(tx_queue, channel) {
649 state->offload_csum = (tx_queue->queue & 649 state->offload_csum = (tx_queue->queue &
650 EFX_TXQ_TYPE_OFFLOAD); 650 EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index aed495a4dad..dba5456e70f 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index bf845617644..e4dd8986b1f 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -227,13 +227,6 @@ static int siena_probe_nic(struct efx_nic *efx)
227 if (rc) 227 if (rc)
228 goto fail1; 228 goto fail1;
229 229
230 rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
231 if (rc) {
232 netif_err(efx, probe, efx->net_dev,
233 "Failed to read MCPU firmware version - rc %d\n", rc);
234 goto fail1; /* MCPU absent? */
235 }
236
237 /* Let the BMC know that the driver is now in charge of link and 230 /* Let the BMC know that the driver is now in charge of link and
238 * filter settings. We must do this before we reset the NIC */ 231 * filter settings. We must do this before we reset the NIC */
239 rc = efx_mcdi_drv_attach(efx, true, &already_attached); 232 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
@@ -348,11 +341,6 @@ static int siena_init_nic(struct efx_nic *efx)
348 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); 341 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
349 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); 342 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
350 343
351 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
352 /* No MCDI operation has been defined to set thresholds */
353 netif_err(efx, hw, efx->net_dev,
354 "ignoring RX flow control thresholds\n");
355
356 /* Enable event logging */ 344 /* Enable event logging */
357 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 345 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
358 if (rc) 346 if (rc)
@@ -514,16 +502,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx)
514 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); 502 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
515} 503}
516 504
517void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
518{
519 struct siena_nic_data *nic_data = efx->nic_data;
520 snprintf(buf, len, "%u.%u.%u.%u",
521 (unsigned int)(nic_data->fw_version >> 48),
522 (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
523 (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
524 (unsigned int)(nic_data->fw_version & 0xffff));
525}
526
527/************************************************************************** 505/**************************************************************************
528 * 506 *
529 * Wake on LAN 507 * Wake on LAN
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 879b7f6bde3..71f2e3ebe1c 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd. 3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f102912eba9..efdceb35aaa 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -196,7 +196,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
196 if (rc < 0) 196 if (rc < 0)
197 return rc; 197 return rc;
198 198
199 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 199 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
200 if (rc < 0) 200 if (rc < 0)
201 return rc; 201 return rc;
202 } 202 }
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2f5e9da657b..13980190821 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
336{ 336{
337 struct efx_nic *efx = netdev_priv(net_dev); 337 struct efx_nic *efx = netdev_priv(net_dev);
338 struct efx_tx_queue *tx_queue; 338 struct efx_tx_queue *tx_queue;
339 unsigned index, type;
339 340
340 if (unlikely(efx->port_inhibited)) 341 if (unlikely(efx->port_inhibited))
341 return NETDEV_TX_BUSY; 342 return NETDEV_TX_BUSY;
342 343
343 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), 344 index = skb_get_queue_mapping(skb);
344 skb->ip_summed == CHECKSUM_PARTIAL ? 345 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
345 EFX_TXQ_TYPE_OFFLOAD : 0); 346 if (index >= efx->n_tx_channels) {
347 index -= efx->n_tx_channels;
348 type |= EFX_TXQ_TYPE_HIGHPRI;
349 }
350 tx_queue = efx_get_tx_queue(efx, index, type);
346 351
347 return efx_enqueue_skb(tx_queue, skb); 352 return efx_enqueue_skb(tx_queue, skb);
348} 353}
349 354
355void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
356{
357 struct efx_nic *efx = tx_queue->efx;
358
359 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
360 tx_queue->core_txq =
361 netdev_get_tx_queue(efx->net_dev,
362 tx_queue->queue / EFX_TXQ_TYPES +
363 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
364 efx->n_tx_channels : 0));
365}
366
367int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
368{
369 struct efx_nic *efx = netdev_priv(net_dev);
370 struct efx_channel *channel;
371 struct efx_tx_queue *tx_queue;
372 unsigned tc;
373 int rc;
374
375 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
376 return -EINVAL;
377
378 if (num_tc == net_dev->num_tc)
379 return 0;
380
381 for (tc = 0; tc < num_tc; tc++) {
382 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
383 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
384 }
385
386 if (num_tc > net_dev->num_tc) {
387 /* Initialise high-priority queues as necessary */
388 efx_for_each_channel(channel, efx) {
389 efx_for_each_possible_channel_tx_queue(tx_queue,
390 channel) {
391 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
392 continue;
393 if (!tx_queue->buffer) {
394 rc = efx_probe_tx_queue(tx_queue);
395 if (rc)
396 return rc;
397 }
398 if (!tx_queue->initialised)
399 efx_init_tx_queue(tx_queue);
400 efx_init_tx_queue_core_txq(tx_queue);
401 }
402 }
403 } else {
404 /* Reduce number of classes before number of queues */
405 net_dev->num_tc = num_tc;
406 }
407
408 rc = netif_set_real_num_tx_queues(net_dev,
409 max_t(int, num_tc, 1) *
410 efx->n_tx_channels);
411 if (rc)
412 return rc;
413
414 /* Do not destroy high-priority queues when they become
415 * unused. We would have to flush them first, and it is
416 * fairly difficult to flush a subset of TX queues. Leave
417 * it to efx_fini_channels().
418 */
419
420 net_dev->num_tc = num_tc;
421 return 0;
422}
423
350void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 424void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
351{ 425{
352 unsigned fill_level; 426 unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
430 504
431 /* Set up TX descriptor ring */ 505 /* Set up TX descriptor ring */
432 efx_nic_init_tx(tx_queue); 506 efx_nic_init_tx(tx_queue);
507
508 tx_queue->initialised = true;
433} 509}
434 510
435void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 511void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
452 528
453void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 529void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
454{ 530{
531 if (!tx_queue->initialised)
532 return;
533
455 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 534 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
456 "shutting down TX queue %d\n", tx_queue->queue); 535 "shutting down TX queue %d\n", tx_queue->queue);
457 536
537 tx_queue->initialised = false;
538
458 /* Flush TX queue, remove descriptor ring */ 539 /* Flush TX queue, remove descriptor ring */
459 efx_nic_fini_tx(tx_queue); 540 efx_nic_fini_tx(tx_queue);
460 541
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
466 547
467void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 548void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
468{ 549{
550 if (!tx_queue->buffer)
551 return;
552
469 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 553 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
470 "destroying TX queue %d\n", tx_queue->queue); 554 "destroying TX queue %d\n", tx_queue->queue);
471 efx_nic_remove_tx(tx_queue); 555 efx_nic_remove_tx(tx_queue);
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
index 351794a7921..d9886addcc9 100644
--- a/drivers/net/sfc/txc43128_phy.c
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -193,7 +193,7 @@ static int txc_reset_phy(struct efx_nic *efx)
193 goto fail; 193 goto fail;
194 194
195 /* Check that all the MMDs we expect are present and responding. */ 195 /* Check that all the MMDs we expect are present and responding. */
196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0); 196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
197 if (rc < 0) 197 if (rc < 0)
198 goto fail; 198 goto fail;
199 199
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index e0d63083c3a..99ff11400ce 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -38,6 +38,8 @@
38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
39/* Legacy interrupt storm when interrupt fifo fills */ 39/* Legacy interrupt storm when interrupt fifo fills */
40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41/* Write combining and sriov=enabled are incompatible */
42#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
41 43
42/* Spurious parity errors in TSORT buffers */ 44/* Spurious parity errors in TSORT buffers */
43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 819c1750e2a..e9e7a530552 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,35 +32,40 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/ethtool.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36 37
37#include "sh_eth.h" 38#include "sh_eth.h"
38 39
40#define SH_ETH_DEF_MSG_ENABLE \
41 (NETIF_MSG_LINK | \
42 NETIF_MSG_TIMER | \
43 NETIF_MSG_RX_ERR| \
44 NETIF_MSG_TX_ERR)
45
39/* There is CPU dependent code */ 46/* There is CPU dependent code */
40#if defined(CONFIG_CPU_SUBTYPE_SH7724) 47#if defined(CONFIG_CPU_SUBTYPE_SH7724)
41#define SH_ETH_RESET_DEFAULT 1 48#define SH_ETH_RESET_DEFAULT 1
42static void sh_eth_set_duplex(struct net_device *ndev) 49static void sh_eth_set_duplex(struct net_device *ndev)
43{ 50{
44 struct sh_eth_private *mdp = netdev_priv(ndev); 51 struct sh_eth_private *mdp = netdev_priv(ndev);
45 u32 ioaddr = ndev->base_addr;
46 52
47 if (mdp->duplex) /* Full */ 53 if (mdp->duplex) /* Full */
48 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 54 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
49 else /* Half */ 55 else /* Half */
50 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 56 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
51} 57}
52 58
53static void sh_eth_set_rate(struct net_device *ndev) 59static void sh_eth_set_rate(struct net_device *ndev)
54{ 60{
55 struct sh_eth_private *mdp = netdev_priv(ndev); 61 struct sh_eth_private *mdp = netdev_priv(ndev);
56 u32 ioaddr = ndev->base_addr;
57 62
58 switch (mdp->speed) { 63 switch (mdp->speed) {
59 case 10: /* 10BASE */ 64 case 10: /* 10BASE */
60 writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR); 65 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
61 break; 66 break;
62 case 100:/* 100BASE */ 67 case 100:/* 100BASE */
63 writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR); 68 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
64 break; 69 break;
65 default: 70 default:
66 break; 71 break;
@@ -89,29 +94,28 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
89 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 94 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
90}; 95};
91#elif defined(CONFIG_CPU_SUBTYPE_SH7757) 96#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
92#define SH_ETH_RESET_DEFAULT 1 97#define SH_ETH_HAS_BOTH_MODULES 1
98#define SH_ETH_HAS_TSU 1
93static void sh_eth_set_duplex(struct net_device *ndev) 99static void sh_eth_set_duplex(struct net_device *ndev)
94{ 100{
95 struct sh_eth_private *mdp = netdev_priv(ndev); 101 struct sh_eth_private *mdp = netdev_priv(ndev);
96 u32 ioaddr = ndev->base_addr;
97 102
98 if (mdp->duplex) /* Full */ 103 if (mdp->duplex) /* Full */
99 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 104 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
100 else /* Half */ 105 else /* Half */
101 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 106 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
102} 107}
103 108
104static void sh_eth_set_rate(struct net_device *ndev) 109static void sh_eth_set_rate(struct net_device *ndev)
105{ 110{
106 struct sh_eth_private *mdp = netdev_priv(ndev); 111 struct sh_eth_private *mdp = netdev_priv(ndev);
107 u32 ioaddr = ndev->base_addr;
108 112
109 switch (mdp->speed) { 113 switch (mdp->speed) {
110 case 10: /* 10BASE */ 114 case 10: /* 10BASE */
111 writel(0, ioaddr + RTRATE); 115 sh_eth_write(ndev, 0, RTRATE);
112 break; 116 break;
113 case 100:/* 100BASE */ 117 case 100:/* 100BASE */
114 writel(1, ioaddr + RTRATE); 118 sh_eth_write(ndev, 1, RTRATE);
115 break; 119 break;
116 default: 120 default:
117 break; 121 break;
@@ -138,24 +142,154 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
138 .no_ade = 1, 142 .no_ade = 1,
139}; 143};
140 144
145#define SH_GIGA_ETH_BASE 0xfee00000
146#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
147#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
148static void sh_eth_chip_reset_giga(struct net_device *ndev)
149{
150 int i;
151 unsigned long mahr[2], malr[2];
152
153 /* save MAHR and MALR */
154 for (i = 0; i < 2; i++) {
155 malr[i] = readl(GIGA_MALR(i));
156 mahr[i] = readl(GIGA_MAHR(i));
157 }
158
159 /* reset device */
160 writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800);
161 mdelay(1);
162
163 /* restore MAHR and MALR */
164 for (i = 0; i < 2; i++) {
165 writel(malr[i], GIGA_MALR(i));
166 writel(mahr[i], GIGA_MAHR(i));
167 }
168}
169
170static int sh_eth_is_gether(struct sh_eth_private *mdp);
171static void sh_eth_reset(struct net_device *ndev)
172{
173 struct sh_eth_private *mdp = netdev_priv(ndev);
174 int cnt = 100;
175
176 if (sh_eth_is_gether(mdp)) {
177 sh_eth_write(ndev, 0x03, EDSR);
178 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
179 EDMR);
180 while (cnt > 0) {
181 if (!(sh_eth_read(ndev, EDMR) & 0x3))
182 break;
183 mdelay(1);
184 cnt--;
185 }
186 if (cnt < 0)
187 printk(KERN_ERR "Device reset fail\n");
188
189 /* Table Init */
190 sh_eth_write(ndev, 0x0, TDLAR);
191 sh_eth_write(ndev, 0x0, TDFAR);
192 sh_eth_write(ndev, 0x0, TDFXR);
193 sh_eth_write(ndev, 0x0, TDFFR);
194 sh_eth_write(ndev, 0x0, RDLAR);
195 sh_eth_write(ndev, 0x0, RDFAR);
196 sh_eth_write(ndev, 0x0, RDFXR);
197 sh_eth_write(ndev, 0x0, RDFFR);
198 } else {
199 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
200 EDMR);
201 mdelay(3);
202 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
203 EDMR);
204 }
205}
206
207static void sh_eth_set_duplex_giga(struct net_device *ndev)
208{
209 struct sh_eth_private *mdp = netdev_priv(ndev);
210
211 if (mdp->duplex) /* Full */
212 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
213 else /* Half */
214 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
215}
216
217static void sh_eth_set_rate_giga(struct net_device *ndev)
218{
219 struct sh_eth_private *mdp = netdev_priv(ndev);
220
221 switch (mdp->speed) {
222 case 10: /* 10BASE */
223 sh_eth_write(ndev, 0x00000000, GECMR);
224 break;
225 case 100:/* 100BASE */
226 sh_eth_write(ndev, 0x00000010, GECMR);
227 break;
228 case 1000: /* 1000BASE */
229 sh_eth_write(ndev, 0x00000020, GECMR);
230 break;
231 default:
232 break;
233 }
234}
235
236/* SH7757(GETHERC) */
237static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
238 .chip_reset = sh_eth_chip_reset_giga,
239 .set_duplex = sh_eth_set_duplex_giga,
240 .set_rate = sh_eth_set_rate_giga,
241
242 .ecsr_value = ECSR_ICD | ECSR_MPD,
243 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
244 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
245
246 .tx_check = EESR_TC1 | EESR_FTC,
247 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
248 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
249 EESR_ECI,
250 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
251 EESR_TFE,
252 .fdr_value = 0x0000072f,
253 .rmcr_value = 0x00000001,
254
255 .apr = 1,
256 .mpr = 1,
257 .tpauser = 1,
258 .bculr = 1,
259 .hw_swap = 1,
260 .rpadir = 1,
261 .rpadir_value = 2 << 16,
262 .no_trimd = 1,
263 .no_ade = 1,
264};
265
266static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
267{
268 if (sh_eth_is_gether(mdp))
269 return &sh_eth_my_cpu_data_giga;
270 else
271 return &sh_eth_my_cpu_data;
272}
273
141#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 274#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
142#define SH_ETH_HAS_TSU 1 275#define SH_ETH_HAS_TSU 1
143static void sh_eth_chip_reset(struct net_device *ndev) 276static void sh_eth_chip_reset(struct net_device *ndev)
144{ 277{
278 struct sh_eth_private *mdp = netdev_priv(ndev);
279
145 /* reset device */ 280 /* reset device */
146 writel(ARSTR_ARSTR, ARSTR); 281 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
147 mdelay(1); 282 mdelay(1);
148} 283}
149 284
150static void sh_eth_reset(struct net_device *ndev) 285static void sh_eth_reset(struct net_device *ndev)
151{ 286{
152 u32 ioaddr = ndev->base_addr;
153 int cnt = 100; 287 int cnt = 100;
154 288
155 writel(EDSR_ENALL, ioaddr + EDSR); 289 sh_eth_write(ndev, EDSR_ENALL, EDSR);
156 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 290 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
157 while (cnt > 0) { 291 while (cnt > 0) {
158 if (!(readl(ioaddr + EDMR) & 0x3)) 292 if (!(sh_eth_read(ndev, EDMR) & 0x3))
159 break; 293 break;
160 mdelay(1); 294 mdelay(1);
161 cnt--; 295 cnt--;
@@ -164,41 +298,39 @@ static void sh_eth_reset(struct net_device *ndev)
164 printk(KERN_ERR "Device reset fail\n"); 298 printk(KERN_ERR "Device reset fail\n");
165 299
166 /* Table Init */ 300 /* Table Init */
167 writel(0x0, ioaddr + TDLAR); 301 sh_eth_write(ndev, 0x0, TDLAR);
168 writel(0x0, ioaddr + TDFAR); 302 sh_eth_write(ndev, 0x0, TDFAR);
169 writel(0x0, ioaddr + TDFXR); 303 sh_eth_write(ndev, 0x0, TDFXR);
170 writel(0x0, ioaddr + TDFFR); 304 sh_eth_write(ndev, 0x0, TDFFR);
171 writel(0x0, ioaddr + RDLAR); 305 sh_eth_write(ndev, 0x0, RDLAR);
172 writel(0x0, ioaddr + RDFAR); 306 sh_eth_write(ndev, 0x0, RDFAR);
173 writel(0x0, ioaddr + RDFXR); 307 sh_eth_write(ndev, 0x0, RDFXR);
174 writel(0x0, ioaddr + RDFFR); 308 sh_eth_write(ndev, 0x0, RDFFR);
175} 309}
176 310
177static void sh_eth_set_duplex(struct net_device *ndev) 311static void sh_eth_set_duplex(struct net_device *ndev)
178{ 312{
179 struct sh_eth_private *mdp = netdev_priv(ndev); 313 struct sh_eth_private *mdp = netdev_priv(ndev);
180 u32 ioaddr = ndev->base_addr;
181 314
182 if (mdp->duplex) /* Full */ 315 if (mdp->duplex) /* Full */
183 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 316 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
184 else /* Half */ 317 else /* Half */
185 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 318 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
186} 319}
187 320
188static void sh_eth_set_rate(struct net_device *ndev) 321static void sh_eth_set_rate(struct net_device *ndev)
189{ 322{
190 struct sh_eth_private *mdp = netdev_priv(ndev); 323 struct sh_eth_private *mdp = netdev_priv(ndev);
191 u32 ioaddr = ndev->base_addr;
192 324
193 switch (mdp->speed) { 325 switch (mdp->speed) {
194 case 10: /* 10BASE */ 326 case 10: /* 10BASE */
195 writel(GECMR_10, ioaddr + GECMR); 327 sh_eth_write(ndev, GECMR_10, GECMR);
196 break; 328 break;
197 case 100:/* 100BASE */ 329 case 100:/* 100BASE */
198 writel(GECMR_100, ioaddr + GECMR); 330 sh_eth_write(ndev, GECMR_100, GECMR);
199 break; 331 break;
200 case 1000: /* 1000BASE */ 332 case 1000: /* 1000BASE */
201 writel(GECMR_1000, ioaddr + GECMR); 333 sh_eth_write(ndev, GECMR_1000, GECMR);
202 break; 334 break;
203 default: 335 default:
204 break; 336 break;
@@ -229,6 +361,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
229 .hw_swap = 1, 361 .hw_swap = 1,
230 .no_trimd = 1, 362 .no_trimd = 1,
231 .no_ade = 1, 363 .no_ade = 1,
364 .tsu = 1,
232}; 365};
233 366
234#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 367#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -246,6 +379,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
246#define SH_ETH_HAS_TSU 1 379#define SH_ETH_HAS_TSU 1
247static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 380static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
248 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 381 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
382 .tsu = 1,
249}; 383};
250#endif 384#endif
251 385
@@ -281,11 +415,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
281/* Chip Reset */ 415/* Chip Reset */
282static void sh_eth_reset(struct net_device *ndev) 416static void sh_eth_reset(struct net_device *ndev)
283{ 417{
284 u32 ioaddr = ndev->base_addr; 418 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
285
286 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
287 mdelay(3); 419 mdelay(3);
288 writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); 420 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
289} 421}
290#endif 422#endif
291 423
@@ -334,13 +466,11 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
334 */ 466 */
335static void update_mac_address(struct net_device *ndev) 467static void update_mac_address(struct net_device *ndev)
336{ 468{
337 u32 ioaddr = ndev->base_addr; 469 sh_eth_write(ndev,
338 470 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
339 writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 471 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
340 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), 472 sh_eth_write(ndev,
341 ioaddr + MAHR); 473 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
342 writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
343 ioaddr + MALR);
344} 474}
345 475
346/* 476/*
@@ -353,21 +483,36 @@ static void update_mac_address(struct net_device *ndev)
353 */ 483 */
354static void read_mac_address(struct net_device *ndev, unsigned char *mac) 484static void read_mac_address(struct net_device *ndev, unsigned char *mac)
355{ 485{
356 u32 ioaddr = ndev->base_addr;
357
358 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 486 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
359 memcpy(ndev->dev_addr, mac, 6); 487 memcpy(ndev->dev_addr, mac, 6);
360 } else { 488 } else {
361 ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24); 489 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
362 ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF; 490 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
363 ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF; 491 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
364 ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF); 492 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
365 ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF; 493 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
366 ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF); 494 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
367 } 495 }
368} 496}
369 497
498static int sh_eth_is_gether(struct sh_eth_private *mdp)
499{
500 if (mdp->reg_offset == sh_eth_offset_gigabit)
501 return 1;
502 else
503 return 0;
504}
505
506static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
507{
508 if (sh_eth_is_gether(mdp))
509 return EDTRR_TRNS_GETHER;
510 else
511 return EDTRR_TRNS_ETHER;
512}
513
370struct bb_info { 514struct bb_info {
515 void (*set_gate)(unsigned long addr);
371 struct mdiobb_ctrl ctrl; 516 struct mdiobb_ctrl ctrl;
372 u32 addr; 517 u32 addr;
373 u32 mmd_msk;/* MMD */ 518 u32 mmd_msk;/* MMD */
@@ -398,6 +543,10 @@ static int bb_read(u32 addr, u32 msk)
398static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 543static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
399{ 544{
400 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 545 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
546
547 if (bitbang->set_gate)
548 bitbang->set_gate(bitbang->addr);
549
401 if (bit) 550 if (bit)
402 bb_set(bitbang->addr, bitbang->mmd_msk); 551 bb_set(bitbang->addr, bitbang->mmd_msk);
403 else 552 else
@@ -409,6 +558,9 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
409{ 558{
410 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 559 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
411 560
561 if (bitbang->set_gate)
562 bitbang->set_gate(bitbang->addr);
563
412 if (bit) 564 if (bit)
413 bb_set(bitbang->addr, bitbang->mdo_msk); 565 bb_set(bitbang->addr, bitbang->mdo_msk);
414 else 566 else
@@ -419,6 +571,10 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
419static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 571static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
420{ 572{
421 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 573 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
574
575 if (bitbang->set_gate)
576 bitbang->set_gate(bitbang->addr);
577
422 return bb_read(bitbang->addr, bitbang->mdi_msk); 578 return bb_read(bitbang->addr, bitbang->mdi_msk);
423} 579}
424 580
@@ -427,6 +583,9 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
427{ 583{
428 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 584 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
429 585
586 if (bitbang->set_gate)
587 bitbang->set_gate(bitbang->addr);
588
430 if (bit) 589 if (bit)
431 bb_set(bitbang->addr, bitbang->mdc_msk); 590 bb_set(bitbang->addr, bitbang->mdc_msk);
432 else 591 else
@@ -470,7 +629,6 @@ static void sh_eth_ring_free(struct net_device *ndev)
470/* format skb and descriptor buffer */ 629/* format skb and descriptor buffer */
471static void sh_eth_ring_format(struct net_device *ndev) 630static void sh_eth_ring_format(struct net_device *ndev)
472{ 631{
473 u32 ioaddr = ndev->base_addr;
474 struct sh_eth_private *mdp = netdev_priv(ndev); 632 struct sh_eth_private *mdp = netdev_priv(ndev);
475 int i; 633 int i;
476 struct sk_buff *skb; 634 struct sk_buff *skb;
@@ -506,10 +664,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
506 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 664 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
507 /* Rx descriptor address set */ 665 /* Rx descriptor address set */
508 if (i == 0) { 666 if (i == 0) {
509 writel(mdp->rx_desc_dma, ioaddr + RDLAR); 667 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
510#if defined(CONFIG_CPU_SUBTYPE_SH7763) 668 if (sh_eth_is_gether(mdp))
511 writel(mdp->rx_desc_dma, ioaddr + RDFAR); 669 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
512#endif
513 } 670 }
514 } 671 }
515 672
@@ -528,10 +685,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
528 txdesc->buffer_length = 0; 685 txdesc->buffer_length = 0;
529 if (i == 0) { 686 if (i == 0) {
530 /* Tx descriptor address set */ 687 /* Tx descriptor address set */
531 writel(mdp->tx_desc_dma, ioaddr + TDLAR); 688 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
532#if defined(CONFIG_CPU_SUBTYPE_SH7763) 689 if (sh_eth_is_gether(mdp))
533 writel(mdp->tx_desc_dma, ioaddr + TDFAR); 690 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
534#endif
535 } 691 }
536 } 692 }
537 693
@@ -613,7 +769,6 @@ static int sh_eth_dev_init(struct net_device *ndev)
613{ 769{
614 int ret = 0; 770 int ret = 0;
615 struct sh_eth_private *mdp = netdev_priv(ndev); 771 struct sh_eth_private *mdp = netdev_priv(ndev);
616 u32 ioaddr = ndev->base_addr;
617 u_int32_t rx_int_var, tx_int_var; 772 u_int32_t rx_int_var, tx_int_var;
618 u32 val; 773 u32 val;
619 774
@@ -623,71 +778,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
623 /* Descriptor format */ 778 /* Descriptor format */
624 sh_eth_ring_format(ndev); 779 sh_eth_ring_format(ndev);
625 if (mdp->cd->rpadir) 780 if (mdp->cd->rpadir)
626 writel(mdp->cd->rpadir_value, ioaddr + RPADIR); 781 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
627 782
628 /* all sh_eth int mask */ 783 /* all sh_eth int mask */
629 writel(0, ioaddr + EESIPR); 784 sh_eth_write(ndev, 0, EESIPR);
630 785
631#if defined(__LITTLE_ENDIAN__) 786#if defined(__LITTLE_ENDIAN__)
632 if (mdp->cd->hw_swap) 787 if (mdp->cd->hw_swap)
633 writel(EDMR_EL, ioaddr + EDMR); 788 sh_eth_write(ndev, EDMR_EL, EDMR);
634 else 789 else
635#endif 790#endif
636 writel(0, ioaddr + EDMR); 791 sh_eth_write(ndev, 0, EDMR);
637 792
638 /* FIFO size set */ 793 /* FIFO size set */
639 writel(mdp->cd->fdr_value, ioaddr + FDR); 794 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
640 writel(0, ioaddr + TFTR); 795 sh_eth_write(ndev, 0, TFTR);
641 796
642 /* Frame recv control */ 797 /* Frame recv control */
643 writel(mdp->cd->rmcr_value, ioaddr + RMCR); 798 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
644 799
645 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 800 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
646 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 801 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
647 writel(rx_int_var | tx_int_var, ioaddr + TRSCER); 802 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
648 803
649 if (mdp->cd->bculr) 804 if (mdp->cd->bculr)
650 writel(0x800, ioaddr + BCULR); /* Burst sycle set */ 805 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
651 806
652 writel(mdp->cd->fcftr_value, ioaddr + FCFTR); 807 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
653 808
654 if (!mdp->cd->no_trimd) 809 if (!mdp->cd->no_trimd)
655 writel(0, ioaddr + TRIMD); 810 sh_eth_write(ndev, 0, TRIMD);
656 811
657 /* Recv frame limit set register */ 812 /* Recv frame limit set register */
658 writel(RFLR_VALUE, ioaddr + RFLR); 813 sh_eth_write(ndev, RFLR_VALUE, RFLR);
659 814
660 writel(readl(ioaddr + EESR), ioaddr + EESR); 815 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
661 writel(mdp->cd->eesipr_value, ioaddr + EESIPR); 816 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
662 817
663 /* PAUSE Prohibition */ 818 /* PAUSE Prohibition */
664 val = (readl(ioaddr + ECMR) & ECMR_DM) | 819 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
665 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 820 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
666 821
667 writel(val, ioaddr + ECMR); 822 sh_eth_write(ndev, val, ECMR);
668 823
669 if (mdp->cd->set_rate) 824 if (mdp->cd->set_rate)
670 mdp->cd->set_rate(ndev); 825 mdp->cd->set_rate(ndev);
671 826
672 /* E-MAC Status Register clear */ 827 /* E-MAC Status Register clear */
673 writel(mdp->cd->ecsr_value, ioaddr + ECSR); 828 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
674 829
675 /* E-MAC Interrupt Enable register */ 830 /* E-MAC Interrupt Enable register */
676 writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR); 831 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
677 832
678 /* Set MAC address */ 833 /* Set MAC address */
679 update_mac_address(ndev); 834 update_mac_address(ndev);
680 835
681 /* mask reset */ 836 /* mask reset */
682 if (mdp->cd->apr) 837 if (mdp->cd->apr)
683 writel(APR_AP, ioaddr + APR); 838 sh_eth_write(ndev, APR_AP, APR);
684 if (mdp->cd->mpr) 839 if (mdp->cd->mpr)
685 writel(MPR_MP, ioaddr + MPR); 840 sh_eth_write(ndev, MPR_MP, MPR);
686 if (mdp->cd->tpauser) 841 if (mdp->cd->tpauser)
687 writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER); 842 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
688 843
689 /* Setting the Rx mode will start the Rx process. */ 844 /* Setting the Rx mode will start the Rx process. */
690 writel(EDRRR_R, ioaddr + EDRRR); 845 sh_eth_write(ndev, EDRRR_R, EDRRR);
691 846
692 netif_start_queue(ndev); 847 netif_start_queue(ndev);
693 848
@@ -811,24 +966,37 @@ static int sh_eth_rx(struct net_device *ndev)
811 966
812 /* Restart Rx engine if stopped. */ 967 /* Restart Rx engine if stopped. */
813 /* If we don't need to check status, don't. -KDU */ 968 /* If we don't need to check status, don't. -KDU */
814 if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R)) 969 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
815 writel(EDRRR_R, ndev->base_addr + EDRRR); 970 sh_eth_write(ndev, EDRRR_R, EDRRR);
816 971
817 return 0; 972 return 0;
818} 973}
819 974
975static void sh_eth_rcv_snd_disable(struct net_device *ndev)
976{
977 /* disable tx and rx */
978 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
979 ~(ECMR_RE | ECMR_TE), ECMR);
980}
981
982static void sh_eth_rcv_snd_enable(struct net_device *ndev)
983{
984 /* enable tx and rx */
985 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
986 (ECMR_RE | ECMR_TE), ECMR);
987}
988
820/* error control function */ 989/* error control function */
821static void sh_eth_error(struct net_device *ndev, int intr_status) 990static void sh_eth_error(struct net_device *ndev, int intr_status)
822{ 991{
823 struct sh_eth_private *mdp = netdev_priv(ndev); 992 struct sh_eth_private *mdp = netdev_priv(ndev);
824 u32 ioaddr = ndev->base_addr;
825 u32 felic_stat; 993 u32 felic_stat;
826 u32 link_stat; 994 u32 link_stat;
827 u32 mask; 995 u32 mask;
828 996
829 if (intr_status & EESR_ECI) { 997 if (intr_status & EESR_ECI) {
830 felic_stat = readl(ioaddr + ECSR); 998 felic_stat = sh_eth_read(ndev, ECSR);
831 writel(felic_stat, ioaddr + ECSR); /* clear int */ 999 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
832 if (felic_stat & ECSR_ICD) 1000 if (felic_stat & ECSR_ICD)
833 mdp->stats.tx_carrier_errors++; 1001 mdp->stats.tx_carrier_errors++;
834 if (felic_stat & ECSR_LCHNG) { 1002 if (felic_stat & ECSR_LCHNG) {
@@ -839,26 +1007,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
839 else 1007 else
840 link_stat = PHY_ST_LINK; 1008 link_stat = PHY_ST_LINK;
841 } else { 1009 } else {
842 link_stat = (readl(ioaddr + PSR)); 1010 link_stat = (sh_eth_read(ndev, PSR));
843 if (mdp->ether_link_active_low) 1011 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 1012 link_stat = ~link_stat;
845 } 1013 }
846 if (!(link_stat & PHY_ST_LINK)) { 1014 if (!(link_stat & PHY_ST_LINK))
847 /* Link Down : disable tx and rx */ 1015 sh_eth_rcv_snd_disable(ndev);
848 writel(readl(ioaddr + ECMR) & 1016 else {
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else {
851 /* Link Up */ 1017 /* Link Up */
852 writel(readl(ioaddr + EESIPR) & 1018 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 1019 ~DMAC_M_ECI, EESIPR);
854 /*clear int */ 1020 /*clear int */
855 writel(readl(ioaddr + ECSR), 1021 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
856 ioaddr + ECSR); 1022 ECSR);
857 writel(readl(ioaddr + EESIPR) | 1023 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 1024 DMAC_M_ECI, EESIPR);
859 /* enable tx and rx */ 1025 /* enable tx and rx */
860 writel(readl(ioaddr + ECMR) | 1026 sh_eth_rcv_snd_enable(ndev);
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 1027 }
863 } 1028 }
864 } 1029 }
@@ -867,6 +1032,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
867 /* Write buck end. unused write back interrupt */ 1032 /* Write buck end. unused write back interrupt */
868 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1033 if (intr_status & EESR_TABT) /* Transmit Abort int */
869 mdp->stats.tx_aborted_errors++; 1034 mdp->stats.tx_aborted_errors++;
1035 if (netif_msg_tx_err(mdp))
1036 dev_err(&ndev->dev, "Transmit Abort\n");
870 } 1037 }
871 1038
872 if (intr_status & EESR_RABT) { 1039 if (intr_status & EESR_RABT) {
@@ -874,28 +1041,47 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
874 if (intr_status & EESR_RFRMER) { 1041 if (intr_status & EESR_RFRMER) {
875 /* Receive Frame Overflow int */ 1042 /* Receive Frame Overflow int */
876 mdp->stats.rx_frame_errors++; 1043 mdp->stats.rx_frame_errors++;
877 dev_err(&ndev->dev, "Receive Frame Overflow\n"); 1044 if (netif_msg_rx_err(mdp))
1045 dev_err(&ndev->dev, "Receive Abort\n");
878 } 1046 }
879 } 1047 }
880 1048
881 if (!mdp->cd->no_ade) { 1049 if (intr_status & EESR_TDE) {
882 if (intr_status & EESR_ADE && intr_status & EESR_TDE && 1050 /* Transmit Descriptor Empty int */
883 intr_status & EESR_TFE) 1051 mdp->stats.tx_fifo_errors++;
884 mdp->stats.tx_fifo_errors++; 1052 if (netif_msg_tx_err(mdp))
1053 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1054 }
1055
1056 if (intr_status & EESR_TFE) {
1057 /* FIFO under flow */
1058 mdp->stats.tx_fifo_errors++;
1059 if (netif_msg_tx_err(mdp))
1060 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
885 } 1061 }
886 1062
887 if (intr_status & EESR_RDE) { 1063 if (intr_status & EESR_RDE) {
888 /* Receive Descriptor Empty int */ 1064 /* Receive Descriptor Empty int */
889 mdp->stats.rx_over_errors++; 1065 mdp->stats.rx_over_errors++;
890 1066
891 if (readl(ioaddr + EDRRR) ^ EDRRR_R) 1067 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
892 writel(EDRRR_R, ioaddr + EDRRR); 1068 sh_eth_write(ndev, EDRRR_R, EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1069 if (netif_msg_rx_err(mdp))
1070 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 1071 }
1072
895 if (intr_status & EESR_RFE) { 1073 if (intr_status & EESR_RFE) {
896 /* Receive FIFO Overflow int */ 1074 /* Receive FIFO Overflow int */
897 mdp->stats.rx_fifo_errors++; 1075 mdp->stats.rx_fifo_errors++;
898 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1076 if (netif_msg_rx_err(mdp))
1077 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1078 }
1079
1080 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1081 /* Address Error */
1082 mdp->stats.tx_fifo_errors++;
1083 if (netif_msg_tx_err(mdp))
1084 dev_err(&ndev->dev, "Address Error\n");
899 } 1085 }
900 1086
901 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1087 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -903,7 +1089,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
903 mask &= ~EESR_ADE; 1089 mask &= ~EESR_ADE;
904 if (intr_status & mask) { 1090 if (intr_status & mask) {
905 /* Tx error */ 1091 /* Tx error */
906 u32 edtrr = readl(ndev->base_addr + EDTRR); 1092 u32 edtrr = sh_eth_read(ndev, EDTRR);
907 /* dmesg */ 1093 /* dmesg */
908 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 1094 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
909 intr_status, mdp->cur_tx); 1095 intr_status, mdp->cur_tx);
@@ -913,9 +1099,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
913 sh_eth_txfree(ndev); 1099 sh_eth_txfree(ndev);
914 1100
915 /* SH7712 BUG */ 1101 /* SH7712 BUG */
916 if (edtrr ^ EDTRR_TRNS) { 1102 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
917 /* tx dma start */ 1103 /* tx dma start */
918 writel(EDTRR_TRNS, ndev->base_addr + EDTRR); 1104 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
919 } 1105 }
920 /* wakeup */ 1106 /* wakeup */
921 netif_wake_queue(ndev); 1107 netif_wake_queue(ndev);
@@ -928,18 +1114,17 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
928 struct sh_eth_private *mdp = netdev_priv(ndev); 1114 struct sh_eth_private *mdp = netdev_priv(ndev);
929 struct sh_eth_cpu_data *cd = mdp->cd; 1115 struct sh_eth_cpu_data *cd = mdp->cd;
930 irqreturn_t ret = IRQ_NONE; 1116 irqreturn_t ret = IRQ_NONE;
931 u32 ioaddr, intr_status = 0; 1117 u32 intr_status = 0;
932 1118
933 ioaddr = ndev->base_addr;
934 spin_lock(&mdp->lock); 1119 spin_lock(&mdp->lock);
935 1120
936 /* Get interrpt stat */ 1121 /* Get interrpt stat */
937 intr_status = readl(ioaddr + EESR); 1122 intr_status = sh_eth_read(ndev, EESR);
938 /* Clear interrupt */ 1123 /* Clear interrupt */
939 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1124 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
940 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1125 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
941 cd->tx_check | cd->eesr_err_check)) { 1126 cd->tx_check | cd->eesr_err_check)) {
942 writel(intr_status, ioaddr + EESR); 1127 sh_eth_write(ndev, intr_status, EESR);
943 ret = IRQ_HANDLED; 1128 ret = IRQ_HANDLED;
944 } else 1129 } else
945 goto other_irq; 1130 goto other_irq;
@@ -982,7 +1167,6 @@ static void sh_eth_adjust_link(struct net_device *ndev)
982{ 1167{
983 struct sh_eth_private *mdp = netdev_priv(ndev); 1168 struct sh_eth_private *mdp = netdev_priv(ndev);
984 struct phy_device *phydev = mdp->phydev; 1169 struct phy_device *phydev = mdp->phydev;
985 u32 ioaddr = ndev->base_addr;
986 int new_state = 0; 1170 int new_state = 0;
987 1171
988 if (phydev->link != PHY_DOWN) { 1172 if (phydev->link != PHY_DOWN) {
@@ -1000,8 +1184,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1000 mdp->cd->set_rate(ndev); 1184 mdp->cd->set_rate(ndev);
1001 } 1185 }
1002 if (mdp->link == PHY_DOWN) { 1186 if (mdp->link == PHY_DOWN) {
1003 writel((readl(ioaddr + ECMR) & ~ECMR_TXF) 1187 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
1004 | ECMR_DM, ioaddr + ECMR); 1188 | ECMR_DM, ECMR);
1005 new_state = 1; 1189 new_state = 1;
1006 mdp->link = phydev->link; 1190 mdp->link = phydev->link;
1007 } 1191 }
@@ -1012,7 +1196,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1012 mdp->duplex = -1; 1196 mdp->duplex = -1;
1013 } 1197 }
1014 1198
1015 if (new_state) 1199 if (new_state && netif_msg_link(mdp))
1016 phy_print_status(phydev); 1200 phy_print_status(phydev);
1017} 1201}
1018 1202
@@ -1032,7 +1216,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
1032 1216
1033 /* Try connect to PHY */ 1217 /* Try connect to PHY */
1034 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1218 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1035 0, PHY_INTERFACE_MODE_MII); 1219 0, mdp->phy_interface);
1036 if (IS_ERR(phydev)) { 1220 if (IS_ERR(phydev)) {
1037 dev_err(&ndev->dev, "phy_connect failed\n"); 1221 dev_err(&ndev->dev, "phy_connect failed\n");
1038 return PTR_ERR(phydev); 1222 return PTR_ERR(phydev);
@@ -1063,6 +1247,131 @@ static int sh_eth_phy_start(struct net_device *ndev)
1063 return 0; 1247 return 0;
1064} 1248}
1065 1249
1250static int sh_eth_get_settings(struct net_device *ndev,
1251 struct ethtool_cmd *ecmd)
1252{
1253 struct sh_eth_private *mdp = netdev_priv(ndev);
1254 unsigned long flags;
1255 int ret;
1256
1257 spin_lock_irqsave(&mdp->lock, flags);
1258 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1259 spin_unlock_irqrestore(&mdp->lock, flags);
1260
1261 return ret;
1262}
1263
1264static int sh_eth_set_settings(struct net_device *ndev,
1265 struct ethtool_cmd *ecmd)
1266{
1267 struct sh_eth_private *mdp = netdev_priv(ndev);
1268 unsigned long flags;
1269 int ret;
1270
1271 spin_lock_irqsave(&mdp->lock, flags);
1272
1273 /* disable tx and rx */
1274 sh_eth_rcv_snd_disable(ndev);
1275
1276 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1277 if (ret)
1278 goto error_exit;
1279
1280 if (ecmd->duplex == DUPLEX_FULL)
1281 mdp->duplex = 1;
1282 else
1283 mdp->duplex = 0;
1284
1285 if (mdp->cd->set_duplex)
1286 mdp->cd->set_duplex(ndev);
1287
1288error_exit:
1289 mdelay(1);
1290
1291 /* enable tx and rx */
1292 sh_eth_rcv_snd_enable(ndev);
1293
1294 spin_unlock_irqrestore(&mdp->lock, flags);
1295
1296 return ret;
1297}
1298
1299static int sh_eth_nway_reset(struct net_device *ndev)
1300{
1301 struct sh_eth_private *mdp = netdev_priv(ndev);
1302 unsigned long flags;
1303 int ret;
1304
1305 spin_lock_irqsave(&mdp->lock, flags);
1306 ret = phy_start_aneg(mdp->phydev);
1307 spin_unlock_irqrestore(&mdp->lock, flags);
1308
1309 return ret;
1310}
1311
1312static u32 sh_eth_get_msglevel(struct net_device *ndev)
1313{
1314 struct sh_eth_private *mdp = netdev_priv(ndev);
1315 return mdp->msg_enable;
1316}
1317
1318static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1319{
1320 struct sh_eth_private *mdp = netdev_priv(ndev);
1321 mdp->msg_enable = value;
1322}
1323
1324static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1325 "rx_current", "tx_current",
1326 "rx_dirty", "tx_dirty",
1327};
1328#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1329
1330static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1331{
1332 switch (sset) {
1333 case ETH_SS_STATS:
1334 return SH_ETH_STATS_LEN;
1335 default:
1336 return -EOPNOTSUPP;
1337 }
1338}
1339
1340static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1341 struct ethtool_stats *stats, u64 *data)
1342{
1343 struct sh_eth_private *mdp = netdev_priv(ndev);
1344 int i = 0;
1345
1346 /* device-specific stats */
1347 data[i++] = mdp->cur_rx;
1348 data[i++] = mdp->cur_tx;
1349 data[i++] = mdp->dirty_rx;
1350 data[i++] = mdp->dirty_tx;
1351}
1352
1353static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1354{
1355 switch (stringset) {
1356 case ETH_SS_STATS:
1357 memcpy(data, *sh_eth_gstrings_stats,
1358 sizeof(sh_eth_gstrings_stats));
1359 break;
1360 }
1361}
1362
1363static struct ethtool_ops sh_eth_ethtool_ops = {
1364 .get_settings = sh_eth_get_settings,
1365 .set_settings = sh_eth_set_settings,
1366 .nway_reset = sh_eth_nway_reset,
1367 .get_msglevel = sh_eth_get_msglevel,
1368 .set_msglevel = sh_eth_set_msglevel,
1369 .get_link = ethtool_op_get_link,
1370 .get_strings = sh_eth_get_strings,
1371 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1372 .get_sset_count = sh_eth_get_sset_count,
1373};
1374
1066/* network device open function */ 1375/* network device open function */
1067static int sh_eth_open(struct net_device *ndev) 1376static int sh_eth_open(struct net_device *ndev)
1068{ 1377{
@@ -1073,8 +1382,8 @@ static int sh_eth_open(struct net_device *ndev)
1073 1382
1074 ret = request_irq(ndev->irq, sh_eth_interrupt, 1383 ret = request_irq(ndev->irq, sh_eth_interrupt,
1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1384#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1076 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1385 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1077 defined(CONFIG_CPU_SUBTYPE_SH7757) 1386 defined(CONFIG_CPU_SUBTYPE_SH7757)
1078 IRQF_SHARED, 1387 IRQF_SHARED,
1079#else 1388#else
1080 0, 1389 0,
@@ -1117,15 +1426,14 @@ out_free_irq:
1117static void sh_eth_tx_timeout(struct net_device *ndev) 1426static void sh_eth_tx_timeout(struct net_device *ndev)
1118{ 1427{
1119 struct sh_eth_private *mdp = netdev_priv(ndev); 1428 struct sh_eth_private *mdp = netdev_priv(ndev);
1120 u32 ioaddr = ndev->base_addr;
1121 struct sh_eth_rxdesc *rxdesc; 1429 struct sh_eth_rxdesc *rxdesc;
1122 int i; 1430 int i;
1123 1431
1124 netif_stop_queue(ndev); 1432 netif_stop_queue(ndev);
1125 1433
1126 /* worning message out. */ 1434 if (netif_msg_timer(mdp))
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1435 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); 1436 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1129 1437
1130 /* tx_errors count up */ 1438 /* tx_errors count up */
1131 mdp->stats.tx_errors++; 1439 mdp->stats.tx_errors++;
@@ -1167,6 +1475,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1167 spin_lock_irqsave(&mdp->lock, flags); 1475 spin_lock_irqsave(&mdp->lock, flags);
1168 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1476 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1169 if (!sh_eth_txfree(ndev)) { 1477 if (!sh_eth_txfree(ndev)) {
1478 if (netif_msg_tx_queued(mdp))
1479 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1170 netif_stop_queue(ndev); 1480 netif_stop_queue(ndev);
1171 spin_unlock_irqrestore(&mdp->lock, flags); 1481 spin_unlock_irqrestore(&mdp->lock, flags);
1172 return NETDEV_TX_BUSY; 1482 return NETDEV_TX_BUSY;
@@ -1196,8 +1506,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1196 1506
1197 mdp->cur_tx++; 1507 mdp->cur_tx++;
1198 1508
1199 if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) 1509 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1200 writel(EDTRR_TRNS, ndev->base_addr + EDTRR); 1510 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1201 1511
1202 return NETDEV_TX_OK; 1512 return NETDEV_TX_OK;
1203} 1513}
@@ -1206,17 +1516,16 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1206static int sh_eth_close(struct net_device *ndev) 1516static int sh_eth_close(struct net_device *ndev)
1207{ 1517{
1208 struct sh_eth_private *mdp = netdev_priv(ndev); 1518 struct sh_eth_private *mdp = netdev_priv(ndev);
1209 u32 ioaddr = ndev->base_addr;
1210 int ringsize; 1519 int ringsize;
1211 1520
1212 netif_stop_queue(ndev); 1521 netif_stop_queue(ndev);
1213 1522
1214 /* Disable interrupts by clearing the interrupt mask. */ 1523 /* Disable interrupts by clearing the interrupt mask. */
1215 writel(0x0000, ioaddr + EESIPR); 1524 sh_eth_write(ndev, 0x0000, EESIPR);
1216 1525
1217 /* Stop the chip's Tx and Rx processes. */ 1526 /* Stop the chip's Tx and Rx processes. */
1218 writel(0, ioaddr + EDTRR); 1527 sh_eth_write(ndev, 0, EDTRR);
1219 writel(0, ioaddr + EDRRR); 1528 sh_eth_write(ndev, 0, EDRRR);
1220 1529
1221 /* PHY Disconnect */ 1530 /* PHY Disconnect */
1222 if (mdp->phydev) { 1531 if (mdp->phydev) {
@@ -1247,25 +1556,24 @@ static int sh_eth_close(struct net_device *ndev)
1247static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 1556static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1248{ 1557{
1249 struct sh_eth_private *mdp = netdev_priv(ndev); 1558 struct sh_eth_private *mdp = netdev_priv(ndev);
1250 u32 ioaddr = ndev->base_addr;
1251 1559
1252 pm_runtime_get_sync(&mdp->pdev->dev); 1560 pm_runtime_get_sync(&mdp->pdev->dev);
1253 1561
1254 mdp->stats.tx_dropped += readl(ioaddr + TROCR); 1562 mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1255 writel(0, ioaddr + TROCR); /* (write clear) */ 1563 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
1256 mdp->stats.collisions += readl(ioaddr + CDCR); 1564 mdp->stats.collisions += sh_eth_read(ndev, CDCR);
1257 writel(0, ioaddr + CDCR); /* (write clear) */ 1565 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
1258 mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR); 1566 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1259 writel(0, ioaddr + LCCR); /* (write clear) */ 1567 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
1260#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1568 if (sh_eth_is_gether(mdp)) {
1261 mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */ 1569 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1262 writel(0, ioaddr + CERCR); /* (write clear) */ 1570 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
1263 mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */ 1571 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1264 writel(0, ioaddr + CEECR); /* (write clear) */ 1572 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
1265#else 1573 } else {
1266 mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR); 1574 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1267 writel(0, ioaddr + CNDCR); /* (write clear) */ 1575 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
1268#endif 1576 }
1269 pm_runtime_put_sync(&mdp->pdev->dev); 1577 pm_runtime_put_sync(&mdp->pdev->dev);
1270 1578
1271 return &mdp->stats; 1579 return &mdp->stats;
@@ -1291,48 +1599,46 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1291/* Multicast reception directions set */ 1599/* Multicast reception directions set */
1292static void sh_eth_set_multicast_list(struct net_device *ndev) 1600static void sh_eth_set_multicast_list(struct net_device *ndev)
1293{ 1601{
1294 u32 ioaddr = ndev->base_addr;
1295
1296 if (ndev->flags & IFF_PROMISC) { 1602 if (ndev->flags & IFF_PROMISC) {
1297 /* Set promiscuous. */ 1603 /* Set promiscuous. */
1298 writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, 1604 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
1299 ioaddr + ECMR); 1605 ECMR_PRM, ECMR);
1300 } else { 1606 } else {
1301 /* Normal, unicast/broadcast-only mode. */ 1607 /* Normal, unicast/broadcast-only mode. */
1302 writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, 1608 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
1303 ioaddr + ECMR); 1609 ECMR_MCT, ECMR);
1304 } 1610 }
1305} 1611}
1612#endif /* SH_ETH_HAS_TSU */
1306 1613
1307/* SuperH's TSU register init function */ 1614/* SuperH's TSU register init function */
1308static void sh_eth_tsu_init(u32 ioaddr) 1615static void sh_eth_tsu_init(struct sh_eth_private *mdp)
1309{ 1616{
1310 writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */ 1617 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
1311 writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */ 1618 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
1312 writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */ 1619 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
1313 writel(0xc, ioaddr + TSU_BSYSL0); 1620 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
1314 writel(0xc, ioaddr + TSU_BSYSL1); 1621 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
1315 writel(0, ioaddr + TSU_PRISL0); 1622 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
1316 writel(0, ioaddr + TSU_PRISL1); 1623 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
1317 writel(0, ioaddr + TSU_FWSL0); 1624 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
1318 writel(0, ioaddr + TSU_FWSL1); 1625 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
1319 writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); 1626 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
1320#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1627 if (sh_eth_is_gether(mdp)) {
1321 writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */ 1628 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
1322 writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */ 1629 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
1323#else 1630 } else {
1324 writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ 1631 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
1325 writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ 1632 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
1326#endif 1633 }
1327 writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ 1634 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
1328 writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ 1635 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
1329 writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ 1636 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
1330 writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */ 1637 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
1331 writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */ 1638 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
1332 writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ 1639 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
1333 writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ 1640 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
1334} 1641}
1335#endif /* SH_ETH_HAS_TSU */
1336 1642
1337/* MDIO bus release function */ 1643/* MDIO bus release function */
1338static int sh_mdio_release(struct net_device *ndev) 1644static int sh_mdio_release(struct net_device *ndev)
@@ -1355,7 +1661,8 @@ static int sh_mdio_release(struct net_device *ndev)
1355} 1661}
1356 1662
1357/* MDIO bus init function */ 1663/* MDIO bus init function */
1358static int sh_mdio_init(struct net_device *ndev, int id) 1664static int sh_mdio_init(struct net_device *ndev, int id,
1665 struct sh_eth_plat_data *pd)
1359{ 1666{
1360 int ret, i; 1667 int ret, i;
1361 struct bb_info *bitbang; 1668 struct bb_info *bitbang;
@@ -1369,7 +1676,8 @@ static int sh_mdio_init(struct net_device *ndev, int id)
1369 } 1676 }
1370 1677
1371 /* bitbang init */ 1678 /* bitbang init */
1372 bitbang->addr = ndev->base_addr + PIR; 1679 bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
1680 bitbang->set_gate = pd->set_mdio_gate;
1373 bitbang->mdi_msk = 0x08; 1681 bitbang->mdi_msk = 0x08;
1374 bitbang->mdo_msk = 0x04; 1682 bitbang->mdo_msk = 0x04;
1375 bitbang->mmd_msk = 0x02;/* MMD */ 1683 bitbang->mmd_msk = 0x02;/* MMD */
@@ -1420,6 +1728,28 @@ out:
1420 return ret; 1728 return ret;
1421} 1729}
1422 1730
1731static const u16 *sh_eth_get_register_offset(int register_type)
1732{
1733 const u16 *reg_offset = NULL;
1734
1735 switch (register_type) {
1736 case SH_ETH_REG_GIGABIT:
1737 reg_offset = sh_eth_offset_gigabit;
1738 break;
1739 case SH_ETH_REG_FAST_SH4:
1740 reg_offset = sh_eth_offset_fast_sh4;
1741 break;
1742 case SH_ETH_REG_FAST_SH3_SH2:
1743 reg_offset = sh_eth_offset_fast_sh3_sh2;
1744 break;
1745 default:
1746 printk(KERN_ERR "Unknown register type (%d)\n", register_type);
1747 break;
1748 }
1749
1750 return reg_offset;
1751}
1752
1423static const struct net_device_ops sh_eth_netdev_ops = { 1753static const struct net_device_ops sh_eth_netdev_ops = {
1424 .ndo_open = sh_eth_open, 1754 .ndo_open = sh_eth_open,
1425 .ndo_stop = sh_eth_close, 1755 .ndo_stop = sh_eth_close,
@@ -1486,19 +1816,28 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1486 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 1816 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1487 /* get PHY ID */ 1817 /* get PHY ID */
1488 mdp->phy_id = pd->phy; 1818 mdp->phy_id = pd->phy;
1819 mdp->phy_interface = pd->phy_interface;
1489 /* EDMAC endian */ 1820 /* EDMAC endian */
1490 mdp->edmac_endian = pd->edmac_endian; 1821 mdp->edmac_endian = pd->edmac_endian;
1491 mdp->no_ether_link = pd->no_ether_link; 1822 mdp->no_ether_link = pd->no_ether_link;
1492 mdp->ether_link_active_low = pd->ether_link_active_low; 1823 mdp->ether_link_active_low = pd->ether_link_active_low;
1824 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
1493 1825
1494 /* set cpu data */ 1826 /* set cpu data */
1827#if defined(SH_ETH_HAS_BOTH_MODULES)
1828 mdp->cd = sh_eth_get_cpu_data(mdp);
1829#else
1495 mdp->cd = &sh_eth_my_cpu_data; 1830 mdp->cd = &sh_eth_my_cpu_data;
1831#endif
1496 sh_eth_set_default_cpu_data(mdp->cd); 1832 sh_eth_set_default_cpu_data(mdp->cd);
1497 1833
1498 /* set function */ 1834 /* set function */
1499 ndev->netdev_ops = &sh_eth_netdev_ops; 1835 ndev->netdev_ops = &sh_eth_netdev_ops;
1836 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1500 ndev->watchdog_timeo = TX_TIMEOUT; 1837 ndev->watchdog_timeo = TX_TIMEOUT;
1501 1838
1839 /* debug message level */
1840 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1502 mdp->post_rx = POST_RX >> (devno << 1); 1841 mdp->post_rx = POST_RX >> (devno << 1);
1503 mdp->post_fw = POST_FW >> (devno << 1); 1842 mdp->post_fw = POST_FW >> (devno << 1);
1504 1843
@@ -1507,13 +1846,23 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1507 1846
1508 /* First device only init */ 1847 /* First device only init */
1509 if (!devno) { 1848 if (!devno) {
1849 if (mdp->cd->tsu) {
1850 struct resource *rtsu;
1851 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1852 if (!rtsu) {
1853 dev_err(&pdev->dev, "Not found TSU resource\n");
1854 goto out_release;
1855 }
1856 mdp->tsu_addr = ioremap(rtsu->start,
1857 resource_size(rtsu));
1858 }
1510 if (mdp->cd->chip_reset) 1859 if (mdp->cd->chip_reset)
1511 mdp->cd->chip_reset(ndev); 1860 mdp->cd->chip_reset(ndev);
1512 1861
1513#if defined(SH_ETH_HAS_TSU) 1862 if (mdp->cd->tsu) {
1514 /* TSU init (Init only)*/ 1863 /* TSU init (Init only)*/
1515 sh_eth_tsu_init(SH_TSU_ADDR); 1864 sh_eth_tsu_init(mdp);
1516#endif 1865 }
1517 } 1866 }
1518 1867
1519 /* network device register */ 1868 /* network device register */
@@ -1522,7 +1871,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1522 goto out_release; 1871 goto out_release;
1523 1872
1524 /* mdio bus init */ 1873 /* mdio bus init */
1525 ret = sh_mdio_init(ndev, pdev->id); 1874 ret = sh_mdio_init(ndev, pdev->id, pd);
1526 if (ret) 1875 if (ret)
1527 goto out_unregister; 1876 goto out_unregister;
1528 1877
@@ -1539,6 +1888,8 @@ out_unregister:
1539 1888
1540out_release: 1889out_release:
1541 /* net_dev free */ 1890 /* net_dev free */
1891 if (mdp->tsu_addr)
1892 iounmap(mdp->tsu_addr);
1542 if (ndev) 1893 if (ndev)
1543 free_netdev(ndev); 1894 free_netdev(ndev);
1544 1895
@@ -1549,7 +1900,9 @@ out:
1549static int sh_eth_drv_remove(struct platform_device *pdev) 1900static int sh_eth_drv_remove(struct platform_device *pdev)
1550{ 1901{
1551 struct net_device *ndev = platform_get_drvdata(pdev); 1902 struct net_device *ndev = platform_get_drvdata(pdev);
1903 struct sh_eth_private *mdp = netdev_priv(ndev);
1552 1904
1905 iounmap(mdp->tsu_addr);
1553 sh_mdio_release(ndev); 1906 sh_mdio_release(ndev);
1554 unregister_netdev(ndev); 1907 unregister_netdev(ndev);
1555 pm_runtime_disable(&pdev->dev); 1908 pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index efa64221eed..c3048a6ba67 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -2,7 +2,7 @@
2 * SuperH Ethernet device driver 2 * SuperH Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2009 Renesas Solutions Corp. 5 * Copyright (C) 2008-2011 Renesas Solutions Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
@@ -38,278 +38,340 @@
38#define ETHERSMALL 60 38#define ETHERSMALL 60
39#define PKT_BUF_SZ 1538 39#define PKT_BUF_SZ 1538
40 40
41#if defined(CONFIG_CPU_SUBTYPE_SH7763) 41enum {
42/* This CPU register maps is very difference by other SH4 CPU */ 42 /* E-DMAC registers */
43 43 EDSR = 0,
44/* Chip Base Address */ 44 EDMR,
45# define SH_TSU_ADDR 0xFEE01800 45 EDTRR,
46# define ARSTR SH_TSU_ADDR 46 EDRRR,
47 47 EESR,
48/* Chip Registers */ 48 EESIPR,
49/* E-DMAC */ 49 TDLAR,
50# define EDSR 0x000 50 TDFAR,
51# define EDMR 0x400 51 TDFXR,
52# define EDTRR 0x408 52 TDFFR,
53# define EDRRR 0x410 53 RDLAR,
54# define EESR 0x428 54 RDFAR,
55# define EESIPR 0x430 55 RDFXR,
56# define TDLAR 0x010 56 RDFFR,
57# define TDFAR 0x014 57 TRSCER,
58# define TDFXR 0x018 58 RMFCR,
59# define TDFFR 0x01C 59 TFTR,
60# define RDLAR 0x030 60 FDR,
61# define RDFAR 0x034 61 RMCR,
62# define RDFXR 0x038 62 EDOCR,
63# define RDFFR 0x03C 63 TFUCR,
64# define TRSCER 0x438 64 RFOCR,
65# define RMFCR 0x440 65 FCFTR,
66# define TFTR 0x448 66 RPADIR,
67# define FDR 0x450 67 TRIMD,
68# define RMCR 0x458 68 RBWAR,
69# define RPADIR 0x460 69 TBRAR,
70# define FCFTR 0x468 70
71 71 /* Ether registers */
72/* Ether Register */ 72 ECMR,
73# define ECMR 0x500 73 ECSR,
74# define ECSR 0x510 74 ECSIPR,
75# define ECSIPR 0x518 75 PIR,
76# define PIR 0x520 76 PSR,
77# define PSR 0x528 77 RDMLR,
78# define PIPR 0x52C 78 PIPR,
79# define RFLR 0x508 79 RFLR,
80# define APR 0x554 80 IPGR,
81# define MPR 0x558 81 APR,
82# define PFTCR 0x55C 82 MPR,
83# define PFRCR 0x560 83 PFTCR,
84# define TPAUSER 0x564 84 PFRCR,
85# define GECMR 0x5B0 85 RFCR,
86# define BCULR 0x5B4 86 RFCF,
87# define MAHR 0x5C0 87 TPAUSER,
88# define MALR 0x5C8 88 TPAUSECR,
89# define TROCR 0x700 89 BCFR,
90# define CDCR 0x708 90 BCFRR,
91# define LCCR 0x710 91 GECMR,
92# define CEFCR 0x740 92 BCULR,
93# define FRECR 0x748 93 MAHR,
94# define TSFRCR 0x750 94 MALR,
95# define TLFRCR 0x758 95 TROCR,
96# define RFCR 0x760 96 CDCR,
97# define CERCR 0x768 97 LCCR,
98# define CEECR 0x770 98 CNDCR,
99# define MAFCR 0x778 99 CEFCR,
100 100 FRECR,
101/* TSU Absolute Address */ 101 TSFRCR,
102# define TSU_CTRST 0x004 102 TLFRCR,
103# define TSU_FWEN0 0x010 103 CERCR,
104# define TSU_FWEN1 0x014 104 CEECR,
105# define TSU_FCM 0x18 105 MAFCR,
106# define TSU_BSYSL0 0x20 106 RTRATE,
107# define TSU_BSYSL1 0x24 107
108# define TSU_PRISL0 0x28 108 /* TSU Absolute address */
109# define TSU_PRISL1 0x2C 109 ARSTR,
110# define TSU_FWSL0 0x30 110 TSU_CTRST,
111# define TSU_FWSL1 0x34 111 TSU_FWEN0,
112# define TSU_FWSLC 0x38 112 TSU_FWEN1,
113# define TSU_QTAG0 0x40 113 TSU_FCM,
114# define TSU_QTAG1 0x44 114 TSU_BSYSL0,
115# define TSU_FWSR 0x50 115 TSU_BSYSL1,
116# define TSU_FWINMK 0x54 116 TSU_PRISL0,
117# define TSU_ADQT0 0x48 117 TSU_PRISL1,
118# define TSU_ADQT1 0x4C 118 TSU_FWSL0,
119# define TSU_VTAG0 0x58 119 TSU_FWSL1,
120# define TSU_VTAG1 0x5C 120 TSU_FWSLC,
121# define TSU_ADSBSY 0x60 121 TSU_QTAG0,
122# define TSU_TEN 0x64 122 TSU_QTAG1,
123# define TSU_POST1 0x70 123 TSU_QTAGM0,
124# define TSU_POST2 0x74 124 TSU_QTAGM1,
125# define TSU_POST3 0x78 125 TSU_FWSR,
126# define TSU_POST4 0x7C 126 TSU_FWINMK,
127# define TSU_ADRH0 0x100 127 TSU_ADQT0,
128# define TSU_ADRL0 0x104 128 TSU_ADQT1,
129# define TSU_ADRH31 0x1F8 129 TSU_VTAG0,
130# define TSU_ADRL31 0x1FC 130 TSU_VTAG1,
131 131 TSU_ADSBSY,
132# define TXNLCR0 0x80 132 TSU_TEN,
133# define TXALCR0 0x84 133 TSU_POST1,
134# define RXNLCR0 0x88 134 TSU_POST2,
135# define RXALCR0 0x8C 135 TSU_POST3,
136# define FWNLCR0 0x90 136 TSU_POST4,
137# define FWALCR0 0x94 137 TSU_ADRH0,
138# define TXNLCR1 0xA0 138 TSU_ADRL0,
139# define TXALCR1 0xA4 139 TSU_ADRH31,
140# define RXNLCR1 0xA8 140 TSU_ADRL31,
141# define RXALCR1 0xAC 141
142# define FWNLCR1 0xB0 142 TXNLCR0,
143# define FWALCR1 0x40 143 TXALCR0,
144 144 RXNLCR0,
145#elif defined(CONFIG_CPU_SH4) /* #if defined(CONFIG_CPU_SUBTYPE_SH7763) */ 145 RXALCR0,
146/* EtherC */ 146 FWNLCR0,
147#define ECMR 0x100 147 FWALCR0,
148#define RFLR 0x108 148 TXNLCR1,
149#define ECSR 0x110 149 TXALCR1,
150#define ECSIPR 0x118 150 RXNLCR1,
151#define PIR 0x120 151 RXALCR1,
152#define PSR 0x128 152 FWNLCR1,
153#define RDMLR 0x140 153 FWALCR1,
154#define IPGR 0x150 154
155#define APR 0x154 155 /* This value must be written at last. */
156#define MPR 0x158 156 SH_ETH_MAX_REGISTER_OFFSET,
157#define TPAUSER 0x164 157};
158#define RFCF 0x160 158
159#define TPAUSECR 0x168 159static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
160#define BCFRR 0x16c 160 [EDSR] = 0x0000,
161#define MAHR 0x1c0 161 [EDMR] = 0x0400,
162#define MALR 0x1c8 162 [EDTRR] = 0x0408,
163#define TROCR 0x1d0 163 [EDRRR] = 0x0410,
164#define CDCR 0x1d4 164 [EESR] = 0x0428,
165#define LCCR 0x1d8 165 [EESIPR] = 0x0430,
166#define CNDCR 0x1dc 166 [TDLAR] = 0x0010,
167#define CEFCR 0x1e4 167 [TDFAR] = 0x0014,
168#define FRECR 0x1e8 168 [TDFXR] = 0x0018,
169#define TSFRCR 0x1ec 169 [TDFFR] = 0x001c,
170#define TLFRCR 0x1f0 170 [RDLAR] = 0x0030,
171#define RFCR 0x1f4 171 [RDFAR] = 0x0034,
172#define MAFCR 0x1f8 172 [RDFXR] = 0x0038,
173#define RTRATE 0x1fc 173 [RDFFR] = 0x003c,
174 174 [TRSCER] = 0x0438,
175/* E-DMAC */ 175 [RMFCR] = 0x0440,
176#define EDMR 0x000 176 [TFTR] = 0x0448,
177#define EDTRR 0x008 177 [FDR] = 0x0450,
178#define EDRRR 0x010 178 [RMCR] = 0x0458,
179#define TDLAR 0x018 179 [RPADIR] = 0x0460,
180#define RDLAR 0x020 180 [FCFTR] = 0x0468,
181#define EESR 0x028 181
182#define EESIPR 0x030 182 [ECMR] = 0x0500,
183#define TRSCER 0x038 183 [ECSR] = 0x0510,
184#define RMFCR 0x040 184 [ECSIPR] = 0x0518,
185#define TFTR 0x048 185 [PIR] = 0x0520,
186#define FDR 0x050 186 [PSR] = 0x0528,
187#define RMCR 0x058 187 [PIPR] = 0x052c,
188#define TFUCR 0x064 188 [RFLR] = 0x0508,
189#define RFOCR 0x068 189 [APR] = 0x0554,
190#define FCFTR 0x070 190 [MPR] = 0x0558,
191#define RPADIR 0x078 191 [PFTCR] = 0x055c,
192#define TRIMD 0x07c 192 [PFRCR] = 0x0560,
193#define RBWAR 0x0c8 193 [TPAUSER] = 0x0564,
194#define RDFAR 0x0cc 194 [GECMR] = 0x05b0,
195#define TBRAR 0x0d4 195 [BCULR] = 0x05b4,
196#define TDFAR 0x0d8 196 [MAHR] = 0x05c0,
197#else /* #elif defined(CONFIG_CPU_SH4) */ 197 [MALR] = 0x05c8,
198/* This section is SH3 or SH2 */ 198 [TROCR] = 0x0700,
199#ifndef CONFIG_CPU_SUBTYPE_SH7619 199 [CDCR] = 0x0708,
200/* Chip base address */ 200 [LCCR] = 0x0710,
201# define SH_TSU_ADDR 0xA7000804 201 [CEFCR] = 0x0740,
202# define ARSTR 0xA7000800 202 [FRECR] = 0x0748,
203#endif 203 [TSFRCR] = 0x0750,
204/* Chip Registers */ 204 [TLFRCR] = 0x0758,
205/* E-DMAC */ 205 [RFCR] = 0x0760,
206# define EDMR 0x0000 206 [CERCR] = 0x0768,
207# define EDTRR 0x0004 207 [CEECR] = 0x0770,
208# define EDRRR 0x0008 208 [MAFCR] = 0x0778,
209# define TDLAR 0x000C 209
210# define RDLAR 0x0010 210 [ARSTR] = 0x0000,
211# define EESR 0x0014 211 [TSU_CTRST] = 0x0004,
212# define EESIPR 0x0018 212 [TSU_FWEN0] = 0x0010,
213# define TRSCER 0x001C 213 [TSU_FWEN1] = 0x0014,
214# define RMFCR 0x0020 214 [TSU_FCM] = 0x0018,
215# define TFTR 0x0024 215 [TSU_BSYSL0] = 0x0020,
216# define FDR 0x0028 216 [TSU_BSYSL1] = 0x0024,
217# define RMCR 0x002C 217 [TSU_PRISL0] = 0x0028,
218# define EDOCR 0x0030 218 [TSU_PRISL1] = 0x002c,
219# define FCFTR 0x0034 219 [TSU_FWSL0] = 0x0030,
220# define RPADIR 0x0038 220 [TSU_FWSL1] = 0x0034,
221# define TRIMD 0x003C 221 [TSU_FWSLC] = 0x0038,
222# define RBWAR 0x0040 222 [TSU_QTAG0] = 0x0040,
223# define RDFAR 0x0044 223 [TSU_QTAG1] = 0x0044,
224# define TBRAR 0x004C 224 [TSU_FWSR] = 0x0050,
225# define TDFAR 0x0050 225 [TSU_FWINMK] = 0x0054,
226 226 [TSU_ADQT0] = 0x0048,
227/* Ether Register */ 227 [TSU_ADQT1] = 0x004c,
228# define ECMR 0x0160 228 [TSU_VTAG0] = 0x0058,
229# define ECSR 0x0164 229 [TSU_VTAG1] = 0x005c,
230# define ECSIPR 0x0168 230 [TSU_ADSBSY] = 0x0060,
231# define PIR 0x016C 231 [TSU_TEN] = 0x0064,
232# define MAHR 0x0170 232 [TSU_POST1] = 0x0070,
233# define MALR 0x0174 233 [TSU_POST2] = 0x0074,
234# define RFLR 0x0178 234 [TSU_POST3] = 0x0078,
235# define PSR 0x017C 235 [TSU_POST4] = 0x007c,
236# define TROCR 0x0180 236 [TSU_ADRH0] = 0x0100,
237# define CDCR 0x0184 237 [TSU_ADRL0] = 0x0104,
238# define LCCR 0x0188 238 [TSU_ADRH31] = 0x01f8,
239# define CNDCR 0x018C 239 [TSU_ADRL31] = 0x01fc,
240# define CEFCR 0x0194 240
241# define FRECR 0x0198 241 [TXNLCR0] = 0x0080,
242# define TSFRCR 0x019C 242 [TXALCR0] = 0x0084,
243# define TLFRCR 0x01A0 243 [RXNLCR0] = 0x0088,
244# define RFCR 0x01A4 244 [RXALCR0] = 0x008c,
245# define MAFCR 0x01A8 245 [FWNLCR0] = 0x0090,
246# define IPGR 0x01B4 246 [FWALCR0] = 0x0094,
247# if defined(CONFIG_CPU_SUBTYPE_SH7710) 247 [TXNLCR1] = 0x00a0,
248# define APR 0x01B8 248 [TXALCR1] = 0x00a0,
249# define MPR 0x01BC 249 [RXNLCR1] = 0x00a8,
250# define TPAUSER 0x1C4 250 [RXALCR1] = 0x00ac,
251# define BCFR 0x1CC 251 [FWNLCR1] = 0x00b0,
252# endif /* CONFIG_CPU_SH7710 */ 252 [FWALCR1] = 0x00b4,
253 253};
254/* TSU */ 254
255# define TSU_CTRST 0x004 255static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
256# define TSU_FWEN0 0x010 256 [ECMR] = 0x0100,
257# define TSU_FWEN1 0x014 257 [RFLR] = 0x0108,
258# define TSU_FCM 0x018 258 [ECSR] = 0x0110,
259# define TSU_BSYSL0 0x020 259 [ECSIPR] = 0x0118,
260# define TSU_BSYSL1 0x024 260 [PIR] = 0x0120,
261# define TSU_PRISL0 0x028 261 [PSR] = 0x0128,
262# define TSU_PRISL1 0x02C 262 [RDMLR] = 0x0140,
263# define TSU_FWSL0 0x030 263 [IPGR] = 0x0150,
264# define TSU_FWSL1 0x034 264 [APR] = 0x0154,
265# define TSU_FWSLC 0x038 265 [MPR] = 0x0158,
266# define TSU_QTAGM0 0x040 266 [TPAUSER] = 0x0164,
267# define TSU_QTAGM1 0x044 267 [RFCF] = 0x0160,
268# define TSU_ADQT0 0x048 268 [TPAUSECR] = 0x0168,
269# define TSU_ADQT1 0x04C 269 [BCFRR] = 0x016c,
270# define TSU_FWSR 0x050 270 [MAHR] = 0x01c0,
271# define TSU_FWINMK 0x054 271 [MALR] = 0x01c8,
272# define TSU_ADSBSY 0x060 272 [TROCR] = 0x01d0,
273# define TSU_TEN 0x064 273 [CDCR] = 0x01d4,
274# define TSU_POST1 0x070 274 [LCCR] = 0x01d8,
275# define TSU_POST2 0x074 275 [CNDCR] = 0x01dc,
276# define TSU_POST3 0x078 276 [CEFCR] = 0x01e4,
277# define TSU_POST4 0x07C 277 [FRECR] = 0x01e8,
278# define TXNLCR0 0x080 278 [TSFRCR] = 0x01ec,
279# define TXALCR0 0x084 279 [TLFRCR] = 0x01f0,
280# define RXNLCR0 0x088 280 [RFCR] = 0x01f4,
281# define RXALCR0 0x08C 281 [MAFCR] = 0x01f8,
282# define FWNLCR0 0x090 282 [RTRATE] = 0x01fc,
283# define FWALCR0 0x094 283
284# define TXNLCR1 0x0A0 284 [EDMR] = 0x0000,
285# define TXALCR1 0x0A4 285 [EDTRR] = 0x0008,
286# define RXNLCR1 0x0A8 286 [EDRRR] = 0x0010,
287# define RXALCR1 0x0AC 287 [TDLAR] = 0x0018,
288# define FWNLCR1 0x0B0 288 [RDLAR] = 0x0020,
289# define FWALCR1 0x0B4 289 [EESR] = 0x0028,
290 290 [EESIPR] = 0x0030,
291#define TSU_ADRH0 0x0100 291 [TRSCER] = 0x0038,
292#define TSU_ADRL0 0x0104 292 [RMFCR] = 0x0040,
293#define TSU_ADRL31 0x01FC 293 [TFTR] = 0x0048,
294 294 [FDR] = 0x0050,
295#endif /* CONFIG_CPU_SUBTYPE_SH7763 */ 295 [RMCR] = 0x0058,
296 296 [TFUCR] = 0x0064,
297/* There are avoid compile error... */ 297 [RFOCR] = 0x0068,
298#if !defined(BCULR) 298 [FCFTR] = 0x0070,
299#define BCULR 0x0fc 299 [RPADIR] = 0x0078,
300#endif 300 [TRIMD] = 0x007c,
301#if !defined(TRIMD) 301 [RBWAR] = 0x00c8,
302#define TRIMD 0x0fc 302 [RDFAR] = 0x00cc,
303#endif 303 [TBRAR] = 0x00d4,
304#if !defined(APR) 304 [TDFAR] = 0x00d8,
305#define APR 0x0fc 305};
306#endif 306
307#if !defined(MPR) 307static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
308#define MPR 0x0fc 308 [ECMR] = 0x0160,
309#endif 309 [ECSR] = 0x0164,
310#if !defined(TPAUSER) 310 [ECSIPR] = 0x0168,
311#define TPAUSER 0x0fc 311 [PIR] = 0x016c,
312#endif 312 [MAHR] = 0x0170,
313 [MALR] = 0x0174,
314 [RFLR] = 0x0178,
315 [PSR] = 0x017c,
316 [TROCR] = 0x0180,
317 [CDCR] = 0x0184,
318 [LCCR] = 0x0188,
319 [CNDCR] = 0x018c,
320 [CEFCR] = 0x0194,
321 [FRECR] = 0x0198,
322 [TSFRCR] = 0x019c,
323 [TLFRCR] = 0x01a0,
324 [RFCR] = 0x01a4,
325 [MAFCR] = 0x01a8,
326 [IPGR] = 0x01b4,
327 [APR] = 0x01b8,
328 [MPR] = 0x01bc,
329 [TPAUSER] = 0x01c4,
330 [BCFR] = 0x01cc,
331
332 [ARSTR] = 0x0000,
333 [TSU_CTRST] = 0x0004,
334 [TSU_FWEN0] = 0x0010,
335 [TSU_FWEN1] = 0x0014,
336 [TSU_FCM] = 0x0018,
337 [TSU_BSYSL0] = 0x0020,
338 [TSU_BSYSL1] = 0x0024,
339 [TSU_PRISL0] = 0x0028,
340 [TSU_PRISL1] = 0x002c,
341 [TSU_FWSL0] = 0x0030,
342 [TSU_FWSL1] = 0x0034,
343 [TSU_FWSLC] = 0x0038,
344 [TSU_QTAGM0] = 0x0040,
345 [TSU_QTAGM1] = 0x0044,
346 [TSU_ADQT0] = 0x0048,
347 [TSU_ADQT1] = 0x004c,
348 [TSU_FWSR] = 0x0050,
349 [TSU_FWINMK] = 0x0054,
350 [TSU_ADSBSY] = 0x0060,
351 [TSU_TEN] = 0x0064,
352 [TSU_POST1] = 0x0070,
353 [TSU_POST2] = 0x0074,
354 [TSU_POST3] = 0x0078,
355 [TSU_POST4] = 0x007c,
356
357 [TXNLCR0] = 0x0080,
358 [TXALCR0] = 0x0084,
359 [RXNLCR0] = 0x0088,
360 [RXALCR0] = 0x008c,
361 [FWNLCR0] = 0x0090,
362 [FWALCR0] = 0x0094,
363 [TXNLCR1] = 0x00a0,
364 [TXALCR1] = 0x00a0,
365 [RXNLCR1] = 0x00a8,
366 [RXALCR1] = 0x00ac,
367 [FWNLCR1] = 0x00b0,
368 [FWALCR1] = 0x00b4,
369
370 [TSU_ADRH0] = 0x0100,
371 [TSU_ADRL0] = 0x0104,
372 [TSU_ADRL31] = 0x01fc,
373
374};
313 375
314/* Driver's parameters */ 376/* Driver's parameters */
315#if defined(CONFIG_CPU_SH4) 377#if defined(CONFIG_CPU_SH4)
@@ -338,20 +400,14 @@ enum GECMR_BIT {
338enum DMAC_M_BIT { 400enum DMAC_M_BIT {
339 EDMR_EL = 0x40, /* Litte endian */ 401 EDMR_EL = 0x40, /* Litte endian */
340 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, 402 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
341#ifdef CONFIG_CPU_SUBTYPE_SH7763 403 EDMR_SRST_GETHER = 0x03,
342 EDMR_SRST = 0x03, 404 EDMR_SRST_ETHER = 0x01,
343#else /* CONFIG_CPU_SUBTYPE_SH7763 */
344 EDMR_SRST = 0x01,
345#endif
346}; 405};
347 406
348/* EDTRR */ 407/* EDTRR */
349enum DMAC_T_BIT { 408enum DMAC_T_BIT {
350#ifdef CONFIG_CPU_SUBTYPE_SH7763 409 EDTRR_TRNS_GETHER = 0x03,
351 EDTRR_TRNS = 0x03, 410 EDTRR_TRNS_ETHER = 0x01,
352#else
353 EDTRR_TRNS = 0x01,
354#endif
355}; 411};
356 412
357/* EDRRR*/ 413/* EDRRR*/
@@ -695,6 +751,7 @@ struct sh_eth_cpu_data {
695 unsigned mpr:1; /* EtherC have MPR */ 751 unsigned mpr:1; /* EtherC have MPR */
696 unsigned tpauser:1; /* EtherC have TPAUSER */ 752 unsigned tpauser:1; /* EtherC have TPAUSER */
697 unsigned bculr:1; /* EtherC have BCULR */ 753 unsigned bculr:1; /* EtherC have BCULR */
754 unsigned tsu:1; /* EtherC have TSU */
698 unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */ 755 unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
699 unsigned rpadir:1; /* E-DMAC have RPADIR */ 756 unsigned rpadir:1; /* E-DMAC have RPADIR */
700 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ 757 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
@@ -704,6 +761,8 @@ struct sh_eth_cpu_data {
704struct sh_eth_private { 761struct sh_eth_private {
705 struct platform_device *pdev; 762 struct platform_device *pdev;
706 struct sh_eth_cpu_data *cd; 763 struct sh_eth_cpu_data *cd;
764 const u16 *reg_offset;
765 void __iomem *tsu_addr;
707 dma_addr_t rx_desc_dma; 766 dma_addr_t rx_desc_dma;
708 dma_addr_t tx_desc_dma; 767 dma_addr_t tx_desc_dma;
709 struct sh_eth_rxdesc *rx_ring; 768 struct sh_eth_rxdesc *rx_ring;
@@ -722,6 +781,7 @@ struct sh_eth_private {
722 struct mii_bus *mii_bus; /* MDIO bus control */ 781 struct mii_bus *mii_bus; /* MDIO bus control */
723 struct phy_device *phydev; /* PHY device control */ 782 struct phy_device *phydev; /* PHY device control */
724 enum phy_state link; 783 enum phy_state link;
784 phy_interface_t phy_interface;
725 int msg_enable; 785 int msg_enable;
726 int speed; 786 int speed;
727 int duplex; 787 int duplex;
@@ -746,4 +806,32 @@ static inline void sh_eth_soft_swap(char *src, int len)
746#endif 806#endif
747} 807}
748 808
809static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
810 int enum_index)
811{
812 struct sh_eth_private *mdp = netdev_priv(ndev);
813
814 writel(data, ndev->base_addr + mdp->reg_offset[enum_index]);
815}
816
817static inline unsigned long sh_eth_read(struct net_device *ndev,
818 int enum_index)
819{
820 struct sh_eth_private *mdp = netdev_priv(ndev);
821
822 return readl(ndev->base_addr + mdp->reg_offset[enum_index]);
823}
824
825static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
826 unsigned long data, int enum_index)
827{
828 writel(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
829}
830
831static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
832 int enum_index)
833{
834 return readl(mdp->tsu_addr + mdp->reg_offset[enum_index]);
835}
836
749#endif /* #ifndef __SH_ETH_H__ */ 837#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5976d1d51df..84d4167eee9 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
495 sis_priv->mii_info.reg_num_mask = 0x1f; 495 sis_priv->mii_info.reg_num_mask = 0x1f;
496 496
497 /* Get Mac address according to the chip revision */ 497 /* Get Mac address according to the chip revision */
498 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev)); 498 sis_priv->chipset_rev = pci_dev->revision;
499 if(netif_msg_probe(sis_priv)) 499 if(netif_msg_probe(sis_priv))
500 printk(KERN_DEBUG "%s: detected revision %2.2x, " 500 printk(KERN_DEBUG "%s: detected revision %2.2x, "
501 "trying to get MAC address...\n", 501 "trying to get MAC address...\n",
@@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
532 /* save our host bridge revision */ 532 /* save our host bridge revision */
533 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL); 533 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
534 if (dev) { 534 if (dev) {
535 pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev); 535 sis_priv->host_bridge_rev = dev->revision;
536 pci_dev_put(dev); 536 pci_dev_put(dev);
537 } 537 }
538 538
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n", 1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1778 net_dev->name, sis_priv->cur_rx, 1778 net_dev->name, sis_priv->cur_rx,
1779 sis_priv->dirty_rx); 1779 sis_priv->dirty_rx);
1780 dev_kfree_skb(skb);
1780 break; 1781 break;
1781 } 1782 }
1782 1783
diff --git a/drivers/net/skfp/Makefile b/drivers/net/skfp/Makefile
index cb23580fcff..b0be0234abf 100644
--- a/drivers/net/skfp/Makefile
+++ b/drivers/net/skfp/Makefile
@@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
17# projects. To keep the source common for all those drivers (and 17# projects. To keep the source common for all those drivers (and
18# thus simplify fixes to it), please do not clean it up! 18# thus simplify fixes to it), please do not clean it up!
19 19
20EXTRA_CFLAGS += -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes 20ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98ba73..35b28f42d20 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3858 3858
3859 /* device is off until link detection */
3860 netif_carrier_off(dev);
3861
3862 return dev; 3859 return dev;
3863} 3860}
3864 3861
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7d85a38377a..2a91868788f 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4983,7 +4983,7 @@ static int sky2_suspend(struct device *dev)
4983 return 0; 4983 return 0;
4984} 4984}
4985 4985
4986#ifdef CONFIG_PM 4986#ifdef CONFIG_PM_SLEEP
4987static int sky2_resume(struct device *dev) 4987static int sky2_resume(struct device *dev)
4988{ 4988{
4989 struct pci_dev *pdev = to_pci_dev(dev); 4989 struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df611ee1..43654a3bb0e 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
81#include <linux/ethtool.h> 81#include <linux/ethtool.h>
82#include <linux/mii.h> 82#include <linux/mii.h>
83#include <linux/workqueue.h> 83#include <linux/workqueue.h>
84#include <linux/of.h>
84 85
85#include <linux/netdevice.h> 86#include <linux/netdevice.h>
86#include <linux/etherdevice.h> 87#include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
2394 return 0; 2395 return 0;
2395} 2396}
2396 2397
2398#ifdef CONFIG_OF
2399static const struct of_device_id smc91x_match[] = {
2400 { .compatible = "smsc,lan91c94", },
2401 { .compatible = "smsc,lan91c111", },
2402 {},
2403}
2404MODULE_DEVICE_TABLE(of, smc91x_match);
2405#endif
2406
2397static struct dev_pm_ops smc_drv_pm_ops = { 2407static struct dev_pm_ops smc_drv_pm_ops = {
2398 .suspend = smc_drv_suspend, 2408 .suspend = smc_drv_suspend,
2399 .resume = smc_drv_resume, 2409 .resume = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
2406 .name = CARDNAME, 2416 .name = CARDNAME,
2407 .owner = THIS_MODULE, 2417 .owner = THIS_MODULE,
2408 .pm = &smc_drv_pm_ops, 2418 .pm = &smc_drv_pm_ops,
2419#ifdef CONFIG_OF
2420 .of_match_table = smc91x_match,
2421#endif
2409 }, 2422 },
2410}; 2423};
2411 2424
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index ee747919a76..68d48ab6eac 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -206,68 +206,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
206#define RPC_LSA_DEFAULT RPC_LED_TX_RX 206#define RPC_LSA_DEFAULT RPC_LED_TX_RX
207#define RPC_LSB_DEFAULT RPC_LED_100_10 207#define RPC_LSB_DEFAULT RPC_LED_100_10
208 208
209#elif defined(CONFIG_MACH_LPD79520) || \
210 defined(CONFIG_MACH_LPD7A400) || \
211 defined(CONFIG_MACH_LPD7A404)
212
213/* The LPD7X_IOBARRIER is necessary to overcome a mismatch between the
214 * way that the CPU handles chip selects and the way that the SMC chip
215 * expects the chip select to operate. Refer to
216 * Documentation/arm/Sharp-LH/IOBarrier for details. The read from
217 * IOBARRIER is a byte, in order that we read the least-common
218 * denominator. It would be wasteful to read 32 bits from an 8-bit
219 * accessible region.
220 *
221 * There is no explicit protection against interrupts intervening
222 * between the writew and the IOBARRIER. In SMC ISR there is a
223 * preamble that performs an IOBARRIER in the extremely unlikely event
224 * that the driver interrupts itself between a writew to the chip an
225 * the IOBARRIER that follows *and* the cache is large enough that the
226 * first off-chip access while handing the interrupt is to the SMC
227 * chip. Other devices in the same address space as the SMC chip must
228 * be aware of the potential for trouble and perform a similar
229 * IOBARRIER on entry to their ISR.
230 */
231
232#include <mach/constants.h> /* IOBARRIER_VIRT */
233
234#define SMC_CAN_USE_8BIT 0
235#define SMC_CAN_USE_16BIT 1
236#define SMC_CAN_USE_32BIT 0
237#define SMC_NOWAIT 0
238#define LPD7X_IOBARRIER readb (IOBARRIER_VIRT)
239
240#define SMC_inw(a,r)\
241 ({ unsigned short v = readw ((void*) ((a) + (r))); LPD7X_IOBARRIER; v; })
242#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7X_IOBARRIER; })
243
244#define SMC_insw LPD7_SMC_insw
245static inline void LPD7_SMC_insw (unsigned char* a, int r,
246 unsigned char* p, int l)
247{
248 unsigned short* ps = (unsigned short*) p;
249 while (l-- > 0) {
250 *ps++ = readw (a + r);
251 LPD7X_IOBARRIER;
252 }
253}
254
255#define SMC_outsw LPD7_SMC_outsw
256static inline void LPD7_SMC_outsw (unsigned char* a, int r,
257 unsigned char* p, int l)
258{
259 unsigned short* ps = (unsigned short*) p;
260 while (l-- > 0) {
261 writew (*ps++, a + r);
262 LPD7X_IOBARRIER;
263 }
264}
265
266#define SMC_INTERRUPT_PREAMBLE LPD7X_IOBARRIER
267
268#define RPC_LSA_DEFAULT RPC_LED_TX_RX
269#define RPC_LSB_DEFAULT RPC_LED_100_10
270
271#elif defined(CONFIG_ARCH_VERSATILE) 209#elif defined(CONFIG_ARCH_VERSATILE)
272 210
273#define SMC_CAN_USE_8BIT 1 211#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 64bfdae5956..1566259c1f2 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -791,8 +791,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
791 return -ENODEV; 791 return -ENODEV;
792 } 792 }
793 793
794 SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X", 794 SMSC_TRACE(PROBE, "PHY: addr %d, phy_id 0x%08X",
795 phy_addr, phydev->addr, phydev->phy_id); 795 phydev->addr, phydev->phy_id);
796 796
797 ret = phy_connect_direct(dev, phydev, 797 ret = phy_connect_direct(dev, phydev,
798 &smsc911x_phy_adjust_link, 0, 798 &smsc911x_phy_adjust_link, 0,
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev)
1178 smsc911x_reg_write(pdata, HW_CFG, 0x00050000); 1178 smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
1179 smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); 1179 smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
1180 1180
1181 /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
1182 spin_lock_irq(&pdata->mac_lock);
1183 smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
1184 spin_unlock_irq(&pdata->mac_lock);
1185
1181 /* Make sure EEPROM has finished loading before setting GPIO_CFG */ 1186 /* Make sure EEPROM has finished loading before setting GPIO_CFG */
1182 timeout = 50; 1187 timeout = 50;
1183 while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && 1188 while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3837f..0e5f03135b5 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1560 1560
1561 priv->hw = device; 1561 priv->hw = device;
1562 1562
1563 if (device_can_wakeup(priv->device)) 1563 if (device_can_wakeup(priv->device)) {
1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1565 enable_irq_wake(dev->irq);
1566 }
1565 1567
1566 return 0; 1568 return 0;
1567} 1569}
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 0a6a5ced3c1..aa4765803a4 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1242,8 +1242,7 @@ fail_and_cleanup:
1242/* QEC can be the parent of either QuadEthernet or a BigMAC. We want 1242/* QEC can be the parent of either QuadEthernet or a BigMAC. We want
1243 * the latter. 1243 * the latter.
1244 */ 1244 */
1245static int __devinit bigmac_sbus_probe(struct platform_device *op, 1245static int __devinit bigmac_sbus_probe(struct platform_device *op)
1246 const struct of_device_id *match)
1247{ 1246{
1248 struct device *parent = op->dev.parent; 1247 struct device *parent = op->dev.parent;
1249 struct platform_device *qec_op; 1248 struct platform_device *qec_op;
@@ -1289,7 +1288,7 @@ static const struct of_device_id bigmac_sbus_match[] = {
1289 1288
1290MODULE_DEVICE_TABLE(of, bigmac_sbus_match); 1289MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
1291 1290
1292static struct of_platform_driver bigmac_sbus_driver = { 1291static struct platform_driver bigmac_sbus_driver = {
1293 .driver = { 1292 .driver = {
1294 .name = "sunbmac", 1293 .name = "sunbmac",
1295 .owner = THIS_MODULE, 1294 .owner = THIS_MODULE,
@@ -1301,12 +1300,12 @@ static struct of_platform_driver bigmac_sbus_driver = {
1301 1300
1302static int __init bigmac_init(void) 1301static int __init bigmac_init(void)
1303{ 1302{
1304 return of_register_platform_driver(&bigmac_sbus_driver); 1303 return platform_driver_register(&bigmac_sbus_driver);
1305} 1304}
1306 1305
1307static void __exit bigmac_exit(void) 1306static void __exit bigmac_exit(void)
1308{ 1307{
1309 of_unregister_platform_driver(&bigmac_sbus_driver); 1308 platform_driver_unregister(&bigmac_sbus_driver);
1310} 1309}
1311 1310
1312module_init(bigmac_init); 1311module_init(bigmac_init);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1c5408f8393..c1a344829b5 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
320 320
321 if (txmac_stat & MAC_TXSTAT_URUN) { 321 if (txmac_stat & MAC_TXSTAT_URUN) {
322 netdev_err(dev, "TX MAC xmit underrun\n"); 322 netdev_err(dev, "TX MAC xmit underrun\n");
323 gp->net_stats.tx_fifo_errors++; 323 dev->stats.tx_fifo_errors++;
324 } 324 }
325 325
326 if (txmac_stat & MAC_TXSTAT_MPE) { 326 if (txmac_stat & MAC_TXSTAT_MPE) {
327 netdev_err(dev, "TX MAC max packet size error\n"); 327 netdev_err(dev, "TX MAC max packet size error\n");
328 gp->net_stats.tx_errors++; 328 dev->stats.tx_errors++;
329 } 329 }
330 330
331 /* The rest are all cases of one of the 16-bit TX 331 /* The rest are all cases of one of the 16-bit TX
332 * counters expiring. 332 * counters expiring.
333 */ 333 */
334 if (txmac_stat & MAC_TXSTAT_NCE) 334 if (txmac_stat & MAC_TXSTAT_NCE)
335 gp->net_stats.collisions += 0x10000; 335 dev->stats.collisions += 0x10000;
336 336
337 if (txmac_stat & MAC_TXSTAT_ECE) { 337 if (txmac_stat & MAC_TXSTAT_ECE) {
338 gp->net_stats.tx_aborted_errors += 0x10000; 338 dev->stats.tx_aborted_errors += 0x10000;
339 gp->net_stats.collisions += 0x10000; 339 dev->stats.collisions += 0x10000;
340 } 340 }
341 341
342 if (txmac_stat & MAC_TXSTAT_LCE) { 342 if (txmac_stat & MAC_TXSTAT_LCE) {
343 gp->net_stats.tx_aborted_errors += 0x10000; 343 dev->stats.tx_aborted_errors += 0x10000;
344 gp->net_stats.collisions += 0x10000; 344 dev->stats.collisions += 0x10000;
345 } 345 }
346 346
347 /* We do not keep track of MAC_TXSTAT_FCE and 347 /* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
469 u32 smac = readl(gp->regs + MAC_SMACHINE); 469 u32 smac = readl(gp->regs + MAC_SMACHINE);
470 470
471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); 471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
472 gp->net_stats.rx_over_errors++; 472 dev->stats.rx_over_errors++;
473 gp->net_stats.rx_fifo_errors++; 473 dev->stats.rx_fifo_errors++;
474 474
475 ret = gem_rxmac_reset(gp); 475 ret = gem_rxmac_reset(gp);
476 } 476 }
477 477
478 if (rxmac_stat & MAC_RXSTAT_ACE) 478 if (rxmac_stat & MAC_RXSTAT_ACE)
479 gp->net_stats.rx_frame_errors += 0x10000; 479 dev->stats.rx_frame_errors += 0x10000;
480 480
481 if (rxmac_stat & MAC_RXSTAT_CCE) 481 if (rxmac_stat & MAC_RXSTAT_CCE)
482 gp->net_stats.rx_crc_errors += 0x10000; 482 dev->stats.rx_crc_errors += 0x10000;
483 483
484 if (rxmac_stat & MAC_RXSTAT_LCE) 484 if (rxmac_stat & MAC_RXSTAT_LCE)
485 gp->net_stats.rx_length_errors += 0x10000; 485 dev->stats.rx_length_errors += 0x10000;
486 486
487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
488 * events. 488 * events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
594 if (netif_msg_rx_err(gp)) 594 if (netif_msg_rx_err(gp))
595 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 595 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
596 gp->dev->name); 596 gp->dev->name);
597 gp->net_stats.rx_dropped++; 597 dev->stats.rx_dropped++;
598 } 598 }
599 599
600 if (gem_status & GREG_STAT_RXTAGERR) { 600 if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
602 if (netif_msg_rx_err(gp)) 602 if (netif_msg_rx_err(gp))
603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
604 gp->dev->name); 604 gp->dev->name);
605 gp->net_stats.rx_errors++; 605 dev->stats.rx_errors++;
606 606
607 goto do_reset; 607 goto do_reset;
608 } 608 }
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
684 break; 684 break;
685 } 685 }
686 gp->tx_skbs[entry] = NULL; 686 gp->tx_skbs[entry] = NULL;
687 gp->net_stats.tx_bytes += skb->len; 687 dev->stats.tx_bytes += skb->len;
688 688
689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
690 txd = &gp->init_block->txd[entry]; 690 txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
696 entry = NEXT_TX(entry); 696 entry = NEXT_TX(entry);
697 } 697 }
698 698
699 gp->net_stats.tx_packets++; 699 dev->stats.tx_packets++;
700 dev_kfree_skb_irq(skb); 700 dev_kfree_skb_irq(skb);
701 } 701 }
702 gp->tx_old = entry; 702 gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
738 738
739static int gem_rx(struct gem *gp, int work_to_do) 739static int gem_rx(struct gem *gp, int work_to_do)
740{ 740{
741 struct net_device *dev = gp->dev;
741 int entry, drops, work_done = 0; 742 int entry, drops, work_done = 0;
742 u32 done; 743 u32 done;
743 __sum16 csum; 744 __sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
782 783
783 len = (status & RXDCTRL_BUFSZ) >> 16; 784 len = (status & RXDCTRL_BUFSZ) >> 16;
784 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 785 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
785 gp->net_stats.rx_errors++; 786 dev->stats.rx_errors++;
786 if (len < ETH_ZLEN) 787 if (len < ETH_ZLEN)
787 gp->net_stats.rx_length_errors++; 788 dev->stats.rx_length_errors++;
788 if (len & RXDCTRL_BAD) 789 if (len & RXDCTRL_BAD)
789 gp->net_stats.rx_crc_errors++; 790 dev->stats.rx_crc_errors++;
790 791
791 /* We'll just return it to GEM. */ 792 /* We'll just return it to GEM. */
792 drop_it: 793 drop_it:
793 gp->net_stats.rx_dropped++; 794 dev->stats.rx_dropped++;
794 goto next; 795 goto next;
795 } 796 }
796 797
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
843 844
844 netif_receive_skb(skb); 845 netif_receive_skb(skb);
845 846
846 gp->net_stats.rx_packets++; 847 dev->stats.rx_packets++;
847 gp->net_stats.rx_bytes += len; 848 dev->stats.rx_bytes += len;
848 849
849 next: 850 next:
850 entry = NEXT_RX(entry); 851 entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
2472static struct net_device_stats *gem_get_stats(struct net_device *dev) 2473static struct net_device_stats *gem_get_stats(struct net_device *dev)
2473{ 2474{
2474 struct gem *gp = netdev_priv(dev); 2475 struct gem *gp = netdev_priv(dev);
2475 struct net_device_stats *stats = &gp->net_stats;
2476 2476
2477 spin_lock_irq(&gp->lock); 2477 spin_lock_irq(&gp->lock);
2478 spin_lock(&gp->tx_lock); 2478 spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2481 * so we shield against this 2481 * so we shield against this
2482 */ 2482 */
2483 if (gp->running) { 2483 if (gp->running) {
2484 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2484 dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2485 writel(0, gp->regs + MAC_FCSERR); 2485 writel(0, gp->regs + MAC_FCSERR);
2486 2486
2487 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2487 dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
2488 writel(0, gp->regs + MAC_AERR); 2488 writel(0, gp->regs + MAC_AERR);
2489 2489
2490 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2490 dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
2491 writel(0, gp->regs + MAC_LERR); 2491 writel(0, gp->regs + MAC_LERR);
2492 2492
2493 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2493 dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2494 stats->collisions += 2494 dev->stats.collisions +=
2495 (readl(gp->regs + MAC_ECOLL) + 2495 (readl(gp->regs + MAC_ECOLL) +
2496 readl(gp->regs + MAC_LCOLL)); 2496 readl(gp->regs + MAC_LCOLL));
2497 writel(0, gp->regs + MAC_ECOLL); 2497 writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2501 spin_unlock(&gp->tx_lock); 2501 spin_unlock(&gp->tx_lock);
2502 spin_unlock_irq(&gp->lock); 2502 spin_unlock_irq(&gp->lock);
2503 2503
2504 return &gp->net_stats; 2504 return &dev->stats;
2505} 2505}
2506 2506
2507static int gem_set_mac_address(struct net_device *dev, void *addr) 2507static int gem_set_mac_address(struct net_device *dev, void *addr)
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 19905460def..d225077964e 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -843,7 +843,7 @@ struct gem_txd {
843 843
844/* GEM requires that RX descriptors are provided four at a time, 844/* GEM requires that RX descriptors are provided four at a time,
845 * aligned. Also, the RX ring may not wrap around. This means that 845 * aligned. Also, the RX ring may not wrap around. This means that
846 * there will be at least 4 unused desciptor entries in the middle 846 * there will be at least 4 unused descriptor entries in the middle
847 * of the RX ring at all times. 847 * of the RX ring at all times.
848 * 848 *
849 * Similar to HME, GEM assumes that it can write garbage bytes before 849 * Similar to HME, GEM assumes that it can write garbage bytes before
@@ -994,7 +994,6 @@ struct gem {
994 u32 status; 994 u32 status;
995 995
996 struct napi_struct napi; 996 struct napi_struct napi;
997 struct net_device_stats net_stats;
998 997
999 int tx_fifo_sz; 998 int tx_fifo_sz;
1000 int rx_fifo_sz; 999 int rx_fifo_sz;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 55bbb9c15d9..eb4f59fb01e 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3237,11 +3237,15 @@ static void happy_meal_pci_exit(void)
3237#endif 3237#endif
3238 3238
3239#ifdef CONFIG_SBUS 3239#ifdef CONFIG_SBUS
3240static int __devinit hme_sbus_probe(struct platform_device *op, const struct of_device_id *match) 3240static int __devinit hme_sbus_probe(struct platform_device *op)
3241{ 3241{
3242 struct device_node *dp = op->dev.of_node; 3242 struct device_node *dp = op->dev.of_node;
3243 const char *model = of_get_property(dp, "model", NULL); 3243 const char *model = of_get_property(dp, "model", NULL);
3244 int is_qfe = (match->data != NULL); 3244 int is_qfe;
3245
3246 if (!op->dev.of_match)
3247 return -EINVAL;
3248 is_qfe = (op->dev.of_match->data != NULL);
3245 3249
3246 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) 3250 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3247 is_qfe = 1; 3251 is_qfe = 1;
@@ -3292,7 +3296,7 @@ static const struct of_device_id hme_sbus_match[] = {
3292 3296
3293MODULE_DEVICE_TABLE(of, hme_sbus_match); 3297MODULE_DEVICE_TABLE(of, hme_sbus_match);
3294 3298
3295static struct of_platform_driver hme_sbus_driver = { 3299static struct platform_driver hme_sbus_driver = {
3296 .driver = { 3300 .driver = {
3297 .name = "hme", 3301 .name = "hme",
3298 .owner = THIS_MODULE, 3302 .owner = THIS_MODULE,
@@ -3306,7 +3310,7 @@ static int __init happy_meal_sbus_init(void)
3306{ 3310{
3307 int err; 3311 int err;
3308 3312
3309 err = of_register_platform_driver(&hme_sbus_driver); 3313 err = platform_driver_register(&hme_sbus_driver);
3310 if (!err) 3314 if (!err)
3311 err = quattro_sbus_register_irqs(); 3315 err = quattro_sbus_register_irqs();
3312 3316
@@ -3315,7 +3319,7 @@ static int __init happy_meal_sbus_init(void)
3315 3319
3316static void happy_meal_sbus_exit(void) 3320static void happy_meal_sbus_exit(void)
3317{ 3321{
3318 of_unregister_platform_driver(&hme_sbus_driver); 3322 platform_driver_unregister(&hme_sbus_driver);
3319 quattro_sbus_free_irqs(); 3323 quattro_sbus_free_irqs();
3320 3324
3321 while (qfe_sbus_list) { 3325 while (qfe_sbus_list) {
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 767e1e2b210..32a5c7f63c4 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1495,7 +1495,7 @@ fail:
1495 return -ENODEV; 1495 return -ENODEV;
1496} 1496}
1497 1497
1498static int __devinit sunlance_sbus_probe(struct platform_device *op, const struct of_device_id *match) 1498static int __devinit sunlance_sbus_probe(struct platform_device *op)
1499{ 1499{
1500 struct platform_device *parent = to_platform_device(op->dev.parent); 1500 struct platform_device *parent = to_platform_device(op->dev.parent);
1501 struct device_node *parent_dp = parent->dev.of_node; 1501 struct device_node *parent_dp = parent->dev.of_node;
@@ -1536,7 +1536,7 @@ static const struct of_device_id sunlance_sbus_match[] = {
1536 1536
1537MODULE_DEVICE_TABLE(of, sunlance_sbus_match); 1537MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
1538 1538
1539static struct of_platform_driver sunlance_sbus_driver = { 1539static struct platform_driver sunlance_sbus_driver = {
1540 .driver = { 1540 .driver = {
1541 .name = "sunlance", 1541 .name = "sunlance",
1542 .owner = THIS_MODULE, 1542 .owner = THIS_MODULE,
@@ -1550,12 +1550,12 @@ static struct of_platform_driver sunlance_sbus_driver = {
1550/* Find all the lance cards on the system and initialize them */ 1550/* Find all the lance cards on the system and initialize them */
1551static int __init sparc_lance_init(void) 1551static int __init sparc_lance_init(void)
1552{ 1552{
1553 return of_register_platform_driver(&sunlance_sbus_driver); 1553 return platform_driver_register(&sunlance_sbus_driver);
1554} 1554}
1555 1555
1556static void __exit sparc_lance_exit(void) 1556static void __exit sparc_lance_exit(void)
1557{ 1557{
1558 of_unregister_platform_driver(&sunlance_sbus_driver); 1558 platform_driver_unregister(&sunlance_sbus_driver);
1559} 1559}
1560 1560
1561module_init(sparc_lance_init); 1561module_init(sparc_lance_init);
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 9536b2f010b..18ecdc30375 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -941,7 +941,7 @@ fail:
941 return res; 941 return res;
942} 942}
943 943
944static int __devinit qec_sbus_probe(struct platform_device *op, const struct of_device_id *match) 944static int __devinit qec_sbus_probe(struct platform_device *op)
945{ 945{
946 return qec_ether_init(op); 946 return qec_ether_init(op);
947} 947}
@@ -976,7 +976,7 @@ static const struct of_device_id qec_sbus_match[] = {
976 976
977MODULE_DEVICE_TABLE(of, qec_sbus_match); 977MODULE_DEVICE_TABLE(of, qec_sbus_match);
978 978
979static struct of_platform_driver qec_sbus_driver = { 979static struct platform_driver qec_sbus_driver = {
980 .driver = { 980 .driver = {
981 .name = "qec", 981 .name = "qec",
982 .owner = THIS_MODULE, 982 .owner = THIS_MODULE,
@@ -988,12 +988,12 @@ static struct of_platform_driver qec_sbus_driver = {
988 988
989static int __init qec_init(void) 989static int __init qec_init(void)
990{ 990{
991 return of_register_platform_driver(&qec_sbus_driver); 991 return platform_driver_register(&qec_sbus_driver);
992} 992}
993 993
994static void __exit qec_exit(void) 994static void __exit qec_exit(void)
995{ 995{
996 of_unregister_platform_driver(&qec_sbus_driver); 996 platform_driver_unregister(&qec_sbus_driver);
997 997
998 while (root_qec_dev) { 998 while (root_qec_dev) {
999 struct sunqec *next = root_qec_dev->next_module; 999 struct sunqec *next = root_qec_dev->next_module;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7841a8f6999..73c942d85f0 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation. 7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
@@ -48,9 +48,9 @@
48#include <net/ip.h> 48#include <net/ip.h>
49 49
50#include <asm/system.h> 50#include <asm/system.h>
51#include <asm/io.h> 51#include <linux/io.h>
52#include <asm/byteorder.h> 52#include <asm/byteorder.h>
53#include <asm/uaccess.h> 53#include <linux/uaccess.h>
54 54
55#ifdef CONFIG_SPARC 55#ifdef CONFIG_SPARC
56#include <asm/idprom.h> 56#include <asm/idprom.h>
@@ -60,20 +60,14 @@
60#define BAR_0 0 60#define BAR_0 0
61#define BAR_2 2 61#define BAR_2 2
62 62
63#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
64#define TG3_VLAN_TAG_USED 1
65#else
66#define TG3_VLAN_TAG_USED 0
67#endif
68
69#include "tg3.h" 63#include "tg3.h"
70 64
71#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
72#define TG3_MAJ_NUM 3 66#define TG3_MAJ_NUM 3
73#define TG3_MIN_NUM 116 67#define TG3_MIN_NUM 117
74#define DRV_MODULE_VERSION \ 68#define DRV_MODULE_VERSION \
75 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
76#define DRV_MODULE_RELDATE "December 3, 2010" 70#define DRV_MODULE_RELDATE "January 25, 2011"
77 71
78#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
79#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -134,9 +128,6 @@
134 TG3_TX_RING_SIZE) 128 TG3_TX_RING_SIZE)
135#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 129#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
136 130
137#define TG3_RX_DMA_ALIGN 16
138#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
139
140#define TG3_DMA_BYTE_ENAB 64 131#define TG3_DMA_BYTE_ENAB 64
141 132
142#define TG3_RX_STD_DMA_SZ 1536 133#define TG3_RX_STD_DMA_SZ 1536
@@ -1785,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1785 tg3_phy_cl45_read(tp, MDIO_MMD_AN, 1776 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1786 TG3_CL45_D7_EEERES_STAT, &val); 1777 TG3_CL45_D7_EEERES_STAT, &val);
1787 1778
1788 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 1779 switch (val) {
1789 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) 1780 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1781 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1782 case ASIC_REV_5717:
1783 case ASIC_REV_5719:
1784 case ASIC_REV_57765:
1785 /* Enable SM_DSP clock and tx 6dB coding. */
1786 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1787 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1788 MII_TG3_AUXCTL_ACTL_TX_6DB;
1789 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1790
1791 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1792
1793 /* Turn off SM_DSP clock. */
1794 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1795 MII_TG3_AUXCTL_ACTL_TX_6DB;
1796 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1797 }
1798 /* Fallthrough */
1799 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1790 tp->setlpicnt = 2; 1800 tp->setlpicnt = 2;
1801 }
1791 } 1802 }
1792 1803
1793 if (!tp->setlpicnt) { 1804 if (!tp->setlpicnt) {
@@ -2109,7 +2120,7 @@ out:
2109 2120
2110static void tg3_frob_aux_power(struct tg3 *tp) 2121static void tg3_frob_aux_power(struct tg3 *tp)
2111{ 2122{
2112 struct tg3 *tp_peer = tp; 2123 bool need_vaux = false;
2113 2124
2114 /* The GPIOs do something completely different on 57765. */ 2125 /* The GPIOs do something completely different on 57765. */
2115 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || 2126 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
@@ -2117,23 +2128,32 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 2128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2118 return; 2129 return;
2119 2130
2120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2131 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2121 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || 2132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 2133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) &&
2134 tp->pdev_peer != tp->pdev) {
2123 struct net_device *dev_peer; 2135 struct net_device *dev_peer;
2124 2136
2125 dev_peer = pci_get_drvdata(tp->pdev_peer); 2137 dev_peer = pci_get_drvdata(tp->pdev_peer);
2138
2126 /* remove_one() may have been run on the peer. */ 2139 /* remove_one() may have been run on the peer. */
2127 if (!dev_peer) 2140 if (dev_peer) {
2128 tp_peer = tp; 2141 struct tg3 *tp_peer = netdev_priv(dev_peer);
2129 else 2142
2130 tp_peer = netdev_priv(dev_peer); 2143 if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
2144 return;
2145
2146 if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2147 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
2148 need_vaux = true;
2149 }
2131 } 2150 }
2132 2151
2133 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 2152 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2134 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || 2153 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2135 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 2154 need_vaux = true;
2136 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { 2155
2156 if (need_vaux) {
2137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 2158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2139 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2159 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
@@ -2163,10 +2183,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2163 u32 no_gpio2; 2183 u32 no_gpio2;
2164 u32 grc_local_ctrl = 0; 2184 u32 grc_local_ctrl = 0;
2165 2185
2166 if (tp_peer != tp &&
2167 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2168 return;
2169
2170 /* Workaround to prevent overdrawing Amps. */ 2186 /* Workaround to prevent overdrawing Amps. */
2171 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 2187 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2172 ASIC_REV_5714) { 2188 ASIC_REV_5714) {
@@ -2205,10 +2221,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2205 } else { 2221 } else {
2206 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 2222 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2207 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 2223 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2208 if (tp_peer != tp &&
2209 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2210 return;
2211
2212 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2224 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213 (GRC_LCLCTRL_GPIO_OE1 | 2225 (GRC_LCLCTRL_GPIO_OE1 |
2214 GRC_LCLCTRL_GPIO_OUTPUT1), 100); 2226 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
@@ -2977,11 +2989,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2977 MII_TG3_AUXCTL_ACTL_TX_6DB; 2989 MII_TG3_AUXCTL_ACTL_TX_6DB;
2978 tg3_writephy(tp, MII_TG3_AUX_CTRL, val); 2990 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2979 2991
2980 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 2992 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && 2993 case ASIC_REV_5717:
2982 !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 2994 case ASIC_REV_57765:
2983 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, 2995 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2984 val | MII_TG3_DSP_CH34TP2_HIBW01); 2996 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2997 MII_TG3_DSP_CH34TP2_HIBW01);
2998 /* Fall through */
2999 case ASIC_REV_5719:
3000 val = MII_TG3_DSP_TAP26_ALNOKO |
3001 MII_TG3_DSP_TAP26_RMRXSTO |
3002 MII_TG3_DSP_TAP26_OPCSINPT;
3003 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3004 }
2985 3005
2986 val = 0; 3006 val = 0;
2987 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 3007 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -4722,8 +4742,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4722 struct sk_buff *skb; 4742 struct sk_buff *skb;
4723 dma_addr_t dma_addr; 4743 dma_addr_t dma_addr;
4724 u32 opaque_key, desc_idx, *post_ptr; 4744 u32 opaque_key, desc_idx, *post_ptr;
4725 bool hw_vlan __maybe_unused = false;
4726 u16 vtag __maybe_unused = 0;
4727 4745
4728 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4746 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4729 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4747 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4800,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4782 tg3_recycle_rx(tnapi, tpr, opaque_key, 4800 tg3_recycle_rx(tnapi, tpr, opaque_key,
4783 desc_idx, *post_ptr); 4801 desc_idx, *post_ptr);
4784 4802
4785 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + 4803 copy_skb = netdev_alloc_skb(tp->dev, len +
4786 TG3_RAW_IP_ALIGN); 4804 TG3_RAW_IP_ALIGN);
4787 if (copy_skb == NULL) 4805 if (copy_skb == NULL)
4788 goto drop_it_no_recycle; 4806 goto drop_it_no_recycle;
4789 4807
4790 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); 4808 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4791 skb_put(copy_skb, len); 4809 skb_put(copy_skb, len);
4792 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4810 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4793 skb_copy_from_linear_data(skb, copy_skb->data, len); 4811 skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4832,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4814 } 4832 }
4815 4833
4816 if (desc->type_flags & RXD_FLAG_VLAN && 4834 if (desc->type_flags & RXD_FLAG_VLAN &&
4817 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { 4835 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4818 vtag = desc->err_vlan & RXD_VLAN_MASK; 4836 __vlan_hwaccel_put_tag(skb,
4819#if TG3_VLAN_TAG_USED 4837 desc->err_vlan & RXD_VLAN_MASK);
4820 if (tp->vlgrp)
4821 hw_vlan = true;
4822 else
4823#endif
4824 {
4825 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4826 __skb_push(skb, VLAN_HLEN);
4827
4828 memmove(ve, skb->data + VLAN_HLEN,
4829 ETH_ALEN * 2);
4830 ve->h_vlan_proto = htons(ETH_P_8021Q);
4831 ve->h_vlan_TCI = htons(vtag);
4832 }
4833 }
4834 4838
4835#if TG3_VLAN_TAG_USED 4839 napi_gro_receive(&tnapi->napi, skb);
4836 if (hw_vlan)
4837 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4838 else
4839#endif
4840 napi_gro_receive(&tnapi->napi, skb);
4841 4840
4842 received++; 4841 received++;
4843 budget--; 4842 budget--;
@@ -5740,11 +5739,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5740 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5739 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5741 } 5740 }
5742 5741
5743#if TG3_VLAN_TAG_USED
5744 if (vlan_tx_tag_present(skb)) 5742 if (vlan_tx_tag_present(skb))
5745 base_flags |= (TXD_FLAG_VLAN | 5743 base_flags |= (TXD_FLAG_VLAN |
5746 (vlan_tx_tag_get(skb) << 16)); 5744 (vlan_tx_tag_get(skb) << 16));
5747#endif
5748 5745
5749 len = skb_headlen(skb); 5746 len = skb_headlen(skb);
5750 5747
@@ -5986,11 +5983,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5986 } 5983 }
5987 } 5984 }
5988 } 5985 }
5989#if TG3_VLAN_TAG_USED 5986
5990 if (vlan_tx_tag_present(skb)) 5987 if (vlan_tx_tag_present(skb))
5991 base_flags |= (TXD_FLAG_VLAN | 5988 base_flags |= (TXD_FLAG_VLAN |
5992 (vlan_tx_tag_get(skb) << 16)); 5989 (vlan_tx_tag_get(skb) << 16));
5993#endif
5994 5990
5995 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5991 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5996 !mss && skb->len > VLAN_ETH_FRAME_LEN) 5992 !mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -7834,7 +7830,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7834 TG3_CPMU_DBTMR1_LNKIDLE_2047US); 7830 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7835 7831
7836 tw32_f(TG3_CPMU_EEE_DBTMR2, 7832 tw32_f(TG3_CPMU_EEE_DBTMR2,
7837 TG3_CPMU_DBTMR1_APE_TX_2047US | 7833 TG3_CPMU_DBTMR2_APE_TX_2047US |
7838 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 7834 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7839 } 7835 }
7840 7836
@@ -8108,8 +8104,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8108 /* Program the jumbo buffer descriptor ring control 8104 /* Program the jumbo buffer descriptor ring control
8109 * blocks on those devices that have them. 8105 * blocks on those devices that have them.
8110 */ 8106 */
8111 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 8107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8112 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 8108 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8109 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8113 /* Setup replenish threshold. */ 8110 /* Setup replenish threshold. */
8114 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); 8111 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8115 8112
@@ -8196,10 +8193,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8196 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 8193 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8197 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 8194 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8198 8195
8199 /* If statement applies to 5705 and 5750 PCI devices only */ 8196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8200 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 8197 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8201 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8202 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
8203 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && 8198 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 8199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8205 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 8200 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
@@ -8227,8 +8222,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8227 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8222 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8228 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8223 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 8224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8230 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK; 8225 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8231 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B; 8226 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8227 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8228 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8229 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8230 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8232 } 8231 }
8233 tw32(TG3_RDMA_RSRVCTRL_REG, 8232 tw32(TG3_RDMA_RSRVCTRL_REG,
8234 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8233 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8350,7 +8349,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8350 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8349 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8351 udelay(100); 8350 udelay(100);
8352 8351
8353 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { 8352 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8353 tp->irq_cnt > 1) {
8354 val = tr32(MSGINT_MODE); 8354 val = tr32(MSGINT_MODE);
8355 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; 8355 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8356 tw32(MSGINT_MODE, val); 8356 tw32(MSGINT_MODE, val);
@@ -8367,17 +8367,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8367 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 8367 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8368 WDMAC_MODE_LNGREAD_ENAB); 8368 WDMAC_MODE_LNGREAD_ENAB);
8369 8369
8370 /* If statement applies to 5705 and 5750 PCI devices only */ 8370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8371 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 8371 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8372 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8373 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8374 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 8372 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8375 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || 8373 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8376 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { 8374 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8377 /* nothing */ 8375 /* nothing */
8378 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 8376 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8379 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && 8377 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8380 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8381 val |= WDMAC_MODE_RX_ACCEL; 8378 val |= WDMAC_MODE_RX_ACCEL;
8382 } 8379 }
8383 } 8380 }
@@ -9090,7 +9087,8 @@ static void tg3_ints_init(struct tg3 *tp)
9090 9087
9091 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { 9088 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9092 u32 msi_mode = tr32(MSGINT_MODE); 9089 u32 msi_mode = tr32(MSGINT_MODE);
9093 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) 9090 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9091 tp->irq_cnt > 1)
9094 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 9092 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9095 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 9093 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9096 } 9094 }
@@ -9532,17 +9530,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
9532 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9530 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9533 RX_MODE_KEEP_VLAN_TAG); 9531 RX_MODE_KEEP_VLAN_TAG);
9534 9532
9533#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9535 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9534 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9536 * flag clear. 9535 * flag clear.
9537 */ 9536 */
9538#if TG3_VLAN_TAG_USED
9539 if (!tp->vlgrp &&
9540 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9541 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9542#else
9543 /* By definition, VLAN is disabled always in this
9544 * case.
9545 */
9546 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9537 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9547 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9538 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9548#endif 9539#endif
@@ -10492,16 +10483,53 @@ static int tg3_test_nvram(struct tg3 *tp)
10492 goto out; 10483 goto out;
10493 } 10484 }
10494 10485
10486 err = -EIO;
10487
10495 /* Bootstrap checksum at offset 0x10 */ 10488 /* Bootstrap checksum at offset 0x10 */
10496 csum = calc_crc((unsigned char *) buf, 0x10); 10489 csum = calc_crc((unsigned char *) buf, 0x10);
10497 if (csum != be32_to_cpu(buf[0x10/4])) 10490 if (csum != le32_to_cpu(buf[0x10/4]))
10498 goto out; 10491 goto out;
10499 10492
10500 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 10493 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10501 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 10494 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10502 if (csum != be32_to_cpu(buf[0xfc/4])) 10495 if (csum != le32_to_cpu(buf[0xfc/4]))
10503 goto out; 10496 goto out;
10504 10497
10498 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
10499 /* The data is in little-endian format in NVRAM.
10500 * Use the big-endian read routines to preserve
10501 * the byte order as it exists in NVRAM.
10502 */
10503 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
10504 goto out;
10505 }
10506
10507 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10508 PCI_VPD_LRDT_RO_DATA);
10509 if (i > 0) {
10510 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10511 if (j < 0)
10512 goto out;
10513
10514 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10515 goto out;
10516
10517 i += PCI_VPD_LRDT_TAG_SIZE;
10518 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10519 PCI_VPD_RO_KEYWORD_CHKSUM);
10520 if (j > 0) {
10521 u8 csum8 = 0;
10522
10523 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10524
10525 for (i = 0; i <= j; i++)
10526 csum8 += ((u8 *)buf)[i];
10527
10528 if (csum8)
10529 goto out;
10530 }
10531 }
10532
10505 err = 0; 10533 err = 0;
10506 10534
10507out: 10535out:
@@ -10873,13 +10901,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10873 if (loopback_mode == TG3_MAC_LOOPBACK) { 10901 if (loopback_mode == TG3_MAC_LOOPBACK) {
10874 /* HW errata - mac loopback fails in some cases on 5780. 10902 /* HW errata - mac loopback fails in some cases on 5780.
10875 * Normal traffic and PHY loopback are not affected by 10903 * Normal traffic and PHY loopback are not affected by
10876 * errata. 10904 * errata. Also, the MAC loopback test is deprecated for
10905 * all newer ASIC revisions.
10877 */ 10906 */
10878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 10907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10908 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
10879 return 0; 10909 return 0;
10880 10910
10881 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 10911 mac_mode = tp->mac_mode &
10882 MAC_MODE_PORT_INT_LPBACK; 10912 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10913 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
10883 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 10914 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10884 mac_mode |= MAC_MODE_LINK_POLARITY; 10915 mac_mode |= MAC_MODE_LINK_POLARITY;
10885 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 10916 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10901,7 +10932,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10901 tg3_writephy(tp, MII_BMCR, val); 10932 tg3_writephy(tp, MII_BMCR, val);
10902 udelay(40); 10933 udelay(40);
10903 10934
10904 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 10935 mac_mode = tp->mac_mode &
10936 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10905 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 10937 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10906 tg3_writephy(tp, MII_TG3_FET_PTEST, 10938 tg3_writephy(tp, MII_TG3_FET_PTEST,
10907 MII_TG3_FET_PTEST_FRC_TX_LINK | 10939 MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10929,6 +10961,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10929 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 10961 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10930 } 10962 }
10931 tw32(MAC_MODE, mac_mode); 10963 tw32(MAC_MODE, mac_mode);
10964
10965 /* Wait for link */
10966 for (i = 0; i < 100; i++) {
10967 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
10968 break;
10969 mdelay(1);
10970 }
10932 } else { 10971 } else {
10933 return -EINVAL; 10972 return -EINVAL;
10934 } 10973 }
@@ -11035,14 +11074,19 @@ out:
11035static int tg3_test_loopback(struct tg3 *tp) 11074static int tg3_test_loopback(struct tg3 *tp)
11036{ 11075{
11037 int err = 0; 11076 int err = 0;
11038 u32 cpmuctrl = 0; 11077 u32 eee_cap, cpmuctrl = 0;
11039 11078
11040 if (!netif_running(tp->dev)) 11079 if (!netif_running(tp->dev))
11041 return TG3_LOOPBACK_FAILED; 11080 return TG3_LOOPBACK_FAILED;
11042 11081
11082 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11083 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11084
11043 err = tg3_reset_hw(tp, 1); 11085 err = tg3_reset_hw(tp, 1);
11044 if (err) 11086 if (err) {
11045 return TG3_LOOPBACK_FAILED; 11087 err = TG3_LOOPBACK_FAILED;
11088 goto done;
11089 }
11046 11090
11047 /* Turn off gphy autopowerdown. */ 11091 /* Turn off gphy autopowerdown. */
11048 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11092 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11062,8 +11106,10 @@ static int tg3_test_loopback(struct tg3 *tp)
11062 udelay(10); 11106 udelay(10);
11063 } 11107 }
11064 11108
11065 if (status != CPMU_MUTEX_GNT_DRIVER) 11109 if (status != CPMU_MUTEX_GNT_DRIVER) {
11066 return TG3_LOOPBACK_FAILED; 11110 err = TG3_LOOPBACK_FAILED;
11111 goto done;
11112 }
11067 11113
11068 /* Turn off link-based power management. */ 11114 /* Turn off link-based power management. */
11069 cpmuctrl = tr32(TG3_CPMU_CTRL); 11115 cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11092,6 +11138,9 @@ static int tg3_test_loopback(struct tg3 *tp)
11092 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11138 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11093 tg3_phy_toggle_apd(tp, true); 11139 tg3_phy_toggle_apd(tp, true);
11094 11140
11141done:
11142 tp->phy_flags |= eee_cap;
11143
11095 return err; 11144 return err;
11096} 11145}
11097 11146
@@ -11198,7 +11247,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11198 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11247 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11199 break; /* We have no PHY */ 11248 break; /* We have no PHY */
11200 11249
11201 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11250 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11251 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11252 !netif_running(dev)))
11202 return -EAGAIN; 11253 return -EAGAIN;
11203 11254
11204 spin_lock_bh(&tp->lock); 11255 spin_lock_bh(&tp->lock);
@@ -11214,7 +11265,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11214 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11265 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11215 break; /* We have no PHY */ 11266 break; /* We have no PHY */
11216 11267
11217 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11268 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11269 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11270 !netif_running(dev)))
11218 return -EAGAIN; 11271 return -EAGAIN;
11219 11272
11220 spin_lock_bh(&tp->lock); 11273 spin_lock_bh(&tp->lock);
@@ -11230,31 +11283,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11230 return -EOPNOTSUPP; 11283 return -EOPNOTSUPP;
11231} 11284}
11232 11285
11233#if TG3_VLAN_TAG_USED
11234static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11235{
11236 struct tg3 *tp = netdev_priv(dev);
11237
11238 if (!netif_running(dev)) {
11239 tp->vlgrp = grp;
11240 return;
11241 }
11242
11243 tg3_netif_stop(tp);
11244
11245 tg3_full_lock(tp, 0);
11246
11247 tp->vlgrp = grp;
11248
11249 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11250 __tg3_set_rx_mode(dev);
11251
11252 tg3_netif_start(tp);
11253
11254 tg3_full_unlock(tp);
11255}
11256#endif
11257
11258static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 11286static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11259{ 11287{
11260 struct tg3 *tp = netdev_priv(dev); 11288 struct tg3 *tp = netdev_priv(dev);
@@ -12468,9 +12496,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12468 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; 12496 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12469 } 12497 }
12470done: 12498done:
12471 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); 12499 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12472 device_set_wakeup_enable(&tp->pdev->dev, 12500 device_set_wakeup_enable(&tp->pdev->dev,
12473 tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 12501 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12502 else
12503 device_set_wakeup_capable(&tp->pdev->dev, false);
12474} 12504}
12475 12505
12476static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 12506static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -12522,12 +12552,45 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12522 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 12552 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12523} 12553}
12524 12554
12555static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12556{
12557 u32 adv = ADVERTISED_Autoneg |
12558 ADVERTISED_Pause;
12559
12560 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12561 adv |= ADVERTISED_1000baseT_Half |
12562 ADVERTISED_1000baseT_Full;
12563
12564 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12565 adv |= ADVERTISED_100baseT_Half |
12566 ADVERTISED_100baseT_Full |
12567 ADVERTISED_10baseT_Half |
12568 ADVERTISED_10baseT_Full |
12569 ADVERTISED_TP;
12570 else
12571 adv |= ADVERTISED_FIBRE;
12572
12573 tp->link_config.advertising = adv;
12574 tp->link_config.speed = SPEED_INVALID;
12575 tp->link_config.duplex = DUPLEX_INVALID;
12576 tp->link_config.autoneg = AUTONEG_ENABLE;
12577 tp->link_config.active_speed = SPEED_INVALID;
12578 tp->link_config.active_duplex = DUPLEX_INVALID;
12579 tp->link_config.orig_speed = SPEED_INVALID;
12580 tp->link_config.orig_duplex = DUPLEX_INVALID;
12581 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12582}
12583
12525static int __devinit tg3_phy_probe(struct tg3 *tp) 12584static int __devinit tg3_phy_probe(struct tg3 *tp)
12526{ 12585{
12527 u32 hw_phy_id_1, hw_phy_id_2; 12586 u32 hw_phy_id_1, hw_phy_id_2;
12528 u32 hw_phy_id, hw_phy_id_masked; 12587 u32 hw_phy_id, hw_phy_id_masked;
12529 int err; 12588 int err;
12530 12589
12590 /* flow control autonegotiation is default behavior */
12591 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12592 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12593
12531 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 12594 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12532 return tg3_phy_init(tp); 12595 return tg3_phy_init(tp);
12533 12596
@@ -12589,6 +12652,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12589 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) 12652 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12590 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 12653 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12591 12654
12655 tg3_phy_init_link_config(tp);
12656
12592 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 12657 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12593 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && 12658 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12594 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 12659 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
@@ -12644,17 +12709,6 @@ skip_phy_reset:
12644 err = tg3_init_5401phy_dsp(tp); 12709 err = tg3_init_5401phy_dsp(tp);
12645 } 12710 }
12646 12711
12647 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12648 tp->link_config.advertising =
12649 (ADVERTISED_1000baseT_Half |
12650 ADVERTISED_1000baseT_Full |
12651 ADVERTISED_Autoneg |
12652 ADVERTISED_FIBRE);
12653 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12654 tp->link_config.advertising &=
12655 ~(ADVERTISED_1000baseT_Half |
12656 ADVERTISED_1000baseT_Full);
12657
12658 return err; 12712 return err;
12659} 12713}
12660 12714
@@ -13064,11 +13118,9 @@ done:
13064 13118
13065static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 13119static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13066 13120
13067static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 13121static inline void vlan_features_add(struct net_device *dev, unsigned long flags)
13068{ 13122{
13069#if TG3_VLAN_TAG_USED
13070 dev->vlan_features |= flags; 13123 dev->vlan_features |= flags;
13071#endif
13072} 13124}
13073 13125
13074static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 13126static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13083,7 +13135,7 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13083 return 512; 13135 return 512;
13084} 13136}
13085 13137
13086DEFINE_PCI_DEVICE_TABLE(write_reorder_chipsets) = { 13138static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13087 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 13139 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13088 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 13140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13089 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 13141 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
@@ -13325,7 +13377,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13325 } 13377 }
13326 13378
13327 /* Determine TSO capabilities */ 13379 /* Determine TSO capabilities */
13328 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13381 ; /* Do nothing. HW bug. */
13382 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13329 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13383 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13330 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13384 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13376,7 +13430,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13376 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 13430 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13377 } 13431 }
13378 13432
13379 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13433 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
13434 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13380 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13435 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13381 13436
13382 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13437 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13394,42 +13449,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13394 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13449 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13395 13450
13396 tp->pcie_readrq = 4096; 13451 tp->pcie_readrq = 4096;
13397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 13452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13398 u16 word; 13453 tp->pcie_readrq = 2048;
13399
13400 pci_read_config_word(tp->pdev,
13401 tp->pcie_cap + PCI_EXP_LNKSTA,
13402 &word);
13403 switch (word & PCI_EXP_LNKSTA_CLS) {
13404 case PCI_EXP_LNKSTA_CLS_2_5GB:
13405 word &= PCI_EXP_LNKSTA_NLW;
13406 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13407 switch (word) {
13408 case 2:
13409 tp->pcie_readrq = 2048;
13410 break;
13411 case 4:
13412 tp->pcie_readrq = 1024;
13413 break;
13414 }
13415 break;
13416
13417 case PCI_EXP_LNKSTA_CLS_5_0GB:
13418 word &= PCI_EXP_LNKSTA_NLW;
13419 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13420 switch (word) {
13421 case 1:
13422 tp->pcie_readrq = 2048;
13423 break;
13424 case 2:
13425 tp->pcie_readrq = 1024;
13426 break;
13427 case 4:
13428 tp->pcie_readrq = 512;
13429 break;
13430 }
13431 }
13432 }
13433 13454
13434 pcie_set_readrq(tp->pdev, tp->pcie_readrq); 13455 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13435 13456
@@ -13468,7 +13489,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13468 * every mailbox register write to force the writes to be 13489 * every mailbox register write to force the writes to be
13469 * posted to the chip in order. 13490 * posted to the chip in order.
13470 */ 13491 */
13471 if (pci_dev_present(write_reorder_chipsets) && 13492 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13472 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 13493 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13473 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 13494 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13474 13495
@@ -13861,11 +13882,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13861 else 13882 else
13862 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13883 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13863 13884
13864 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; 13885 tp->rx_offset = NET_IP_ALIGN;
13865 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 13886 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13867 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 13888 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13868 tp->rx_offset -= NET_IP_ALIGN; 13889 tp->rx_offset = 0;
13869#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 13890#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13870 tp->rx_copy_thresh = ~(u16)0; 13891 tp->rx_copy_thresh = ~(u16)0;
13871#endif 13892#endif
@@ -14224,7 +14245,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
14224 14245
14225#define TEST_BUFFER_SIZE 0x2000 14246#define TEST_BUFFER_SIZE 0x2000
14226 14247
14227DEFINE_PCI_DEVICE_TABLE(dma_wait_state_chipsets) = { 14248static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14228 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 14249 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14229 { }, 14250 { },
14230}; 14251};
@@ -14403,7 +14424,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14403 * now look for chipsets that are known to expose the 14424 * now look for chipsets that are known to expose the
14404 * DMA bug without failing the test. 14425 * DMA bug without failing the test.
14405 */ 14426 */
14406 if (pci_dev_present(dma_wait_state_chipsets)) { 14427 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14407 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 14428 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14408 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 14429 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14409 } else { 14430 } else {
@@ -14420,23 +14441,6 @@ out_nofree:
14420 return ret; 14441 return ret;
14421} 14442}
14422 14443
14423static void __devinit tg3_init_link_config(struct tg3 *tp)
14424{
14425 tp->link_config.advertising =
14426 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14427 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14428 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14429 ADVERTISED_Autoneg | ADVERTISED_MII);
14430 tp->link_config.speed = SPEED_INVALID;
14431 tp->link_config.duplex = DUPLEX_INVALID;
14432 tp->link_config.autoneg = AUTONEG_ENABLE;
14433 tp->link_config.active_speed = SPEED_INVALID;
14434 tp->link_config.active_duplex = DUPLEX_INVALID;
14435 tp->link_config.orig_speed = SPEED_INVALID;
14436 tp->link_config.orig_duplex = DUPLEX_INVALID;
14437 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14438}
14439
14440static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14444static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14441{ 14445{
14442 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 14446 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
@@ -14629,9 +14633,6 @@ static const struct net_device_ops tg3_netdev_ops = {
14629 .ndo_do_ioctl = tg3_ioctl, 14633 .ndo_do_ioctl = tg3_ioctl,
14630 .ndo_tx_timeout = tg3_tx_timeout, 14634 .ndo_tx_timeout = tg3_tx_timeout,
14631 .ndo_change_mtu = tg3_change_mtu, 14635 .ndo_change_mtu = tg3_change_mtu,
14632#if TG3_VLAN_TAG_USED
14633 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14634#endif
14635#ifdef CONFIG_NET_POLL_CONTROLLER 14636#ifdef CONFIG_NET_POLL_CONTROLLER
14636 .ndo_poll_controller = tg3_poll_controller, 14637 .ndo_poll_controller = tg3_poll_controller,
14637#endif 14638#endif
@@ -14648,9 +14649,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14648 .ndo_do_ioctl = tg3_ioctl, 14649 .ndo_do_ioctl = tg3_ioctl,
14649 .ndo_tx_timeout = tg3_tx_timeout, 14650 .ndo_tx_timeout = tg3_tx_timeout,
14650 .ndo_change_mtu = tg3_change_mtu, 14651 .ndo_change_mtu = tg3_change_mtu,
14651#if TG3_VLAN_TAG_USED
14652 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14653#endif
14654#ifdef CONFIG_NET_POLL_CONTROLLER 14652#ifdef CONFIG_NET_POLL_CONTROLLER
14655 .ndo_poll_controller = tg3_poll_controller, 14653 .ndo_poll_controller = tg3_poll_controller,
14656#endif 14654#endif
@@ -14700,9 +14698,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14700 14698
14701 SET_NETDEV_DEV(dev, &pdev->dev); 14699 SET_NETDEV_DEV(dev, &pdev->dev);
14702 14700
14703#if TG3_VLAN_TAG_USED
14704 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 14701 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14705#endif
14706 14702
14707 tp = netdev_priv(dev); 14703 tp = netdev_priv(dev);
14708 tp->pdev = pdev; 14704 tp->pdev = pdev;
@@ -14748,8 +14744,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14748 goto err_out_free_dev; 14744 goto err_out_free_dev;
14749 } 14745 }
14750 14746
14751 tg3_init_link_config(tp);
14752
14753 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 14747 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14754 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 14748 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14755 14749
@@ -14897,10 +14891,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14897 goto err_out_apeunmap; 14891 goto err_out_apeunmap;
14898 } 14892 }
14899 14893
14900 /* flow control autonegotiation is default behavior */
14901 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14902 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14903
14904 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 14894 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14905 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 14895 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14906 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 14896 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d62c8d937c8..73884b69b74 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2007-2010 Broadcom Corporation. 7 * Copyright (C) 2007-2011 Broadcom Corporation.
8 */ 8 */
9 9
10#ifndef _T3_H 10#ifndef _T3_H
@@ -141,6 +141,7 @@
141#define CHIPREV_ID_57780_A1 0x57780001 141#define CHIPREV_ID_57780_A1 0x57780001
142#define CHIPREV_ID_5717_A0 0x05717000 142#define CHIPREV_ID_5717_A0 0x05717000
143#define CHIPREV_ID_57765_A0 0x57785000 143#define CHIPREV_ID_57765_A0 0x57785000
144#define CHIPREV_ID_5719_A0 0x05719000
144#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 145#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
145#define ASIC_REV_5700 0x07 146#define ASIC_REV_5700 0x07
146#define ASIC_REV_5701 0x00 147#define ASIC_REV_5701 0x00
@@ -1105,7 +1106,7 @@
1105#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 1106#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1106#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff 1107#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
1107#define TG3_CPMU_EEE_DBTMR2 0x000036b8 1108#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1108#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000 1109#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
1109#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff 1110#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
1110#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc 1111#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
1111#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 1112#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
@@ -1333,6 +1334,10 @@
1333 1334
1334#define TG3_RDMA_RSRVCTRL_REG 0x00004900 1335#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1335#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 1336#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1337#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
1338#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
1339#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
1340#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
1336#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 1341#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
1337#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 1342#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
1338/* 0x4904 --> 0x4910 unused */ 1343/* 0x4904 --> 0x4910 unused */
@@ -2108,6 +2113,10 @@
2108 2113
2109#define MII_TG3_DSP_TAP1 0x0001 2114#define MII_TG3_DSP_TAP1 0x0001
2110#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 2115#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
2116#define MII_TG3_DSP_TAP26 0x001a
2117#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
2118#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
2119#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
2111#define MII_TG3_DSP_AADJ1CH0 0x001f 2120#define MII_TG3_DSP_AADJ1CH0 0x001f
2112#define MII_TG3_DSP_CH34TP2 0x4022 2121#define MII_TG3_DSP_CH34TP2 0x4022
2113#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010 2122#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
@@ -2808,9 +2817,6 @@ struct tg3 {
2808 u32 rx_std_max_post; 2817 u32 rx_std_max_post;
2809 u32 rx_offset; 2818 u32 rx_offset;
2810 u32 rx_pkt_map_sz; 2819 u32 rx_pkt_map_sz;
2811#if TG3_VLAN_TAG_USED
2812 struct vlan_group *vlgrp;
2813#endif
2814 2820
2815 2821
2816 /* begin "everything else" cacheline(s) section */ 2822 /* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 7cb301da747..0825db6d883 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -44,10 +44,6 @@
44#include <linux/tcp.h> 44#include <linux/tcp.h>
45 45
46 46
47/* There is no singlethread_cpu, so schedule work on the current cpu. */
48#define singlethread_cpu -1
49
50
51/* 47/*
52 * First, "tile_net_init_module()" initializes all four "devices" which 48 * First, "tile_net_init_module()" initializes all four "devices" which
53 * can be used by linux. 49 * can be used by linux.
@@ -73,15 +69,16 @@
73 * return, knowing we will be called again later. Otherwise, we 69 * return, knowing we will be called again later. Otherwise, we
74 * reenable the ingress interrupt, and call "napi_complete()". 70 * reenable the ingress interrupt, and call "napi_complete()".
75 * 71 *
72 * HACK: Since disabling the ingress interrupt is not reliable, we
73 * ignore the interrupt if the global "active" flag is false.
74 *
76 * 75 *
77 * NOTE: The use of "native_driver" ensures that EPP exists, and that 76 * NOTE: The use of "native_driver" ensures that EPP exists, and that
78 * "epp_sendv" is legal, and that "LIPP" is being used. 77 * we are using "LIPP" and "LEPP".
79 * 78 *
80 * NOTE: Failing to free completions for an arbitrarily long time 79 * NOTE: Failing to free completions for an arbitrarily long time
81 * (which is defined to be illegal) does in fact cause bizarre 80 * (which is defined to be illegal) does in fact cause bizarre
82 * problems. The "egress_timer" helps prevent this from happening. 81 * problems. The "egress_timer" helps prevent this from happening.
83 *
84 * NOTE: The egress code can be interrupted by the interrupt handler.
85 */ 82 */
86 83
87 84
@@ -142,6 +139,7 @@
142MODULE_AUTHOR("Tilera"); 139MODULE_AUTHOR("Tilera");
143MODULE_LICENSE("GPL"); 140MODULE_LICENSE("GPL");
144 141
142
145/* 143/*
146 * Queue of incoming packets for a specific cpu and device. 144 * Queue of incoming packets for a specific cpu and device.
147 * 145 *
@@ -177,7 +175,7 @@ struct tile_net_cpu {
177 struct tile_netio_queue queue; 175 struct tile_netio_queue queue;
178 /* Statistics. */ 176 /* Statistics. */
179 struct tile_net_stats_t stats; 177 struct tile_net_stats_t stats;
180 /* ISSUE: Is this needed? */ 178 /* True iff NAPI is enabled. */
181 bool napi_enabled; 179 bool napi_enabled;
182 /* True if this tile has succcessfully registered with the IPP. */ 180 /* True if this tile has succcessfully registered with the IPP. */
183 bool registered; 181 bool registered;
@@ -200,20 +198,20 @@ struct tile_net_cpu {
200struct tile_net_priv { 198struct tile_net_priv {
201 /* Our network device. */ 199 /* Our network device. */
202 struct net_device *dev; 200 struct net_device *dev;
203 /* The actual egress queue. */ 201 /* Pages making up the egress queue. */
204 lepp_queue_t *epp_queue; 202 struct page *eq_pages;
205 /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */ 203 /* Address of the actual egress queue. */
206 spinlock_t cmd_lock; 204 lepp_queue_t *eq;
207 /* Protects "epp_queue->comp_head". */ 205 /* Protects "eq". */
208 spinlock_t comp_lock; 206 spinlock_t eq_lock;
209 /* The hypervisor handle for this interface. */ 207 /* The hypervisor handle for this interface. */
210 int hv_devhdl; 208 int hv_devhdl;
211 /* The intr bit mask that IDs this device. */ 209 /* The intr bit mask that IDs this device. */
212 u32 intr_id; 210 u32 intr_id;
213 /* True iff "tile_net_open_aux()" has succeeded. */ 211 /* True iff "tile_net_open_aux()" has succeeded. */
214 int partly_opened; 212 bool partly_opened;
215 /* True iff "tile_net_open_inner()" has succeeded. */ 213 /* True iff the device is "active". */
216 int fully_opened; 214 bool active;
217 /* Effective network cpus. */ 215 /* Effective network cpus. */
218 struct cpumask network_cpus_map; 216 struct cpumask network_cpus_map;
219 /* Number of network cpus. */ 217 /* Number of network cpus. */
@@ -228,6 +226,10 @@ struct tile_net_priv {
228 struct tile_net_cpu *cpu[NR_CPUS]; 226 struct tile_net_cpu *cpu[NR_CPUS];
229}; 227};
230 228
229/* Log2 of the number of small pages needed for the egress queue. */
230#define EQ_ORDER get_order(sizeof(lepp_queue_t))
231/* Size of the egress queue's pages. */
232#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER))
231 233
232/* 234/*
233 * The actual devices (xgbe0, xgbe1, gbe0, gbe1). 235 * The actual devices (xgbe0, xgbe1, gbe0, gbe1).
@@ -284,7 +286,11 @@ static void net_printk(char *fmt, ...)
284 */ 286 */
285static void dump_packet(unsigned char *data, unsigned long length, char *s) 287static void dump_packet(unsigned char *data, unsigned long length, char *s)
286{ 288{
289 int my_cpu = smp_processor_id();
290
287 unsigned long i; 291 unsigned long i;
292 char buf[128];
293
288 static unsigned int count; 294 static unsigned int count;
289 295
290 pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", 296 pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
@@ -294,10 +300,12 @@ static void dump_packet(unsigned char *data, unsigned long length, char *s)
294 300
295 for (i = 0; i < length; i++) { 301 for (i = 0; i < length; i++) {
296 if ((i & 0xf) == 0) 302 if ((i & 0xf) == 0)
297 sprintf(buf, "%8.8lx:", i); 303 sprintf(buf, "[%02d] %8.8lx:", my_cpu, i);
298 sprintf(buf + strlen(buf), " %2.2x", data[i]); 304 sprintf(buf + strlen(buf), " %2.2x", data[i]);
299 if ((i & 0xf) == 0xf || i == length - 1) 305 if ((i & 0xf) == 0xf || i == length - 1) {
300 pr_info("%s\n", buf); 306 strcat(buf, "\n");
307 pr_info("%s", buf);
308 }
301 } 309 }
302} 310}
303#endif 311#endif
@@ -351,60 +359,109 @@ static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
351 359
352/* 360/*
353 * Provide a linux buffer for LIPP. 361 * Provide a linux buffer for LIPP.
362 *
363 * Note that the ACTUAL allocation for each buffer is a "struct sk_buff",
364 * plus a chunk of memory that includes not only the requested bytes, but
365 * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info".
366 *
367 * Note that "struct skb_shared_info" is 88 bytes with 64K pages and
368 * 268 bytes with 4K pages (since the frags[] array needs 18 entries).
369 *
370 * Without jumbo packets, the maximum packet size will be 1536 bytes,
371 * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told
372 * the hardware to clip at 1518 bytes instead of 1536 bytes, then we
373 * could save an entire cache line, but in practice, we don't need it.
374 *
375 * Since CPAs are 38 bits, and we can only encode the high 31 bits in
376 * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must
377 * align the actual "va" mod 128.
378 *
379 * We assume that the underlying "head" will be aligned mod 64. Note
380 * that in practice, we have seen "head" NOT aligned mod 128 even when
381 * using 2048 byte allocations, which is surprising.
382 *
383 * If "head" WAS always aligned mod 128, we could change LIPP to
384 * assume that the low SIX bits are zero, and the 7th bit is one, that
385 * is, align the actual "va" mod 128 plus 64, which would be "free".
386 *
387 * For now, the actual "head" pointer points at NET_SKB_PAD bytes of
388 * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff
389 * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for
390 * the actual packet, plus 62 bytes of empty padding, plus some
391 * padding and the "struct skb_shared_info".
392 *
393 * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88
394 * bytes, or 1816 bytes, which fits comfortably into 2048 bytes.
395 *
396 * With 64K pages, a small buffer thus needs 32+92+4+2+126+88
397 * bytes, or 344 bytes, which means we are wasting 64+ bytes, and
398 * could presumably increase the size of small buffers.
399 *
400 * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268
401 * bytes, or 1996 bytes, which fits comfortably into 2048 bytes.
402 *
403 * With 4K pages, a small buffer thus needs 32+92+4+2+126+268
404 * bytes, or 524 bytes, which is annoyingly wasteful.
405 *
406 * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192?
407 *
408 * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64?
354 */ 409 */
355static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, 410static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
356 bool small) 411 bool small)
357{ 412{
358 /* ISSUE: What should we use here? */ 413#if TILE_NET_MTU <= 1536
414 /* Without "jumbo", 2 + 1536 should be sufficient. */
415 unsigned int large_size = NET_IP_ALIGN + 1536;
416#else
417 /* ISSUE: This has not been tested. */
359 unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; 418 unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
419#endif
360 420
361 /* Round up to ensure to avoid "false sharing" with last cache line. */ 421 /* Avoid "false sharing" with last cache line. */
362 unsigned int buffer_size = 422 /* ISSUE: This is already done by "dev_alloc_skb()". */
423 unsigned int len =
363 (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + 424 (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
364 CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); 425 CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
365 426
366 /* 427 unsigned int padding = 128 - NET_SKB_PAD;
367 * ISSUE: Since CPAs are 38 bits, and we can only encode the 428 unsigned int align;
368 * high 31 bits in a "linux_buffer_t", the low 7 bits must be
369 * zero, and thus, we must align the actual "va" mod 128.
370 */
371 const unsigned long align = 128;
372 429
373 struct sk_buff *skb; 430 struct sk_buff *skb;
374 void *va; 431 void *va;
375 432
376 struct sk_buff **skb_ptr; 433 struct sk_buff **skb_ptr;
377 434
378 /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */ 435 /* Request 96 extra bytes for alignment purposes. */
379 /* and also "reserves" that many bytes. */ 436 skb = dev_alloc_skb(len + padding);
380 /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */ 437 if (skb == NULL)
381 int len = sizeof(*skb_ptr) + align + buffer_size; 438 return false;
382
383 while (1) {
384
385 /* Allocate (or fail). */
386 skb = dev_alloc_skb(len);
387 if (skb == NULL)
388 return false;
389
390 /* Make room for a back-pointer to 'skb'. */
391 skb_reserve(skb, sizeof(*skb_ptr));
392 439
393 /* Make sure we are aligned. */ 440 /* Skip 32 or 96 bytes to align "data" mod 128. */
394 skb_reserve(skb, -(long)skb->data & (align - 1)); 441 align = -(long)skb->data & (128 - 1);
442 BUG_ON(align > padding);
443 skb_reserve(skb, align);
395 444
396 /* This address is given to IPP. */ 445 /* This address is given to IPP. */
397 va = skb->data; 446 va = skb->data;
398 447
399 if (small) 448 /* Buffers must not span a huge page. */
400 break; 449 BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0);
401 450
402 /* ISSUE: This has never been observed! */ 451#ifdef TILE_NET_PARANOIA
403 /* Large buffers must not span a huge page. */ 452#if CHIP_HAS_CBOX_HOME_MAP()
404 if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0) 453 if (hash_default) {
405 break; 454 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
406 pr_err("Leaking unaligned linux buffer at %p.\n", va); 455 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
456 panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx",
457 va, hv_pte_get_mode(pte), hv_pte_val(pte));
407 } 458 }
459#endif
460#endif
461
462 /* Invalidate the packet buffer. */
463 if (!hash_default)
464 __inv_buffer(va, len);
408 465
409 /* Skip two bytes to satisfy LIPP assumptions. */ 466 /* Skip two bytes to satisfy LIPP assumptions. */
410 /* Note that this aligns IP on a 16 byte boundary. */ 467 /* Note that this aligns IP on a 16 byte boundary. */
@@ -415,23 +472,9 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
415 skb_ptr = va - sizeof(*skb_ptr); 472 skb_ptr = va - sizeof(*skb_ptr);
416 *skb_ptr = skb; 473 *skb_ptr = skb;
417 474
418 /* Invalidate the packet buffer. */
419 if (!hash_default)
420 __inv_buffer(skb->data, buffer_size);
421
422 /* Make sure "skb_ptr" has been flushed. */ 475 /* Make sure "skb_ptr" has been flushed. */
423 __insn_mf(); 476 __insn_mf();
424 477
425#ifdef TILE_NET_PARANOIA
426#if CHIP_HAS_CBOX_HOME_MAP()
427 if (hash_default) {
428 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
429 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
430 panic("Non-coherent ingress buffer!");
431 }
432#endif
433#endif
434
435 /* Provide the new buffer. */ 478 /* Provide the new buffer. */
436 tile_net_provide_linux_buffer(info, va, small); 479 tile_net_provide_linux_buffer(info, va, small);
437 480
@@ -469,48 +512,64 @@ oops:
469 * Grab some LEPP completions, and store them in "comps", of size 512 * Grab some LEPP completions, and store them in "comps", of size
470 * "comps_size", and return the number of completions which were 513 * "comps_size", and return the number of completions which were
471 * stored, so the caller can free them. 514 * stored, so the caller can free them.
472 *
473 * If "pending" is not NULL, it will be set to true if there might
474 * still be some pending completions caused by this tile, else false.
475 */ 515 */
476static unsigned int tile_net_lepp_grab_comps(struct net_device *dev, 516static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq,
477 struct sk_buff *comps[], 517 struct sk_buff *comps[],
478 unsigned int comps_size, 518 unsigned int comps_size,
479 bool *pending) 519 unsigned int min_size)
480{ 520{
481 struct tile_net_priv *priv = netdev_priv(dev);
482
483 lepp_queue_t *eq = priv->epp_queue;
484
485 unsigned int n = 0; 521 unsigned int n = 0;
486 522
487 unsigned int comp_head; 523 unsigned int comp_head = eq->comp_head;
488 unsigned int comp_busy; 524 unsigned int comp_busy = eq->comp_busy;
489 unsigned int comp_tail;
490
491 spin_lock(&priv->comp_lock);
492
493 comp_head = eq->comp_head;
494 comp_busy = eq->comp_busy;
495 comp_tail = eq->comp_tail;
496 525
497 while (comp_head != comp_busy && n < comps_size) { 526 while (comp_head != comp_busy && n < comps_size) {
498 comps[n++] = eq->comps[comp_head]; 527 comps[n++] = eq->comps[comp_head];
499 LEPP_QINC(comp_head); 528 LEPP_QINC(comp_head);
500 } 529 }
501 530
502 if (pending != NULL) 531 if (n < min_size)
503 *pending = (comp_head != comp_tail); 532 return 0;
504 533
505 eq->comp_head = comp_head; 534 eq->comp_head = comp_head;
506 535
507 spin_unlock(&priv->comp_lock);
508
509 return n; 536 return n;
510} 537}
511 538
512 539
513/* 540/*
541 * Free some comps, and return true iff there are still some pending.
542 */
543static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
544{
545 struct tile_net_priv *priv = netdev_priv(dev);
546
547 lepp_queue_t *eq = priv->eq;
548
549 struct sk_buff *olds[64];
550 unsigned int wanted = 64;
551 unsigned int i, n;
552 bool pending;
553
554 spin_lock(&priv->eq_lock);
555
556 if (all)
557 eq->comp_busy = eq->comp_tail;
558
559 n = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
560
561 pending = (eq->comp_head != eq->comp_tail);
562
563 spin_unlock(&priv->eq_lock);
564
565 for (i = 0; i < n; i++)
566 kfree_skb(olds[i]);
567
568 return pending;
569}
570
571
572/*
514 * Make sure the egress timer is scheduled. 573 * Make sure the egress timer is scheduled.
515 * 574 *
516 * Note that we use "schedule if not scheduled" logic instead of the more 575 * Note that we use "schedule if not scheduled" logic instead of the more
@@ -544,21 +603,11 @@ static void tile_net_handle_egress_timer(unsigned long arg)
544 struct tile_net_cpu *info = (struct tile_net_cpu *)arg; 603 struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
545 struct net_device *dev = info->napi.dev; 604 struct net_device *dev = info->napi.dev;
546 605
547 struct sk_buff *olds[32];
548 unsigned int wanted = 32;
549 unsigned int i, nolds = 0;
550 bool pending;
551
552 /* The timer is no longer scheduled. */ 606 /* The timer is no longer scheduled. */
553 info->egress_timer_scheduled = false; 607 info->egress_timer_scheduled = false;
554 608
555 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending); 609 /* Free comps, and reschedule timer if more are pending. */
556 610 if (tile_net_lepp_free_comps(dev, false))
557 for (i = 0; i < nolds; i++)
558 kfree_skb(olds[i]);
559
560 /* Reschedule timer if needed. */
561 if (pending)
562 tile_net_schedule_egress_timer(info); 611 tile_net_schedule_egress_timer(info);
563} 612}
564 613
@@ -636,8 +685,39 @@ static bool is_dup_ack(char *s1, char *s2, unsigned int len)
636 685
637 686
638 687
688static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
689{
690 struct tile_netio_queue *queue = &info->queue;
691 netio_queue_impl_t *qsp = queue->__system_part;
692 netio_queue_user_impl_t *qup = &queue->__user_part;
693
694 int index2_aux = index + sizeof(netio_pkt_t);
695 int index2 =
696 ((index2_aux ==
697 qsp->__packet_receive_queue.__last_packet_plus_one) ?
698 0 : index2_aux);
699
700 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
701
702 /* Extract the "linux_buffer_t". */
703 unsigned int buffer = pkt->__packet.word;
704
705 /* Convert "linux_buffer_t" to "va". */
706 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
707
708 /* Acquire the associated "skb". */
709 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
710 struct sk_buff *skb = *skb_ptr;
711
712 kfree_skb(skb);
713
714 /* Consume this packet. */
715 qup->__packet_receive_read = index2;
716}
717
718
639/* 719/*
640 * Like "tile_net_handle_packets()", but just discard packets. 720 * Like "tile_net_poll()", but just discard packets.
641 */ 721 */
642static void tile_net_discard_packets(struct net_device *dev) 722static void tile_net_discard_packets(struct net_device *dev)
643{ 723{
@@ -650,32 +730,8 @@ static void tile_net_discard_packets(struct net_device *dev)
650 730
651 while (qup->__packet_receive_read != 731 while (qup->__packet_receive_read !=
652 qsp->__packet_receive_queue.__packet_write) { 732 qsp->__packet_receive_queue.__packet_write) {
653
654 int index = qup->__packet_receive_read; 733 int index = qup->__packet_receive_read;
655 734 tile_net_discard_aux(info, index);
656 int index2_aux = index + sizeof(netio_pkt_t);
657 int index2 =
658 ((index2_aux ==
659 qsp->__packet_receive_queue.__last_packet_plus_one) ?
660 0 : index2_aux);
661
662 netio_pkt_t *pkt = (netio_pkt_t *)
663 ((unsigned long) &qsp[1] + index);
664
665 /* Extract the "linux_buffer_t". */
666 unsigned int buffer = pkt->__packet.word;
667
668 /* Convert "linux_buffer_t" to "va". */
669 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
670
671 /* Acquire the associated "skb". */
672 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
673 struct sk_buff *skb = *skb_ptr;
674
675 kfree_skb(skb);
676
677 /* Consume this packet. */
678 qup->__packet_receive_read = index2;
679 } 735 }
680} 736}
681 737
@@ -704,7 +760,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
704 760
705 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); 761 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
706 762
707 /* Extract the packet size. */ 763 /* Extract the packet size. FIXME: Shouldn't the second line */
764 /* get subtracted? Mostly moot, since it should be "zero". */
708 unsigned long len = 765 unsigned long len =
709 (NETIO_PKT_CUSTOM_LENGTH(pkt) + 766 (NETIO_PKT_CUSTOM_LENGTH(pkt) +
710 NET_IP_ALIGN - NETIO_PACKET_PADDING); 767 NET_IP_ALIGN - NETIO_PACKET_PADDING);
@@ -722,15 +779,6 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
722 /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ 779 /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
723 unsigned char *buf = va + NET_IP_ALIGN; 780 unsigned char *buf = va + NET_IP_ALIGN;
724 781
725#ifdef IGNORE_DUP_ACKS
726
727 static int other;
728 static int final;
729 static int keep;
730 static int skip;
731
732#endif
733
734 /* Invalidate the packet buffer. */ 782 /* Invalidate the packet buffer. */
735 if (!hash_default) 783 if (!hash_default)
736 __inv_buffer(buf, len); 784 __inv_buffer(buf, len);
@@ -745,16 +793,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
745#ifdef TILE_NET_VERIFY_INGRESS 793#ifdef TILE_NET_VERIFY_INGRESS
746 if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && 794 if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
747 NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { 795 NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
748 /* 796 /* Bug 6624: Includes UDP packets with a "zero" checksum. */
749 * FIXME: This complains about UDP packets
750 * with a "zero" checksum (bug 6624).
751 */
752#ifdef TILE_NET_PANIC_ON_BAD
753 dump_packet(buf, len, "rx");
754 panic("Bad L4 checksum.");
755#else
756 pr_warning("Bad L4 checksum on %d byte packet.\n", len); 797 pr_warning("Bad L4 checksum on %d byte packet.\n", len);
757#endif
758 } 798 }
759 if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && 799 if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
760 NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { 800 NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
@@ -769,90 +809,29 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
769 } 809 }
770 break; 810 break;
771 case NETIO_PKT_STATUS_BAD: 811 case NETIO_PKT_STATUS_BAD:
772#ifdef TILE_NET_PANIC_ON_BAD 812 pr_warning("Unexpected BAD %ld byte packet.\n", len);
773 dump_packet(buf, len, "rx");
774 panic("Unexpected BAD packet.");
775#else
776 pr_warning("Unexpected BAD %d byte packet.\n", len);
777#endif
778 } 813 }
779#endif 814#endif
780 815
781 filter = 0; 816 filter = 0;
782 817
818 /* ISSUE: Filter TCP packets with "bad" checksums? */
819
783 if (!(dev->flags & IFF_UP)) { 820 if (!(dev->flags & IFF_UP)) {
784 /* Filter packets received before we're up. */ 821 /* Filter packets received before we're up. */
785 filter = 1; 822 filter = 1;
823 } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) {
824 /* Filter "truncated" packets. */
825 filter = 1;
786 } else if (!(dev->flags & IFF_PROMISC)) { 826 } else if (!(dev->flags & IFF_PROMISC)) {
787 /* 827 /* FIXME: Implement HW multicast filter. */
788 * FIXME: Implement HW multicast filter. 828 if (!is_multicast_ether_addr(buf)) {
789 */
790 if (is_unicast_ether_addr(buf)) {
791 /* Filter packets not for our address. */ 829 /* Filter packets not for our address. */
792 const u8 *mine = dev->dev_addr; 830 const u8 *mine = dev->dev_addr;
793 filter = compare_ether_addr(mine, buf); 831 filter = compare_ether_addr(mine, buf);
794 } 832 }
795 } 833 }
796 834
797#ifdef IGNORE_DUP_ACKS
798
799 if (len != 66) {
800 /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */
801
802 other++;
803
804 } else if (index2 ==
805 qsp->__packet_receive_queue.__packet_write) {
806
807 final++;
808
809 } else {
810
811 netio_pkt_t *pkt2 = (netio_pkt_t *)
812 ((unsigned long) &qsp[1] + index2);
813
814 netio_pkt_metadata_t *metadata2 =
815 NETIO_PKT_METADATA(pkt2);
816
817 /* Extract the packet size. */
818 unsigned long len2 =
819 (NETIO_PKT_CUSTOM_LENGTH(pkt2) +
820 NET_IP_ALIGN - NETIO_PACKET_PADDING);
821
822 if (len2 == 66 &&
823 NETIO_PKT_FLOW_HASH_M(metadata, pkt) ==
824 NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) {
825
826 /* Extract the "linux_buffer_t". */
827 unsigned int buffer2 = pkt2->__packet.word;
828
829 /* Convert "linux_buffer_t" to "va". */
830 void *va2 =
831 __va((phys_addr_t)(buffer2 >> 1) << 7);
832
833 /* Extract the packet data pointer. */
834 /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
835 unsigned char *buf2 = va2 + NET_IP_ALIGN;
836
837 /* Invalidate the packet buffer. */
838 if (!hash_default)
839 __inv_buffer(buf2, len2);
840
841 if (is_dup_ack(buf, buf2, len)) {
842 skip++;
843 filter = 1;
844 } else {
845 keep++;
846 }
847 }
848 }
849
850 if (net_ratelimit())
851 pr_info("Other %d Final %d Keep %d Skip %d.\n",
852 other, final, keep, skip);
853
854#endif
855
856 if (filter) { 835 if (filter) {
857 836
858 /* ISSUE: Update "drop" statistics? */ 837 /* ISSUE: Update "drop" statistics? */
@@ -877,10 +856,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
877 /* NOTE: This call also sets "skb->dev = dev". */ 856 /* NOTE: This call also sets "skb->dev = dev". */
878 skb->protocol = eth_type_trans(skb, dev); 857 skb->protocol = eth_type_trans(skb, dev);
879 858
880 /* ISSUE: Discard corrupt packets? */ 859 /* Avoid recomputing "good" TCP/UDP checksums. */
881 /* ISSUE: Discard packets with bad checksums? */
882
883 /* Avoid recomputing TCP/UDP checksums. */
884 if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) 860 if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
885 skb->ip_summed = CHECKSUM_UNNECESSARY; 861 skb->ip_summed = CHECKSUM_UNNECESSARY;
886 862
@@ -912,9 +888,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
912/* 888/*
913 * Handle some packets for the given device on the current CPU. 889 * Handle some packets for the given device on the current CPU.
914 * 890 *
915 * ISSUE: The "rotting packet" race condition occurs if a packet 891 * If "tile_net_stop()" is called on some other tile while this
916 * arrives after the queue appears to be empty, and before the 892 * function is running, we will return, hopefully before that
917 * hypervisor interrupt is re-enabled. 893 * other tile asks us to call "napi_disable()".
894 *
895 * The "rotting packet" race condition occurs if a packet arrives
896 * during the extremely narrow window between the queue appearing to
897 * be empty, and the ingress interrupt being re-enabled. This happens
898 * a LOT under heavy network load.
918 */ 899 */
919static int tile_net_poll(struct napi_struct *napi, int budget) 900static int tile_net_poll(struct napi_struct *napi, int budget)
920{ 901{
@@ -928,7 +909,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
928 909
929 unsigned int work = 0; 910 unsigned int work = 0;
930 911
931 while (1) { 912 while (priv->active) {
932 int index = qup->__packet_receive_read; 913 int index = qup->__packet_receive_read;
933 if (index == qsp->__packet_receive_queue.__packet_write) 914 if (index == qsp->__packet_receive_queue.__packet_write)
934 break; 915 break;
@@ -941,19 +922,24 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
941 922
942 napi_complete(&info->napi); 923 napi_complete(&info->napi);
943 924
944 /* Re-enable hypervisor interrupts. */ 925 if (!priv->active)
926 goto done;
927
928 /* Re-enable the ingress interrupt. */
945 enable_percpu_irq(priv->intr_id); 929 enable_percpu_irq(priv->intr_id);
946 930
947 /* HACK: Avoid the "rotting packet" problem. */ 931 /* HACK: Avoid the "rotting packet" problem (see above). */
948 if (qup->__packet_receive_read != 932 if (qup->__packet_receive_read !=
949 qsp->__packet_receive_queue.__packet_write) 933 qsp->__packet_receive_queue.__packet_write) {
950 napi_schedule(&info->napi); 934 /* ISSUE: Sometimes this returns zero, presumably */
951 935 /* because an interrupt was handled for this tile. */
952 /* ISSUE: Handle completions? */ 936 (void)napi_reschedule(&info->napi);
937 }
953 938
954done: 939done:
955 940
956 tile_net_provide_needed_buffers(info); 941 if (priv->active)
942 tile_net_provide_needed_buffers(info);
957 943
958 return work; 944 return work;
959} 945}
@@ -961,6 +947,12 @@ done:
961 947
962/* 948/*
963 * Handle an ingress interrupt for the given device on the current cpu. 949 * Handle an ingress interrupt for the given device on the current cpu.
950 *
951 * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has
952 * been called! This is probably due to "pending hypervisor downcalls".
953 *
954 * ISSUE: Is there any race condition between the "napi_schedule()" here
955 * and the "napi_complete()" call above?
964 */ 956 */
965static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) 957static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
966{ 958{
@@ -969,9 +961,15 @@ static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
969 int my_cpu = smp_processor_id(); 961 int my_cpu = smp_processor_id();
970 struct tile_net_cpu *info = priv->cpu[my_cpu]; 962 struct tile_net_cpu *info = priv->cpu[my_cpu];
971 963
972 /* Disable hypervisor interrupt. */ 964 /* Disable the ingress interrupt. */
973 disable_percpu_irq(priv->intr_id); 965 disable_percpu_irq(priv->intr_id);
974 966
967 /* Ignore unwanted interrupts. */
968 if (!priv->active)
969 return IRQ_HANDLED;
970
971 /* ISSUE: Sometimes "info->napi_enabled" is false here. */
972
975 napi_schedule(&info->napi); 973 napi_schedule(&info->napi);
976 974
977 return IRQ_HANDLED; 975 return IRQ_HANDLED;
@@ -1005,8 +1003,7 @@ static int tile_net_open_aux(struct net_device *dev)
1005 */ 1003 */
1006 { 1004 {
1007 int epp_home = hv_lotar_to_cpu(epp_lotar); 1005 int epp_home = hv_lotar_to_cpu(epp_lotar);
1008 struct page *page = virt_to_page(priv->epp_queue); 1006 homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home);
1009 homecache_change_page_home(page, 0, epp_home);
1010 } 1007 }
1011 1008
1012 /* 1009 /*
@@ -1015,9 +1012,9 @@ static int tile_net_open_aux(struct net_device *dev)
1015 { 1012 {
1016 netio_ipp_address_t ea = { 1013 netio_ipp_address_t ea = {
1017 .va = 0, 1014 .va = 0,
1018 .pa = __pa(priv->epp_queue), 1015 .pa = __pa(priv->eq),
1019 .pte = hv_pte(0), 1016 .pte = hv_pte(0),
1020 .size = PAGE_SIZE, 1017 .size = EQ_SIZE,
1021 }; 1018 };
1022 ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); 1019 ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
1023 ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); 1020 ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
@@ -1043,7 +1040,7 @@ static int tile_net_open_aux(struct net_device *dev)
1043 1040
1044 1041
1045/* 1042/*
1046 * Register with hypervisor on each CPU. 1043 * Register with hypervisor on the current CPU.
1047 * 1044 *
1048 * Strangely, this function does important things even if it "fails", 1045 * Strangely, this function does important things even if it "fails",
1049 * which is especially common if the link is not up yet. Hopefully 1046 * which is especially common if the link is not up yet. Hopefully
@@ -1092,7 +1089,8 @@ static void tile_net_register(void *dev_ptr)
1092 priv->cpu[my_cpu] = info; 1089 priv->cpu[my_cpu] = info;
1093 1090
1094 /* 1091 /*
1095 * Register ourselves with the IPP. 1092 * Register ourselves with LIPP. This does a lot of stuff,
1093 * including invoking the LIPP registration code.
1096 */ 1094 */
1097 ret = hv_dev_pwrite(priv->hv_devhdl, 0, 1095 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
1098 (HV_VirtAddr)&config, 1096 (HV_VirtAddr)&config,
@@ -1101,8 +1099,11 @@ static void tile_net_register(void *dev_ptr)
1101 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", 1099 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1102 ret); 1100 ret);
1103 if (ret < 0) { 1101 if (ret < 0) {
1104 printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF" 1102 if (ret != NETIO_LINK_DOWN) {
1105 " failure %d\n", ret); 1103 printk(KERN_DEBUG "hv_dev_pwrite "
1104 "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n",
1105 ret);
1106 }
1106 info->link_down = (ret == NETIO_LINK_DOWN); 1107 info->link_down = (ret == NETIO_LINK_DOWN);
1107 return; 1108 return;
1108 } 1109 }
@@ -1145,15 +1146,47 @@ static void tile_net_register(void *dev_ptr)
1145 NETIO_IPP_GET_FASTIO_OFF); 1146 NETIO_IPP_GET_FASTIO_OFF);
1146 PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); 1147 PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
1147 1148
1148 netif_napi_add(dev, &info->napi, tile_net_poll, 64);
1149
1150 /* Now we are registered. */ 1149 /* Now we are registered. */
1151 info->registered = true; 1150 info->registered = true;
1152} 1151}
1153 1152
1154 1153
1155/* 1154/*
1156 * Unregister with hypervisor on each CPU. 1155 * Deregister with hypervisor on the current CPU.
1156 *
1157 * This simply discards all our credits, so no more packets will be
1158 * delivered to this tile. There may still be packets in our queue.
1159 *
1160 * Also, disable the ingress interrupt.
1161 */
1162static void tile_net_deregister(void *dev_ptr)
1163{
1164 struct net_device *dev = (struct net_device *)dev_ptr;
1165 struct tile_net_priv *priv = netdev_priv(dev);
1166 int my_cpu = smp_processor_id();
1167 struct tile_net_cpu *info = priv->cpu[my_cpu];
1168
1169 /* Disable the ingress interrupt. */
1170 disable_percpu_irq(priv->intr_id);
1171
1172 /* Do nothing else if not registered. */
1173 if (info == NULL || !info->registered)
1174 return;
1175
1176 {
1177 struct tile_netio_queue *queue = &info->queue;
1178 netio_queue_user_impl_t *qup = &queue->__user_part;
1179
1180 /* Discard all our credits. */
1181 __netio_fastio_return_credits(qup->__fastio_index, -1);
1182 }
1183}
1184
1185
1186/*
1187 * Unregister with hypervisor on the current CPU.
1188 *
1189 * Also, disable the ingress interrupt.
1157 */ 1190 */
1158static void tile_net_unregister(void *dev_ptr) 1191static void tile_net_unregister(void *dev_ptr)
1159{ 1192{
@@ -1162,35 +1195,23 @@ static void tile_net_unregister(void *dev_ptr)
1162 int my_cpu = smp_processor_id(); 1195 int my_cpu = smp_processor_id();
1163 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1196 struct tile_net_cpu *info = priv->cpu[my_cpu];
1164 1197
1165 int ret = 0; 1198 int ret;
1166 int dummy = 0; 1199 int dummy = 0;
1167 1200
1168 /* Do nothing if never registered. */ 1201 /* Disable the ingress interrupt. */
1169 if (info == NULL) 1202 disable_percpu_irq(priv->intr_id);
1170 return;
1171 1203
1172 /* Do nothing if already unregistered. */ 1204 /* Do nothing else if not registered. */
1173 if (!info->registered) 1205 if (info == NULL || !info->registered)
1174 return; 1206 return;
1175 1207
1176 /* 1208 /* Unregister ourselves with LIPP/LEPP. */
1177 * Unregister ourselves with LIPP.
1178 */
1179 ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, 1209 ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1180 sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); 1210 sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
1181 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n", 1211 if (ret < 0)
1182 ret); 1212 panic("Failed to unregister with LIPP/LEPP!\n");
1183 if (ret < 0) {
1184 /* FIXME: Just panic? */
1185 pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF"
1186 " failure %d\n", ret);
1187 }
1188 1213
1189 /* 1214 /* Discard all packets still in our NetIO queue. */
1190 * Discard all packets still in our NetIO queue. Hopefully,
1191 * once the unregister call is complete, there will be no
1192 * packets still in flight on the IDN.
1193 */
1194 tile_net_discard_packets(dev); 1215 tile_net_discard_packets(dev);
1195 1216
1196 /* Reset state. */ 1217 /* Reset state. */
@@ -1200,11 +1221,6 @@ static void tile_net_unregister(void *dev_ptr)
1200 /* Cancel egress timer. */ 1221 /* Cancel egress timer. */
1201 del_timer(&info->egress_timer); 1222 del_timer(&info->egress_timer);
1202 info->egress_timer_scheduled = false; 1223 info->egress_timer_scheduled = false;
1203
1204 netif_napi_del(&info->napi);
1205
1206 /* Now we are unregistered. */
1207 info->registered = false;
1208} 1224}
1209 1225
1210 1226
@@ -1212,18 +1228,28 @@ static void tile_net_unregister(void *dev_ptr)
1212 * Helper function for "tile_net_stop()". 1228 * Helper function for "tile_net_stop()".
1213 * 1229 *
1214 * Also used to handle registration failure in "tile_net_open_inner()", 1230 * Also used to handle registration failure in "tile_net_open_inner()",
1215 * when "fully_opened" is known to be false, and the various extra 1231 * when the various extra steps in "tile_net_stop()" are not necessary.
1216 * steps in "tile_net_stop()" are not necessary. ISSUE: It might be
1217 * simpler if we could just call "tile_net_stop()" anyway.
1218 */ 1232 */
1219static void tile_net_stop_aux(struct net_device *dev) 1233static void tile_net_stop_aux(struct net_device *dev)
1220{ 1234{
1221 struct tile_net_priv *priv = netdev_priv(dev); 1235 struct tile_net_priv *priv = netdev_priv(dev);
1236 int i;
1222 1237
1223 int dummy = 0; 1238 int dummy = 0;
1224 1239
1225 /* Unregister all tiles, so LIPP will stop delivering packets. */ 1240 /*
1241 * Unregister all tiles, so LIPP will stop delivering packets.
1242 * Also, delete all the "napi" objects (sequentially, to protect
1243 * "dev->napi_list").
1244 */
1226 on_each_cpu(tile_net_unregister, (void *)dev, 1); 1245 on_each_cpu(tile_net_unregister, (void *)dev, 1);
1246 for_each_online_cpu(i) {
1247 struct tile_net_cpu *info = priv->cpu[i];
1248 if (info != NULL && info->registered) {
1249 netif_napi_del(&info->napi);
1250 info->registered = false;
1251 }
1252 }
1227 1253
1228 /* Stop LIPP/LEPP. */ 1254 /* Stop LIPP/LEPP. */
1229 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, 1255 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
@@ -1235,18 +1261,15 @@ static void tile_net_stop_aux(struct net_device *dev)
1235 1261
1236 1262
1237/* 1263/*
1238 * Disable ingress interrupts for the given device on the current cpu. 1264 * Disable NAPI for the given device on the current cpu.
1239 */ 1265 */
1240static void tile_net_disable_intr(void *dev_ptr) 1266static void tile_net_stop_disable(void *dev_ptr)
1241{ 1267{
1242 struct net_device *dev = (struct net_device *)dev_ptr; 1268 struct net_device *dev = (struct net_device *)dev_ptr;
1243 struct tile_net_priv *priv = netdev_priv(dev); 1269 struct tile_net_priv *priv = netdev_priv(dev);
1244 int my_cpu = smp_processor_id(); 1270 int my_cpu = smp_processor_id();
1245 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1271 struct tile_net_cpu *info = priv->cpu[my_cpu];
1246 1272
1247 /* Disable hypervisor interrupt. */
1248 disable_percpu_irq(priv->intr_id);
1249
1250 /* Disable NAPI if needed. */ 1273 /* Disable NAPI if needed. */
1251 if (info != NULL && info->napi_enabled) { 1274 if (info != NULL && info->napi_enabled) {
1252 napi_disable(&info->napi); 1275 napi_disable(&info->napi);
@@ -1256,21 +1279,24 @@ static void tile_net_disable_intr(void *dev_ptr)
1256 1279
1257 1280
1258/* 1281/*
1259 * Enable ingress interrupts for the given device on the current cpu. 1282 * Enable NAPI and the ingress interrupt for the given device
1283 * on the current cpu.
1284 *
1285 * ISSUE: Only do this for "network cpus"?
1260 */ 1286 */
1261static void tile_net_enable_intr(void *dev_ptr) 1287static void tile_net_open_enable(void *dev_ptr)
1262{ 1288{
1263 struct net_device *dev = (struct net_device *)dev_ptr; 1289 struct net_device *dev = (struct net_device *)dev_ptr;
1264 struct tile_net_priv *priv = netdev_priv(dev); 1290 struct tile_net_priv *priv = netdev_priv(dev);
1265 int my_cpu = smp_processor_id(); 1291 int my_cpu = smp_processor_id();
1266 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1292 struct tile_net_cpu *info = priv->cpu[my_cpu];
1267 1293
1268 /* Enable hypervisor interrupt. */
1269 enable_percpu_irq(priv->intr_id);
1270
1271 /* Enable NAPI. */ 1294 /* Enable NAPI. */
1272 napi_enable(&info->napi); 1295 napi_enable(&info->napi);
1273 info->napi_enabled = true; 1296 info->napi_enabled = true;
1297
1298 /* Enable the ingress interrupt. */
1299 enable_percpu_irq(priv->intr_id);
1274} 1300}
1275 1301
1276 1302
@@ -1288,8 +1314,9 @@ static int tile_net_open_inner(struct net_device *dev)
1288 int my_cpu = smp_processor_id(); 1314 int my_cpu = smp_processor_id();
1289 struct tile_net_cpu *info; 1315 struct tile_net_cpu *info;
1290 struct tile_netio_queue *queue; 1316 struct tile_netio_queue *queue;
1291 unsigned int irq; 1317 int result = 0;
1292 int i; 1318 int i;
1319 int dummy = 0;
1293 1320
1294 /* 1321 /*
1295 * First try to register just on the local CPU, and handle any 1322 * First try to register just on the local CPU, and handle any
@@ -1307,42 +1334,52 @@ static int tile_net_open_inner(struct net_device *dev)
1307 /* 1334 /*
1308 * Now register everywhere else. If any registration fails, 1335 * Now register everywhere else. If any registration fails,
1309 * even for "link down" (which might not be possible), we 1336 * even for "link down" (which might not be possible), we
1310 * clean up using "tile_net_stop_aux()". 1337 * clean up using "tile_net_stop_aux()". Also, add all the
1338 * "napi" objects (sequentially, to protect "dev->napi_list").
1339 * ISSUE: Only use "netif_napi_add()" for "network cpus"?
1311 */ 1340 */
1312 smp_call_function(tile_net_register, (void *)dev, 1); 1341 smp_call_function(tile_net_register, (void *)dev, 1);
1313 for_each_online_cpu(i) { 1342 for_each_online_cpu(i) {
1314 if (!priv->cpu[i]->registered) { 1343 struct tile_net_cpu *info = priv->cpu[i];
1315 tile_net_stop_aux(dev); 1344 if (info->registered)
1316 return -EAGAIN; 1345 netif_napi_add(dev, &info->napi, tile_net_poll, 64);
1317 } 1346 else
1347 result = -EAGAIN;
1348 }
1349 if (result != 0) {
1350 tile_net_stop_aux(dev);
1351 return result;
1318 } 1352 }
1319 1353
1320 queue = &info->queue; 1354 queue = &info->queue;
1321 1355
1322 /* 1356 if (priv->intr_id == 0) {
1323 * Set the device intr bit mask. 1357 unsigned int irq;
1324 * The tile_net_register above sets per tile __intr_id.
1325 */
1326 priv->intr_id = queue->__system_part->__intr_id;
1327 BUG_ON(!priv->intr_id);
1328
1329 /*
1330 * Register the device interrupt handler.
1331 * The __ffs() function returns the index into the interrupt handler
1332 * table from the interrupt bit mask which should have one bit
1333 * and one bit only set.
1334 */
1335 irq = __ffs(priv->intr_id);
1336 tile_irq_activate(irq, TILE_IRQ_PERCPU);
1337 BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
1338 0, dev->name, (void *)dev) != 0);
1339 1358
1340 /* ISSUE: How could "priv->fully_opened" ever be "true" here? */ 1359 /*
1341 1360 * Acquire the irq allocated by the hypervisor. Every
1342 if (!priv->fully_opened) { 1361 * queue gets the same irq. The "__intr_id" field is
1362 * "1 << irq", so we use "__ffs()" to extract "irq".
1363 */
1364 priv->intr_id = queue->__system_part->__intr_id;
1365 BUG_ON(priv->intr_id == 0);
1366 irq = __ffs(priv->intr_id);
1343 1367
1344 int dummy = 0; 1368 /*
1369 * Register the ingress interrupt handler for this
1370 * device, permanently.
1371 *
1372 * We used to call "free_irq()" in "tile_net_stop()",
1373 * and then re-register the handler here every time,
1374 * but that caused DNP errors in "handle_IRQ_event()"
1375 * because "desc->action" was NULL. See bug 9143.
1376 */
1377 tile_irq_activate(irq, TILE_IRQ_PERCPU);
1378 BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
1379 0, dev->name, (void *)dev) != 0);
1380 }
1345 1381
1382 {
1346 /* Allocate initial buffers. */ 1383 /* Allocate initial buffers. */
1347 1384
1348 int max_buffers = 1385 int max_buffers =
@@ -1359,18 +1396,21 @@ static int tile_net_open_inner(struct net_device *dev)
1359 if (info->num_needed_small_buffers != 0 || 1396 if (info->num_needed_small_buffers != 0 ||
1360 info->num_needed_large_buffers != 0) 1397 info->num_needed_large_buffers != 0)
1361 panic("Insufficient memory for buffer stack!"); 1398 panic("Insufficient memory for buffer stack!");
1399 }
1362 1400
1363 /* Start LIPP/LEPP and activate "ingress" at the shim. */ 1401 /* We are about to be active. */
1364 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, 1402 priv->active = true;
1365 sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
1366 panic("Failed to activate the LIPP Shim!\n");
1367 1403
1368 priv->fully_opened = 1; 1404 /* Make sure "active" is visible to all tiles. */
1369 } 1405 mb();
1370 1406
1371 /* On each tile, enable the hypervisor to trigger interrupts. */ 1407 /* On each tile, enable NAPI and the ingress interrupt. */
1372 /* ISSUE: Do this before starting LIPP/LEPP? */ 1408 on_each_cpu(tile_net_open_enable, (void *)dev, 1);
1373 on_each_cpu(tile_net_enable_intr, (void *)dev, 1); 1409
1410 /* Start LIPP/LEPP and activate "ingress" at the shim. */
1411 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1412 sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
1413 panic("Failed to activate the LIPP Shim!\n");
1374 1414
1375 /* Start our transmit queue. */ 1415 /* Start our transmit queue. */
1376 netif_start_queue(dev); 1416 netif_start_queue(dev);
@@ -1396,9 +1436,9 @@ static void tile_net_open_retry(struct work_struct *w)
1396 * ourselves to try again later; otherwise, tell Linux we now have 1436 * ourselves to try again later; otherwise, tell Linux we now have
1397 * a working link. ISSUE: What if the return value is negative? 1437 * a working link. ISSUE: What if the return value is negative?
1398 */ 1438 */
1399 if (tile_net_open_inner(priv->dev)) 1439 if (tile_net_open_inner(priv->dev) != 0)
1400 schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, 1440 schedule_delayed_work(&priv->retry_work,
1401 TILE_NET_RETRY_INTERVAL); 1441 TILE_NET_RETRY_INTERVAL);
1402 else 1442 else
1403 netif_carrier_on(priv->dev); 1443 netif_carrier_on(priv->dev);
1404} 1444}
@@ -1412,8 +1452,8 @@ static void tile_net_open_retry(struct work_struct *w)
1412 * The open entry point is called when a network interface is made 1452 * The open entry point is called when a network interface is made
1413 * active by the system (IFF_UP). At this point all resources needed 1453 * active by the system (IFF_UP). At this point all resources needed
1414 * for transmit and receive operations are allocated, the interrupt 1454 * for transmit and receive operations are allocated, the interrupt
1415 * handler is registered with the OS, the watchdog timer is started, 1455 * handler is registered with the OS (if needed), the watchdog timer
1416 * and the stack is notified that the interface is ready. 1456 * is started, and the stack is notified that the interface is ready.
1417 * 1457 *
1418 * If the actual link is not available yet, then we tell Linux that 1458 * If the actual link is not available yet, then we tell Linux that
1419 * we have no carrier, and we keep checking until the link comes up. 1459 * we have no carrier, and we keep checking until the link comes up.
@@ -1468,6 +1508,10 @@ static int tile_net_open(struct net_device *dev)
1468#endif 1508#endif
1469 1509
1470 priv->partly_opened = 1; 1510 priv->partly_opened = 1;
1511
1512 } else {
1513 /* FIXME: Is this possible? */
1514 /* printk("Already partly opened.\n"); */
1471 } 1515 }
1472 1516
1473 /* 1517 /*
@@ -1487,57 +1531,17 @@ static int tile_net_open(struct net_device *dev)
1487 * and then remember to try again later. 1531 * and then remember to try again later.
1488 */ 1532 */
1489 netif_carrier_off(dev); 1533 netif_carrier_off(dev);
1490 schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, 1534 schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL);
1491 TILE_NET_RETRY_INTERVAL);
1492 1535
1493 return 0; 1536 return 0;
1494} 1537}
1495 1538
1496 1539
1497/* 1540static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
1498 * Disables a network interface.
1499 *
1500 * Returns 0, this is not allowed to fail.
1501 *
1502 * The close entry point is called when an interface is de-activated
1503 * by the OS. The hardware is still under the drivers control, but
1504 * needs to be disabled. A global MAC reset is issued to stop the
1505 * hardware, and all transmit and receive resources are freed.
1506 *
1507 * ISSUE: Can this can be called while "tile_net_poll()" is running?
1508 */
1509static int tile_net_stop(struct net_device *dev)
1510{ 1541{
1511 struct tile_net_priv *priv = netdev_priv(dev); 1542 int n = 0;
1512
1513 bool pending = true;
1514
1515 PDEBUG("tile_net_stop()\n");
1516
1517 /* ISSUE: Only needed if not yet fully open. */
1518 cancel_delayed_work_sync(&priv->retry_work);
1519
1520 /* Can't transmit any more. */
1521 netif_stop_queue(dev);
1522
1523 /*
1524 * Disable hypervisor interrupts on each tile.
1525 */
1526 on_each_cpu(tile_net_disable_intr, (void *)dev, 1);
1527
1528 /*
1529 * Unregister the interrupt handler.
1530 * The __ffs() function returns the index into the interrupt handler
1531 * table from the interrupt bit mask which should have one bit
1532 * and one bit only set.
1533 */
1534 if (priv->intr_id)
1535 free_irq(__ffs(priv->intr_id), dev);
1536
1537 /*
1538 * Drain all the LIPP buffers.
1539 */
1540 1543
1544 /* Drain all the LIPP buffers. */
1541 while (true) { 1545 while (true) {
1542 int buffer; 1546 int buffer;
1543 1547
@@ -1560,43 +1564,105 @@ static int tile_net_stop(struct net_device *dev)
1560 1564
1561 kfree_skb(skb); 1565 kfree_skb(skb);
1562 } 1566 }
1567
1568 n++;
1563 } 1569 }
1564 1570
1565 /* Stop LIPP/LEPP. */ 1571 return n;
1566 tile_net_stop_aux(dev); 1572}
1567 1573
1568 1574
1569 priv->fully_opened = 0; 1575/*
1576 * Disables a network interface.
1577 *
1578 * Returns 0, this is not allowed to fail.
1579 *
1580 * The close entry point is called when an interface is de-activated
1581 * by the OS. The hardware is still under the drivers control, but
1582 * needs to be disabled. A global MAC reset is issued to stop the
1583 * hardware, and all transmit and receive resources are freed.
1584 *
1585 * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"?
1586 *
1587 * Before we are called by "__dev_close()", "netif_running()" will
1588 * have been cleared, so no NEW calls to "tile_net_poll()" will be
1589 * made by "netpoll_poll_dev()".
1590 *
1591 * Often, this can cause some tiles to still have packets in their
1592 * queues, so we must call "tile_net_discard_packets()" later.
1593 *
1594 * Note that some other tile may still be INSIDE "tile_net_poll()",
1595 * and in fact, many will be, if there is heavy network load.
1596 *
1597 * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when
1598 * any tile is still "napi_schedule()"'d will induce a horrible crash
1599 * when "msleep()" is called. This includes tiles which are inside
1600 * "tile_net_poll()" which have not yet called "napi_complete()".
1601 *
1602 * So, we must first try to wait long enough for other tiles to finish
1603 * with any current "tile_net_poll()" call, and, hopefully, to clear
1604 * the "scheduled" flag. ISSUE: It is unclear what happens to tiles
1605 * which have called "napi_schedule()" but which had not yet tried to
1606 * call "tile_net_poll()", or which exhausted their budget inside
1607 * "tile_net_poll()" just before this function was called.
1608 */
1609static int tile_net_stop(struct net_device *dev)
1610{
1611 struct tile_net_priv *priv = netdev_priv(dev);
1612
1613 PDEBUG("tile_net_stop()\n");
1570 1614
1615 /* Start discarding packets. */
1616 priv->active = false;
1617
1618 /* Make sure "active" is visible to all tiles. */
1619 mb();
1571 1620
1572 /* 1621 /*
1573 * XXX: ISSUE: It appears that, in practice anyway, by the 1622 * On each tile, make sure no NEW packets get delivered, and
1574 * time we get here, there are no pending completions. 1623 * disable the ingress interrupt.
1624 *
1625 * Note that the ingress interrupt can fire AFTER this,
1626 * presumably due to packets which were recently delivered,
1627 * but it will have no effect.
1575 */ 1628 */
1576 while (pending) { 1629 on_each_cpu(tile_net_deregister, (void *)dev, 1);
1577 1630
1578 struct sk_buff *olds[32]; 1631 /* Optimistically drain LIPP buffers. */
1579 unsigned int wanted = 32; 1632 (void)tile_net_drain_lipp_buffers(priv);
1580 unsigned int i, nolds = 0;
1581 1633
1582 nolds = tile_net_lepp_grab_comps(dev, olds, 1634 /* ISSUE: Only needed if not yet fully open. */
1583 wanted, &pending); 1635 cancel_delayed_work_sync(&priv->retry_work);
1584 1636
1585 /* ISSUE: We have never actually seen this debug spew. */ 1637 /* Can't transmit any more. */
1586 if (nolds != 0) 1638 netif_stop_queue(dev);
1587 pr_info("During tile_net_stop(), grabbed %d comps.\n",
1588 nolds);
1589 1639
1590 for (i = 0; i < nolds; i++) 1640 /* Disable NAPI on each tile. */
1591 kfree_skb(olds[i]); 1641 on_each_cpu(tile_net_stop_disable, (void *)dev, 1);
1592 } 1642
1643 /*
1644 * Drain any remaining LIPP buffers. NOTE: This "printk()"
1645 * has never been observed, but in theory it could happen.
1646 */
1647 if (tile_net_drain_lipp_buffers(priv) != 0)
1648 printk("Had to drain some extra LIPP buffers!\n");
1593 1649
1650 /* Stop LIPP/LEPP. */
1651 tile_net_stop_aux(dev);
1652
1653 /*
1654 * ISSUE: It appears that, in practice anyway, by the time we
1655 * get here, there are no pending completions, but just in case,
1656 * we free (all of) them anyway.
1657 */
1658 while (tile_net_lepp_free_comps(dev, true))
1659 /* loop */;
1594 1660
1595 /* Wipe the EPP queue. */ 1661 /* Wipe the EPP queue. */
1596 memset(priv->epp_queue, 0, sizeof(lepp_queue_t)); 1662 memset(priv->eq, 0, sizeof(lepp_queue_t));
1597 1663
1598 /* Evict the EPP queue. */ 1664 /* Evict the EPP queue. */
1599 finv_buffer(priv->epp_queue, PAGE_SIZE); 1665 finv_buffer(priv->eq, EQ_SIZE);
1600 1666
1601 return 0; 1667 return 0;
1602} 1668}
@@ -1620,7 +1686,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1620 if (b_len != 0) { 1686 if (b_len != 0) {
1621 1687
1622 if (!hash_default) 1688 if (!hash_default)
1623 finv_buffer_remote(b_data, b_len); 1689 finv_buffer_remote(b_data, b_len, 0);
1624 1690
1625 cpa = __pa(b_data); 1691 cpa = __pa(b_data);
1626 frags[n].cpa_lo = cpa; 1692 frags[n].cpa_lo = cpa;
@@ -1643,7 +1709,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1643 if (!hash_default) { 1709 if (!hash_default) {
1644 void *va = pfn_to_kaddr(pfn) + f->page_offset; 1710 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1645 BUG_ON(PageHighMem(f->page)); 1711 BUG_ON(PageHighMem(f->page));
1646 finv_buffer_remote(va, f->size); 1712 finv_buffer_remote(va, f->size, 0);
1647 } 1713 }
1648 1714
1649 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; 1715 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
@@ -1742,17 +1808,15 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1742 1808
1743 unsigned long irqflags; 1809 unsigned long irqflags;
1744 1810
1745 lepp_queue_t *eq = priv->epp_queue; 1811 lepp_queue_t *eq = priv->eq;
1746 1812
1747 struct sk_buff *olds[4]; 1813 struct sk_buff *olds[8];
1748 unsigned int wanted = 4; 1814 unsigned int wanted = 8;
1749 unsigned int i, nolds = 0; 1815 unsigned int i, nolds = 0;
1750 1816
1751 unsigned int cmd_head, cmd_tail, cmd_next; 1817 unsigned int cmd_head, cmd_tail, cmd_next;
1752 unsigned int comp_tail; 1818 unsigned int comp_tail;
1753 1819
1754 unsigned int free_slots;
1755
1756 1820
1757 /* Paranoia. */ 1821 /* Paranoia. */
1758 BUG_ON(skb->protocol != htons(ETH_P_IP)); 1822 BUG_ON(skb->protocol != htons(ETH_P_IP));
@@ -1780,34 +1844,32 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1780 1844
1781 /* Enqueue the command. */ 1845 /* Enqueue the command. */
1782 1846
1783 spin_lock_irqsave(&priv->cmd_lock, irqflags); 1847 spin_lock_irqsave(&priv->eq_lock, irqflags);
1784 1848
1785 /* 1849 /*
1786 * Handle completions if needed to make room. 1850 * Handle completions if needed to make room.
1787 * HACK: Spin until there is sufficient room. 1851 * HACK: Spin until there is sufficient room.
1788 */ 1852 */
1789 free_slots = lepp_num_free_comp_slots(eq); 1853 if (lepp_num_free_comp_slots(eq) == 0) {
1790 if (free_slots < 1) { 1854 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
1791spin: 1855 if (nolds == 0) {
1792 nolds += tile_net_lepp_grab_comps(dev, olds + nolds, 1856busy:
1793 wanted - nolds, NULL); 1857 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1794 if (lepp_num_free_comp_slots(eq) < 1) 1858 return NETDEV_TX_BUSY;
1795 goto spin; 1859 }
1796 } 1860 }
1797 1861
1798 cmd_head = eq->cmd_head; 1862 cmd_head = eq->cmd_head;
1799 cmd_tail = eq->cmd_tail; 1863 cmd_tail = eq->cmd_tail;
1800 1864
1801 /* NOTE: The "gotos" below are untested. */
1802
1803 /* Prepare to advance, detecting full queue. */ 1865 /* Prepare to advance, detecting full queue. */
1804 cmd_next = cmd_tail + cmd_size; 1866 cmd_next = cmd_tail + cmd_size;
1805 if (cmd_tail < cmd_head && cmd_next >= cmd_head) 1867 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1806 goto spin; 1868 goto busy;
1807 if (cmd_next > LEPP_CMD_LIMIT) { 1869 if (cmd_next > LEPP_CMD_LIMIT) {
1808 cmd_next = 0; 1870 cmd_next = 0;
1809 if (cmd_next == cmd_head) 1871 if (cmd_next == cmd_head)
1810 goto spin; 1872 goto busy;
1811 } 1873 }
1812 1874
1813 /* Copy the command. */ 1875 /* Copy the command. */
@@ -1823,14 +1885,18 @@ spin:
1823 eq->comp_tail = comp_tail; 1885 eq->comp_tail = comp_tail;
1824 1886
1825 /* Flush before allowing LEPP to handle the command. */ 1887 /* Flush before allowing LEPP to handle the command. */
1888 /* ISSUE: Is this the optimal location for the flush? */
1826 __insn_mf(); 1889 __insn_mf();
1827 1890
1828 eq->cmd_tail = cmd_tail; 1891 eq->cmd_tail = cmd_tail;
1829 1892
1830 spin_unlock_irqrestore(&priv->cmd_lock, irqflags); 1893 /* NOTE: Using "4" here is more efficient than "0" or "2", */
1831 1894 /* and, strangely, more efficient than pre-checking the number */
1895 /* of available completions, and comparing it to 4. */
1832 if (nolds == 0) 1896 if (nolds == 0)
1833 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); 1897 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
1898
1899 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1834 1900
1835 /* Handle completions. */ 1901 /* Handle completions. */
1836 for (i = 0; i < nolds; i++) 1902 for (i = 0; i < nolds; i++)
@@ -1870,10 +1936,10 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1870 1936
1871 unsigned int num_frags; 1937 unsigned int num_frags;
1872 1938
1873 lepp_queue_t *eq = priv->epp_queue; 1939 lepp_queue_t *eq = priv->eq;
1874 1940
1875 struct sk_buff *olds[4]; 1941 struct sk_buff *olds[8];
1876 unsigned int wanted = 4; 1942 unsigned int wanted = 8;
1877 unsigned int i, nolds = 0; 1943 unsigned int i, nolds = 0;
1878 1944
1879 unsigned int cmd_size = sizeof(lepp_cmd_t); 1945 unsigned int cmd_size = sizeof(lepp_cmd_t);
@@ -1883,8 +1949,6 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1883 1949
1884 lepp_cmd_t cmds[LEPP_MAX_FRAGS]; 1950 lepp_cmd_t cmds[LEPP_MAX_FRAGS];
1885 1951
1886 unsigned int free_slots;
1887
1888 1952
1889 /* 1953 /*
1890 * This is paranoia, since we think that if the link doesn't come 1954 * This is paranoia, since we think that if the link doesn't come
@@ -1905,7 +1969,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1905 if (hash_default) { 1969 if (hash_default) {
1906 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); 1970 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
1907 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) 1971 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
1908 panic("Non-coherent egress buffer!"); 1972 panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx",
1973 data, hv_pte_get_mode(pte), hv_pte_val(pte));
1909 } 1974 }
1910#endif 1975#endif
1911#endif 1976#endif
@@ -1958,37 +2023,35 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1958 2023
1959 /* Enqueue the commands. */ 2024 /* Enqueue the commands. */
1960 2025
1961 spin_lock_irqsave(&priv->cmd_lock, irqflags); 2026 spin_lock_irqsave(&priv->eq_lock, irqflags);
1962 2027
1963 /* 2028 /*
1964 * Handle completions if needed to make room. 2029 * Handle completions if needed to make room.
1965 * HACK: Spin until there is sufficient room. 2030 * HACK: Spin until there is sufficient room.
1966 */ 2031 */
1967 free_slots = lepp_num_free_comp_slots(eq); 2032 if (lepp_num_free_comp_slots(eq) == 0) {
1968 if (free_slots < 1) { 2033 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
1969spin: 2034 if (nolds == 0) {
1970 nolds += tile_net_lepp_grab_comps(dev, olds + nolds, 2035busy:
1971 wanted - nolds, NULL); 2036 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
1972 if (lepp_num_free_comp_slots(eq) < 1) 2037 return NETDEV_TX_BUSY;
1973 goto spin; 2038 }
1974 } 2039 }
1975 2040
1976 cmd_head = eq->cmd_head; 2041 cmd_head = eq->cmd_head;
1977 cmd_tail = eq->cmd_tail; 2042 cmd_tail = eq->cmd_tail;
1978 2043
1979 /* NOTE: The "gotos" below are untested. */
1980
1981 /* Copy the commands, or fail. */ 2044 /* Copy the commands, or fail. */
1982 for (i = 0; i < num_frags; i++) { 2045 for (i = 0; i < num_frags; i++) {
1983 2046
1984 /* Prepare to advance, detecting full queue. */ 2047 /* Prepare to advance, detecting full queue. */
1985 cmd_next = cmd_tail + cmd_size; 2048 cmd_next = cmd_tail + cmd_size;
1986 if (cmd_tail < cmd_head && cmd_next >= cmd_head) 2049 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1987 goto spin; 2050 goto busy;
1988 if (cmd_next > LEPP_CMD_LIMIT) { 2051 if (cmd_next > LEPP_CMD_LIMIT) {
1989 cmd_next = 0; 2052 cmd_next = 0;
1990 if (cmd_next == cmd_head) 2053 if (cmd_next == cmd_head)
1991 goto spin; 2054 goto busy;
1992 } 2055 }
1993 2056
1994 /* Copy the command. */ 2057 /* Copy the command. */
@@ -2005,14 +2068,18 @@ spin:
2005 eq->comp_tail = comp_tail; 2068 eq->comp_tail = comp_tail;
2006 2069
2007 /* Flush before allowing LEPP to handle the command. */ 2070 /* Flush before allowing LEPP to handle the command. */
2071 /* ISSUE: Is this the optimal location for the flush? */
2008 __insn_mf(); 2072 __insn_mf();
2009 2073
2010 eq->cmd_tail = cmd_tail; 2074 eq->cmd_tail = cmd_tail;
2011 2075
2012 spin_unlock_irqrestore(&priv->cmd_lock, irqflags); 2076 /* NOTE: Using "4" here is more efficient than "0" or "2", */
2013 2077 /* and, strangely, more efficient than pre-checking the number */
2078 /* of available completions, and comparing it to 4. */
2014 if (nolds == 0) 2079 if (nolds == 0)
2015 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); 2080 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
2081
2082 spin_unlock_irqrestore(&priv->eq_lock, irqflags);
2016 2083
2017 /* Handle completions. */ 2084 /* Handle completions. */
2018 for (i = 0; i < nolds; i++) 2085 for (i = 0; i < nolds; i++)
@@ -2261,7 +2328,6 @@ static struct net_device *tile_net_dev_init(const char *name)
2261 int ret; 2328 int ret;
2262 struct net_device *dev; 2329 struct net_device *dev;
2263 struct tile_net_priv *priv; 2330 struct tile_net_priv *priv;
2264 struct page *page;
2265 2331
2266 /* 2332 /*
2267 * Allocate the device structure. This allocates "priv", calls 2333 * Allocate the device structure. This allocates "priv", calls
@@ -2285,23 +2351,21 @@ static struct net_device *tile_net_dev_init(const char *name)
2285 2351
2286 INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); 2352 INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
2287 2353
2288 spin_lock_init(&priv->cmd_lock); 2354 spin_lock_init(&priv->eq_lock);
2289 spin_lock_init(&priv->comp_lock);
2290 2355
2291 /* Allocate "epp_queue". */ 2356 /* Allocate "eq". */
2292 BUG_ON(get_order(sizeof(lepp_queue_t)) != 0); 2357 priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER);
2293 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); 2358 if (!priv->eq_pages) {
2294 if (!page) {
2295 free_netdev(dev); 2359 free_netdev(dev);
2296 return NULL; 2360 return NULL;
2297 } 2361 }
2298 priv->epp_queue = page_address(page); 2362 priv->eq = page_address(priv->eq_pages);
2299 2363
2300 /* Register the network device. */ 2364 /* Register the network device. */
2301 ret = register_netdev(dev); 2365 ret = register_netdev(dev);
2302 if (ret) { 2366 if (ret) {
2303 pr_err("register_netdev %s failed %d\n", dev->name, ret); 2367 pr_err("register_netdev %s failed %d\n", dev->name, ret);
2304 free_page((unsigned long)priv->epp_queue); 2368 __free_pages(priv->eq_pages, EQ_ORDER);
2305 free_netdev(dev); 2369 free_netdev(dev);
2306 return NULL; 2370 return NULL;
2307 } 2371 }
@@ -2310,7 +2374,7 @@ static struct net_device *tile_net_dev_init(const char *name)
2310 ret = tile_net_get_mac(dev); 2374 ret = tile_net_get_mac(dev);
2311 if (ret < 0) { 2375 if (ret < 0) {
2312 unregister_netdev(dev); 2376 unregister_netdev(dev);
2313 free_page((unsigned long)priv->epp_queue); 2377 __free_pages(priv->eq_pages, EQ_ORDER);
2314 free_netdev(dev); 2378 free_netdev(dev);
2315 return NULL; 2379 return NULL;
2316 } 2380 }
@@ -2321,6 +2385,9 @@ static struct net_device *tile_net_dev_init(const char *name)
2321 2385
2322/* 2386/*
2323 * Module cleanup. 2387 * Module cleanup.
2388 *
2389 * FIXME: If compiled as a module, this module cannot be "unloaded",
2390 * because the "ingress interrupt handler" is registered permanently.
2324 */ 2391 */
2325static void tile_net_cleanup(void) 2392static void tile_net_cleanup(void)
2326{ 2393{
@@ -2331,8 +2398,8 @@ static void tile_net_cleanup(void)
2331 struct net_device *dev = tile_net_devs[i]; 2398 struct net_device *dev = tile_net_devs[i];
2332 struct tile_net_priv *priv = netdev_priv(dev); 2399 struct tile_net_priv *priv = netdev_priv(dev);
2333 unregister_netdev(dev); 2400 unregister_netdev(dev);
2334 finv_buffer(priv->epp_queue, PAGE_SIZE); 2401 finv_buffer(priv->eq, EQ_SIZE);
2335 free_page((unsigned long)priv->epp_queue); 2402 __free_pages(priv->eq_pages, EQ_ORDER);
2336 free_netdev(dev); 2403 free_netdev(dev);
2337 } 2404 }
2338 } 2405 }
@@ -2355,7 +2422,12 @@ static int tile_net_init_module(void)
2355} 2422}
2356 2423
2357 2424
2425module_init(tile_net_init_module);
2426module_exit(tile_net_cleanup);
2427
2428
2358#ifndef MODULE 2429#ifndef MODULE
2430
2359/* 2431/*
2360 * The "network_cpus" boot argument specifies the cpus that are dedicated 2432 * The "network_cpus" boot argument specifies the cpus that are dedicated
2361 * to handle ingress packets. 2433 * to handle ingress packets.
@@ -2391,8 +2463,5 @@ static int __init network_cpus_setup(char *str)
2391 return 0; 2463 return 0;
2392} 2464}
2393__setup("network_cpus=", network_cpus_setup); 2465__setup("network_cpus=", network_cpus_setup);
2394#endif
2395
2396 2466
2397module_init(tile_net_init_module); 2467#endif
2398module_exit(tile_net_cleanup);
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463cd8ec..ace6404e2fa 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -25,150 +25,9 @@
25 * Microchip Technology, 24C01A/02A/04A Data Sheet 25 * Microchip Technology, 24C01A/02A/04A Data Sheet
26 * available in PDF format from www.microchip.com 26 * available in PDF format from www.microchip.com
27 * 27 *
28 * Change History 28 ******************************************************************************/
29 * 29
30 * Tigran Aivazian <tigran@sco.com>: TLan_PciProbe() now uses 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 * new PCI BIOS interface.
32 * Alan Cox <alan@lxorguk.ukuu.org.uk>:
33 * Fixed the out of memory
34 * handling.
35 *
36 * Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
37 *
38 * v1.1 Dec 20, 1999 - Removed linux version checking
39 * Patch from Tigran Aivazian.
40 * - v1.1 includes Alan's SMP updates.
41 * - We still have problems on SMP though,
42 * but I'm looking into that.
43 *
44 * v1.2 Jan 02, 2000 - Hopefully fixed the SMP deadlock.
45 * - Removed dependency of HZ being 100.
46 * - We now allow higher priority timers to
47 * overwrite timers like TLAN_TIMER_ACTIVITY
48 * Patch from John Cagle <john.cagle@compaq.com>.
49 * - Fixed a few compiler warnings.
50 *
51 * v1.3 Feb 04, 2000 - Fixed the remaining HZ issues.
52 * - Removed call to pci_present().
53 * - Removed SA_INTERRUPT flag from irq handler.
54 * - Added __init and __initdata to reduce resisdent
55 * code size.
56 * - Driver now uses module_init/module_exit.
57 * - Rewrote init_module and tlan_probe to
58 * share a lot more code. We now use tlan_probe
59 * with builtin and module driver.
60 * - Driver ported to new net API.
61 * - tlan.txt has been reworked to reflect current
62 * driver (almost)
63 * - Other minor stuff
64 *
65 * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
66 * network cleanup in 2.3.43pre7 (Tigran & myself)
67 * - Minor stuff.
68 *
69 * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver
70 * if no cable/link were present.
71 * - Cosmetic changes.
72 * - TODO: Port completely to new PCI/DMA API
73 * Auto-Neg fallback.
74 *
75 * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't
76 * tested it though, as the kernel support is currently
77 * broken (2.3.99p4p3).
78 * - Updated tlan.txt accordingly.
79 * - Adjusted minimum/maximum frame length.
80 * - There is now a TLAN website up at
81 * http://hp.sourceforge.net/
82 *
83 * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
84 * reports PHY information when used with Donald
85 * Beckers userspace MII diagnostics utility.
86 *
87 * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
88 * - Added link information to Auto-Neg and forced
89 * modes. When NIC operates with auto-neg the driver
90 * will report Link speed & duplex modes as well as
91 * link partner abilities. When forced link is used,
92 * the driver will report status of the established
93 * link.
94 * Please read tlan.txt for additional information.
95 * - Removed call to check_region(), and used
96 * return value of request_region() instead.
97 *
98 * v1.8a May 28, 2000 - Minor updates.
99 *
100 * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
101 * - Updated with timer fixes from Andrew Morton.
102 * - Fixed module race in TLan_Open.
103 * - Added routine to monitor PHY status.
104 * - Added activity led support for Proliant devices.
105 *
106 * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
107 * like the Compaq NetFlex3/E.
108 * - Rewrote tlan_probe to better handle multiple
109 * bus probes. Probing and device setup is now
110 * done through TLan_Probe and TLan_init_one. Actual
111 * hardware probe is done with kernel API and
112 * TLan_EisaProbe.
113 * - Adjusted debug information for probing.
114 * - Fixed bug that would cause general debug information
115 * to be printed after driver removal.
116 * - Added transmit timeout handling.
117 * - Fixed OOM return values in tlan_probe.
118 * - Fixed possible mem leak in tlan_exit
119 * (now tlan_remove_one).
120 * - Fixed timer bug in TLan_phyMonitor.
121 * - This driver version is alpha quality, please
122 * send me any bug issues you may encounter.
123 *
124 * v1.11 Aug 31, 2000 - Do not try to register irq 0 if no irq line was
125 * set for EISA cards.
126 * - Added support for NetFlex3/E with nibble-rate
127 * 10Base-T PHY. This is untestet as I haven't got
128 * one of these cards.
129 * - Fixed timer being added twice.
130 * - Disabled PhyMonitoring by default as this is
131 * work in progress. Define MONITOR to enable it.
132 * - Now we don't display link info with PHYs that
133 * doesn't support it (level1).
134 * - Incresed tx_timeout beacuse of auto-neg.
135 * - Adjusted timers for forced speeds.
136 *
137 * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
138 *
139 * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
140 * when link can't be established.
141 * - Added the bbuf option as a kernel parameter.
142 * - Fixed ioaddr probe bug.
143 * - Fixed stupid deadlock with MII interrupts.
144 * - Added support for speed/duplex selection with
145 * multiple nics.
146 * - Added partly fix for TX Channel lockup with
147 * TLAN v1.0 silicon. This needs to be investigated
148 * further.
149 *
150 * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
151 * interrupt. Thanks goes to
152 * Adam Keys <adam@ti.com>
153 * Denis Beaudoin <dbeaudoin@ti.com>
154 * for providing the patch.
155 * - Fixed auto-neg output when using multiple
156 * adapters.
157 * - Converted to use new taskq interface.
158 *
159 * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
160 *
161 * Samuel Chessman <chessman@tux.org> New Maintainer!
162 *
163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
164 * 10T half duplex no loopback
165 * Thanks to Gunnar Eikman
166 *
167 * Sakari Ailus <sakari.ailus@iki.fi>:
168 *
169 * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
170 *
171 *******************************************************************************/
172 31
173#include <linux/module.h> 32#include <linux/module.h>
174#include <linux/init.h> 33#include <linux/init.h>
@@ -185,13 +44,11 @@
185 44
186#include "tlan.h" 45#include "tlan.h"
187 46
188typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
189
190 47
191/* For removing EISA devices */ 48/* For removing EISA devices */
192static struct net_device *TLan_Eisa_Devices; 49static struct net_device *tlan_eisa_devices;
193 50
194static int TLanDevicesInstalled; 51static int tlan_devices_installed;
195 52
196/* Set speed, duplex and aui settings */ 53/* Set speed, duplex and aui settings */
197static int aui[MAX_TLAN_BOARDS]; 54static int aui[MAX_TLAN_BOARDS];
@@ -202,8 +59,9 @@ module_param_array(aui, int, NULL, 0);
202module_param_array(duplex, int, NULL, 0); 59module_param_array(duplex, int, NULL, 0);
203module_param_array(speed, int, NULL, 0); 60module_param_array(speed, int, NULL, 0);
204MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); 61MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
205MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); 62MODULE_PARM_DESC(duplex,
206MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)"); 63 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
64MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
207 65
208MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); 66MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
209MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); 67MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
@@ -218,139 +76,144 @@ static int debug;
218module_param(debug, int, 0); 76module_param(debug, int, 0);
219MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); 77MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
220 78
221static const char TLanSignature[] = "TLAN"; 79static const char tlan_signature[] = "TLAN";
222static const char tlan_banner[] = "ThunderLAN driver v1.15a\n"; 80static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
223static int tlan_have_pci; 81static int tlan_have_pci;
224static int tlan_have_eisa; 82static int tlan_have_eisa;
225 83
226static const char *media[] = { 84static const char * const media[] = {
227 "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ", 85 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
228 "100baseTx-FD", "100baseT4", NULL 86 "100BaseTx-FD", "100BaseT4", NULL
229}; 87};
230 88
231static struct board { 89static struct board {
232 const char *deviceLabel; 90 const char *device_label;
233 u32 flags; 91 u32 flags;
234 u16 addrOfs; 92 u16 addr_ofs;
235} board_info[] = { 93} board_info[] = {
236 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 94 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
237 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 95 { "Compaq Netelligent 10/100 TX PCI UTP",
96 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
238 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 97 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
239 { "Compaq NetFlex-3/P", 98 { "Compaq NetFlex-3/P",
240 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 99 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 100 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
242 { "Compaq Netelligent Integrated 10/100 TX UTP", 101 { "Compaq Netelligent Integrated 10/100 TX UTP",
243 TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 102 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, 103 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, 104 TLAN_ADAPTER_NONE, 0x83 },
105 { "Compaq Netelligent 10/100 TX Embedded UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, 107 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
247 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 }, 108 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
248 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, 109 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
249 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 110 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
250 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, 111 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
251 { "Compaq NetFlex-3/E", 112 { "Compaq NetFlex-3/E",
252 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 113 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
253 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 114 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 115 { "Compaq NetFlex-3/E",
116 TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
255}; 117};
256 118
257static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = { 119static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, 120 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, 122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
262 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I, 124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
264 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER, 126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, 127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
266 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B, 128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
267 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
268 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI, 130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
269 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, 131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
270 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D, 132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
271 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, 133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
272 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I, 134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
273 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, 135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
274 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183, 136 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
275 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
276 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325, 138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
277 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, 139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
278 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326, 140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, 141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
280 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100, 142 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, 143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
282 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2, 144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, 145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
284 { 0,} 146 { 0,}
285}; 147};
286MODULE_DEVICE_TABLE(pci, tlan_pci_tbl); 148MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
287 149
288static void TLan_EisaProbe( void ); 150static void tlan_eisa_probe(void);
289static void TLan_Eisa_Cleanup( void ); 151static void tlan_eisa_cleanup(void);
290static int TLan_Init( struct net_device * ); 152static int tlan_init(struct net_device *);
291static int TLan_Open( struct net_device *dev ); 153static int tlan_open(struct net_device *dev);
292static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *); 154static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
293static irqreturn_t TLan_HandleInterrupt( int, void *); 155static irqreturn_t tlan_handle_interrupt(int, void *);
294static int TLan_Close( struct net_device *); 156static int tlan_close(struct net_device *);
295static struct net_device_stats *TLan_GetStats( struct net_device *); 157static struct net_device_stats *tlan_get_stats(struct net_device *);
296static void TLan_SetMulticastList( struct net_device *); 158static void tlan_set_multicast_list(struct net_device *);
297static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 159static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
298static int TLan_probe1( struct pci_dev *pdev, long ioaddr, 160static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
299 int irq, int rev, const struct pci_device_id *ent); 161 int irq, int rev, const struct pci_device_id *ent);
300static void TLan_tx_timeout( struct net_device *dev); 162static void tlan_tx_timeout(struct net_device *dev);
301static void TLan_tx_timeout_work(struct work_struct *work); 163static void tlan_tx_timeout_work(struct work_struct *work);
302static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 164static int tlan_init_one(struct pci_dev *pdev,
303 165 const struct pci_device_id *ent);
304static u32 TLan_HandleTxEOF( struct net_device *, u16 ); 166
305static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); 167static u32 tlan_handle_tx_eof(struct net_device *, u16);
306static u32 TLan_HandleRxEOF( struct net_device *, u16 ); 168static u32 tlan_handle_stat_overflow(struct net_device *, u16);
307static u32 TLan_HandleDummy( struct net_device *, u16 ); 169static u32 tlan_handle_rx_eof(struct net_device *, u16);
308static u32 TLan_HandleTxEOC( struct net_device *, u16 ); 170static u32 tlan_handle_dummy(struct net_device *, u16);
309static u32 TLan_HandleStatusCheck( struct net_device *, u16 ); 171static u32 tlan_handle_tx_eoc(struct net_device *, u16);
310static u32 TLan_HandleRxEOC( struct net_device *, u16 ); 172static u32 tlan_handle_status_check(struct net_device *, u16);
311 173static u32 tlan_handle_rx_eoc(struct net_device *, u16);
312static void TLan_Timer( unsigned long ); 174
313 175static void tlan_timer(unsigned long);
314static void TLan_ResetLists( struct net_device * ); 176
315static void TLan_FreeLists( struct net_device * ); 177static void tlan_reset_lists(struct net_device *);
316static void TLan_PrintDio( u16 ); 178static void tlan_free_lists(struct net_device *);
317static void TLan_PrintList( TLanList *, char *, int ); 179static void tlan_print_dio(u16);
318static void TLan_ReadAndClearStats( struct net_device *, int ); 180static void tlan_print_list(struct tlan_list *, char *, int);
319static void TLan_ResetAdapter( struct net_device * ); 181static void tlan_read_and_clear_stats(struct net_device *, int);
320static void TLan_FinishReset( struct net_device * ); 182static void tlan_reset_adapter(struct net_device *);
321static void TLan_SetMac( struct net_device *, int areg, char *mac ); 183static void tlan_finish_reset(struct net_device *);
322 184static void tlan_set_mac(struct net_device *, int areg, char *mac);
323static void TLan_PhyPrint( struct net_device * ); 185
324static void TLan_PhyDetect( struct net_device * ); 186static void tlan_phy_print(struct net_device *);
325static void TLan_PhyPowerDown( struct net_device * ); 187static void tlan_phy_detect(struct net_device *);
326static void TLan_PhyPowerUp( struct net_device * ); 188static void tlan_phy_power_down(struct net_device *);
327static void TLan_PhyReset( struct net_device * ); 189static void tlan_phy_power_up(struct net_device *);
328static void TLan_PhyStartLink( struct net_device * ); 190static void tlan_phy_reset(struct net_device *);
329static void TLan_PhyFinishAutoNeg( struct net_device * ); 191static void tlan_phy_start_link(struct net_device *);
192static void tlan_phy_finish_auto_neg(struct net_device *);
330#ifdef MONITOR 193#ifdef MONITOR
331static void TLan_PhyMonitor( struct net_device * ); 194static void tlan_phy_monitor(struct net_device *);
332#endif 195#endif
333 196
334/* 197/*
335static int TLan_PhyNop( struct net_device * ); 198 static int tlan_phy_nop(struct net_device *);
336static int TLan_PhyInternalCheck( struct net_device * ); 199 static int tlan_phy_internal_check(struct net_device *);
337static int TLan_PhyInternalService( struct net_device * ); 200 static int tlan_phy_internal_service(struct net_device *);
338static int TLan_PhyDp83840aCheck( struct net_device * ); 201 static int tlan_phy_dp83840a_check(struct net_device *);
339*/ 202*/
340 203
341static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); 204static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
342static void TLan_MiiSendData( u16, u32, unsigned ); 205static void tlan_mii_send_data(u16, u32, unsigned);
343static void TLan_MiiSync( u16 ); 206static void tlan_mii_sync(u16);
344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); 207static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
345 208
346static void TLan_EeSendStart( u16 ); 209static void tlan_ee_send_start(u16);
347static int TLan_EeSendByte( u16, u8, int ); 210static int tlan_ee_send_byte(u16, u8, int);
348static void TLan_EeReceiveByte( u16, u8 *, int ); 211static void tlan_ee_receive_byte(u16, u8 *, int);
349static int TLan_EeReadByte( struct net_device *, u8, u8 * ); 212static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
350 213
351 214
352static inline void 215static inline void
353TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) 216tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
354{ 217{
355 unsigned long addr = (unsigned long)skb; 218 unsigned long addr = (unsigned long)skb;
356 tag->buffer[9].address = addr; 219 tag->buffer[9].address = addr;
@@ -358,7 +221,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
358} 221}
359 222
360static inline struct sk_buff * 223static inline struct sk_buff *
361TLan_GetSKB( const struct tlan_list_tag *tag) 224tlan_get_skb(const struct tlan_list *tag)
362{ 225{
363 unsigned long addr; 226 unsigned long addr;
364 227
@@ -367,50 +230,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
367 return (struct sk_buff *) addr; 230 return (struct sk_buff *) addr;
368} 231}
369 232
370 233static u32
371static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { 234(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
372 NULL, 235 NULL,
373 TLan_HandleTxEOF, 236 tlan_handle_tx_eof,
374 TLan_HandleStatOverflow, 237 tlan_handle_stat_overflow,
375 TLan_HandleRxEOF, 238 tlan_handle_rx_eof,
376 TLan_HandleDummy, 239 tlan_handle_dummy,
377 TLan_HandleTxEOC, 240 tlan_handle_tx_eoc,
378 TLan_HandleStatusCheck, 241 tlan_handle_status_check,
379 TLan_HandleRxEOC 242 tlan_handle_rx_eoc
380}; 243};
381 244
382static inline void 245static inline void
383TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type ) 246tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
384{ 247{
385 TLanPrivateInfo *priv = netdev_priv(dev); 248 struct tlan_priv *priv = netdev_priv(dev);
386 unsigned long flags = 0; 249 unsigned long flags = 0;
387 250
388 if (!in_irq()) 251 if (!in_irq())
389 spin_lock_irqsave(&priv->lock, flags); 252 spin_lock_irqsave(&priv->lock, flags);
390 if ( priv->timer.function != NULL && 253 if (priv->timer.function != NULL &&
391 priv->timerType != TLAN_TIMER_ACTIVITY ) { 254 priv->timer_type != TLAN_TIMER_ACTIVITY) {
392 if (!in_irq()) 255 if (!in_irq())
393 spin_unlock_irqrestore(&priv->lock, flags); 256 spin_unlock_irqrestore(&priv->lock, flags);
394 return; 257 return;
395 } 258 }
396 priv->timer.function = TLan_Timer; 259 priv->timer.function = tlan_timer;
397 if (!in_irq()) 260 if (!in_irq())
398 spin_unlock_irqrestore(&priv->lock, flags); 261 spin_unlock_irqrestore(&priv->lock, flags);
399 262
400 priv->timer.data = (unsigned long) dev; 263 priv->timer.data = (unsigned long) dev;
401 priv->timerSetAt = jiffies; 264 priv->timer_set_at = jiffies;
402 priv->timerType = type; 265 priv->timer_type = type;
403 mod_timer(&priv->timer, jiffies + ticks); 266 mod_timer(&priv->timer, jiffies + ticks);
404 267
405} /* TLan_SetTimer */ 268}
406 269
407 270
408/***************************************************************************** 271/*****************************************************************************
409****************************************************************************** 272******************************************************************************
410 273
411 ThunderLAN Driver Primary Functions 274ThunderLAN driver primary functions
412 275
413 These functions are more or less common to all Linux network drivers. 276these functions are more or less common to all linux network drivers.
414 277
415****************************************************************************** 278******************************************************************************
416*****************************************************************************/ 279*****************************************************************************/
@@ -419,56 +282,124 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
419 282
420 283
421 284
422 /*************************************************************** 285/***************************************************************
423 * tlan_remove_one 286 * tlan_remove_one
424 * 287 *
425 * Returns: 288 * Returns:
426 * Nothing 289 * Nothing
427 * Parms: 290 * Parms:
428 * None 291 * None
429 * 292 *
430 * Goes through the TLanDevices list and frees the device 293 * Goes through the TLanDevices list and frees the device
431 * structs and memory associated with each device (lists 294 * structs and memory associated with each device (lists
432 * and buffers). It also ureserves the IO port regions 295 * and buffers). It also ureserves the IO port regions
433 * associated with this device. 296 * associated with this device.
434 * 297 *
435 **************************************************************/ 298 **************************************************************/
436 299
437 300
438static void __devexit tlan_remove_one( struct pci_dev *pdev) 301static void __devexit tlan_remove_one(struct pci_dev *pdev)
439{ 302{
440 struct net_device *dev = pci_get_drvdata( pdev ); 303 struct net_device *dev = pci_get_drvdata(pdev);
441 TLanPrivateInfo *priv = netdev_priv(dev); 304 struct tlan_priv *priv = netdev_priv(dev);
442 305
443 unregister_netdev( dev ); 306 unregister_netdev(dev);
444 307
445 if ( priv->dmaStorage ) { 308 if (priv->dma_storage) {
446 pci_free_consistent(priv->pciDev, 309 pci_free_consistent(priv->pci_dev,
447 priv->dmaSize, priv->dmaStorage, 310 priv->dma_size, priv->dma_storage,
448 priv->dmaStorageDMA ); 311 priv->dma_storage_dma);
449 } 312 }
450 313
451#ifdef CONFIG_PCI 314#ifdef CONFIG_PCI
452 pci_release_regions(pdev); 315 pci_release_regions(pdev);
453#endif 316#endif
454 317
455 free_netdev( dev ); 318 free_netdev(dev);
456 319
457 pci_set_drvdata( pdev, NULL ); 320 pci_set_drvdata(pdev, NULL);
458} 321}
459 322
323static void tlan_start(struct net_device *dev)
324{
325 tlan_reset_lists(dev);
326 /* NOTE: It might not be necessary to read the stats before a
327 reset if you don't care what the values are.
328 */
329 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
330 tlan_reset_adapter(dev);
331 netif_wake_queue(dev);
332}
333
334static void tlan_stop(struct net_device *dev)
335{
336 struct tlan_priv *priv = netdev_priv(dev);
337
338 tlan_read_and_clear_stats(dev, TLAN_RECORD);
339 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
340 /* Reset and power down phy */
341 tlan_reset_adapter(dev);
342 if (priv->timer.function != NULL) {
343 del_timer_sync(&priv->timer);
344 priv->timer.function = NULL;
345 }
346}
347
348#ifdef CONFIG_PM
349
350static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
351{
352 struct net_device *dev = pci_get_drvdata(pdev);
353
354 if (netif_running(dev))
355 tlan_stop(dev);
356
357 netif_device_detach(dev);
358 pci_save_state(pdev);
359 pci_disable_device(pdev);
360 pci_wake_from_d3(pdev, false);
361 pci_set_power_state(pdev, PCI_D3hot);
362
363 return 0;
364}
365
366static int tlan_resume(struct pci_dev *pdev)
367{
368 struct net_device *dev = pci_get_drvdata(pdev);
369
370 pci_set_power_state(pdev, PCI_D0);
371 pci_restore_state(pdev);
372 pci_enable_wake(pdev, 0, 0);
373 netif_device_attach(dev);
374
375 if (netif_running(dev))
376 tlan_start(dev);
377
378 return 0;
379}
380
381#else /* CONFIG_PM */
382
383#define tlan_suspend NULL
384#define tlan_resume NULL
385
386#endif /* CONFIG_PM */
387
388
460static struct pci_driver tlan_driver = { 389static struct pci_driver tlan_driver = {
461 .name = "tlan", 390 .name = "tlan",
462 .id_table = tlan_pci_tbl, 391 .id_table = tlan_pci_tbl,
463 .probe = tlan_init_one, 392 .probe = tlan_init_one,
464 .remove = __devexit_p(tlan_remove_one), 393 .remove = __devexit_p(tlan_remove_one),
394 .suspend = tlan_suspend,
395 .resume = tlan_resume,
465}; 396};
466 397
467static int __init tlan_probe(void) 398static int __init tlan_probe(void)
468{ 399{
469 int rc = -ENODEV; 400 int rc = -ENODEV;
470 401
471 printk(KERN_INFO "%s", tlan_banner); 402 pr_info("%s", tlan_banner);
472 403
473 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); 404 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
474 405
@@ -477,18 +408,18 @@ static int __init tlan_probe(void)
477 rc = pci_register_driver(&tlan_driver); 408 rc = pci_register_driver(&tlan_driver);
478 409
479 if (rc != 0) { 410 if (rc != 0) {
480 printk(KERN_ERR "TLAN: Could not register pci driver.\n"); 411 pr_err("Could not register pci driver\n");
481 goto err_out_pci_free; 412 goto err_out_pci_free;
482 } 413 }
483 414
484 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); 415 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
485 TLan_EisaProbe(); 416 tlan_eisa_probe();
486 417
487 printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n", 418 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
488 TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s", 419 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
489 tlan_have_pci, tlan_have_eisa); 420 tlan_have_pci, tlan_have_eisa);
490 421
491 if (TLanDevicesInstalled == 0) { 422 if (tlan_devices_installed == 0) {
492 rc = -ENODEV; 423 rc = -ENODEV;
493 goto err_out_pci_unreg; 424 goto err_out_pci_unreg;
494 } 425 }
@@ -501,39 +432,39 @@ err_out_pci_free:
501} 432}
502 433
503 434
504static int __devinit tlan_init_one( struct pci_dev *pdev, 435static int __devinit tlan_init_one(struct pci_dev *pdev,
505 const struct pci_device_id *ent) 436 const struct pci_device_id *ent)
506{ 437{
507 return TLan_probe1( pdev, -1, -1, 0, ent); 438 return tlan_probe1(pdev, -1, -1, 0, ent);
508} 439}
509 440
510 441
511/* 442/*
512 *************************************************************** 443***************************************************************
513 * tlan_probe1 444* tlan_probe1
514 * 445*
515 * Returns: 446* Returns:
516 * 0 on success, error code on error 447* 0 on success, error code on error
517 * Parms: 448* Parms:
518 * none 449* none
519 * 450*
520 * The name is lower case to fit in with all the rest of 451* The name is lower case to fit in with all the rest of
521 * the netcard_probe names. This function looks for 452* the netcard_probe names. This function looks for
522 * another TLan based adapter, setting it up with the 453* another TLan based adapter, setting it up with the
523 * allocated device struct if one is found. 454* allocated device struct if one is found.
524 * tlan_probe has been ported to the new net API and 455* tlan_probe has been ported to the new net API and
525 * now allocates its own device structure. This function 456* now allocates its own device structure. This function
526 * is also used by modules. 457* is also used by modules.
527 * 458*
528 **************************************************************/ 459**************************************************************/
529 460
530static int __devinit TLan_probe1(struct pci_dev *pdev, 461static int __devinit tlan_probe1(struct pci_dev *pdev,
531 long ioaddr, int irq, int rev, 462 long ioaddr, int irq, int rev,
532 const struct pci_device_id *ent ) 463 const struct pci_device_id *ent)
533{ 464{
534 465
535 struct net_device *dev; 466 struct net_device *dev;
536 TLanPrivateInfo *priv; 467 struct tlan_priv *priv;
537 u16 device_id; 468 u16 device_id;
538 int reg, rc = -ENODEV; 469 int reg, rc = -ENODEV;
539 470
@@ -543,17 +474,17 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
543 if (rc) 474 if (rc)
544 return rc; 475 return rc;
545 476
546 rc = pci_request_regions(pdev, TLanSignature); 477 rc = pci_request_regions(pdev, tlan_signature);
547 if (rc) { 478 if (rc) {
548 printk(KERN_ERR "TLAN: Could not reserve IO regions\n"); 479 pr_err("Could not reserve IO regions\n");
549 goto err_out; 480 goto err_out;
550 } 481 }
551 } 482 }
552#endif /* CONFIG_PCI */ 483#endif /* CONFIG_PCI */
553 484
554 dev = alloc_etherdev(sizeof(TLanPrivateInfo)); 485 dev = alloc_etherdev(sizeof(struct tlan_priv));
555 if (dev == NULL) { 486 if (dev == NULL) {
556 printk(KERN_ERR "TLAN: Could not allocate memory for device.\n"); 487 pr_err("Could not allocate memory for device\n");
557 rc = -ENOMEM; 488 rc = -ENOMEM;
558 goto err_out_regions; 489 goto err_out_regions;
559 } 490 }
@@ -561,38 +492,39 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
561 492
562 priv = netdev_priv(dev); 493 priv = netdev_priv(dev);
563 494
564 priv->pciDev = pdev; 495 priv->pci_dev = pdev;
565 priv->dev = dev; 496 priv->dev = dev;
566 497
567 /* Is this a PCI device? */ 498 /* Is this a PCI device? */
568 if (pdev) { 499 if (pdev) {
569 u32 pci_io_base = 0; 500 u32 pci_io_base = 0;
570 501
571 priv->adapter = &board_info[ent->driver_data]; 502 priv->adapter = &board_info[ent->driver_data];
572 503
573 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 504 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
574 if (rc) { 505 if (rc) {
575 printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n"); 506 pr_err("No suitable PCI mapping available\n");
576 goto err_out_free_dev; 507 goto err_out_free_dev;
577 } 508 }
578 509
579 for ( reg= 0; reg <= 5; reg ++ ) { 510 for (reg = 0; reg <= 5; reg++) {
580 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { 511 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
581 pci_io_base = pci_resource_start(pdev, reg); 512 pci_io_base = pci_resource_start(pdev, reg);
582 TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n", 513 TLAN_DBG(TLAN_DEBUG_GNRL,
583 pci_io_base); 514 "IO mapping is available at %x.\n",
515 pci_io_base);
584 break; 516 break;
585 } 517 }
586 } 518 }
587 if (!pci_io_base) { 519 if (!pci_io_base) {
588 printk(KERN_ERR "TLAN: No IO mappings available\n"); 520 pr_err("No IO mappings available\n");
589 rc = -EIO; 521 rc = -EIO;
590 goto err_out_free_dev; 522 goto err_out_free_dev;
591 } 523 }
592 524
593 dev->base_addr = pci_io_base; 525 dev->base_addr = pci_io_base;
594 dev->irq = pdev->irq; 526 dev->irq = pdev->irq;
595 priv->adapterRev = pdev->revision; 527 priv->adapter_rev = pdev->revision;
596 pci_set_master(pdev); 528 pci_set_master(pdev);
597 pci_set_drvdata(pdev, dev); 529 pci_set_drvdata(pdev, dev);
598 530
@@ -602,11 +534,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
602 device_id = inw(ioaddr + EISA_ID2); 534 device_id = inw(ioaddr + EISA_ID2);
603 priv->is_eisa = 1; 535 priv->is_eisa = 1;
604 if (device_id == 0x20F1) { 536 if (device_id == 0x20F1) {
605 priv->adapter = &board_info[13]; /* NetFlex-3/E */ 537 priv->adapter = &board_info[13]; /* NetFlex-3/E */
606 priv->adapterRev = 23; /* TLAN 2.3 */ 538 priv->adapter_rev = 23; /* TLAN 2.3 */
607 } else { 539 } else {
608 priv->adapter = &board_info[14]; 540 priv->adapter = &board_info[14];
609 priv->adapterRev = 10; /* TLAN 1.0 */ 541 priv->adapter_rev = 10; /* TLAN 1.0 */
610 } 542 }
611 dev->base_addr = ioaddr; 543 dev->base_addr = ioaddr;
612 dev->irq = irq; 544 dev->irq = irq;
@@ -620,11 +552,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
620 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 552 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
621 : (dev->mem_start & 0x18) >> 3; 553 : (dev->mem_start & 0x18) >> 3;
622 554
623 if (priv->speed == 0x1) { 555 if (priv->speed == 0x1)
624 priv->speed = TLAN_SPEED_10; 556 priv->speed = TLAN_SPEED_10;
625 } else if (priv->speed == 0x2) { 557 else if (priv->speed == 0x2)
626 priv->speed = TLAN_SPEED_100; 558 priv->speed = TLAN_SPEED_100;
627 } 559
628 debug = priv->debug = dev->mem_end; 560 debug = priv->debug = dev->mem_end;
629 } else { 561 } else {
630 priv->aui = aui[boards_found]; 562 priv->aui = aui[boards_found];
@@ -635,46 +567,45 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
635 567
636 /* This will be used when we get an adapter error from 568 /* This will be used when we get an adapter error from
637 * within our irq handler */ 569 * within our irq handler */
638 INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); 570 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
639 571
640 spin_lock_init(&priv->lock); 572 spin_lock_init(&priv->lock);
641 573
642 rc = TLan_Init(dev); 574 rc = tlan_init(dev);
643 if (rc) { 575 if (rc) {
644 printk(KERN_ERR "TLAN: Could not set up device.\n"); 576 pr_err("Could not set up device\n");
645 goto err_out_free_dev; 577 goto err_out_free_dev;
646 } 578 }
647 579
648 rc = register_netdev(dev); 580 rc = register_netdev(dev);
649 if (rc) { 581 if (rc) {
650 printk(KERN_ERR "TLAN: Could not register device.\n"); 582 pr_err("Could not register device\n");
651 goto err_out_uninit; 583 goto err_out_uninit;
652 } 584 }
653 585
654 586
655 TLanDevicesInstalled++; 587 tlan_devices_installed++;
656 boards_found++; 588 boards_found++;
657 589
658 /* pdev is NULL if this is an EISA device */ 590 /* pdev is NULL if this is an EISA device */
659 if (pdev) 591 if (pdev)
660 tlan_have_pci++; 592 tlan_have_pci++;
661 else { 593 else {
662 priv->nextDevice = TLan_Eisa_Devices; 594 priv->next_device = tlan_eisa_devices;
663 TLan_Eisa_Devices = dev; 595 tlan_eisa_devices = dev;
664 tlan_have_eisa++; 596 tlan_have_eisa++;
665 } 597 }
666 598
667 printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n", 599 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
668 dev->name, 600 (int)dev->irq,
669 (int) dev->irq, 601 (int)dev->base_addr,
670 (int) dev->base_addr, 602 priv->adapter->device_label,
671 priv->adapter->deviceLabel, 603 priv->adapter_rev);
672 priv->adapterRev);
673 return 0; 604 return 0;
674 605
675err_out_uninit: 606err_out_uninit:
676 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, 607 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
677 priv->dmaStorageDMA ); 608 priv->dma_storage_dma);
678err_out_free_dev: 609err_out_free_dev:
679 free_netdev(dev); 610 free_netdev(dev);
680err_out_regions: 611err_out_regions:
@@ -689,22 +620,23 @@ err_out:
689} 620}
690 621
691 622
692static void TLan_Eisa_Cleanup(void) 623static void tlan_eisa_cleanup(void)
693{ 624{
694 struct net_device *dev; 625 struct net_device *dev;
695 TLanPrivateInfo *priv; 626 struct tlan_priv *priv;
696 627
697 while( tlan_have_eisa ) { 628 while (tlan_have_eisa) {
698 dev = TLan_Eisa_Devices; 629 dev = tlan_eisa_devices;
699 priv = netdev_priv(dev); 630 priv = netdev_priv(dev);
700 if (priv->dmaStorage) { 631 if (priv->dma_storage) {
701 pci_free_consistent(priv->pciDev, priv->dmaSize, 632 pci_free_consistent(priv->pci_dev, priv->dma_size,
702 priv->dmaStorage, priv->dmaStorageDMA ); 633 priv->dma_storage,
634 priv->dma_storage_dma);
703 } 635 }
704 release_region( dev->base_addr, 0x10); 636 release_region(dev->base_addr, 0x10);
705 unregister_netdev( dev ); 637 unregister_netdev(dev);
706 TLan_Eisa_Devices = priv->nextDevice; 638 tlan_eisa_devices = priv->next_device;
707 free_netdev( dev ); 639 free_netdev(dev);
708 tlan_have_eisa--; 640 tlan_have_eisa--;
709 } 641 }
710} 642}
@@ -715,7 +647,7 @@ static void __exit tlan_exit(void)
715 pci_unregister_driver(&tlan_driver); 647 pci_unregister_driver(&tlan_driver);
716 648
717 if (tlan_have_eisa) 649 if (tlan_have_eisa)
718 TLan_Eisa_Cleanup(); 650 tlan_eisa_cleanup();
719 651
720} 652}
721 653
@@ -726,24 +658,24 @@ module_exit(tlan_exit);
726 658
727 659
728 660
729 /************************************************************** 661/**************************************************************
730 * TLan_EisaProbe 662 * tlan_eisa_probe
731 * 663 *
732 * Returns: 0 on success, 1 otherwise 664 * Returns: 0 on success, 1 otherwise
733 * 665 *
734 * Parms: None 666 * Parms: None
735 * 667 *
736 * 668 *
737 * This functions probes for EISA devices and calls 669 * This functions probes for EISA devices and calls
738 * TLan_probe1 when one is found. 670 * TLan_probe1 when one is found.
739 * 671 *
740 *************************************************************/ 672 *************************************************************/
741 673
742static void __init TLan_EisaProbe (void) 674static void __init tlan_eisa_probe(void)
743{ 675{
744 long ioaddr; 676 long ioaddr;
745 int rc = -ENODEV; 677 int rc = -ENODEV;
746 int irq; 678 int irq;
747 u16 device_id; 679 u16 device_id;
748 680
749 if (!EISA_bus) { 681 if (!EISA_bus) {
@@ -754,15 +686,16 @@ static void __init TLan_EisaProbe (void)
754 /* Loop through all slots of the EISA bus */ 686 /* Loop through all slots of the EISA bus */
755 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { 687 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
756 688
757 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 689 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
758 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 690 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
759 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 691 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
760 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 692 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
761 693
762 694
763 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", 695 TLAN_DBG(TLAN_DEBUG_PROBE,
764 (int) ioaddr); 696 "Probing for EISA adapter at IO: 0x%4x : ",
765 if (request_region(ioaddr, 0x10, TLanSignature) == NULL) 697 (int) ioaddr);
698 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
766 goto out; 699 goto out;
767 700
768 if (inw(ioaddr + EISA_ID) != 0x110E) { 701 if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +705,324 @@ static void __init TLan_EisaProbe (void)
772 705
773 device_id = inw(ioaddr + EISA_ID2); 706 device_id = inw(ioaddr + EISA_ID2);
774 if (device_id != 0x20F1 && device_id != 0x40F1) { 707 if (device_id != 0x20F1 && device_id != 0x40F1) {
775 release_region (ioaddr, 0x10); 708 release_region(ioaddr, 0x10);
776 goto out; 709 goto out;
777 } 710 }
778 711
779 if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */ 712 /* check if adapter is enabled */
780 release_region (ioaddr, 0x10); 713 if (inb(ioaddr + EISA_CR) != 0x1) {
714 release_region(ioaddr, 0x10);
781 goto out2; 715 goto out2;
782 } 716 }
783 717
784 if (debug == 0x10) 718 if (debug == 0x10)
785 printk("Found one\n"); 719 pr_info("Found one\n");
786 720
787 721
788 /* Get irq from board */ 722 /* Get irq from board */
789 switch (inb(ioaddr + 0xCC0)) { 723 switch (inb(ioaddr + 0xcc0)) {
790 case(0x10): 724 case(0x10):
791 irq=5; 725 irq = 5;
792 break; 726 break;
793 case(0x20): 727 case(0x20):
794 irq=9; 728 irq = 9;
795 break; 729 break;
796 case(0x40): 730 case(0x40):
797 irq=10; 731 irq = 10;
798 break; 732 break;
799 case(0x80): 733 case(0x80):
800 irq=11; 734 irq = 11;
801 break; 735 break;
802 default: 736 default:
803 goto out; 737 goto out;
804 } 738 }
805 739
806 740
807 /* Setup the newly found eisa adapter */ 741 /* Setup the newly found eisa adapter */
808 rc = TLan_probe1( NULL, ioaddr, irq, 742 rc = tlan_probe1(NULL, ioaddr, irq,
809 12, NULL); 743 12, NULL);
810 continue; 744 continue;
811 745
812 out: 746out:
813 if (debug == 0x10) 747 if (debug == 0x10)
814 printk("None found\n"); 748 pr_info("None found\n");
815 continue; 749 continue;
816 750
817 out2: if (debug == 0x10) 751out2:
818 printk("Card found but it is not enabled, skipping\n"); 752 if (debug == 0x10)
819 continue; 753 pr_info("Card found but it is not enabled, skipping\n");
754 continue;
820 755
821 } 756 }
822 757
823} /* TLan_EisaProbe */ 758}
824 759
825#ifdef CONFIG_NET_POLL_CONTROLLER 760#ifdef CONFIG_NET_POLL_CONTROLLER
826static void TLan_Poll(struct net_device *dev) 761static void tlan_poll(struct net_device *dev)
827{ 762{
828 disable_irq(dev->irq); 763 disable_irq(dev->irq);
829 TLan_HandleInterrupt(dev->irq, dev); 764 tlan_handle_interrupt(dev->irq, dev);
830 enable_irq(dev->irq); 765 enable_irq(dev->irq);
831} 766}
832#endif 767#endif
833 768
834static const struct net_device_ops TLan_netdev_ops = { 769static const struct net_device_ops tlan_netdev_ops = {
835 .ndo_open = TLan_Open, 770 .ndo_open = tlan_open,
836 .ndo_stop = TLan_Close, 771 .ndo_stop = tlan_close,
837 .ndo_start_xmit = TLan_StartTx, 772 .ndo_start_xmit = tlan_start_tx,
838 .ndo_tx_timeout = TLan_tx_timeout, 773 .ndo_tx_timeout = tlan_tx_timeout,
839 .ndo_get_stats = TLan_GetStats, 774 .ndo_get_stats = tlan_get_stats,
840 .ndo_set_multicast_list = TLan_SetMulticastList, 775 .ndo_set_multicast_list = tlan_set_multicast_list,
841 .ndo_do_ioctl = TLan_ioctl, 776 .ndo_do_ioctl = tlan_ioctl,
842 .ndo_change_mtu = eth_change_mtu, 777 .ndo_change_mtu = eth_change_mtu,
843 .ndo_set_mac_address = eth_mac_addr, 778 .ndo_set_mac_address = eth_mac_addr,
844 .ndo_validate_addr = eth_validate_addr, 779 .ndo_validate_addr = eth_validate_addr,
845#ifdef CONFIG_NET_POLL_CONTROLLER 780#ifdef CONFIG_NET_POLL_CONTROLLER
846 .ndo_poll_controller = TLan_Poll, 781 .ndo_poll_controller = tlan_poll,
847#endif 782#endif
848}; 783};
849 784
850 785
851 786
852 /*************************************************************** 787/***************************************************************
853 * TLan_Init 788 * tlan_init
854 * 789 *
855 * Returns: 790 * Returns:
856 * 0 on success, error code otherwise. 791 * 0 on success, error code otherwise.
857 * Parms: 792 * Parms:
858 * dev The structure of the device to be 793 * dev The structure of the device to be
859 * init'ed. 794 * init'ed.
860 * 795 *
861 * This function completes the initialization of the 796 * This function completes the initialization of the
862 * device structure and driver. It reserves the IO 797 * device structure and driver. It reserves the IO
863 * addresses, allocates memory for the lists and bounce 798 * addresses, allocates memory for the lists and bounce
864 * buffers, retrieves the MAC address from the eeprom 799 * buffers, retrieves the MAC address from the eeprom
865 * and assignes the device's methods. 800 * and assignes the device's methods.
866 * 801 *
867 **************************************************************/ 802 **************************************************************/
868 803
869static int TLan_Init( struct net_device *dev ) 804static int tlan_init(struct net_device *dev)
870{ 805{
871 int dma_size; 806 int dma_size;
872 int err; 807 int err;
873 int i; 808 int i;
874 TLanPrivateInfo *priv; 809 struct tlan_priv *priv;
875 810
876 priv = netdev_priv(dev); 811 priv = netdev_priv(dev);
877 812
878 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 813 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
879 * ( sizeof(TLanList) ); 814 * (sizeof(struct tlan_list));
880 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, 815 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
881 dma_size, &priv->dmaStorageDMA); 816 dma_size,
882 priv->dmaSize = dma_size; 817 &priv->dma_storage_dma);
818 priv->dma_size = dma_size;
883 819
884 if ( priv->dmaStorage == NULL ) { 820 if (priv->dma_storage == NULL) {
885 printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n", 821 pr_err("Could not allocate lists and buffers for %s\n",
886 dev->name ); 822 dev->name);
887 return -ENOMEM; 823 return -ENOMEM;
888 } 824 }
889 memset( priv->dmaStorage, 0, dma_size ); 825 memset(priv->dma_storage, 0, dma_size);
890 priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8); 826 priv->rx_list = (struct tlan_list *)
891 priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8); 827 ALIGN((unsigned long)priv->dma_storage, 8);
892 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 828 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
893 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 829 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
830 priv->tx_list_dma =
831 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
894 832
895 err = 0; 833 err = 0;
896 for ( i = 0; i < 6 ; i++ ) 834 for (i = 0; i < 6 ; i++)
897 err |= TLan_EeReadByte( dev, 835 err |= tlan_ee_read_byte(dev,
898 (u8) priv->adapter->addrOfs + i, 836 (u8) priv->adapter->addr_ofs + i,
899 (u8 *) &dev->dev_addr[i] ); 837 (u8 *) &dev->dev_addr[i]);
900 if ( err ) { 838 if (err) {
901 printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n", 839 pr_err("%s: Error reading MAC from eeprom: %d\n",
902 dev->name, 840 dev->name, err);
903 err );
904 } 841 }
905 dev->addr_len = 6; 842 dev->addr_len = 6;
906 843
907 netif_carrier_off(dev); 844 netif_carrier_off(dev);
908 845
909 /* Device methods */ 846 /* Device methods */
910 dev->netdev_ops = &TLan_netdev_ops; 847 dev->netdev_ops = &tlan_netdev_ops;
911 dev->watchdog_timeo = TX_TIMEOUT; 848 dev->watchdog_timeo = TX_TIMEOUT;
912 849
913 return 0; 850 return 0;
914 851
915} /* TLan_Init */ 852}
916 853
917 854
918 855
919 856
920 /*************************************************************** 857/***************************************************************
921 * TLan_Open 858 * tlan_open
922 * 859 *
923 * Returns: 860 * Returns:
924 * 0 on success, error code otherwise. 861 * 0 on success, error code otherwise.
925 * Parms: 862 * Parms:
926 * dev Structure of device to be opened. 863 * dev Structure of device to be opened.
927 * 864 *
928 * This routine puts the driver and TLAN adapter in a 865 * This routine puts the driver and TLAN adapter in a
929 * state where it is ready to send and receive packets. 866 * state where it is ready to send and receive packets.
930 * It allocates the IRQ, resets and brings the adapter 867 * It allocates the IRQ, resets and brings the adapter
931 * out of reset, and allows interrupts. It also delays 868 * out of reset, and allows interrupts. It also delays
932 * the startup for autonegotiation or sends a Rx GO 869 * the startup for autonegotiation or sends a Rx GO
933 * command to the adapter, as appropriate. 870 * command to the adapter, as appropriate.
934 * 871 *
935 **************************************************************/ 872 **************************************************************/
936 873
937static int TLan_Open( struct net_device *dev ) 874static int tlan_open(struct net_device *dev)
938{ 875{
939 TLanPrivateInfo *priv = netdev_priv(dev); 876 struct tlan_priv *priv = netdev_priv(dev);
940 int err; 877 int err;
941 878
942 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); 879 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
943 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, 880 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
944 dev->name, dev ); 881 dev->name, dev);
945 882
946 if ( err ) { 883 if (err) {
947 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n", 884 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
948 dev->name, dev->irq ); 885 dev->irq);
949 return err; 886 return err;
950 } 887 }
951 888
952 init_timer(&priv->timer); 889 init_timer(&priv->timer);
953 netif_start_queue(dev);
954 890
955 /* NOTE: It might not be necessary to read the stats before a 891 tlan_start(dev);
956 reset if you don't care what the values are.
957 */
958 TLan_ResetLists( dev );
959 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
960 TLan_ResetAdapter( dev );
961 892
962 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", 893 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
963 dev->name, priv->tlanRev ); 894 dev->name, priv->tlan_rev);
964 895
965 return 0; 896 return 0;
966 897
967} /* TLan_Open */ 898}
968 899
969 900
970 901
971 /************************************************************** 902/**************************************************************
972 * TLan_ioctl 903 * tlan_ioctl
973 * 904 *
974 * Returns: 905 * Returns:
975 * 0 on success, error code otherwise 906 * 0 on success, error code otherwise
976 * Params: 907 * Params:
977 * dev structure of device to receive ioctl. 908 * dev structure of device to receive ioctl.
978 * 909 *
979 * rq ifreq structure to hold userspace data. 910 * rq ifreq structure to hold userspace data.
980 * 911 *
981 * cmd ioctl command. 912 * cmd ioctl command.
982 * 913 *
983 * 914 *
984 *************************************************************/ 915 *************************************************************/
985 916
986static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 917static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
987{ 918{
988 TLanPrivateInfo *priv = netdev_priv(dev); 919 struct tlan_priv *priv = netdev_priv(dev);
989 struct mii_ioctl_data *data = if_mii(rq); 920 struct mii_ioctl_data *data = if_mii(rq);
990 u32 phy = priv->phy[priv->phyNum]; 921 u32 phy = priv->phy[priv->phy_num];
991 922
992 if (!priv->phyOnline) 923 if (!priv->phy_online)
993 return -EAGAIN; 924 return -EAGAIN;
994 925
995 switch(cmd) { 926 switch (cmd) {
996 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 927 case SIOCGMIIPHY: /* get address of MII PHY in use. */
997 data->phy_id = phy; 928 data->phy_id = phy;
998 929
999 930
1000 case SIOCGMIIREG: /* Read MII PHY register. */ 931 case SIOCGMIIREG: /* read MII PHY register. */
1001 TLan_MiiReadReg(dev, data->phy_id & 0x1f, 932 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
1002 data->reg_num & 0x1f, &data->val_out); 933 data->reg_num & 0x1f, &data->val_out);
1003 return 0; 934 return 0;
1004 935
1005 936
1006 case SIOCSMIIREG: /* Write MII PHY register. */ 937 case SIOCSMIIREG: /* write MII PHY register. */
1007 TLan_MiiWriteReg(dev, data->phy_id & 0x1f, 938 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
1008 data->reg_num & 0x1f, data->val_in); 939 data->reg_num & 0x1f, data->val_in);
1009 return 0; 940 return 0;
1010 default: 941 default:
1011 return -EOPNOTSUPP; 942 return -EOPNOTSUPP;
1012 } 943 }
1013} /* tlan_ioctl */ 944}
1014 945
1015 946
1016 /*************************************************************** 947/***************************************************************
1017 * TLan_tx_timeout 948 * tlan_tx_timeout
1018 * 949 *
1019 * Returns: nothing 950 * Returns: nothing
1020 * 951 *
1021 * Params: 952 * Params:
1022 * dev structure of device which timed out 953 * dev structure of device which timed out
1023 * during transmit. 954 * during transmit.
1024 * 955 *
1025 **************************************************************/ 956 **************************************************************/
1026 957
1027static void TLan_tx_timeout(struct net_device *dev) 958static void tlan_tx_timeout(struct net_device *dev)
1028{ 959{
1029 960
1030 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); 961 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
1031 962
1032 /* Ok so we timed out, lets see what we can do about it...*/ 963 /* Ok so we timed out, lets see what we can do about it...*/
1033 TLan_FreeLists( dev ); 964 tlan_free_lists(dev);
1034 TLan_ResetLists( dev ); 965 tlan_reset_lists(dev);
1035 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 966 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
1036 TLan_ResetAdapter( dev ); 967 tlan_reset_adapter(dev);
1037 dev->trans_start = jiffies; /* prevent tx timeout */ 968 dev->trans_start = jiffies; /* prevent tx timeout */
1038 netif_wake_queue( dev ); 969 netif_wake_queue(dev);
1039 970
1040} 971}
1041 972
1042 973
1043 /*************************************************************** 974/***************************************************************
1044 * TLan_tx_timeout_work 975 * tlan_tx_timeout_work
1045 * 976 *
1046 * Returns: nothing 977 * Returns: nothing
1047 * 978 *
1048 * Params: 979 * Params:
1049 * work work item of device which timed out 980 * work work item of device which timed out
1050 * 981 *
1051 **************************************************************/ 982 **************************************************************/
1052 983
1053static void TLan_tx_timeout_work(struct work_struct *work) 984static void tlan_tx_timeout_work(struct work_struct *work)
1054{ 985{
1055 TLanPrivateInfo *priv = 986 struct tlan_priv *priv =
1056 container_of(work, TLanPrivateInfo, tlan_tqueue); 987 container_of(work, struct tlan_priv, tlan_tqueue);
1057 988
1058 TLan_tx_timeout(priv->dev); 989 tlan_tx_timeout(priv->dev);
1059} 990}
1060 991
1061 992
1062 993
1063 /*************************************************************** 994/***************************************************************
1064 * TLan_StartTx 995 * tlan_start_tx
1065 * 996 *
1066 * Returns: 997 * Returns:
1067 * 0 on success, non-zero on failure. 998 * 0 on success, non-zero on failure.
1068 * Parms: 999 * Parms:
1069 * skb A pointer to the sk_buff containing the 1000 * skb A pointer to the sk_buff containing the
1070 * frame to be sent. 1001 * frame to be sent.
1071 * dev The device to send the data on. 1002 * dev The device to send the data on.
1072 * 1003 *
1073 * This function adds a frame to the Tx list to be sent 1004 * This function adds a frame to the Tx list to be sent
1074 * ASAP. First it verifies that the adapter is ready and 1005 * ASAP. First it verifies that the adapter is ready and
1075 * there is room in the queue. Then it sets up the next 1006 * there is room in the queue. Then it sets up the next
1076 * available list, copies the frame to the corresponding 1007 * available list, copies the frame to the corresponding
1077 * buffer. If the adapter Tx channel is idle, it gives 1008 * buffer. If the adapter Tx channel is idle, it gives
1078 * the adapter a Tx Go command on the list, otherwise it 1009 * the adapter a Tx Go command on the list, otherwise it
1079 * sets the forward address of the previous list to point 1010 * sets the forward address of the previous list to point
1080 * to this one. Then it frees the sk_buff. 1011 * to this one. Then it frees the sk_buff.
1081 * 1012 *
1082 **************************************************************/ 1013 **************************************************************/
1083 1014
1084static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) 1015static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1085{ 1016{
1086 TLanPrivateInfo *priv = netdev_priv(dev); 1017 struct tlan_priv *priv = netdev_priv(dev);
1087 dma_addr_t tail_list_phys; 1018 dma_addr_t tail_list_phys;
1088 TLanList *tail_list; 1019 struct tlan_list *tail_list;
1089 unsigned long flags; 1020 unsigned long flags;
1090 unsigned int txlen; 1021 unsigned int txlen;
1091 1022
1092 if ( ! priv->phyOnline ) { 1023 if (!priv->phy_online) {
1093 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", 1024 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1094 dev->name ); 1025 dev->name);
1095 dev_kfree_skb_any(skb); 1026 dev_kfree_skb_any(skb);
1096 return NETDEV_TX_OK; 1027 return NETDEV_TX_OK;
1097 } 1028 }
@@ -1100,218 +1031,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1100 return NETDEV_TX_OK; 1031 return NETDEV_TX_OK;
1101 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE); 1032 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1102 1033
1103 tail_list = priv->txList + priv->txTail; 1034 tail_list = priv->tx_list + priv->tx_tail;
1104 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1035 tail_list_phys =
1036 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1105 1037
1106 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { 1038 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1107 TLAN_DBG( TLAN_DEBUG_TX, 1039 TLAN_DBG(TLAN_DEBUG_TX,
1108 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", 1040 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1109 dev->name, priv->txHead, priv->txTail ); 1041 dev->name, priv->tx_head, priv->tx_tail);
1110 netif_stop_queue(dev); 1042 netif_stop_queue(dev);
1111 priv->txBusyCount++; 1043 priv->tx_busy_count++;
1112 return NETDEV_TX_BUSY; 1044 return NETDEV_TX_BUSY;
1113 } 1045 }
1114 1046
1115 tail_list->forward = 0; 1047 tail_list->forward = 0;
1116 1048
1117 tail_list->buffer[0].address = pci_map_single(priv->pciDev, 1049 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1118 skb->data, txlen, 1050 skb->data, txlen,
1119 PCI_DMA_TODEVICE); 1051 PCI_DMA_TODEVICE);
1120 TLan_StoreSKB(tail_list, skb); 1052 tlan_store_skb(tail_list, skb);
1121 1053
1122 tail_list->frameSize = (u16) txlen; 1054 tail_list->frame_size = (u16) txlen;
1123 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; 1055 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1124 tail_list->buffer[1].count = 0; 1056 tail_list->buffer[1].count = 0;
1125 tail_list->buffer[1].address = 0; 1057 tail_list->buffer[1].address = 0;
1126 1058
1127 spin_lock_irqsave(&priv->lock, flags); 1059 spin_lock_irqsave(&priv->lock, flags);
1128 tail_list->cStat = TLAN_CSTAT_READY; 1060 tail_list->c_stat = TLAN_CSTAT_READY;
1129 if ( ! priv->txInProgress ) { 1061 if (!priv->tx_in_progress) {
1130 priv->txInProgress = 1; 1062 priv->tx_in_progress = 1;
1131 TLAN_DBG( TLAN_DEBUG_TX, 1063 TLAN_DBG(TLAN_DEBUG_TX,
1132 "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1064 "TRANSMIT: Starting TX on buffer %d\n",
1133 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); 1065 priv->tx_tail);
1134 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); 1066 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1067 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1135 } else { 1068 } else {
1136 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", 1069 TLAN_DBG(TLAN_DEBUG_TX,
1137 priv->txTail ); 1070 "TRANSMIT: Adding buffer %d to TX channel\n",
1138 if ( priv->txTail == 0 ) { 1071 priv->tx_tail);
1139 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward 1072 if (priv->tx_tail == 0) {
1073 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1140 = tail_list_phys; 1074 = tail_list_phys;
1141 } else { 1075 } else {
1142 ( priv->txList + ( priv->txTail - 1 ) )->forward 1076 (priv->tx_list + (priv->tx_tail - 1))->forward
1143 = tail_list_phys; 1077 = tail_list_phys;
1144 } 1078 }
1145 } 1079 }
1146 spin_unlock_irqrestore(&priv->lock, flags); 1080 spin_unlock_irqrestore(&priv->lock, flags);
1147 1081
1148 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); 1082 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1149 1083
1150 return NETDEV_TX_OK; 1084 return NETDEV_TX_OK;
1151 1085
1152} /* TLan_StartTx */ 1086}
1153 1087
1154 1088
1155 1089
1156 1090
1157 /*************************************************************** 1091/***************************************************************
1158 * TLan_HandleInterrupt 1092 * tlan_handle_interrupt
1159 * 1093 *
1160 * Returns: 1094 * Returns:
1161 * Nothing 1095 * Nothing
1162 * Parms: 1096 * Parms:
1163 * irq The line on which the interrupt 1097 * irq The line on which the interrupt
1164 * occurred. 1098 * occurred.
1165 * dev_id A pointer to the device assigned to 1099 * dev_id A pointer to the device assigned to
1166 * this irq line. 1100 * this irq line.
1167 * 1101 *
1168 * This function handles an interrupt generated by its 1102 * This function handles an interrupt generated by its
1169 * assigned TLAN adapter. The function deactivates 1103 * assigned TLAN adapter. The function deactivates
1170 * interrupts on its adapter, records the type of 1104 * interrupts on its adapter, records the type of
1171 * interrupt, executes the appropriate subhandler, and 1105 * interrupt, executes the appropriate subhandler, and
1172 * acknowdges the interrupt to the adapter (thus 1106 * acknowdges the interrupt to the adapter (thus
1173 * re-enabling adapter interrupts. 1107 * re-enabling adapter interrupts.
1174 * 1108 *
1175 **************************************************************/ 1109 **************************************************************/
1176 1110
1177static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) 1111static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1178{ 1112{
1179 struct net_device *dev = dev_id; 1113 struct net_device *dev = dev_id;
1180 TLanPrivateInfo *priv = netdev_priv(dev); 1114 struct tlan_priv *priv = netdev_priv(dev);
1181 u16 host_int; 1115 u16 host_int;
1182 u16 type; 1116 u16 type;
1183 1117
1184 spin_lock(&priv->lock); 1118 spin_lock(&priv->lock);
1185 1119
1186 host_int = inw( dev->base_addr + TLAN_HOST_INT ); 1120 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1187 type = ( host_int & TLAN_HI_IT_MASK ) >> 2; 1121 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1188 if ( type ) { 1122 if (type) {
1189 u32 ack; 1123 u32 ack;
1190 u32 host_cmd; 1124 u32 host_cmd;
1191 1125
1192 outw( host_int, dev->base_addr + TLAN_HOST_INT ); 1126 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1193 ack = TLanIntVector[type]( dev, host_int ); 1127 ack = tlan_int_vector[type](dev, host_int);
1194 1128
1195 if ( ack ) { 1129 if (ack) {
1196 host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); 1130 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1197 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); 1131 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1198 } 1132 }
1199 } 1133 }
1200 1134
1201 spin_unlock(&priv->lock); 1135 spin_unlock(&priv->lock);
1202 1136
1203 return IRQ_RETVAL(type); 1137 return IRQ_RETVAL(type);
1204} /* TLan_HandleInterrupts */ 1138}
1205 1139
1206 1140
1207 1141
1208 1142
1209 /*************************************************************** 1143/***************************************************************
1210 * TLan_Close 1144 * tlan_close
1211 * 1145 *
1212 * Returns: 1146 * Returns:
1213 * An error code. 1147 * An error code.
1214 * Parms: 1148 * Parms:
1215 * dev The device structure of the device to 1149 * dev The device structure of the device to
1216 * close. 1150 * close.
1217 * 1151 *
1218 * This function shuts down the adapter. It records any 1152 * This function shuts down the adapter. It records any
1219 * stats, puts the adapter into reset state, deactivates 1153 * stats, puts the adapter into reset state, deactivates
1220 * its time as needed, and frees the irq it is using. 1154 * its time as needed, and frees the irq it is using.
1221 * 1155 *
1222 **************************************************************/ 1156 **************************************************************/
1223 1157
1224static int TLan_Close(struct net_device *dev) 1158static int tlan_close(struct net_device *dev)
1225{ 1159{
1226 TLanPrivateInfo *priv = netdev_priv(dev); 1160 struct tlan_priv *priv = netdev_priv(dev);
1227 1161
1228 netif_stop_queue(dev);
1229 priv->neg_be_verbose = 0; 1162 priv->neg_be_verbose = 0;
1163 tlan_stop(dev);
1230 1164
1231 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1165 free_irq(dev->irq, dev);
1232 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); 1166 tlan_free_lists(dev);
1233 if ( priv->timer.function != NULL ) { 1167 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1234 del_timer_sync( &priv->timer );
1235 priv->timer.function = NULL;
1236 }
1237
1238 free_irq( dev->irq, dev );
1239 TLan_FreeLists( dev );
1240 TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
1241 1168
1242 return 0; 1169 return 0;
1243 1170
1244} /* TLan_Close */ 1171}
1245 1172
1246 1173
1247 1174
1248 1175
1249 /*************************************************************** 1176/***************************************************************
1250 * TLan_GetStats 1177 * tlan_get_stats
1251 * 1178 *
1252 * Returns: 1179 * Returns:
1253 * A pointer to the device's statistics structure. 1180 * A pointer to the device's statistics structure.
1254 * Parms: 1181 * Parms:
1255 * dev The device structure to return the 1182 * dev The device structure to return the
1256 * stats for. 1183 * stats for.
1257 * 1184 *
1258 * This function updates the devices statistics by reading 1185 * This function updates the devices statistics by reading
1259 * the TLAN chip's onboard registers. Then it returns the 1186 * the TLAN chip's onboard registers. Then it returns the
1260 * address of the statistics structure. 1187 * address of the statistics structure.
1261 * 1188 *
1262 **************************************************************/ 1189 **************************************************************/
1263 1190
1264static struct net_device_stats *TLan_GetStats( struct net_device *dev ) 1191static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1265{ 1192{
1266 TLanPrivateInfo *priv = netdev_priv(dev); 1193 struct tlan_priv *priv = netdev_priv(dev);
1267 int i; 1194 int i;
1268 1195
1269 /* Should only read stats if open ? */ 1196 /* Should only read stats if open ? */
1270 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1197 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1271 1198
1272 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, 1199 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1273 priv->rxEocCount ); 1200 priv->rx_eoc_count);
1274 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, 1201 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1275 priv->txBusyCount ); 1202 priv->tx_busy_count);
1276 if ( debug & TLAN_DEBUG_GNRL ) { 1203 if (debug & TLAN_DEBUG_GNRL) {
1277 TLan_PrintDio( dev->base_addr ); 1204 tlan_print_dio(dev->base_addr);
1278 TLan_PhyPrint( dev ); 1205 tlan_phy_print(dev);
1279 } 1206 }
1280 if ( debug & TLAN_DEBUG_LIST ) { 1207 if (debug & TLAN_DEBUG_LIST) {
1281 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) 1208 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1282 TLan_PrintList( priv->rxList + i, "RX", i ); 1209 tlan_print_list(priv->rx_list + i, "RX", i);
1283 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) 1210 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1284 TLan_PrintList( priv->txList + i, "TX", i ); 1211 tlan_print_list(priv->tx_list + i, "TX", i);
1285 } 1212 }
1286 1213
1287 return &dev->stats; 1214 return &dev->stats;
1288 1215
1289} /* TLan_GetStats */ 1216}
1290 1217
1291 1218
1292 1219
1293 1220
1294 /*************************************************************** 1221/***************************************************************
1295 * TLan_SetMulticastList 1222 * tlan_set_multicast_list
1296 * 1223 *
1297 * Returns: 1224 * Returns:
1298 * Nothing 1225 * Nothing
1299 * Parms: 1226 * Parms:
1300 * dev The device structure to set the 1227 * dev The device structure to set the
1301 * multicast list for. 1228 * multicast list for.
1302 * 1229 *
1303 * This function sets the TLAN adaptor to various receive 1230 * This function sets the TLAN adaptor to various receive
1304 * modes. If the IFF_PROMISC flag is set, promiscuous 1231 * modes. If the IFF_PROMISC flag is set, promiscuous
1305 * mode is acitviated. Otherwise, promiscuous mode is 1232 * mode is acitviated. Otherwise, promiscuous mode is
1306 * turned off. If the IFF_ALLMULTI flag is set, then 1233 * turned off. If the IFF_ALLMULTI flag is set, then
1307 * the hash table is set to receive all group addresses. 1234 * the hash table is set to receive all group addresses.
1308 * Otherwise, the first three multicast addresses are 1235 * Otherwise, the first three multicast addresses are
1309 * stored in AREG_1-3, and the rest are selected via the 1236 * stored in AREG_1-3, and the rest are selected via the
1310 * hash table, as necessary. 1237 * hash table, as necessary.
1311 * 1238 *
1312 **************************************************************/ 1239 **************************************************************/
1313 1240
1314static void TLan_SetMulticastList( struct net_device *dev ) 1241static void tlan_set_multicast_list(struct net_device *dev)
1315{ 1242{
1316 struct netdev_hw_addr *ha; 1243 struct netdev_hw_addr *ha;
1317 u32 hash1 = 0; 1244 u32 hash1 = 0;
@@ -1320,53 +1247,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
1320 u32 offset; 1247 u32 offset;
1321 u8 tmp; 1248 u8 tmp;
1322 1249
1323 if ( dev->flags & IFF_PROMISC ) { 1250 if (dev->flags & IFF_PROMISC) {
1324 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1251 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1325 TLan_DioWrite8( dev->base_addr, 1252 tlan_dio_write8(dev->base_addr,
1326 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1253 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1327 } else { 1254 } else {
1328 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1255 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1329 TLan_DioWrite8( dev->base_addr, 1256 tlan_dio_write8(dev->base_addr,
1330 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1257 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1331 if ( dev->flags & IFF_ALLMULTI ) { 1258 if (dev->flags & IFF_ALLMULTI) {
1332 for ( i = 0; i < 3; i++ ) 1259 for (i = 0; i < 3; i++)
1333 TLan_SetMac( dev, i + 1, NULL ); 1260 tlan_set_mac(dev, i + 1, NULL);
1334 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF ); 1261 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1335 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); 1262 0xffffffff);
1263 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1264 0xffffffff);
1336 } else { 1265 } else {
1337 i = 0; 1266 i = 0;
1338 netdev_for_each_mc_addr(ha, dev) { 1267 netdev_for_each_mc_addr(ha, dev) {
1339 if ( i < 3 ) { 1268 if (i < 3) {
1340 TLan_SetMac( dev, i + 1, 1269 tlan_set_mac(dev, i + 1,
1341 (char *) &ha->addr); 1270 (char *) &ha->addr);
1342 } else { 1271 } else {
1343 offset = TLan_HashFunc((u8 *)&ha->addr); 1272 offset =
1344 if ( offset < 32 ) 1273 tlan_hash_func((u8 *)&ha->addr);
1345 hash1 |= ( 1 << offset ); 1274 if (offset < 32)
1275 hash1 |= (1 << offset);
1346 else 1276 else
1347 hash2 |= ( 1 << ( offset - 32 ) ); 1277 hash2 |= (1 << (offset - 32));
1348 } 1278 }
1349 i++; 1279 i++;
1350 } 1280 }
1351 for ( ; i < 3; i++ ) 1281 for ( ; i < 3; i++)
1352 TLan_SetMac( dev, i + 1, NULL ); 1282 tlan_set_mac(dev, i + 1, NULL);
1353 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 ); 1283 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1354 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 ); 1284 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1355 } 1285 }
1356 } 1286 }
1357 1287
1358} /* TLan_SetMulticastList */ 1288}
1359 1289
1360 1290
1361 1291
1362/***************************************************************************** 1292/*****************************************************************************
1363****************************************************************************** 1293******************************************************************************
1364 1294
1365 ThunderLAN Driver Interrupt Vectors and Table 1295ThunderLAN driver interrupt vectors and table
1366 1296
1367 Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN 1297please see chap. 4, "Interrupt Handling" of the "ThunderLAN
1368 Programmer's Guide" for more informations on handling interrupts 1298Programmer's Guide" for more informations on handling interrupts
1369 generated by TLAN based adapters. 1299generated by TLAN based adapters.
1370 1300
1371****************************************************************************** 1301******************************************************************************
1372*****************************************************************************/ 1302*****************************************************************************/
@@ -1374,46 +1304,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
1374 1304
1375 1305
1376 1306
1377 /*************************************************************** 1307/***************************************************************
1378 * TLan_HandleTxEOF 1308 * tlan_handle_tx_eof
1379 * 1309 *
1380 * Returns: 1310 * Returns:
1381 * 1 1311 * 1
1382 * Parms: 1312 * Parms:
1383 * dev Device assigned the IRQ that was 1313 * dev Device assigned the IRQ that was
1384 * raised. 1314 * raised.
1385 * host_int The contents of the HOST_INT 1315 * host_int The contents of the HOST_INT
1386 * port. 1316 * port.
1387 * 1317 *
1388 * This function handles Tx EOF interrupts which are raised 1318 * This function handles Tx EOF interrupts which are raised
1389 * by the adapter when it has completed sending the 1319 * by the adapter when it has completed sending the
1390 * contents of a buffer. If detemines which list/buffer 1320 * contents of a buffer. If detemines which list/buffer
1391 * was completed and resets it. If the buffer was the last 1321 * was completed and resets it. If the buffer was the last
1392 * in the channel (EOC), then the function checks to see if 1322 * in the channel (EOC), then the function checks to see if
1393 * another buffer is ready to send, and if so, sends a Tx 1323 * another buffer is ready to send, and if so, sends a Tx
1394 * Go command. Finally, the driver activates/continues the 1324 * Go command. Finally, the driver activates/continues the
1395 * activity LED. 1325 * activity LED.
1396 * 1326 *
1397 **************************************************************/ 1327 **************************************************************/
1398 1328
1399static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) 1329static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1400{ 1330{
1401 TLanPrivateInfo *priv = netdev_priv(dev); 1331 struct tlan_priv *priv = netdev_priv(dev);
1402 int eoc = 0; 1332 int eoc = 0;
1403 TLanList *head_list; 1333 struct tlan_list *head_list;
1404 dma_addr_t head_list_phys; 1334 dma_addr_t head_list_phys;
1405 u32 ack = 0; 1335 u32 ack = 0;
1406 u16 tmpCStat; 1336 u16 tmp_c_stat;
1407 1337
1408 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", 1338 TLAN_DBG(TLAN_DEBUG_TX,
1409 priv->txHead, priv->txTail ); 1339 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1410 head_list = priv->txList + priv->txHead; 1340 priv->tx_head, priv->tx_tail);
1341 head_list = priv->tx_list + priv->tx_head;
1411 1342
1412 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1343 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1413 struct sk_buff *skb = TLan_GetSKB(head_list); 1344 && (ack < 255)) {
1345 struct sk_buff *skb = tlan_get_skb(head_list);
1414 1346
1415 ack++; 1347 ack++;
1416 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1348 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1417 max(skb->len, 1349 max(skb->len,
1418 (unsigned int)TLAN_MIN_FRAME_SIZE), 1350 (unsigned int)TLAN_MIN_FRAME_SIZE),
1419 PCI_DMA_TODEVICE); 1351 PCI_DMA_TODEVICE);
@@ -1421,304 +1353,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1421 head_list->buffer[8].address = 0; 1353 head_list->buffer[8].address = 0;
1422 head_list->buffer[9].address = 0; 1354 head_list->buffer[9].address = 0;
1423 1355
1424 if ( tmpCStat & TLAN_CSTAT_EOC ) 1356 if (tmp_c_stat & TLAN_CSTAT_EOC)
1425 eoc = 1; 1357 eoc = 1;
1426 1358
1427 dev->stats.tx_bytes += head_list->frameSize; 1359 dev->stats.tx_bytes += head_list->frame_size;
1428 1360
1429 head_list->cStat = TLAN_CSTAT_UNUSED; 1361 head_list->c_stat = TLAN_CSTAT_UNUSED;
1430 netif_start_queue(dev); 1362 netif_start_queue(dev);
1431 CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS ); 1363 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1432 head_list = priv->txList + priv->txHead; 1364 head_list = priv->tx_list + priv->tx_head;
1433 } 1365 }
1434 1366
1435 if (!ack) 1367 if (!ack)
1436 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); 1368 netdev_info(dev,
1437 1369 "Received interrupt for uncompleted TX frame\n");
1438 if ( eoc ) { 1370
1439 TLAN_DBG( TLAN_DEBUG_TX, 1371 if (eoc) {
1440 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", 1372 TLAN_DBG(TLAN_DEBUG_TX,
1441 priv->txHead, priv->txTail ); 1373 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1442 head_list = priv->txList + priv->txHead; 1374 priv->tx_head, priv->tx_tail);
1443 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1375 head_list = priv->tx_list + priv->tx_head;
1444 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1376 head_list_phys = priv->tx_list_dma
1445 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1377 + sizeof(struct tlan_list)*priv->tx_head;
1378 if ((head_list->c_stat & TLAN_CSTAT_READY)
1379 == TLAN_CSTAT_READY) {
1380 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1446 ack |= TLAN_HC_GO; 1381 ack |= TLAN_HC_GO;
1447 } else { 1382 } else {
1448 priv->txInProgress = 0; 1383 priv->tx_in_progress = 0;
1449 } 1384 }
1450 } 1385 }
1451 1386
1452 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1387 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1453 TLan_DioWrite8( dev->base_addr, 1388 tlan_dio_write8(dev->base_addr,
1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1389 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1455 if ( priv->timer.function == NULL ) { 1390 if (priv->timer.function == NULL) {
1456 priv->timer.function = TLan_Timer; 1391 priv->timer.function = tlan_timer;
1457 priv->timer.data = (unsigned long) dev; 1392 priv->timer.data = (unsigned long) dev;
1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1393 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1459 priv->timerSetAt = jiffies; 1394 priv->timer_set_at = jiffies;
1460 priv->timerType = TLAN_TIMER_ACTIVITY; 1395 priv->timer_type = TLAN_TIMER_ACTIVITY;
1461 add_timer(&priv->timer); 1396 add_timer(&priv->timer);
1462 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { 1397 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1463 priv->timerSetAt = jiffies; 1398 priv->timer_set_at = jiffies;
1464 } 1399 }
1465 } 1400 }
1466 1401
1467 return ack; 1402 return ack;
1468 1403
1469} /* TLan_HandleTxEOF */ 1404}
1470 1405
1471 1406
1472 1407
1473 1408
1474 /*************************************************************** 1409/***************************************************************
1475 * TLan_HandleStatOverflow 1410 * TLan_HandleStatOverflow
1476 * 1411 *
1477 * Returns: 1412 * Returns:
1478 * 1 1413 * 1
1479 * Parms: 1414 * Parms:
1480 * dev Device assigned the IRQ that was 1415 * dev Device assigned the IRQ that was
1481 * raised. 1416 * raised.
1482 * host_int The contents of the HOST_INT 1417 * host_int The contents of the HOST_INT
1483 * port. 1418 * port.
1484 * 1419 *
1485 * This function handles the Statistics Overflow interrupt 1420 * This function handles the Statistics Overflow interrupt
1486 * which means that one or more of the TLAN statistics 1421 * which means that one or more of the TLAN statistics
1487 * registers has reached 1/2 capacity and needs to be read. 1422 * registers has reached 1/2 capacity and needs to be read.
1488 * 1423 *
1489 **************************************************************/ 1424 **************************************************************/
1490 1425
1491static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) 1426static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1492{ 1427{
1493 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1428 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1494 1429
1495 return 1; 1430 return 1;
1496 1431
1497} /* TLan_HandleStatOverflow */ 1432}
1498 1433
1499 1434
1500 1435
1501 1436
1502 /*************************************************************** 1437/***************************************************************
1503 * TLan_HandleRxEOF 1438 * TLan_HandleRxEOF
1504 * 1439 *
1505 * Returns: 1440 * Returns:
1506 * 1 1441 * 1
1507 * Parms: 1442 * Parms:
1508 * dev Device assigned the IRQ that was 1443 * dev Device assigned the IRQ that was
1509 * raised. 1444 * raised.
1510 * host_int The contents of the HOST_INT 1445 * host_int The contents of the HOST_INT
1511 * port. 1446 * port.
1512 * 1447 *
1513 * This function handles the Rx EOF interrupt which 1448 * This function handles the Rx EOF interrupt which
1514 * indicates a frame has been received by the adapter from 1449 * indicates a frame has been received by the adapter from
1515 * the net and the frame has been transferred to memory. 1450 * the net and the frame has been transferred to memory.
1516 * The function determines the bounce buffer the frame has 1451 * The function determines the bounce buffer the frame has
1517 * been loaded into, creates a new sk_buff big enough to 1452 * been loaded into, creates a new sk_buff big enough to
1518 * hold the frame, and sends it to protocol stack. It 1453 * hold the frame, and sends it to protocol stack. It
1519 * then resets the used buffer and appends it to the end 1454 * then resets the used buffer and appends it to the end
1520 * of the list. If the frame was the last in the Rx 1455 * of the list. If the frame was the last in the Rx
1521 * channel (EOC), the function restarts the receive channel 1456 * channel (EOC), the function restarts the receive channel
1522 * by sending an Rx Go command to the adapter. Then it 1457 * by sending an Rx Go command to the adapter. Then it
1523 * activates/continues the activity LED. 1458 * activates/continues the activity LED.
1524 * 1459 *
1525 **************************************************************/ 1460 **************************************************************/
1526 1461
1527static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) 1462static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1528{ 1463{
1529 TLanPrivateInfo *priv = netdev_priv(dev); 1464 struct tlan_priv *priv = netdev_priv(dev);
1530 u32 ack = 0; 1465 u32 ack = 0;
1531 int eoc = 0; 1466 int eoc = 0;
1532 TLanList *head_list; 1467 struct tlan_list *head_list;
1533 struct sk_buff *skb; 1468 struct sk_buff *skb;
1534 TLanList *tail_list; 1469 struct tlan_list *tail_list;
1535 u16 tmpCStat; 1470 u16 tmp_c_stat;
1536 dma_addr_t head_list_phys; 1471 dma_addr_t head_list_phys;
1537 1472
1538 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", 1473 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1539 priv->rxHead, priv->rxTail ); 1474 priv->rx_head, priv->rx_tail);
1540 head_list = priv->rxList + priv->rxHead; 1475 head_list = priv->rx_list + priv->rx_head;
1541 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1476 head_list_phys =
1477 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1542 1478
1543 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1479 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1544 dma_addr_t frameDma = head_list->buffer[0].address; 1480 && (ack < 255)) {
1545 u32 frameSize = head_list->frameSize; 1481 dma_addr_t frame_dma = head_list->buffer[0].address;
1482 u32 frame_size = head_list->frame_size;
1546 struct sk_buff *new_skb; 1483 struct sk_buff *new_skb;
1547 1484
1548 ack++; 1485 ack++;
1549 if (tmpCStat & TLAN_CSTAT_EOC) 1486 if (tmp_c_stat & TLAN_CSTAT_EOC)
1550 eoc = 1; 1487 eoc = 1;
1551 1488
1552 new_skb = netdev_alloc_skb_ip_align(dev, 1489 new_skb = netdev_alloc_skb_ip_align(dev,
1553 TLAN_MAX_FRAME_SIZE + 5); 1490 TLAN_MAX_FRAME_SIZE + 5);
1554 if ( !new_skb ) 1491 if (!new_skb)
1555 goto drop_and_reuse; 1492 goto drop_and_reuse;
1556 1493
1557 skb = TLan_GetSKB(head_list); 1494 skb = tlan_get_skb(head_list);
1558 pci_unmap_single(priv->pciDev, frameDma, 1495 pci_unmap_single(priv->pci_dev, frame_dma,
1559 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1496 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1560 skb_put( skb, frameSize ); 1497 skb_put(skb, frame_size);
1561 1498
1562 dev->stats.rx_bytes += frameSize; 1499 dev->stats.rx_bytes += frame_size;
1563 1500
1564 skb->protocol = eth_type_trans( skb, dev ); 1501 skb->protocol = eth_type_trans(skb, dev);
1565 netif_rx( skb ); 1502 netif_rx(skb);
1566 1503
1567 head_list->buffer[0].address = pci_map_single(priv->pciDev, 1504 head_list->buffer[0].address =
1568 new_skb->data, 1505 pci_map_single(priv->pci_dev, new_skb->data,
1569 TLAN_MAX_FRAME_SIZE, 1506 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1570 PCI_DMA_FROMDEVICE);
1571 1507
1572 TLan_StoreSKB(head_list, new_skb); 1508 tlan_store_skb(head_list, new_skb);
1573drop_and_reuse: 1509drop_and_reuse:
1574 head_list->forward = 0; 1510 head_list->forward = 0;
1575 head_list->cStat = 0; 1511 head_list->c_stat = 0;
1576 tail_list = priv->rxList + priv->rxTail; 1512 tail_list = priv->rx_list + priv->rx_tail;
1577 tail_list->forward = head_list_phys; 1513 tail_list->forward = head_list_phys;
1578 1514
1579 CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS ); 1515 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1580 CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS ); 1516 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1581 head_list = priv->rxList + priv->rxHead; 1517 head_list = priv->rx_list + priv->rx_head;
1582 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1518 head_list_phys = priv->rx_list_dma
1519 + sizeof(struct tlan_list)*priv->rx_head;
1583 } 1520 }
1584 1521
1585 if (!ack) 1522 if (!ack)
1586 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); 1523 netdev_info(dev,
1587 1524 "Received interrupt for uncompleted RX frame\n");
1588 1525
1589 if ( eoc ) { 1526
1590 TLAN_DBG( TLAN_DEBUG_RX, 1527 if (eoc) {
1591 "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", 1528 TLAN_DBG(TLAN_DEBUG_RX,
1592 priv->rxHead, priv->rxTail ); 1529 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1593 head_list = priv->rxList + priv->rxHead; 1530 priv->rx_head, priv->rx_tail);
1594 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1531 head_list = priv->rx_list + priv->rx_head;
1595 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1532 head_list_phys = priv->rx_list_dma
1533 + sizeof(struct tlan_list)*priv->rx_head;
1534 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1596 ack |= TLAN_HC_GO | TLAN_HC_RT; 1535 ack |= TLAN_HC_GO | TLAN_HC_RT;
1597 priv->rxEocCount++; 1536 priv->rx_eoc_count++;
1598 } 1537 }
1599 1538
1600 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1539 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1601 TLan_DioWrite8( dev->base_addr, 1540 tlan_dio_write8(dev->base_addr,
1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1541 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1603 if ( priv->timer.function == NULL ) { 1542 if (priv->timer.function == NULL) {
1604 priv->timer.function = TLan_Timer; 1543 priv->timer.function = tlan_timer;
1605 priv->timer.data = (unsigned long) dev; 1544 priv->timer.data = (unsigned long) dev;
1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1545 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1607 priv->timerSetAt = jiffies; 1546 priv->timer_set_at = jiffies;
1608 priv->timerType = TLAN_TIMER_ACTIVITY; 1547 priv->timer_type = TLAN_TIMER_ACTIVITY;
1609 add_timer(&priv->timer); 1548 add_timer(&priv->timer);
1610 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { 1549 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1611 priv->timerSetAt = jiffies; 1550 priv->timer_set_at = jiffies;
1612 } 1551 }
1613 } 1552 }
1614 1553
1615 return ack; 1554 return ack;
1616 1555
1617} /* TLan_HandleRxEOF */ 1556}
1618 1557
1619 1558
1620 1559
1621 1560
1622 /*************************************************************** 1561/***************************************************************
1623 * TLan_HandleDummy 1562 * tlan_handle_dummy
1624 * 1563 *
1625 * Returns: 1564 * Returns:
1626 * 1 1565 * 1
1627 * Parms: 1566 * Parms:
1628 * dev Device assigned the IRQ that was 1567 * dev Device assigned the IRQ that was
1629 * raised. 1568 * raised.
1630 * host_int The contents of the HOST_INT 1569 * host_int The contents of the HOST_INT
1631 * port. 1570 * port.
1632 * 1571 *
1633 * This function handles the Dummy interrupt, which is 1572 * This function handles the Dummy interrupt, which is
1634 * raised whenever a test interrupt is generated by setting 1573 * raised whenever a test interrupt is generated by setting
1635 * the Req_Int bit of HOST_CMD to 1. 1574 * the Req_Int bit of HOST_CMD to 1.
1636 * 1575 *
1637 **************************************************************/ 1576 **************************************************************/
1638 1577
1639static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) 1578static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1640{ 1579{
1641 printk( "TLAN: Test interrupt on %s.\n", dev->name ); 1580 netdev_info(dev, "Test interrupt\n");
1642 return 1; 1581 return 1;
1643 1582
1644} /* TLan_HandleDummy */ 1583}
1645 1584
1646 1585
1647 1586
1648 1587
1649 /*************************************************************** 1588/***************************************************************
1650 * TLan_HandleTxEOC 1589 * tlan_handle_tx_eoc
1651 * 1590 *
1652 * Returns: 1591 * Returns:
1653 * 1 1592 * 1
1654 * Parms: 1593 * Parms:
1655 * dev Device assigned the IRQ that was 1594 * dev Device assigned the IRQ that was
1656 * raised. 1595 * raised.
1657 * host_int The contents of the HOST_INT 1596 * host_int The contents of the HOST_INT
1658 * port. 1597 * port.
1659 * 1598 *
1660 * This driver is structured to determine EOC occurrences by 1599 * This driver is structured to determine EOC occurrences by
1661 * reading the CSTAT member of the list structure. Tx EOC 1600 * reading the CSTAT member of the list structure. Tx EOC
1662 * interrupts are disabled via the DIO INTDIS register. 1601 * interrupts are disabled via the DIO INTDIS register.
1663 * However, TLAN chips before revision 3.0 didn't have this 1602 * However, TLAN chips before revision 3.0 didn't have this
1664 * functionality, so process EOC events if this is the 1603 * functionality, so process EOC events if this is the
1665 * case. 1604 * case.
1666 * 1605 *
1667 **************************************************************/ 1606 **************************************************************/
1668 1607
1669static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) 1608static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1670{ 1609{
1671 TLanPrivateInfo *priv = netdev_priv(dev); 1610 struct tlan_priv *priv = netdev_priv(dev);
1672 TLanList *head_list; 1611 struct tlan_list *head_list;
1673 dma_addr_t head_list_phys; 1612 dma_addr_t head_list_phys;
1674 u32 ack = 1; 1613 u32 ack = 1;
1675 1614
1676 host_int = 0; 1615 host_int = 0;
1677 if ( priv->tlanRev < 0x30 ) { 1616 if (priv->tlan_rev < 0x30) {
1678 TLAN_DBG( TLAN_DEBUG_TX, 1617 TLAN_DBG(TLAN_DEBUG_TX,
1679 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", 1618 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1680 priv->txHead, priv->txTail ); 1619 priv->tx_head, priv->tx_tail);
1681 head_list = priv->txList + priv->txHead; 1620 head_list = priv->tx_list + priv->tx_head;
1682 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1621 head_list_phys = priv->tx_list_dma
1683 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1622 + sizeof(struct tlan_list)*priv->tx_head;
1623 if ((head_list->c_stat & TLAN_CSTAT_READY)
1624 == TLAN_CSTAT_READY) {
1684 netif_stop_queue(dev); 1625 netif_stop_queue(dev);
1685 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1626 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1686 ack |= TLAN_HC_GO; 1627 ack |= TLAN_HC_GO;
1687 } else { 1628 } else {
1688 priv->txInProgress = 0; 1629 priv->tx_in_progress = 0;
1689 } 1630 }
1690 } 1631 }
1691 1632
1692 return ack; 1633 return ack;
1693 1634
1694} /* TLan_HandleTxEOC */ 1635}
1695 1636
1696 1637
1697 1638
1698 1639
1699 /*************************************************************** 1640/***************************************************************
1700 * TLan_HandleStatusCheck 1641 * tlan_handle_status_check
1701 * 1642 *
1702 * Returns: 1643 * Returns:
1703 * 0 if Adapter check, 1 if Network Status check. 1644 * 0 if Adapter check, 1 if Network Status check.
1704 * Parms: 1645 * Parms:
1705 * dev Device assigned the IRQ that was 1646 * dev Device assigned the IRQ that was
1706 * raised. 1647 * raised.
1707 * host_int The contents of the HOST_INT 1648 * host_int The contents of the HOST_INT
1708 * port. 1649 * port.
1709 * 1650 *
1710 * This function handles Adapter Check/Network Status 1651 * This function handles Adapter Check/Network Status
1711 * interrupts generated by the adapter. It checks the 1652 * interrupts generated by the adapter. It checks the
1712 * vector in the HOST_INT register to determine if it is 1653 * vector in the HOST_INT register to determine if it is
1713 * an Adapter Check interrupt. If so, it resets the 1654 * an Adapter Check interrupt. If so, it resets the
1714 * adapter. Otherwise it clears the status registers 1655 * adapter. Otherwise it clears the status registers
1715 * and services the PHY. 1656 * and services the PHY.
1716 * 1657 *
1717 **************************************************************/ 1658 **************************************************************/
1718 1659
1719static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) 1660static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1720{ 1661{
1721 TLanPrivateInfo *priv = netdev_priv(dev); 1662 struct tlan_priv *priv = netdev_priv(dev);
1722 u32 ack; 1663 u32 ack;
1723 u32 error; 1664 u32 error;
1724 u8 net_sts; 1665 u8 net_sts;
@@ -1727,92 +1668,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1727 u16 tlphy_sts; 1668 u16 tlphy_sts;
1728 1669
1729 ack = 1; 1670 ack = 1;
1730 if ( host_int & TLAN_HI_IV_MASK ) { 1671 if (host_int & TLAN_HI_IV_MASK) {
1731 netif_stop_queue( dev ); 1672 netif_stop_queue(dev);
1732 error = inl( dev->base_addr + TLAN_CH_PARM ); 1673 error = inl(dev->base_addr + TLAN_CH_PARM);
1733 printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error ); 1674 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1734 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1675 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1735 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); 1676 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1736 1677
1737 schedule_work(&priv->tlan_tqueue); 1678 schedule_work(&priv->tlan_tqueue);
1738 1679
1739 netif_wake_queue(dev); 1680 netif_wake_queue(dev);
1740 ack = 0; 1681 ack = 0;
1741 } else { 1682 } else {
1742 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name ); 1683 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1743 phy = priv->phy[priv->phyNum]; 1684 phy = priv->phy[priv->phy_num];
1744 1685
1745 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); 1686 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1746 if ( net_sts ) { 1687 if (net_sts) {
1747 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); 1688 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1748 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", 1689 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1749 dev->name, (unsigned) net_sts ); 1690 dev->name, (unsigned) net_sts);
1750 } 1691 }
1751 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { 1692 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1752 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); 1693 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1753 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 1694 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1754 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && 1695 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1755 ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1696 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1756 tlphy_ctl |= TLAN_TC_SWAPOL; 1697 tlphy_ctl |= TLAN_TC_SWAPOL;
1757 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1698 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1758 } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && 1699 tlphy_ctl);
1759 ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1700 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1760 tlphy_ctl &= ~TLAN_TC_SWAPOL; 1701 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1761 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1702 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1762 } 1703 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1763 1704 tlphy_ctl);
1764 if (debug) {
1765 TLan_PhyPrint( dev );
1766 } 1705 }
1706
1707 if (debug)
1708 tlan_phy_print(dev);
1767 } 1709 }
1768 } 1710 }
1769 1711
1770 return ack; 1712 return ack;
1771 1713
1772} /* TLan_HandleStatusCheck */ 1714}
1773 1715
1774 1716
1775 1717
1776 1718
1777 /*************************************************************** 1719/***************************************************************
1778 * TLan_HandleRxEOC 1720 * tlan_handle_rx_eoc
1779 * 1721 *
1780 * Returns: 1722 * Returns:
1781 * 1 1723 * 1
1782 * Parms: 1724 * Parms:
1783 * dev Device assigned the IRQ that was 1725 * dev Device assigned the IRQ that was
1784 * raised. 1726 * raised.
1785 * host_int The contents of the HOST_INT 1727 * host_int The contents of the HOST_INT
1786 * port. 1728 * port.
1787 * 1729 *
1788 * This driver is structured to determine EOC occurrences by 1730 * This driver is structured to determine EOC occurrences by
1789 * reading the CSTAT member of the list structure. Rx EOC 1731 * reading the CSTAT member of the list structure. Rx EOC
1790 * interrupts are disabled via the DIO INTDIS register. 1732 * interrupts are disabled via the DIO INTDIS register.
1791 * However, TLAN chips before revision 3.0 didn't have this 1733 * However, TLAN chips before revision 3.0 didn't have this
1792 * CSTAT member or a INTDIS register, so if this chip is 1734 * CSTAT member or a INTDIS register, so if this chip is
1793 * pre-3.0, process EOC interrupts normally. 1735 * pre-3.0, process EOC interrupts normally.
1794 * 1736 *
1795 **************************************************************/ 1737 **************************************************************/
1796 1738
1797static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) 1739static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1798{ 1740{
1799 TLanPrivateInfo *priv = netdev_priv(dev); 1741 struct tlan_priv *priv = netdev_priv(dev);
1800 dma_addr_t head_list_phys; 1742 dma_addr_t head_list_phys;
1801 u32 ack = 1; 1743 u32 ack = 1;
1802 1744
1803 if ( priv->tlanRev < 0x30 ) { 1745 if (priv->tlan_rev < 0x30) {
1804 TLAN_DBG( TLAN_DEBUG_RX, 1746 TLAN_DBG(TLAN_DEBUG_RX,
1805 "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", 1747 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1806 priv->rxHead, priv->rxTail ); 1748 priv->rx_head, priv->rx_tail);
1807 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1749 head_list_phys = priv->rx_list_dma
1808 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1750 + sizeof(struct tlan_list)*priv->rx_head;
1751 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1809 ack |= TLAN_HC_GO | TLAN_HC_RT; 1752 ack |= TLAN_HC_GO | TLAN_HC_RT;
1810 priv->rxEocCount++; 1753 priv->rx_eoc_count++;
1811 } 1754 }
1812 1755
1813 return ack; 1756 return ack;
1814 1757
1815} /* TLan_HandleRxEOC */ 1758}
1816 1759
1817 1760
1818 1761
@@ -1820,98 +1763,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1820/***************************************************************************** 1763/*****************************************************************************
1821****************************************************************************** 1764******************************************************************************
1822 1765
1823 ThunderLAN Driver Timer Function 1766ThunderLAN driver timer function
1824 1767
1825****************************************************************************** 1768******************************************************************************
1826*****************************************************************************/ 1769*****************************************************************************/
1827 1770
1828 1771
1829 /*************************************************************** 1772/***************************************************************
1830 * TLan_Timer 1773 * tlan_timer
1831 * 1774 *
1832 * Returns: 1775 * Returns:
1833 * Nothing 1776 * Nothing
1834 * Parms: 1777 * Parms:
1835 * data A value given to add timer when 1778 * data A value given to add timer when
1836 * add_timer was called. 1779 * add_timer was called.
1837 * 1780 *
1838 * This function handles timed functionality for the 1781 * This function handles timed functionality for the
1839 * TLAN driver. The two current timer uses are for 1782 * TLAN driver. The two current timer uses are for
1840 * delaying for autonegotionation and driving the ACT LED. 1783 * delaying for autonegotionation and driving the ACT LED.
1841 * - Autonegotiation requires being allowed about 1784 * - Autonegotiation requires being allowed about
1842 * 2 1/2 seconds before attempting to transmit a 1785 * 2 1/2 seconds before attempting to transmit a
1843 * packet. It would be a very bad thing to hang 1786 * packet. It would be a very bad thing to hang
1844 * the kernel this long, so the driver doesn't 1787 * the kernel this long, so the driver doesn't
1845 * allow transmission 'til after this time, for 1788 * allow transmission 'til after this time, for
1846 * certain PHYs. It would be much nicer if all 1789 * certain PHYs. It would be much nicer if all
1847 * PHYs were interrupt-capable like the internal 1790 * PHYs were interrupt-capable like the internal
1848 * PHY. 1791 * PHY.
1849 * - The ACT LED, which shows adapter activity, is 1792 * - The ACT LED, which shows adapter activity, is
1850 * driven by the driver, and so must be left on 1793 * driven by the driver, and so must be left on
1851 * for a short period to power up the LED so it 1794 * for a short period to power up the LED so it
1852 * can be seen. This delay can be changed by 1795 * can be seen. This delay can be changed by
1853 * changing the TLAN_TIMER_ACT_DELAY in tlan.h, 1796 * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
1854 * if desired. 100 ms produces a slightly 1797 * if desired. 100 ms produces a slightly
1855 * sluggish response. 1798 * sluggish response.
1856 * 1799 *
1857 **************************************************************/ 1800 **************************************************************/
1858 1801
1859static void TLan_Timer( unsigned long data ) 1802static void tlan_timer(unsigned long data)
1860{ 1803{
1861 struct net_device *dev = (struct net_device *) data; 1804 struct net_device *dev = (struct net_device *) data;
1862 TLanPrivateInfo *priv = netdev_priv(dev); 1805 struct tlan_priv *priv = netdev_priv(dev);
1863 u32 elapsed; 1806 u32 elapsed;
1864 unsigned long flags = 0; 1807 unsigned long flags = 0;
1865 1808
1866 priv->timer.function = NULL; 1809 priv->timer.function = NULL;
1867 1810
1868 switch ( priv->timerType ) { 1811 switch (priv->timer_type) {
1869#ifdef MONITOR 1812#ifdef MONITOR
1870 case TLAN_TIMER_LINK_BEAT: 1813 case TLAN_TIMER_LINK_BEAT:
1871 TLan_PhyMonitor( dev ); 1814 tlan_phy_monitor(dev);
1872 break; 1815 break;
1873#endif 1816#endif
1874 case TLAN_TIMER_PHY_PDOWN: 1817 case TLAN_TIMER_PHY_PDOWN:
1875 TLan_PhyPowerDown( dev ); 1818 tlan_phy_power_down(dev);
1876 break; 1819 break;
1877 case TLAN_TIMER_PHY_PUP: 1820 case TLAN_TIMER_PHY_PUP:
1878 TLan_PhyPowerUp( dev ); 1821 tlan_phy_power_up(dev);
1879 break; 1822 break;
1880 case TLAN_TIMER_PHY_RESET: 1823 case TLAN_TIMER_PHY_RESET:
1881 TLan_PhyReset( dev ); 1824 tlan_phy_reset(dev);
1882 break; 1825 break;
1883 case TLAN_TIMER_PHY_START_LINK: 1826 case TLAN_TIMER_PHY_START_LINK:
1884 TLan_PhyStartLink( dev ); 1827 tlan_phy_start_link(dev);
1885 break; 1828 break;
1886 case TLAN_TIMER_PHY_FINISH_AN: 1829 case TLAN_TIMER_PHY_FINISH_AN:
1887 TLan_PhyFinishAutoNeg( dev ); 1830 tlan_phy_finish_auto_neg(dev);
1888 break; 1831 break;
1889 case TLAN_TIMER_FINISH_RESET: 1832 case TLAN_TIMER_FINISH_RESET:
1890 TLan_FinishReset( dev ); 1833 tlan_finish_reset(dev);
1891 break; 1834 break;
1892 case TLAN_TIMER_ACTIVITY: 1835 case TLAN_TIMER_ACTIVITY:
1893 spin_lock_irqsave(&priv->lock, flags); 1836 spin_lock_irqsave(&priv->lock, flags);
1894 if ( priv->timer.function == NULL ) { 1837 if (priv->timer.function == NULL) {
1895 elapsed = jiffies - priv->timerSetAt; 1838 elapsed = jiffies - priv->timer_set_at;
1896 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { 1839 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1897 TLan_DioWrite8( dev->base_addr, 1840 tlan_dio_write8(dev->base_addr,
1898 TLAN_LED_REG, TLAN_LED_LINK ); 1841 TLAN_LED_REG, TLAN_LED_LINK);
1899 } else { 1842 } else {
1900 priv->timer.function = TLan_Timer; 1843 priv->timer.function = tlan_timer;
1901 priv->timer.expires = priv->timerSetAt 1844 priv->timer.expires = priv->timer_set_at
1902 + TLAN_TIMER_ACT_DELAY; 1845 + TLAN_TIMER_ACT_DELAY;
1903 spin_unlock_irqrestore(&priv->lock, flags); 1846 spin_unlock_irqrestore(&priv->lock, flags);
1904 add_timer( &priv->timer ); 1847 add_timer(&priv->timer);
1905 break; 1848 break;
1906 }
1907 } 1849 }
1908 spin_unlock_irqrestore(&priv->lock, flags); 1850 }
1909 break; 1851 spin_unlock_irqrestore(&priv->lock, flags);
1910 default: 1852 break;
1911 break; 1853 default:
1854 break;
1912 } 1855 }
1913 1856
1914} /* TLan_Timer */ 1857}
1915 1858
1916 1859
1917 1860
@@ -1919,39 +1862,39 @@ static void TLan_Timer( unsigned long data )
1919/***************************************************************************** 1862/*****************************************************************************
1920****************************************************************************** 1863******************************************************************************
1921 1864
1922 ThunderLAN Driver Adapter Related Routines 1865ThunderLAN driver adapter related routines
1923 1866
1924****************************************************************************** 1867******************************************************************************
1925*****************************************************************************/ 1868*****************************************************************************/
1926 1869
1927 1870
1928 /*************************************************************** 1871/***************************************************************
1929 * TLan_ResetLists 1872 * tlan_reset_lists
1930 * 1873 *
1931 * Returns: 1874 * Returns:
1932 * Nothing 1875 * Nothing
1933 * Parms: 1876 * Parms:
1934 * dev The device structure with the list 1877 * dev The device structure with the list
1935 * stuctures to be reset. 1878 * stuctures to be reset.
1936 * 1879 *
1937 * This routine sets the variables associated with managing 1880 * This routine sets the variables associated with managing
1938 * the TLAN lists to their initial values. 1881 * the TLAN lists to their initial values.
1939 * 1882 *
1940 **************************************************************/ 1883 **************************************************************/
1941 1884
1942static void TLan_ResetLists( struct net_device *dev ) 1885static void tlan_reset_lists(struct net_device *dev)
1943{ 1886{
1944 TLanPrivateInfo *priv = netdev_priv(dev); 1887 struct tlan_priv *priv = netdev_priv(dev);
1945 int i; 1888 int i;
1946 TLanList *list; 1889 struct tlan_list *list;
1947 dma_addr_t list_phys; 1890 dma_addr_t list_phys;
1948 struct sk_buff *skb; 1891 struct sk_buff *skb;
1949 1892
1950 priv->txHead = 0; 1893 priv->tx_head = 0;
1951 priv->txTail = 0; 1894 priv->tx_tail = 0;
1952 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 1895 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1953 list = priv->txList + i; 1896 list = priv->tx_list + i;
1954 list->cStat = TLAN_CSTAT_UNUSED; 1897 list->c_stat = TLAN_CSTAT_UNUSED;
1955 list->buffer[0].address = 0; 1898 list->buffer[0].address = 0;
1956 list->buffer[2].count = 0; 1899 list->buffer[2].count = 0;
1957 list->buffer[2].address = 0; 1900 list->buffer[2].address = 0;
@@ -1959,169 +1902,169 @@ static void TLan_ResetLists( struct net_device *dev )
1959 list->buffer[9].address = 0; 1902 list->buffer[9].address = 0;
1960 } 1903 }
1961 1904
1962 priv->rxHead = 0; 1905 priv->rx_head = 0;
1963 priv->rxTail = TLAN_NUM_RX_LISTS - 1; 1906 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1964 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 1907 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1965 list = priv->rxList + i; 1908 list = priv->rx_list + i;
1966 list_phys = priv->rxListDMA + sizeof(TLanList) * i; 1909 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1967 list->cStat = TLAN_CSTAT_READY; 1910 list->c_stat = TLAN_CSTAT_READY;
1968 list->frameSize = TLAN_MAX_FRAME_SIZE; 1911 list->frame_size = TLAN_MAX_FRAME_SIZE;
1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1912 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1970 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); 1913 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1971 if ( !skb ) { 1914 if (!skb) {
1972 pr_err("TLAN: out of memory for received data.\n" ); 1915 netdev_err(dev, "Out of memory for received data\n");
1973 break; 1916 break;
1974 } 1917 }
1975 1918
1976 list->buffer[0].address = pci_map_single(priv->pciDev, 1919 list->buffer[0].address = pci_map_single(priv->pci_dev,
1977 skb->data, 1920 skb->data,
1978 TLAN_MAX_FRAME_SIZE, 1921 TLAN_MAX_FRAME_SIZE,
1979 PCI_DMA_FROMDEVICE); 1922 PCI_DMA_FROMDEVICE);
1980 TLan_StoreSKB(list, skb); 1923 tlan_store_skb(list, skb);
1981 list->buffer[1].count = 0; 1924 list->buffer[1].count = 0;
1982 list->buffer[1].address = 0; 1925 list->buffer[1].address = 0;
1983 list->forward = list_phys + sizeof(TLanList); 1926 list->forward = list_phys + sizeof(struct tlan_list);
1984 } 1927 }
1985 1928
1986 /* in case ran out of memory early, clear bits */ 1929 /* in case ran out of memory early, clear bits */
1987 while (i < TLAN_NUM_RX_LISTS) { 1930 while (i < TLAN_NUM_RX_LISTS) {
1988 TLan_StoreSKB(priv->rxList + i, NULL); 1931 tlan_store_skb(priv->rx_list + i, NULL);
1989 ++i; 1932 ++i;
1990 } 1933 }
1991 list->forward = 0; 1934 list->forward = 0;
1992 1935
1993} /* TLan_ResetLists */ 1936}
1994 1937
1995 1938
1996static void TLan_FreeLists( struct net_device *dev ) 1939static void tlan_free_lists(struct net_device *dev)
1997{ 1940{
1998 TLanPrivateInfo *priv = netdev_priv(dev); 1941 struct tlan_priv *priv = netdev_priv(dev);
1999 int i; 1942 int i;
2000 TLanList *list; 1943 struct tlan_list *list;
2001 struct sk_buff *skb; 1944 struct sk_buff *skb;
2002 1945
2003 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 1946 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
2004 list = priv->txList + i; 1947 list = priv->tx_list + i;
2005 skb = TLan_GetSKB(list); 1948 skb = tlan_get_skb(list);
2006 if ( skb ) { 1949 if (skb) {
2007 pci_unmap_single( 1950 pci_unmap_single(
2008 priv->pciDev, 1951 priv->pci_dev,
2009 list->buffer[0].address, 1952 list->buffer[0].address,
2010 max(skb->len, 1953 max(skb->len,
2011 (unsigned int)TLAN_MIN_FRAME_SIZE), 1954 (unsigned int)TLAN_MIN_FRAME_SIZE),
2012 PCI_DMA_TODEVICE); 1955 PCI_DMA_TODEVICE);
2013 dev_kfree_skb_any( skb ); 1956 dev_kfree_skb_any(skb);
2014 list->buffer[8].address = 0; 1957 list->buffer[8].address = 0;
2015 list->buffer[9].address = 0; 1958 list->buffer[9].address = 0;
2016 } 1959 }
2017 } 1960 }
2018 1961
2019 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 1962 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
2020 list = priv->rxList + i; 1963 list = priv->rx_list + i;
2021 skb = TLan_GetSKB(list); 1964 skb = tlan_get_skb(list);
2022 if ( skb ) { 1965 if (skb) {
2023 pci_unmap_single(priv->pciDev, 1966 pci_unmap_single(priv->pci_dev,
2024 list->buffer[0].address, 1967 list->buffer[0].address,
2025 TLAN_MAX_FRAME_SIZE, 1968 TLAN_MAX_FRAME_SIZE,
2026 PCI_DMA_FROMDEVICE); 1969 PCI_DMA_FROMDEVICE);
2027 dev_kfree_skb_any( skb ); 1970 dev_kfree_skb_any(skb);
2028 list->buffer[8].address = 0; 1971 list->buffer[8].address = 0;
2029 list->buffer[9].address = 0; 1972 list->buffer[9].address = 0;
2030 } 1973 }
2031 } 1974 }
2032} /* TLan_FreeLists */ 1975}
2033 1976
2034 1977
2035 1978
2036 1979
2037 /*************************************************************** 1980/***************************************************************
2038 * TLan_PrintDio 1981 * tlan_print_dio
2039 * 1982 *
2040 * Returns: 1983 * Returns:
2041 * Nothing 1984 * Nothing
2042 * Parms: 1985 * Parms:
2043 * io_base Base IO port of the device of 1986 * io_base Base IO port of the device of
2044 * which to print DIO registers. 1987 * which to print DIO registers.
2045 * 1988 *
2046 * This function prints out all the internal (DIO) 1989 * This function prints out all the internal (DIO)
2047 * registers of a TLAN chip. 1990 * registers of a TLAN chip.
2048 * 1991 *
2049 **************************************************************/ 1992 **************************************************************/
2050 1993
2051static void TLan_PrintDio( u16 io_base ) 1994static void tlan_print_dio(u16 io_base)
2052{ 1995{
2053 u32 data0, data1; 1996 u32 data0, data1;
2054 int i; 1997 int i;
2055 1998
2056 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", 1999 pr_info("Contents of internal registers for io base 0x%04hx\n",
2057 io_base ); 2000 io_base);
2058 printk( "TLAN: Off. +0 +4\n" ); 2001 pr_info("Off. +0 +4\n");
2059 for ( i = 0; i < 0x4C; i+= 8 ) { 2002 for (i = 0; i < 0x4C; i += 8) {
2060 data0 = TLan_DioRead32( io_base, i ); 2003 data0 = tlan_dio_read32(io_base, i);
2061 data1 = TLan_DioRead32( io_base, i + 0x4 ); 2004 data1 = tlan_dio_read32(io_base, i + 0x4);
2062 printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 ); 2005 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2063 } 2006 }
2064 2007
2065} /* TLan_PrintDio */ 2008}
2066 2009
2067 2010
2068 2011
2069 2012
2070 /*************************************************************** 2013/***************************************************************
2071 * TLan_PrintList 2014 * TLan_PrintList
2072 * 2015 *
2073 * Returns: 2016 * Returns:
2074 * Nothing 2017 * Nothing
2075 * Parms: 2018 * Parms:
2076 * list A pointer to the TLanList structure to 2019 * list A pointer to the struct tlan_list structure to
2077 * be printed. 2020 * be printed.
2078 * type A string to designate type of list, 2021 * type A string to designate type of list,
2079 * "Rx" or "Tx". 2022 * "Rx" or "Tx".
2080 * num The index of the list. 2023 * num The index of the list.
2081 * 2024 *
2082 * This function prints out the contents of the list 2025 * This function prints out the contents of the list
2083 * pointed to by the list parameter. 2026 * pointed to by the list parameter.
2084 * 2027 *
2085 **************************************************************/ 2028 **************************************************************/
2086 2029
2087static void TLan_PrintList( TLanList *list, char *type, int num) 2030static void tlan_print_list(struct tlan_list *list, char *type, int num)
2088{ 2031{
2089 int i; 2032 int i;
2090 2033
2091 printk( "TLAN: %s List %d at %p\n", type, num, list ); 2034 pr_info("%s List %d at %p\n", type, num, list);
2092 printk( "TLAN: Forward = 0x%08x\n", list->forward ); 2035 pr_info(" Forward = 0x%08x\n", list->forward);
2093 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); 2036 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2094 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); 2037 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2095 /* for ( i = 0; i < 10; i++ ) { */ 2038 /* for (i = 0; i < 10; i++) { */
2096 for ( i = 0; i < 2; i++ ) { 2039 for (i = 0; i < 2; i++) {
2097 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", 2040 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2098 i, list->buffer[i].count, list->buffer[i].address ); 2041 i, list->buffer[i].count, list->buffer[i].address);
2099 } 2042 }
2100 2043
2101} /* TLan_PrintList */ 2044}
2102 2045
2103 2046
2104 2047
2105 2048
2106 /*************************************************************** 2049/***************************************************************
2107 * TLan_ReadAndClearStats 2050 * tlan_read_and_clear_stats
2108 * 2051 *
2109 * Returns: 2052 * Returns:
2110 * Nothing 2053 * Nothing
2111 * Parms: 2054 * Parms:
2112 * dev Pointer to device structure of adapter 2055 * dev Pointer to device structure of adapter
2113 * to which to read stats. 2056 * to which to read stats.
2114 * record Flag indicating whether to add 2057 * record Flag indicating whether to add
2115 * 2058 *
2116 * This functions reads all the internal status registers 2059 * This functions reads all the internal status registers
2117 * of the TLAN chip, which clears them as a side effect. 2060 * of the TLAN chip, which clears them as a side effect.
2118 * It then either adds the values to the device's status 2061 * It then either adds the values to the device's status
2119 * struct, or discards them, depending on whether record 2062 * struct, or discards them, depending on whether record
2120 * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). 2063 * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
2121 * 2064 *
2122 **************************************************************/ 2065 **************************************************************/
2123 2066
2124static void TLan_ReadAndClearStats( struct net_device *dev, int record ) 2067static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2125{ 2068{
2126 u32 tx_good, tx_under; 2069 u32 tx_good, tx_under;
2127 u32 rx_good, rx_over; 2070 u32 rx_good, rx_over;
@@ -2129,41 +2072,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2129 u32 multi_col, single_col; 2072 u32 multi_col, single_col;
2130 u32 excess_col, late_col, loss; 2073 u32 excess_col, late_col, loss;
2131 2074
2132 outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2075 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2133 tx_good = inb( dev->base_addr + TLAN_DIO_DATA ); 2076 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2134 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2077 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2135 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; 2078 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2136 tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2079 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2137 2080
2138 outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2081 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2139 rx_good = inb( dev->base_addr + TLAN_DIO_DATA ); 2082 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2140 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2083 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2141 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; 2084 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2142 rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2085 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2143 2086
2144 outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR ); 2087 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2145 def_tx = inb( dev->base_addr + TLAN_DIO_DATA ); 2088 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2146 def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2089 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2147 crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2090 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2148 code = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2091 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2149 2092
2150 outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2093 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2151 multi_col = inb( dev->base_addr + TLAN_DIO_DATA ); 2094 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2152 multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2095 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2153 single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2096 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2154 single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8; 2097 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2155 2098
2156 outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2099 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2157 excess_col = inb( dev->base_addr + TLAN_DIO_DATA ); 2100 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2158 late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 ); 2101 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2159 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2102 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2160 2103
2161 if ( record ) { 2104 if (record) {
2162 dev->stats.rx_packets += rx_good; 2105 dev->stats.rx_packets += rx_good;
2163 dev->stats.rx_errors += rx_over + crc + code; 2106 dev->stats.rx_errors += rx_over + crc + code;
2164 dev->stats.tx_packets += tx_good; 2107 dev->stats.tx_packets += tx_good;
2165 dev->stats.tx_errors += tx_under + loss; 2108 dev->stats.tx_errors += tx_under + loss;
2166 dev->stats.collisions += multi_col + single_col + excess_col + late_col; 2109 dev->stats.collisions += multi_col
2110 + single_col + excess_col + late_col;
2167 2111
2168 dev->stats.rx_over_errors += rx_over; 2112 dev->stats.rx_over_errors += rx_over;
2169 dev->stats.rx_crc_errors += crc; 2113 dev->stats.rx_crc_errors += crc;
@@ -2173,39 +2117,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2173 dev->stats.tx_carrier_errors += loss; 2117 dev->stats.tx_carrier_errors += loss;
2174 } 2118 }
2175 2119
2176} /* TLan_ReadAndClearStats */ 2120}
2177 2121
2178 2122
2179 2123
2180 2124
2181 /*************************************************************** 2125/***************************************************************
2182 * TLan_Reset 2126 * TLan_Reset
2183 * 2127 *
2184 * Returns: 2128 * Returns:
2185 * 0 2129 * 0
2186 * Parms: 2130 * Parms:
2187 * dev Pointer to device structure of adapter 2131 * dev Pointer to device structure of adapter
2188 * to be reset. 2132 * to be reset.
2189 * 2133 *
2190 * This function resets the adapter and it's physical 2134 * This function resets the adapter and it's physical
2191 * device. See Chap. 3, pp. 9-10 of the "ThunderLAN 2135 * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
2192 * Programmer's Guide" for details. The routine tries to 2136 * Programmer's Guide" for details. The routine tries to
2193 * implement what is detailed there, though adjustments 2137 * implement what is detailed there, though adjustments
2194 * have been made. 2138 * have been made.
2195 * 2139 *
2196 **************************************************************/ 2140 **************************************************************/
2197 2141
2198static void 2142static void
2199TLan_ResetAdapter( struct net_device *dev ) 2143tlan_reset_adapter(struct net_device *dev)
2200{ 2144{
2201 TLanPrivateInfo *priv = netdev_priv(dev); 2145 struct tlan_priv *priv = netdev_priv(dev);
2202 int i; 2146 int i;
2203 u32 addr; 2147 u32 addr;
2204 u32 data; 2148 u32 data;
2205 u8 data8; 2149 u8 data8;
2206 2150
2207 priv->tlanFullDuplex = false; 2151 priv->tlan_full_duplex = false;
2208 priv->phyOnline=0; 2152 priv->phy_online = 0;
2209 netif_carrier_off(dev); 2153 netif_carrier_off(dev);
2210 2154
2211/* 1. Assert reset bit. */ 2155/* 1. Assert reset bit. */
@@ -2216,7 +2160,7 @@ TLan_ResetAdapter( struct net_device *dev )
2216 2160
2217 udelay(1000); 2161 udelay(1000);
2218 2162
2219/* 2. Turn off interrupts. ( Probably isn't necessary ) */ 2163/* 2. Turn off interrupts. (Probably isn't necessary) */
2220 2164
2221 data = inl(dev->base_addr + TLAN_HOST_CMD); 2165 data = inl(dev->base_addr + TLAN_HOST_CMD);
2222 data |= TLAN_HC_INT_OFF; 2166 data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2168,204 @@ TLan_ResetAdapter( struct net_device *dev )
2224 2168
2225/* 3. Clear AREGs and HASHs. */ 2169/* 3. Clear AREGs and HASHs. */
2226 2170
2227 for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) { 2171 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2228 TLan_DioWrite32( dev->base_addr, (u16) i, 0 ); 2172 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2229 }
2230 2173
2231/* 4. Setup NetConfig register. */ 2174/* 4. Setup NetConfig register. */
2232 2175
2233 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2176 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2234 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data ); 2177 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2235 2178
2236/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */ 2179/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
2237 2180
2238 outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD ); 2181 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2239 outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD ); 2182 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2240 2183
2241/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */ 2184/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
2242 2185
2243 outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR ); 2186 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2244 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2187 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2245 TLan_SetBit( TLAN_NET_SIO_NMRST, addr ); 2188 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2246 2189
2247/* 7. Setup the remaining registers. */ 2190/* 7. Setup the remaining registers. */
2248 2191
2249 if ( priv->tlanRev >= 0x30 ) { 2192 if (priv->tlan_rev >= 0x30) {
2250 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC; 2193 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2251 TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 ); 2194 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2252 } 2195 }
2253 TLan_PhyDetect( dev ); 2196 tlan_phy_detect(dev);
2254 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN; 2197 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2255 2198
2256 if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) { 2199 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2257 data |= TLAN_NET_CFG_BIT; 2200 data |= TLAN_NET_CFG_BIT;
2258 if ( priv->aui == 1 ) { 2201 if (priv->aui == 1) {
2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); 2202 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2203 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); 2204 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2262 priv->tlanFullDuplex = true; 2205 priv->tlan_full_duplex = true;
2263 } else { 2206 } else {
2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); 2207 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2265 } 2208 }
2266 } 2209 }
2267 2210
2268 if ( priv->phyNum == 0 ) { 2211 if (priv->phy_num == 0)
2269 data |= TLAN_NET_CFG_PHY_EN; 2212 data |= TLAN_NET_CFG_PHY_EN;
2270 } 2213 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2271 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
2272 2214
2273 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2215 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2274 TLan_FinishReset( dev ); 2216 tlan_finish_reset(dev);
2275 } else { 2217 else
2276 TLan_PhyPowerDown( dev ); 2218 tlan_phy_power_down(dev);
2277 }
2278 2219
2279} /* TLan_ResetAdapter */ 2220}
2280 2221
2281 2222
2282 2223
2283 2224
2284static void 2225static void
2285TLan_FinishReset( struct net_device *dev ) 2226tlan_finish_reset(struct net_device *dev)
2286{ 2227{
2287 TLanPrivateInfo *priv = netdev_priv(dev); 2228 struct tlan_priv *priv = netdev_priv(dev);
2288 u8 data; 2229 u8 data;
2289 u32 phy; 2230 u32 phy;
2290 u8 sio; 2231 u8 sio;
2291 u16 status; 2232 u16 status;
2292 u16 partner; 2233 u16 partner;
2293 u16 tlphy_ctl; 2234 u16 tlphy_ctl;
2294 u16 tlphy_par; 2235 u16 tlphy_par;
2295 u16 tlphy_id1, tlphy_id2; 2236 u16 tlphy_id1, tlphy_id2;
2296 int i; 2237 int i;
2297 2238
2298 phy = priv->phy[priv->phyNum]; 2239 phy = priv->phy[priv->phy_num];
2299 2240
2300 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP; 2241 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2301 if ( priv->tlanFullDuplex ) { 2242 if (priv->tlan_full_duplex)
2302 data |= TLAN_NET_CMD_DUPLEX; 2243 data |= TLAN_NET_CMD_DUPLEX;
2303 } 2244 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2304 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
2305 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5; 2245 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2306 if ( priv->phyNum == 0 ) { 2246 if (priv->phy_num == 0)
2307 data |= TLAN_NET_MASK_MASK7; 2247 data |= TLAN_NET_MASK_MASK7;
2308 } 2248 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2309 TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data ); 2249 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2310 TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 ); 2250 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2311 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); 2251 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2312 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
2313 2252
2314 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || 2253 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2315 ( priv->aui ) ) { 2254 (priv->aui)) {
2316 status = MII_GS_LINK; 2255 status = MII_GS_LINK;
2317 printk( "TLAN: %s: Link forced.\n", dev->name ); 2256 netdev_info(dev, "Link forced\n");
2318 } else { 2257 } else {
2319 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2258 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2320 udelay( 1000 ); 2259 udelay(1000);
2321 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2260 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2322 if ( (status & MII_GS_LINK) && 2261 if ((status & MII_GS_LINK) &&
2323 /* We only support link info on Nat.Sem. PHY's */ 2262 /* We only support link info on Nat.Sem. PHY's */
2324 (tlphy_id1 == NAT_SEM_ID1) && 2263 (tlphy_id1 == NAT_SEM_ID1) &&
2325 (tlphy_id2 == NAT_SEM_ID2) ) { 2264 (tlphy_id2 == NAT_SEM_ID2)) {
2326 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); 2265 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
2327 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par ); 2266 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
2328 2267
2329 printk( "TLAN: %s: Link active with ", dev->name ); 2268 netdev_info(dev,
2330 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2269 "Link active with %s %uMbps %s-Duplex\n",
2331 printk( "forced 10%sMbps %s-Duplex\n", 2270 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2332 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2271 ? "forced" : "Autonegotiation enabled,",
2333 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2272 tlphy_par & TLAN_PHY_SPEED_100
2334 } else { 2273 ? 100 : 10,
2335 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", 2274 tlphy_par & TLAN_PHY_DUPLEX_FULL
2336 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2275 ? "Full" : "Half");
2337 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2276
2338 printk("TLAN: Partner capability: "); 2277 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2339 for (i = 5; i <= 10; i++) 2278 netdev_info(dev, "Partner capability:");
2340 if (partner & (1<<i)) 2279 for (i = 5; i < 10; i++)
2341 printk("%s",media[i-5]); 2280 if (partner & (1 << i))
2342 printk("\n"); 2281 pr_cont(" %s", media[i-5]);
2282 pr_cont("\n");
2343 } 2283 }
2344 2284
2345 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 2285 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2286 TLAN_LED_LINK);
2346#ifdef MONITOR 2287#ifdef MONITOR
2347 /* We have link beat..for now anyway */ 2288 /* We have link beat..for now anyway */
2348 priv->link = 1; 2289 priv->link = 1;
2349 /*Enabling link beat monitoring */ 2290 /*Enabling link beat monitoring */
2350 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT ); 2291 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
2351#endif 2292#endif
2352 } else if (status & MII_GS_LINK) { 2293 } else if (status & MII_GS_LINK) {
2353 printk( "TLAN: %s: Link active\n", dev->name ); 2294 netdev_info(dev, "Link active\n");
2354 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 2295 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2296 TLAN_LED_LINK);
2355 } 2297 }
2356 } 2298 }
2357 2299
2358 if ( priv->phyNum == 0 ) { 2300 if (priv->phy_num == 0) {
2359 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 2301 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2360 tlphy_ctl |= TLAN_TC_INTEN; 2302 tlphy_ctl |= TLAN_TC_INTEN;
2361 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl ); 2303 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2362 sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO ); 2304 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2363 sio |= TLAN_NET_SIO_MINTEN; 2305 sio |= TLAN_NET_SIO_MINTEN;
2364 TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio ); 2306 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2365 } 2307 }
2366 2308
2367 if ( status & MII_GS_LINK ) { 2309 if (status & MII_GS_LINK) {
2368 TLan_SetMac( dev, 0, dev->dev_addr ); 2310 tlan_set_mac(dev, 0, dev->dev_addr);
2369 priv->phyOnline = 1; 2311 priv->phy_online = 1;
2370 outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); 2312 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2371 if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) { 2313 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2372 outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); 2314 outb((TLAN_HC_REQ_INT >> 8),
2373 } 2315 dev->base_addr + TLAN_HOST_CMD + 1);
2374 outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM ); 2316 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2375 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); 2317 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2376 netif_carrier_on(dev); 2318 netif_carrier_on(dev);
2377 } else { 2319 } else {
2378 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", 2320 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2379 dev->name ); 2321 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2380 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
2381 return; 2322 return;
2382 } 2323 }
2383 TLan_SetMulticastList(dev); 2324 tlan_set_multicast_list(dev);
2384 2325
2385} /* TLan_FinishReset */ 2326}
2386 2327
2387 2328
2388 2329
2389 2330
2390 /*************************************************************** 2331/***************************************************************
2391 * TLan_SetMac 2332 * tlan_set_mac
2392 * 2333 *
2393 * Returns: 2334 * Returns:
2394 * Nothing 2335 * Nothing
2395 * Parms: 2336 * Parms:
2396 * dev Pointer to device structure of adapter 2337 * dev Pointer to device structure of adapter
2397 * on which to change the AREG. 2338 * on which to change the AREG.
2398 * areg The AREG to set the address in (0 - 3). 2339 * areg The AREG to set the address in (0 - 3).
2399 * mac A pointer to an array of chars. Each 2340 * mac A pointer to an array of chars. Each
2400 * element stores one byte of the address. 2341 * element stores one byte of the address.
2401 * IE, it isn't in ascii. 2342 * IE, it isn't in ascii.
2402 * 2343 *
2403 * This function transfers a MAC address to one of the 2344 * This function transfers a MAC address to one of the
2404 * TLAN AREGs (address registers). The TLAN chip locks 2345 * TLAN AREGs (address registers). The TLAN chip locks
2405 * the register on writing to offset 0 and unlocks the 2346 * the register on writing to offset 0 and unlocks the
2406 * register after writing to offset 5. If NULL is passed 2347 * register after writing to offset 5. If NULL is passed
2407 * in mac, then the AREG is filled with 0's. 2348 * in mac, then the AREG is filled with 0's.
2408 * 2349 *
2409 **************************************************************/ 2350 **************************************************************/
2410 2351
2411static void TLan_SetMac( struct net_device *dev, int areg, char *mac ) 2352static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2412{ 2353{
2413 int i; 2354 int i;
2414 2355
2415 areg *= 6; 2356 areg *= 6;
2416 2357
2417 if ( mac != NULL ) { 2358 if (mac != NULL) {
2418 for ( i = 0; i < 6; i++ ) 2359 for (i = 0; i < 6; i++)
2419 TLan_DioWrite8( dev->base_addr, 2360 tlan_dio_write8(dev->base_addr,
2420 TLAN_AREG_0 + areg + i, mac[i] ); 2361 TLAN_AREG_0 + areg + i, mac[i]);
2421 } else { 2362 } else {
2422 for ( i = 0; i < 6; i++ ) 2363 for (i = 0; i < 6; i++)
2423 TLan_DioWrite8( dev->base_addr, 2364 tlan_dio_write8(dev->base_addr,
2424 TLAN_AREG_0 + areg + i, 0 ); 2365 TLAN_AREG_0 + areg + i, 0);
2425 } 2366 }
2426 2367
2427} /* TLan_SetMac */ 2368}
2428 2369
2429 2370
2430 2371
@@ -2432,205 +2373,199 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2432/***************************************************************************** 2373/*****************************************************************************
2433****************************************************************************** 2374******************************************************************************
2434 2375
2435 ThunderLAN Driver PHY Layer Routines 2376ThunderLAN driver PHY layer routines
2436 2377
2437****************************************************************************** 2378******************************************************************************
2438*****************************************************************************/ 2379*****************************************************************************/
2439 2380
2440 2381
2441 2382
2442 /********************************************************************* 2383/*********************************************************************
2443 * TLan_PhyPrint 2384 * tlan_phy_print
2444 * 2385 *
2445 * Returns: 2386 * Returns:
2446 * Nothing 2387 * Nothing
2447 * Parms: 2388 * Parms:
2448 * dev A pointer to the device structure of the 2389 * dev A pointer to the device structure of the
2449 * TLAN device having the PHYs to be detailed. 2390 * TLAN device having the PHYs to be detailed.
2450 * 2391 *
2451 * This function prints the registers a PHY (aka transceiver). 2392 * This function prints the registers a PHY (aka transceiver).
2452 * 2393 *
2453 ********************************************************************/ 2394 ********************************************************************/
2454 2395
2455static void TLan_PhyPrint( struct net_device *dev ) 2396static void tlan_phy_print(struct net_device *dev)
2456{ 2397{
2457 TLanPrivateInfo *priv = netdev_priv(dev); 2398 struct tlan_priv *priv = netdev_priv(dev);
2458 u16 i, data0, data1, data2, data3, phy; 2399 u16 i, data0, data1, data2, data3, phy;
2459 2400
2460 phy = priv->phy[priv->phyNum]; 2401 phy = priv->phy[priv->phy_num];
2461 2402
2462 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2403 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2463 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); 2404 netdev_info(dev, "Unmanaged PHY\n");
2464 } else if ( phy <= TLAN_PHY_MAX_ADDR ) { 2405 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2465 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); 2406 netdev_info(dev, "PHY 0x%02x\n", phy);
2466 printk( "TLAN: Off. +0 +1 +2 +3\n" ); 2407 pr_info(" Off. +0 +1 +2 +3\n");
2467 for ( i = 0; i < 0x20; i+= 4 ) { 2408 for (i = 0; i < 0x20; i += 4) {
2468 printk( "TLAN: 0x%02x", i ); 2409 tlan_mii_read_reg(dev, phy, i, &data0);
2469 TLan_MiiReadReg( dev, phy, i, &data0 ); 2410 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2470 printk( " 0x%04hx", data0 ); 2411 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2471 TLan_MiiReadReg( dev, phy, i + 1, &data1 ); 2412 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2472 printk( " 0x%04hx", data1 ); 2413 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2473 TLan_MiiReadReg( dev, phy, i + 2, &data2 ); 2414 i, data0, data1, data2, data3);
2474 printk( " 0x%04hx", data2 );
2475 TLan_MiiReadReg( dev, phy, i + 3, &data3 );
2476 printk( " 0x%04hx\n", data3 );
2477 } 2415 }
2478 } else { 2416 } else {
2479 printk( "TLAN: Device %s, Invalid PHY.\n", dev->name ); 2417 netdev_info(dev, "Invalid PHY\n");
2480 } 2418 }
2481 2419
2482} /* TLan_PhyPrint */ 2420}
2483 2421
2484 2422
2485 2423
2486 2424
2487 /********************************************************************* 2425/*********************************************************************
2488 * TLan_PhyDetect 2426 * tlan_phy_detect
2489 * 2427 *
2490 * Returns: 2428 * Returns:
2491 * Nothing 2429 * Nothing
2492 * Parms: 2430 * Parms:
2493 * dev A pointer to the device structure of the adapter 2431 * dev A pointer to the device structure of the adapter
2494 * for which the PHY needs determined. 2432 * for which the PHY needs determined.
2495 * 2433 *
2496 * So far I've found that adapters which have external PHYs 2434 * So far I've found that adapters which have external PHYs
2497 * may also use the internal PHY for part of the functionality. 2435 * may also use the internal PHY for part of the functionality.
2498 * (eg, AUI/Thinnet). This function finds out if this TLAN 2436 * (eg, AUI/Thinnet). This function finds out if this TLAN
2499 * chip has an internal PHY, and then finds the first external 2437 * chip has an internal PHY, and then finds the first external
2500 * PHY (starting from address 0) if it exists). 2438 * PHY (starting from address 0) if it exists).
2501 * 2439 *
2502 ********************************************************************/ 2440 ********************************************************************/
2503 2441
2504static void TLan_PhyDetect( struct net_device *dev ) 2442static void tlan_phy_detect(struct net_device *dev)
2505{ 2443{
2506 TLanPrivateInfo *priv = netdev_priv(dev); 2444 struct tlan_priv *priv = netdev_priv(dev);
2507 u16 control; 2445 u16 control;
2508 u16 hi; 2446 u16 hi;
2509 u16 lo; 2447 u16 lo;
2510 u32 phy; 2448 u32 phy;
2511 2449
2512 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2450 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2513 priv->phyNum = 0xFFFF; 2451 priv->phy_num = 0xffff;
2514 return; 2452 return;
2515 } 2453 }
2516 2454
2517 TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi ); 2455 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2518 2456
2519 if ( hi != 0xFFFF ) { 2457 if (hi != 0xffff)
2520 priv->phy[0] = TLAN_PHY_MAX_ADDR; 2458 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2521 } else { 2459 else
2522 priv->phy[0] = TLAN_PHY_NONE; 2460 priv->phy[0] = TLAN_PHY_NONE;
2523 }
2524 2461
2525 priv->phy[1] = TLAN_PHY_NONE; 2462 priv->phy[1] = TLAN_PHY_NONE;
2526 for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) { 2463 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2527 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); 2464 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2528 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); 2465 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2529 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); 2466 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2530 if ( ( control != 0xFFFF ) || 2467 if ((control != 0xffff) ||
2531 ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2468 (hi != 0xffff) || (lo != 0xffff)) {
2532 TLAN_DBG( TLAN_DEBUG_GNRL, 2469 TLAN_DBG(TLAN_DEBUG_GNRL,
2533 "PHY found at %02x %04x %04x %04x\n", 2470 "PHY found at %02x %04x %04x %04x\n",
2534 phy, control, hi, lo ); 2471 phy, control, hi, lo);
2535 if ( ( priv->phy[1] == TLAN_PHY_NONE ) && 2472 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2536 ( phy != TLAN_PHY_MAX_ADDR ) ) { 2473 (phy != TLAN_PHY_MAX_ADDR)) {
2537 priv->phy[1] = phy; 2474 priv->phy[1] = phy;
2538 } 2475 }
2539 } 2476 }
2540 } 2477 }
2541 2478
2542 if ( priv->phy[1] != TLAN_PHY_NONE ) { 2479 if (priv->phy[1] != TLAN_PHY_NONE)
2543 priv->phyNum = 1; 2480 priv->phy_num = 1;
2544 } else if ( priv->phy[0] != TLAN_PHY_NONE ) { 2481 else if (priv->phy[0] != TLAN_PHY_NONE)
2545 priv->phyNum = 0; 2482 priv->phy_num = 0;
2546 } else { 2483 else
2547 printk( "TLAN: Cannot initialize device, no PHY was found!\n" ); 2484 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2548 }
2549 2485
2550} /* TLan_PhyDetect */ 2486}
2551 2487
2552 2488
2553 2489
2554 2490
2555static void TLan_PhyPowerDown( struct net_device *dev ) 2491static void tlan_phy_power_down(struct net_device *dev)
2556{ 2492{
2557 TLanPrivateInfo *priv = netdev_priv(dev); 2493 struct tlan_priv *priv = netdev_priv(dev);
2558 u16 value; 2494 u16 value;
2559 2495
2560 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name ); 2496 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2561 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; 2497 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2562 TLan_MiiSync( dev->base_addr ); 2498 tlan_mii_sync(dev->base_addr);
2563 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2499 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2564 if ( ( priv->phyNum == 0 ) && 2500 if ((priv->phy_num == 0) &&
2565 ( priv->phy[1] != TLAN_PHY_NONE ) && 2501 (priv->phy[1] != TLAN_PHY_NONE) &&
2566 ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2502 (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
2567 TLan_MiiSync( dev->base_addr ); 2503 tlan_mii_sync(dev->base_addr);
2568 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); 2504 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2569 } 2505 }
2570 2506
2571 /* Wait for 50 ms and powerup 2507 /* Wait for 50 ms and powerup
2572 * This is abitrary. It is intended to make sure the 2508 * This is abitrary. It is intended to make sure the
2573 * transceiver settles. 2509 * transceiver settles.
2574 */ 2510 */
2575 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP ); 2511 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
2576 2512
2577} /* TLan_PhyPowerDown */ 2513}
2578 2514
2579 2515
2580 2516
2581 2517
2582static void TLan_PhyPowerUp( struct net_device *dev ) 2518static void tlan_phy_power_up(struct net_device *dev)
2583{ 2519{
2584 TLanPrivateInfo *priv = netdev_priv(dev); 2520 struct tlan_priv *priv = netdev_priv(dev);
2585 u16 value; 2521 u16 value;
2586 2522
2587 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name ); 2523 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2588 TLan_MiiSync( dev->base_addr ); 2524 tlan_mii_sync(dev->base_addr);
2589 value = MII_GC_LOOPBK; 2525 value = MII_GC_LOOPBK;
2590 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2526 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2591 TLan_MiiSync(dev->base_addr); 2527 tlan_mii_sync(dev->base_addr);
2592 /* Wait for 500 ms and reset the 2528 /* Wait for 500 ms and reset the
2593 * transceiver. The TLAN docs say both 50 ms and 2529 * transceiver. The TLAN docs say both 50 ms and
2594 * 500 ms, so do the longer, just in case. 2530 * 500 ms, so do the longer, just in case.
2595 */ 2531 */
2596 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET ); 2532 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
2597 2533
2598} /* TLan_PhyPowerUp */ 2534}
2599 2535
2600 2536
2601 2537
2602 2538
2603static void TLan_PhyReset( struct net_device *dev ) 2539static void tlan_phy_reset(struct net_device *dev)
2604{ 2540{
2605 TLanPrivateInfo *priv = netdev_priv(dev); 2541 struct tlan_priv *priv = netdev_priv(dev);
2606 u16 phy; 2542 u16 phy;
2607 u16 value; 2543 u16 value;
2608 2544
2609 phy = priv->phy[priv->phyNum]; 2545 phy = priv->phy[priv->phy_num];
2610 2546
2611 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name ); 2547 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
2612 TLan_MiiSync( dev->base_addr ); 2548 tlan_mii_sync(dev->base_addr);
2613 value = MII_GC_LOOPBK | MII_GC_RESET; 2549 value = MII_GC_LOOPBK | MII_GC_RESET;
2614 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value ); 2550 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2615 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); 2551 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2616 while ( value & MII_GC_RESET ) { 2552 while (value & MII_GC_RESET)
2617 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); 2553 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2618 }
2619 2554
2620 /* Wait for 500 ms and initialize. 2555 /* Wait for 500 ms and initialize.
2621 * I don't remember why I wait this long. 2556 * I don't remember why I wait this long.
2622 * I've changed this to 50ms, as it seems long enough. 2557 * I've changed this to 50ms, as it seems long enough.
2623 */ 2558 */
2624 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK ); 2559 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
2625 2560
2626} /* TLan_PhyReset */ 2561}
2627 2562
2628 2563
2629 2564
2630 2565
2631static void TLan_PhyStartLink( struct net_device *dev ) 2566static void tlan_phy_start_link(struct net_device *dev)
2632{ 2567{
2633 TLanPrivateInfo *priv = netdev_priv(dev); 2568 struct tlan_priv *priv = netdev_priv(dev);
2634 u16 ability; 2569 u16 ability;
2635 u16 control; 2570 u16 control;
2636 u16 data; 2571 u16 data;
@@ -2638,86 +2573,87 @@ static void TLan_PhyStartLink( struct net_device *dev )
2638 u16 status; 2573 u16 status;
2639 u16 tctl; 2574 u16 tctl;
2640 2575
2641 phy = priv->phy[priv->phyNum]; 2576 phy = priv->phy[priv->phy_num];
2642 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name ); 2577 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2643 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2578 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2644 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability ); 2579 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2645 2580
2646 if ( ( status & MII_GS_AUTONEG ) && 2581 if ((status & MII_GS_AUTONEG) &&
2647 ( ! priv->aui ) ) { 2582 (!priv->aui)) {
2648 ability = status >> 11; 2583 ability = status >> 11;
2649 if ( priv->speed == TLAN_SPEED_10 && 2584 if (priv->speed == TLAN_SPEED_10 &&
2650 priv->duplex == TLAN_DUPLEX_HALF) { 2585 priv->duplex == TLAN_DUPLEX_HALF) {
2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); 2586 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2652 } else if ( priv->speed == TLAN_SPEED_10 && 2587 } else if (priv->speed == TLAN_SPEED_10 &&
2653 priv->duplex == TLAN_DUPLEX_FULL) { 2588 priv->duplex == TLAN_DUPLEX_FULL) {
2654 priv->tlanFullDuplex = true; 2589 priv->tlan_full_duplex = true;
2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); 2590 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2656 } else if ( priv->speed == TLAN_SPEED_100 && 2591 } else if (priv->speed == TLAN_SPEED_100 &&
2657 priv->duplex == TLAN_DUPLEX_HALF) { 2592 priv->duplex == TLAN_DUPLEX_HALF) {
2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); 2593 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2659 } else if ( priv->speed == TLAN_SPEED_100 && 2594 } else if (priv->speed == TLAN_SPEED_100 &&
2660 priv->duplex == TLAN_DUPLEX_FULL) { 2595 priv->duplex == TLAN_DUPLEX_FULL) {
2661 priv->tlanFullDuplex = true; 2596 priv->tlan_full_duplex = true;
2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); 2597 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2663 } else { 2598 } else {
2664 2599
2665 /* Set Auto-Neg advertisement */ 2600 /* Set Auto-Neg advertisement */
2666 TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1); 2601 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2602 (ability << 5) | 1);
2667 /* Enablee Auto-Neg */ 2603 /* Enablee Auto-Neg */
2668 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 ); 2604 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2669 /* Restart Auto-Neg */ 2605 /* Restart Auto-Neg */
2670 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 ); 2606 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2671 /* Wait for 4 sec for autonegotiation 2607 /* Wait for 4 sec for autonegotiation
2672 * to complete. The max spec time is less than this 2608 * to complete. The max spec time is less than this
2673 * but the card need additional time to start AN. 2609 * but the card need additional time to start AN.
2674 * .5 sec should be plenty extra. 2610 * .5 sec should be plenty extra.
2675 */ 2611 */
2676 printk( "TLAN: %s: Starting autonegotiation.\n", dev->name ); 2612 netdev_info(dev, "Starting autonegotiation\n");
2677 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2613 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2678 return; 2614 return;
2679 } 2615 }
2680 2616
2681 } 2617 }
2682 2618
2683 if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) { 2619 if ((priv->aui) && (priv->phy_num != 0)) {
2684 priv->phyNum = 0; 2620 priv->phy_num = 0;
2685 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2621 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2686 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2622 | TLAN_NET_CFG_PHY_EN;
2687 TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN ); 2623 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2624 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2688 return; 2625 return;
2689 } else if ( priv->phyNum == 0 ) { 2626 } else if (priv->phy_num == 0) {
2690 control = 0; 2627 control = 0;
2691 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl ); 2628 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2692 if ( priv->aui ) { 2629 if (priv->aui) {
2693 tctl |= TLAN_TC_AUISEL; 2630 tctl |= TLAN_TC_AUISEL;
2694 } else { 2631 } else {
2695 tctl &= ~TLAN_TC_AUISEL; 2632 tctl &= ~TLAN_TC_AUISEL;
2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2633 if (priv->duplex == TLAN_DUPLEX_FULL) {
2697 control |= MII_GC_DUPLEX; 2634 control |= MII_GC_DUPLEX;
2698 priv->tlanFullDuplex = true; 2635 priv->tlan_full_duplex = true;
2699 } 2636 }
2700 if ( priv->speed == TLAN_SPEED_100 ) { 2637 if (priv->speed == TLAN_SPEED_100)
2701 control |= MII_GC_SPEEDSEL; 2638 control |= MII_GC_SPEEDSEL;
2702 }
2703 } 2639 }
2704 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control ); 2640 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2705 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl ); 2641 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2706 } 2642 }
2707 2643
2708 /* Wait for 2 sec to give the transceiver time 2644 /* Wait for 2 sec to give the transceiver time
2709 * to establish link. 2645 * to establish link.
2710 */ 2646 */
2711 TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET ); 2647 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2712 2648
2713} /* TLan_PhyStartLink */ 2649}
2714 2650
2715 2651
2716 2652
2717 2653
2718static void TLan_PhyFinishAutoNeg( struct net_device *dev ) 2654static void tlan_phy_finish_auto_neg(struct net_device *dev)
2719{ 2655{
2720 TLanPrivateInfo *priv = netdev_priv(dev); 2656 struct tlan_priv *priv = netdev_priv(dev);
2721 u16 an_adv; 2657 u16 an_adv;
2722 u16 an_lpa; 2658 u16 an_lpa;
2723 u16 data; 2659 u16 data;
@@ -2725,115 +2661,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2725 u16 phy; 2661 u16 phy;
2726 u16 status; 2662 u16 status;
2727 2663
2728 phy = priv->phy[priv->phyNum]; 2664 phy = priv->phy[priv->phy_num];
2729 2665
2730 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2666 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2731 udelay( 1000 ); 2667 udelay(1000);
2732 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2668 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2733 2669
2734 if ( ! ( status & MII_GS_AUTOCMPLT ) ) { 2670 if (!(status & MII_GS_AUTOCMPLT)) {
2735 /* Wait for 8 sec to give the process 2671 /* Wait for 8 sec to give the process
2736 * more time. Perhaps we should fail after a while. 2672 * more time. Perhaps we should fail after a while.
2737 */ 2673 */
2738 if (!priv->neg_be_verbose++) { 2674 if (!priv->neg_be_verbose++) {
2739 pr_info("TLAN: Giving autonegotiation more time.\n"); 2675 pr_info("Giving autonegotiation more time.\n");
2740 pr_info("TLAN: Please check that your adapter has\n"); 2676 pr_info("Please check that your adapter has\n");
2741 pr_info("TLAN: been properly connected to a HUB or Switch.\n"); 2677 pr_info("been properly connected to a HUB or Switch.\n");
2742 pr_info("TLAN: Trying to establish link in the background...\n"); 2678 pr_info("Trying to establish link in the background...\n");
2743 } 2679 }
2744 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2680 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
2745 return; 2681 return;
2746 } 2682 }
2747 2683
2748 printk( "TLAN: %s: Autonegotiation complete.\n", dev->name ); 2684 netdev_info(dev, "Autonegotiation complete\n");
2749 TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv ); 2685 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); 2686 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2751 mode = an_adv & an_lpa & 0x03E0; 2687 mode = an_adv & an_lpa & 0x03E0;
2752 if ( mode & 0x0100 ) { 2688 if (mode & 0x0100)
2753 priv->tlanFullDuplex = true; 2689 priv->tlan_full_duplex = true;
2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { 2690 else if (!(mode & 0x0080) && (mode & 0x0040))
2755 priv->tlanFullDuplex = true; 2691 priv->tlan_full_duplex = true;
2756 } 2692
2757 2693 if ((!(mode & 0x0180)) &&
2758 if ( ( ! ( mode & 0x0180 ) ) && 2694 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2759 ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && 2695 (priv->phy_num != 0)) {
2760 ( priv->phyNum != 0 ) ) { 2696 priv->phy_num = 0;
2761 priv->phyNum = 0; 2697 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2762 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2698 | TLAN_NET_CFG_PHY_EN;
2763 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2699 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2764 TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN ); 2700 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2765 return; 2701 return;
2766 } 2702 }
2767 2703
2768 if ( priv->phyNum == 0 ) { 2704 if (priv->phy_num == 0) {
2769 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || 2705 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2770 ( an_adv & an_lpa & 0x0040 ) ) { 2706 (an_adv & an_lpa & 0x0040)) {
2771 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 2707 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2772 MII_GC_AUTOENB | MII_GC_DUPLEX ); 2708 MII_GC_AUTOENB | MII_GC_DUPLEX);
2773 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2709 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2774 } else { 2710 } else {
2775 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); 2711 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2776 pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2712 MII_GC_AUTOENB);
2713 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2777 } 2714 }
2778 } 2715 }
2779 2716
2780 /* Wait for 100 ms. No reason in partiticular. 2717 /* Wait for 100 ms. No reason in partiticular.
2781 */ 2718 */
2782 TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET ); 2719 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
2783 2720
2784} /* TLan_PhyFinishAutoNeg */ 2721}
2785 2722
2786#ifdef MONITOR 2723#ifdef MONITOR
2787 2724
2788 /********************************************************************* 2725/*********************************************************************
2789 * 2726 *
2790 * TLan_phyMonitor 2727 * tlan_phy_monitor
2791 * 2728 *
2792 * Returns: 2729 * Returns:
2793 * None 2730 * None
2794 * 2731 *
2795 * Params: 2732 * Params:
2796 * dev The device structure of this device. 2733 * dev The device structure of this device.
2797 * 2734 *
2798 * 2735 *
2799 * This function monitors PHY condition by reading the status 2736 * This function monitors PHY condition by reading the status
2800 * register via the MII bus. This can be used to give info 2737 * register via the MII bus. This can be used to give info
2801 * about link changes (up/down), and possible switch to alternate 2738 * about link changes (up/down), and possible switch to alternate
2802 * media. 2739 * media.
2803 * 2740 *
2804 * ******************************************************************/ 2741 *******************************************************************/
2805 2742
2806void TLan_PhyMonitor( struct net_device *dev ) 2743void tlan_phy_monitor(struct net_device *dev)
2807{ 2744{
2808 TLanPrivateInfo *priv = netdev_priv(dev); 2745 struct tlan_priv *priv = netdev_priv(dev);
2809 u16 phy; 2746 u16 phy;
2810 u16 phy_status; 2747 u16 phy_status;
2811 2748
2812 phy = priv->phy[priv->phyNum]; 2749 phy = priv->phy[priv->phy_num];
2813 2750
2814 /* Get PHY status register */ 2751 /* Get PHY status register */
2815 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status ); 2752 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2816 2753
2817 /* Check if link has been lost */ 2754 /* Check if link has been lost */
2818 if (!(phy_status & MII_GS_LINK)) { 2755 if (!(phy_status & MII_GS_LINK)) {
2819 if (priv->link) { 2756 if (priv->link) {
2820 priv->link = 0; 2757 priv->link = 0;
2821 printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name); 2758 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2822 netif_carrier_off(dev); 2759 dev->name);
2823 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2760 netif_carrier_off(dev);
2824 return; 2761 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2762 return;
2825 } 2763 }
2826 } 2764 }
2827 2765
2828 /* Link restablished? */ 2766 /* Link restablished? */
2829 if ((phy_status & MII_GS_LINK) && !priv->link) { 2767 if ((phy_status & MII_GS_LINK) && !priv->link) {
2830 priv->link = 1; 2768 priv->link = 1;
2831 printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name); 2769 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2770 dev->name);
2832 netif_carrier_on(dev); 2771 netif_carrier_on(dev);
2833 } 2772 }
2834 2773
2835 /* Setup a new monitor */ 2774 /* Setup a new monitor */
2836 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2775 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2837} 2776}
2838 2777
2839#endif /* MONITOR */ 2778#endif /* MONITOR */
@@ -2842,47 +2781,48 @@ void TLan_PhyMonitor( struct net_device *dev )
2842/***************************************************************************** 2781/*****************************************************************************
2843****************************************************************************** 2782******************************************************************************
2844 2783
2845 ThunderLAN Driver MII Routines 2784ThunderLAN driver MII routines
2846 2785
2847 These routines are based on the information in Chap. 2 of the 2786these routines are based on the information in chap. 2 of the
2848 "ThunderLAN Programmer's Guide", pp. 15-24. 2787"ThunderLAN Programmer's Guide", pp. 15-24.
2849 2788
2850****************************************************************************** 2789******************************************************************************
2851*****************************************************************************/ 2790*****************************************************************************/
2852 2791
2853 2792
2854 /*************************************************************** 2793/***************************************************************
2855 * TLan_MiiReadReg 2794 * tlan_mii_read_reg
2856 * 2795 *
2857 * Returns: 2796 * Returns:
2858 * false if ack received ok 2797 * false if ack received ok
2859 * true if no ack received or other error 2798 * true if no ack received or other error
2860 * 2799 *
2861 * Parms: 2800 * Parms:
2862 * dev The device structure containing 2801 * dev The device structure containing
2863 * The io address and interrupt count 2802 * The io address and interrupt count
2864 * for this device. 2803 * for this device.
2865 * phy The address of the PHY to be queried. 2804 * phy The address of the PHY to be queried.
2866 * reg The register whose contents are to be 2805 * reg The register whose contents are to be
2867 * retrieved. 2806 * retrieved.
2868 * val A pointer to a variable to store the 2807 * val A pointer to a variable to store the
2869 * retrieved value. 2808 * retrieved value.
2870 * 2809 *
2871 * This function uses the TLAN's MII bus to retrieve the contents 2810 * This function uses the TLAN's MII bus to retrieve the contents
2872 * of a given register on a PHY. It sends the appropriate info 2811 * of a given register on a PHY. It sends the appropriate info
2873 * and then reads the 16-bit register value from the MII bus via 2812 * and then reads the 16-bit register value from the MII bus via
2874 * the TLAN SIO register. 2813 * the TLAN SIO register.
2875 * 2814 *
2876 **************************************************************/ 2815 **************************************************************/
2877 2816
2878static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) 2817static bool
2818tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2879{ 2819{
2880 u8 nack; 2820 u8 nack;
2881 u16 sio, tmp; 2821 u16 sio, tmp;
2882 u32 i; 2822 u32 i;
2883 bool err; 2823 bool err;
2884 int minten; 2824 int minten;
2885 TLanPrivateInfo *priv = netdev_priv(dev); 2825 struct tlan_priv *priv = netdev_priv(dev);
2886 unsigned long flags = 0; 2826 unsigned long flags = 0;
2887 2827
2888 err = false; 2828 err = false;
@@ -2892,48 +2832,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
2892 if (!in_irq()) 2832 if (!in_irq())
2893 spin_lock_irqsave(&priv->lock, flags); 2833 spin_lock_irqsave(&priv->lock, flags);
2894 2834
2895 TLan_MiiSync(dev->base_addr); 2835 tlan_mii_sync(dev->base_addr);
2896 2836
2897 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); 2837 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2898 if ( minten ) 2838 if (minten)
2899 TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio); 2839 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2900 2840
2901 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ 2841 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
2902 TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */ 2842 tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
2903 TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ 2843 tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
2904 TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ 2844 tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
2905 2845
2906 2846
2907 TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */ 2847 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
2908 2848
2909 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */ 2849 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
2910 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2850 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2911 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */ 2851 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
2912 2852
2913 nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */ 2853 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
2914 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */ 2854 tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
2915 if (nack) { /* No ACK, so fake it */ 2855 if (nack) { /* no ACK, so fake it */
2916 for (i = 0; i < 16; i++) { 2856 for (i = 0; i < 16; i++) {
2917 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2857 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2858 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2919 } 2859 }
2920 tmp = 0xffff; 2860 tmp = 0xffff;
2921 err = true; 2861 err = true;
2922 } else { /* ACK, so read data */ 2862 } else { /* ACK, so read data */
2923 for (tmp = 0, i = 0x8000; i; i >>= 1) { 2863 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2864 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2925 if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio)) 2865 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2926 tmp |= i; 2866 tmp |= i;
2927 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2867 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2928 } 2868 }
2929 } 2869 }
2930 2870
2931 2871
2932 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */ 2872 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
2933 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2873 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2934 2874
2935 if ( minten ) 2875 if (minten)
2936 TLan_SetBit(TLAN_NET_SIO_MINTEN, sio); 2876 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2937 2877
2938 *val = tmp; 2878 *val = tmp;
2939 2879
@@ -2942,116 +2882,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
2942 2882
2943 return err; 2883 return err;
2944 2884
2945} /* TLan_MiiReadReg */ 2885}
2946 2886
2947 2887
2948 2888
2949 2889
2950 /*************************************************************** 2890/***************************************************************
2951 * TLan_MiiSendData 2891 * tlan_mii_send_data
2952 * 2892 *
2953 * Returns: 2893 * Returns:
2954 * Nothing 2894 * Nothing
2955 * Parms: 2895 * Parms:
2956 * base_port The base IO port of the adapter in 2896 * base_port The base IO port of the adapter in
2957 * question. 2897 * question.
2958 * dev The address of the PHY to be queried. 2898 * dev The address of the PHY to be queried.
2959 * data The value to be placed on the MII bus. 2899 * data The value to be placed on the MII bus.
2960 * num_bits The number of bits in data that are to 2900 * num_bits The number of bits in data that are to
2961 * be placed on the MII bus. 2901 * be placed on the MII bus.
2962 * 2902 *
2963 * This function sends on sequence of bits on the MII 2903 * This function sends on sequence of bits on the MII
2964 * configuration bus. 2904 * configuration bus.
2965 * 2905 *
2966 **************************************************************/ 2906 **************************************************************/
2967 2907
2968static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) 2908static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2969{ 2909{
2970 u16 sio; 2910 u16 sio;
2971 u32 i; 2911 u32 i;
2972 2912
2973 if ( num_bits == 0 ) 2913 if (num_bits == 0)
2974 return; 2914 return;
2975 2915
2976 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); 2916 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2977 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; 2917 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2978 TLan_SetBit( TLAN_NET_SIO_MTXEN, sio ); 2918 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2979 2919
2980 for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) { 2920 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2981 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); 2921 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2982 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); 2922 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2983 if ( data & i ) 2923 if (data & i)
2984 TLan_SetBit( TLAN_NET_SIO_MDATA, sio ); 2924 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2985 else 2925 else
2986 TLan_ClearBit( TLAN_NET_SIO_MDATA, sio ); 2926 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2987 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 2927 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2988 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); 2928 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2989 } 2929 }
2990 2930
2991} /* TLan_MiiSendData */ 2931}
2992 2932
2993 2933
2994 2934
2995 2935
2996 /*************************************************************** 2936/***************************************************************
2997 * TLan_MiiSync 2937 * TLan_MiiSync
2998 * 2938 *
2999 * Returns: 2939 * Returns:
3000 * Nothing 2940 * Nothing
3001 * Parms: 2941 * Parms:
3002 * base_port The base IO port of the adapter in 2942 * base_port The base IO port of the adapter in
3003 * question. 2943 * question.
3004 * 2944 *
3005 * This functions syncs all PHYs in terms of the MII configuration 2945 * This functions syncs all PHYs in terms of the MII configuration
3006 * bus. 2946 * bus.
3007 * 2947 *
3008 **************************************************************/ 2948 **************************************************************/
3009 2949
3010static void TLan_MiiSync( u16 base_port ) 2950static void tlan_mii_sync(u16 base_port)
3011{ 2951{
3012 int i; 2952 int i;
3013 u16 sio; 2953 u16 sio;
3014 2954
3015 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); 2955 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
3016 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; 2956 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
3017 2957
3018 TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio ); 2958 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
3019 for ( i = 0; i < 32; i++ ) { 2959 for (i = 0; i < 32; i++) {
3020 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); 2960 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3021 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 2961 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3022 } 2962 }
3023 2963
3024} /* TLan_MiiSync */ 2964}
3025 2965
3026 2966
3027 2967
3028 2968
3029 /*************************************************************** 2969/***************************************************************
3030 * TLan_MiiWriteReg 2970 * tlan_mii_write_reg
3031 * 2971 *
3032 * Returns: 2972 * Returns:
3033 * Nothing 2973 * Nothing
3034 * Parms: 2974 * Parms:
3035 * dev The device structure for the device 2975 * dev The device structure for the device
3036 * to write to. 2976 * to write to.
3037 * phy The address of the PHY to be written to. 2977 * phy The address of the PHY to be written to.
3038 * reg The register whose contents are to be 2978 * reg The register whose contents are to be
3039 * written. 2979 * written.
3040 * val The value to be written to the register. 2980 * val The value to be written to the register.
3041 * 2981 *
3042 * This function uses the TLAN's MII bus to write the contents of a 2982 * This function uses the TLAN's MII bus to write the contents of a
3043 * given register on a PHY. It sends the appropriate info and then 2983 * given register on a PHY. It sends the appropriate info and then
3044 * writes the 16-bit register value from the MII configuration bus 2984 * writes the 16-bit register value from the MII configuration bus
3045 * via the TLAN SIO register. 2985 * via the TLAN SIO register.
3046 * 2986 *
3047 **************************************************************/ 2987 **************************************************************/
3048 2988
3049static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) 2989static void
2990tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
3050{ 2991{
3051 u16 sio; 2992 u16 sio;
3052 int minten; 2993 int minten;
3053 unsigned long flags = 0; 2994 unsigned long flags = 0;
3054 TLanPrivateInfo *priv = netdev_priv(dev); 2995 struct tlan_priv *priv = netdev_priv(dev);
3055 2996
3056 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); 2997 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3057 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2998 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3000,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
3059 if (!in_irq()) 3000 if (!in_irq())
3060 spin_lock_irqsave(&priv->lock, flags); 3001 spin_lock_irqsave(&priv->lock, flags);
3061 3002
3062 TLan_MiiSync( dev->base_addr ); 3003 tlan_mii_sync(dev->base_addr);
3063 3004
3064 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); 3005 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3065 if ( minten ) 3006 if (minten)
3066 TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio ); 3007 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3067 3008
3068 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ 3009 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
3069 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */ 3010 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
3070 TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ 3011 tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
3071 TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ 3012 tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
3072 3013
3073 TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */ 3014 tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
3074 TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */ 3015 tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
3075 3016
3076 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */ 3017 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
3077 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 3018 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3078 3019
3079 if ( minten ) 3020 if (minten)
3080 TLan_SetBit( TLAN_NET_SIO_MINTEN, sio ); 3021 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3081 3022
3082 if (!in_irq()) 3023 if (!in_irq())
3083 spin_unlock_irqrestore(&priv->lock, flags); 3024 spin_unlock_irqrestore(&priv->lock, flags);
3084 3025
3085} /* TLan_MiiWriteReg */ 3026}
3086 3027
3087 3028
3088 3029
@@ -3090,229 +3031,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
3090/***************************************************************************** 3031/*****************************************************************************
3091****************************************************************************** 3032******************************************************************************
3092 3033
3093 ThunderLAN Driver Eeprom routines 3034ThunderLAN driver eeprom routines
3094 3035
3095 The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A 3036the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
3096 EEPROM. These functions are based on information in Microchip's 3037EEPROM. these functions are based on information in microchip's
3097 data sheet. I don't know how well this functions will work with 3038data sheet. I don't know how well this functions will work with
3098 other EEPROMs. 3039other Eeproms.
3099 3040
3100****************************************************************************** 3041******************************************************************************
3101*****************************************************************************/ 3042*****************************************************************************/
3102 3043
3103 3044
3104 /*************************************************************** 3045/***************************************************************
3105 * TLan_EeSendStart 3046 * tlan_ee_send_start
3106 * 3047 *
3107 * Returns: 3048 * Returns:
3108 * Nothing 3049 * Nothing
3109 * Parms: 3050 * Parms:
3110 * io_base The IO port base address for the 3051 * io_base The IO port base address for the
3111 * TLAN device with the EEPROM to 3052 * TLAN device with the EEPROM to
3112 * use. 3053 * use.
3113 * 3054 *
3114 * This function sends a start cycle to an EEPROM attached 3055 * This function sends a start cycle to an EEPROM attached
3115 * to a TLAN chip. 3056 * to a TLAN chip.
3116 * 3057 *
3117 **************************************************************/ 3058 **************************************************************/
3118 3059
3119static void TLan_EeSendStart( u16 io_base ) 3060static void tlan_ee_send_start(u16 io_base)
3120{ 3061{
3121 u16 sio; 3062 u16 sio;
3122 3063
3123 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3064 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3124 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3065 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3125 3066
3126 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3067 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3127 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3068 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3128 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3069 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3129 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3070 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3130 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3071 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3131 3072
3132} /* TLan_EeSendStart */ 3073}
3133 3074
3134 3075
3135 3076
3136 3077
3137 /*************************************************************** 3078/***************************************************************
3138 * TLan_EeSendByte 3079 * tlan_ee_send_byte
3139 * 3080 *
3140 * Returns: 3081 * Returns:
3141 * If the correct ack was received, 0, otherwise 1 3082 * If the correct ack was received, 0, otherwise 1
3142 * Parms: io_base The IO port base address for the 3083 * Parms: io_base The IO port base address for the
3143 * TLAN device with the EEPROM to 3084 * TLAN device with the EEPROM to
3144 * use. 3085 * use.
3145 * data The 8 bits of information to 3086 * data The 8 bits of information to
3146 * send to the EEPROM. 3087 * send to the EEPROM.
3147 * stop If TLAN_EEPROM_STOP is passed, a 3088 * stop If TLAN_EEPROM_STOP is passed, a
3148 * stop cycle is sent after the 3089 * stop cycle is sent after the
3149 * byte is sent after the ack is 3090 * byte is sent after the ack is
3150 * read. 3091 * read.
3151 * 3092 *
3152 * This function sends a byte on the serial EEPROM line, 3093 * This function sends a byte on the serial EEPROM line,
3153 * driving the clock to send each bit. The function then 3094 * driving the clock to send each bit. The function then
3154 * reverses transmission direction and reads an acknowledge 3095 * reverses transmission direction and reads an acknowledge
3155 * bit. 3096 * bit.
3156 * 3097 *
3157 **************************************************************/ 3098 **************************************************************/
3158 3099
3159static int TLan_EeSendByte( u16 io_base, u8 data, int stop ) 3100static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3160{ 3101{
3161 int err; 3102 int err;
3162 u8 place; 3103 u8 place;
3163 u16 sio; 3104 u16 sio;
3164 3105
3165 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3106 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3166 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3107 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3167 3108
3168 /* Assume clock is low, tx is enabled; */ 3109 /* Assume clock is low, tx is enabled; */
3169 for ( place = 0x80; place != 0; place >>= 1 ) { 3110 for (place = 0x80; place != 0; place >>= 1) {
3170 if ( place & data ) 3111 if (place & data)
3171 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3112 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3172 else 3113 else
3173 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3114 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3174 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3115 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3175 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3116 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3176 } 3117 }
3177 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); 3118 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3178 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3119 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3179 err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio ); 3120 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3180 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3121 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3181 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3122 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3182 3123
3183 if ( ( ! err ) && stop ) { 3124 if ((!err) && stop) {
3184 /* STOP, raise data while clock is high */ 3125 /* STOP, raise data while clock is high */
3185 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3126 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3186 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3127 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3187 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3128 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3188 } 3129 }
3189 3130
3190 return err; 3131 return err;
3191 3132
3192} /* TLan_EeSendByte */ 3133}
3193 3134
3194 3135
3195 3136
3196 3137
3197 /*************************************************************** 3138/***************************************************************
3198 * TLan_EeReceiveByte 3139 * tlan_ee_receive_byte
3199 * 3140 *
3200 * Returns: 3141 * Returns:
3201 * Nothing 3142 * Nothing
3202 * Parms: 3143 * Parms:
3203 * io_base The IO port base address for the 3144 * io_base The IO port base address for the
3204 * TLAN device with the EEPROM to 3145 * TLAN device with the EEPROM to
3205 * use. 3146 * use.
3206 * data An address to a char to hold the 3147 * data An address to a char to hold the
3207 * data sent from the EEPROM. 3148 * data sent from the EEPROM.
3208 * stop If TLAN_EEPROM_STOP is passed, a 3149 * stop If TLAN_EEPROM_STOP is passed, a
3209 * stop cycle is sent after the 3150 * stop cycle is sent after the
3210 * byte is received, and no ack is 3151 * byte is received, and no ack is
3211 * sent. 3152 * sent.
3212 * 3153 *
3213 * This function receives 8 bits of data from the EEPROM 3154 * This function receives 8 bits of data from the EEPROM
3214 * over the serial link. It then sends and ack bit, or no 3155 * over the serial link. It then sends and ack bit, or no
3215 * ack and a stop bit. This function is used to retrieve 3156 * ack and a stop bit. This function is used to retrieve
3216 * data after the address of a byte in the EEPROM has been 3157 * data after the address of a byte in the EEPROM has been
3217 * sent. 3158 * sent.
3218 * 3159 *
3219 **************************************************************/ 3160 **************************************************************/
3220 3161
3221static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) 3162static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3222{ 3163{
3223 u8 place; 3164 u8 place;
3224 u16 sio; 3165 u16 sio;
3225 3166
3226 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3167 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3227 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3168 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3228 *data = 0; 3169 *data = 0;
3229 3170
3230 /* Assume clock is low, tx is enabled; */ 3171 /* Assume clock is low, tx is enabled; */
3231 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); 3172 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3232 for ( place = 0x80; place; place >>= 1 ) { 3173 for (place = 0x80; place; place >>= 1) {
3233 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3174 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3234 if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) ) 3175 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3235 *data |= place; 3176 *data |= place;
3236 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3177 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3237 } 3178 }
3238 3179
3239 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3180 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3240 if ( ! stop ) { 3181 if (!stop) {
3241 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */ 3182 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
3242 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3183 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3243 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3184 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3244 } else { 3185 } else {
3245 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ 3186 tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
3246 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3187 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3247 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3188 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3248 /* STOP, raise data while clock is high */ 3189 /* STOP, raise data while clock is high */
3249 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3190 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3250 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3191 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3251 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3192 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3252 } 3193 }
3253 3194
3254} /* TLan_EeReceiveByte */ 3195}
3255 3196
3256 3197
3257 3198
3258 3199
3259 /*************************************************************** 3200/***************************************************************
3260 * TLan_EeReadByte 3201 * tlan_ee_read_byte
3261 * 3202 *
3262 * Returns: 3203 * Returns:
3263 * No error = 0, else, the stage at which the error 3204 * No error = 0, else, the stage at which the error
3264 * occurred. 3205 * occurred.
3265 * Parms: 3206 * Parms:
3266 * io_base The IO port base address for the 3207 * io_base The IO port base address for the
3267 * TLAN device with the EEPROM to 3208 * TLAN device with the EEPROM to
3268 * use. 3209 * use.
3269 * ee_addr The address of the byte in the 3210 * ee_addr The address of the byte in the
3270 * EEPROM whose contents are to be 3211 * EEPROM whose contents are to be
3271 * retrieved. 3212 * retrieved.
3272 * data An address to a char to hold the 3213 * data An address to a char to hold the
3273 * data obtained from the EEPROM. 3214 * data obtained from the EEPROM.
3274 * 3215 *
3275 * This function reads a byte of information from an byte 3216 * This function reads a byte of information from an byte
3276 * cell in the EEPROM. 3217 * cell in the EEPROM.
3277 * 3218 *
3278 **************************************************************/ 3219 **************************************************************/
3279 3220
3280static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) 3221static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3281{ 3222{
3282 int err; 3223 int err;
3283 TLanPrivateInfo *priv = netdev_priv(dev); 3224 struct tlan_priv *priv = netdev_priv(dev);
3284 unsigned long flags = 0; 3225 unsigned long flags = 0;
3285 int ret=0; 3226 int ret = 0;
3286 3227
3287 spin_lock_irqsave(&priv->lock, flags); 3228 spin_lock_irqsave(&priv->lock, flags);
3288 3229
3289 TLan_EeSendStart( dev->base_addr ); 3230 tlan_ee_send_start(dev->base_addr);
3290 err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK ); 3231 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3291 if (err) 3232 if (err) {
3292 { 3233 ret = 1;
3293 ret=1;
3294 goto fail; 3234 goto fail;
3295 } 3235 }
3296 err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK ); 3236 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3297 if (err) 3237 if (err) {
3298 { 3238 ret = 2;
3299 ret=2;
3300 goto fail; 3239 goto fail;
3301 } 3240 }
3302 TLan_EeSendStart( dev->base_addr ); 3241 tlan_ee_send_start(dev->base_addr);
3303 err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK ); 3242 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3304 if (err) 3243 if (err) {
3305 { 3244 ret = 3;
3306 ret=3;
3307 goto fail; 3245 goto fail;
3308 } 3246 }
3309 TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP ); 3247 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3310fail: 3248fail:
3311 spin_unlock_irqrestore(&priv->lock, flags); 3249 spin_unlock_irqrestore(&priv->lock, flags);
3312 3250
3313 return ret; 3251 return ret;
3314 3252
3315} /* TLan_EeReadByte */ 3253}
3316 3254
3317 3255
3318 3256
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced774e..5fc98a8e488 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
20 ********************************************************************/ 20 ********************************************************************/
21 21
22 22
23#include <asm/io.h> 23#include <linux/io.h>
24#include <asm/types.h> 24#include <linux/types.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26 26
27 27
@@ -40,8 +40,11 @@
40#define TLAN_IGNORE 0 40#define TLAN_IGNORE 0
41#define TLAN_RECORD 1 41#define TLAN_RECORD 1
42 42
43#define TLAN_DBG(lvl, format, args...) \ 43#define TLAN_DBG(lvl, format, args...) \
44 do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0) 44 do { \
45 if (debug&lvl) \
46 printk(KERN_DEBUG "TLAN: " format, ##args); \
47 } while (0)
45 48
46#define TLAN_DEBUG_GNRL 0x0001 49#define TLAN_DEBUG_GNRL 0x0001
47#define TLAN_DEBUG_TX 0x0002 50#define TLAN_DEBUG_TX 0x0002
@@ -50,7 +53,8 @@
50#define TLAN_DEBUG_PROBE 0x0010 53#define TLAN_DEBUG_PROBE 0x0010
51 54
52#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */ 55#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
53#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */ 56#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
57 at a time */
54 58
55 59
56 /***************************************************************** 60 /*****************************************************************
@@ -70,13 +74,13 @@
70#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 74#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
71#endif 75#endif
72 76
73typedef struct tlan_adapter_entry { 77struct tlan_adapter_entry {
74 u16 vendorId; 78 u16 vendor_id;
75 u16 deviceId; 79 u16 device_id;
76 char *deviceLabel; 80 char *device_label;
77 u32 flags; 81 u32 flags;
78 u16 addrOfs; 82 u16 addr_ofs;
79} TLanAdapterEntry; 83};
80 84
81#define TLAN_ADAPTER_NONE 0x00000000 85#define TLAN_ADAPTER_NONE 0x00000000
82#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001 86#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
129#define TLAN_CSTAT_DP_PR 0x0100 133#define TLAN_CSTAT_DP_PR 0x0100
130 134
131 135
132typedef struct tlan_buffer_ref_tag { 136struct tlan_buffer {
133 u32 count; 137 u32 count;
134 u32 address; 138 u32 address;
135} TLanBufferRef; 139};
136 140
137 141
138typedef struct tlan_list_tag { 142struct tlan_list {
139 u32 forward; 143 u32 forward;
140 u16 cStat; 144 u16 c_stat;
141 u16 frameSize; 145 u16 frame_size;
142 TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST]; 146 struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
143} TLanList; 147};
144 148
145 149
146typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; 150typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
164 * 168 *
165 ****************************************************************/ 169 ****************************************************************/
166 170
167typedef struct tlan_private_tag { 171struct tlan_priv {
168 struct net_device *nextDevice; 172 struct net_device *next_device;
169 struct pci_dev *pciDev; 173 struct pci_dev *pci_dev;
170 struct net_device *dev; 174 struct net_device *dev;
171 void *dmaStorage; 175 void *dma_storage;
172 dma_addr_t dmaStorageDMA; 176 dma_addr_t dma_storage_dma;
173 unsigned int dmaSize; 177 unsigned int dma_size;
174 u8 *padBuffer; 178 u8 *pad_buffer;
175 TLanList *rxList; 179 struct tlan_list *rx_list;
176 dma_addr_t rxListDMA; 180 dma_addr_t rx_list_dma;
177 u8 *rxBuffer; 181 u8 *rx_buffer;
178 dma_addr_t rxBufferDMA; 182 dma_addr_t rx_buffer_dma;
179 u32 rxHead; 183 u32 rx_head;
180 u32 rxTail; 184 u32 rx_tail;
181 u32 rxEocCount; 185 u32 rx_eoc_count;
182 TLanList *txList; 186 struct tlan_list *tx_list;
183 dma_addr_t txListDMA; 187 dma_addr_t tx_list_dma;
184 u8 *txBuffer; 188 u8 *tx_buffer;
185 dma_addr_t txBufferDMA; 189 dma_addr_t tx_buffer_dma;
186 u32 txHead; 190 u32 tx_head;
187 u32 txInProgress; 191 u32 tx_in_progress;
188 u32 txTail; 192 u32 tx_tail;
189 u32 txBusyCount; 193 u32 tx_busy_count;
190 u32 phyOnline; 194 u32 phy_online;
191 u32 timerSetAt; 195 u32 timer_set_at;
192 u32 timerType; 196 u32 timer_type;
193 struct timer_list timer; 197 struct timer_list timer;
194 struct board *adapter; 198 struct board *adapter;
195 u32 adapterRev; 199 u32 adapter_rev;
196 u32 aui; 200 u32 aui;
197 u32 debug; 201 u32 debug;
198 u32 duplex; 202 u32 duplex;
199 u32 phy[2]; 203 u32 phy[2];
200 u32 phyNum; 204 u32 phy_num;
201 u32 speed; 205 u32 speed;
202 u8 tlanRev; 206 u8 tlan_rev;
203 u8 tlanFullDuplex; 207 u8 tlan_full_duplex;
204 spinlock_t lock; 208 spinlock_t lock;
205 u8 link; 209 u8 link;
206 u8 is_eisa; 210 u8 is_eisa;
207 struct work_struct tlan_tqueue; 211 struct work_struct tlan_tqueue;
208 u8 neg_be_verbose; 212 u8 neg_be_verbose;
209} TLanPrivateInfo; 213};
210 214
211 215
212 216
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
247 ****************************************************************/ 251 ****************************************************************/
248 252
249#define TLAN_HOST_CMD 0x00 253#define TLAN_HOST_CMD 0x00
250#define TLAN_HC_GO 0x80000000 254#define TLAN_HC_GO 0x80000000
251#define TLAN_HC_STOP 0x40000000 255#define TLAN_HC_STOP 0x40000000
252#define TLAN_HC_ACK 0x20000000 256#define TLAN_HC_ACK 0x20000000
253#define TLAN_HC_CS_MASK 0x1FE00000 257#define TLAN_HC_CS_MASK 0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
283#define TLAN_NET_CMD_TRFRAM 0x02 287#define TLAN_NET_CMD_TRFRAM 0x02
284#define TLAN_NET_CMD_TXPACE 0x01 288#define TLAN_NET_CMD_TXPACE 0x01
285#define TLAN_NET_SIO 0x01 289#define TLAN_NET_SIO 0x01
286#define TLAN_NET_SIO_MINTEN 0x80 290#define TLAN_NET_SIO_MINTEN 0x80
287#define TLAN_NET_SIO_ECLOK 0x40 291#define TLAN_NET_SIO_ECLOK 0x40
288#define TLAN_NET_SIO_ETXEN 0x20 292#define TLAN_NET_SIO_ETXEN 0x20
289#define TLAN_NET_SIO_EDATA 0x10 293#define TLAN_NET_SIO_EDATA 0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
304#define TLAN_NET_MASK_MASK4 0x10 308#define TLAN_NET_MASK_MASK4 0x10
305#define TLAN_NET_MASK_RSRVD 0x0F 309#define TLAN_NET_MASK_RSRVD 0x0F
306#define TLAN_NET_CONFIG 0x04 310#define TLAN_NET_CONFIG 0x04
307#define TLAN_NET_CFG_RCLK 0x8000 311#define TLAN_NET_CFG_RCLK 0x8000
308#define TLAN_NET_CFG_TCLK 0x4000 312#define TLAN_NET_CFG_TCLK 0x4000
309#define TLAN_NET_CFG_BIT 0x2000 313#define TLAN_NET_CFG_BIT 0x2000
310#define TLAN_NET_CFG_RXCRC 0x1000 314#define TLAN_NET_CFG_RXCRC 0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
372/* Generic MII/PHY Registers */ 376/* Generic MII/PHY Registers */
373 377
374#define MII_GEN_CTL 0x00 378#define MII_GEN_CTL 0x00
375#define MII_GC_RESET 0x8000 379#define MII_GC_RESET 0x8000
376#define MII_GC_LOOPBK 0x4000 380#define MII_GC_LOOPBK 0x4000
377#define MII_GC_SPEEDSEL 0x2000 381#define MII_GC_SPEEDSEL 0x2000
378#define MII_GC_AUTOENB 0x1000 382#define MII_GC_AUTOENB 0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
397#define MII_GS_EXTCAP 0x0001 401#define MII_GS_EXTCAP 0x0001
398#define MII_GEN_ID_HI 0x02 402#define MII_GEN_ID_HI 0x02
399#define MII_GEN_ID_LO 0x03 403#define MII_GEN_ID_LO 0x03
400#define MII_GIL_OUI 0xFC00 404#define MII_GIL_OUI 0xFC00
401#define MII_GIL_MODEL 0x03F0 405#define MII_GIL_MODEL 0x03F0
402#define MII_GIL_REVISION 0x000F 406#define MII_GIL_REVISION 0x000F
403#define MII_AN_ADV 0x04 407#define MII_AN_ADV 0x04
404#define MII_AN_LPA 0x05 408#define MII_AN_LPA 0x05
405#define MII_AN_EXP 0x06 409#define MII_AN_EXP 0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
408 412
409#define TLAN_TLPHY_ID 0x10 413#define TLAN_TLPHY_ID 0x10
410#define TLAN_TLPHY_CTL 0x11 414#define TLAN_TLPHY_CTL 0x11
411#define TLAN_TC_IGLINK 0x8000 415#define TLAN_TC_IGLINK 0x8000
412#define TLAN_TC_SWAPOL 0x4000 416#define TLAN_TC_SWAPOL 0x4000
413#define TLAN_TC_AUISEL 0x2000 417#define TLAN_TC_AUISEL 0x2000
414#define TLAN_TC_SQEEN 0x1000 418#define TLAN_TC_SQEEN 0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
435#define LEVEL1_ID1 0x7810 439#define LEVEL1_ID1 0x7810
436#define LEVEL1_ID2 0x0000 440#define LEVEL1_ID2 0x0000
437 441
438#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0 442#define CIRC_INC(a, b) if (++a >= b) a = 0
439 443
440/* Routines to access internal registers. */ 444/* Routines to access internal registers. */
441 445
442static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr) 446static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
443{ 447{
444 outw(internal_addr, base_addr + TLAN_DIO_ADR); 448 outw(internal_addr, base_addr + TLAN_DIO_ADR);
445 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)); 449 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
446 450
447} /* TLan_DioRead8 */ 451}
448 452
449 453
450 454
451 455
452static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr) 456static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
453{ 457{
454 outw(internal_addr, base_addr + TLAN_DIO_ADR); 458 outw(internal_addr, base_addr + TLAN_DIO_ADR);
455 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)); 459 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
456 460
457} /* TLan_DioRead16 */ 461}
458 462
459 463
460 464
461 465
462static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr) 466static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
463{ 467{
464 outw(internal_addr, base_addr + TLAN_DIO_ADR); 468 outw(internal_addr, base_addr + TLAN_DIO_ADR);
465 return inl(base_addr + TLAN_DIO_DATA); 469 return inl(base_addr + TLAN_DIO_DATA);
466 470
467} /* TLan_DioRead32 */ 471}
468 472
469 473
470 474
471 475
472static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data) 476static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
473{ 477{
474 outw(internal_addr, base_addr + TLAN_DIO_ADR); 478 outw(internal_addr, base_addr + TLAN_DIO_ADR);
475 outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3)); 479 outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
479 483
480 484
481 485
482static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data) 486static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
483{ 487{
484 outw(internal_addr, base_addr + TLAN_DIO_ADR); 488 outw(internal_addr, base_addr + TLAN_DIO_ADR);
485 outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); 489 outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
489 493
490 494
491 495
492static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data) 496static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
493{ 497{
494 outw(internal_addr, base_addr + TLAN_DIO_ADR); 498 outw(internal_addr, base_addr + TLAN_DIO_ADR);
495 outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); 499 outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
496 500
497} 501}
498 502
499#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port) 503#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
500#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit)) 504#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
501#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port) 505#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
502 506
503/* 507/*
504 * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those 508 * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
506 * 510 *
507 * The original code was: 511 * The original code was:
508 * 512 *
509 * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); } 513 * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
510 * 514 *
511 * #define XOR8( a, b, c, d, e, f, g, h ) \ 515 * #define XOR8(a, b, c, d, e, f, g, h) \
512 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) 516 * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
513 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) 517 * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
514 * 518 *
515 * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), 519 * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
516 * DA(a,30), DA(a,36), DA(a,42) ); 520 * DA(a,30), DA(a,36), DA(a,42));
517 * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), 521 * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
518 * DA(a,31), DA(a,37), DA(a,43) ) << 1; 522 * DA(a,31), DA(a,37), DA(a,43)) << 1;
519 * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), 523 * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
520 * DA(a,32), DA(a,38), DA(a,44) ) << 2; 524 * DA(a,32), DA(a,38), DA(a,44)) << 2;
521 * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), 525 * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
522 * DA(a,33), DA(a,39), DA(a,45) ) << 3; 526 * DA(a,33), DA(a,39), DA(a,45)) << 3;
523 * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), 527 * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
524 * DA(a,34), DA(a,40), DA(a,46) ) << 4; 528 * DA(a,34), DA(a,40), DA(a,46)) << 4;
525 * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), 529 * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
526 * DA(a,35), DA(a,41), DA(a,47) ) << 5; 530 * DA(a,35), DA(a,41), DA(a,47)) << 5;
527 * 531 *
528 */ 532 */
529static inline u32 TLan_HashFunc( const u8 *a ) 533static inline u32 tlan_hash_func(const u8 *a)
530{ 534{
531 u8 hash; 535 u8 hash;
532 536
533 hash = (a[0]^a[3]); /* & 077 */ 537 hash = (a[0]^a[3]); /* & 077 */
534 hash ^= ((a[0]^a[3])>>6); /* & 003 */ 538 hash ^= ((a[0]^a[3])>>6); /* & 003 */
535 hash ^= ((a[1]^a[4])<<2); /* & 074 */ 539 hash ^= ((a[1]^a[4])<<2); /* & 074 */
536 hash ^= ((a[1]^a[4])>>4); /* & 017 */ 540 hash ^= ((a[1]^a[4])>>4); /* & 017 */
537 hash ^= ((a[2]^a[5])<<4); /* & 060 */ 541 hash ^= ((a[2]^a[5])<<4); /* & 060 */
538 hash ^= ((a[2]^a[5])>>2); /* & 077 */ 542 hash ^= ((a[2]^a[5])>>2); /* & 077 */
539 543
540 return hash & 077; 544 return hash & 077;
541} 545}
542#endif 546#endif
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b100bd50a0d..f5e9ac00a07 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -34,6 +34,8 @@
34 * Modifications for 2.3.99-pre5 kernel. 34 * Modifications for 2.3.99-pre5 kernel.
35 */ 35 */
36 36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
37#define DRV_NAME "tun" 39#define DRV_NAME "tun"
38#define DRV_VERSION "1.6" 40#define DRV_VERSION "1.6"
39#define DRV_DESCRIPTION "Universal TUN/TAP device driver" 41#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
@@ -76,11 +78,27 @@
76#ifdef TUN_DEBUG 78#ifdef TUN_DEBUG
77static int debug; 79static int debug;
78 80
79#define DBG if(tun->debug)printk 81#define tun_debug(level, tun, fmt, args...) \
80#define DBG1 if(debug==2)printk 82do { \
83 if (tun->debug) \
84 netdev_printk(level, tun->dev, fmt, ##args); \
85} while (0)
86#define DBG1(level, fmt, args...) \
87do { \
88 if (debug == 2) \
89 printk(level fmt, ##args); \
90} while (0)
81#else 91#else
82#define DBG( a... ) 92#define tun_debug(level, tun, fmt, args...) \
83#define DBG1( a... ) 93do { \
94 if (0) \
95 netdev_printk(level, tun->dev, fmt, ##args); \
96} while (0)
97#define DBG1(level, fmt, args...) \
98do { \
99 if (0) \
100 printk(level fmt, ##args); \
101} while (0)
84#endif 102#endif
85 103
86#define FLT_EXACT_COUNT 8 104#define FLT_EXACT_COUNT 8
@@ -205,7 +223,7 @@ static void tun_put(struct tun_struct *tun)
205 tun_detach(tfile->tun); 223 tun_detach(tfile->tun);
206} 224}
207 225
208/* TAP filterting */ 226/* TAP filtering */
209static void addr_hash_set(u32 *mask, const u8 *addr) 227static void addr_hash_set(u32 *mask, const u8 *addr)
210{ 228{
211 int n = ether_crc(ETH_ALEN, addr) >> 26; 229 int n = ether_crc(ETH_ALEN, addr) >> 26;
@@ -360,7 +378,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
360{ 378{
361 struct tun_struct *tun = netdev_priv(dev); 379 struct tun_struct *tun = netdev_priv(dev);
362 380
363 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); 381 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
364 382
365 /* Drop packet if interface is not attached */ 383 /* Drop packet if interface is not attached */
366 if (!tun->tfile) 384 if (!tun->tfile)
@@ -499,7 +517,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
499 517
500 sk = tun->socket.sk; 518 sk = tun->socket.sk;
501 519
502 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 520 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
503 521
504 poll_wait(file, &tun->wq.wait, wait); 522 poll_wait(file, &tun->wq.wait, wait);
505 523
@@ -690,7 +708,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
690 if (!tun) 708 if (!tun)
691 return -EBADFD; 709 return -EBADFD;
692 710
693 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); 711 tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
694 712
695 result = tun_get_user(tun, iv, iov_length(iv, count), 713 result = tun_get_user(tun, iv, iov_length(iv, count),
696 file->f_flags & O_NONBLOCK); 714 file->f_flags & O_NONBLOCK);
@@ -739,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
739 else if (sinfo->gso_type & SKB_GSO_UDP) 757 else if (sinfo->gso_type & SKB_GSO_UDP)
740 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; 758 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
741 else { 759 else {
742 printk(KERN_ERR "tun: unexpected GSO type: " 760 pr_err("unexpected GSO type: "
743 "0x%x, gso_size %d, hdr_len %d\n", 761 "0x%x, gso_size %d, hdr_len %d\n",
744 sinfo->gso_type, gso.gso_size, 762 sinfo->gso_type, gso.gso_size,
745 gso.hdr_len); 763 gso.hdr_len);
@@ -786,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
786 struct sk_buff *skb; 804 struct sk_buff *skb;
787 ssize_t ret = 0; 805 ssize_t ret = 0;
788 806
789 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 807 tun_debug(KERN_INFO, tun, "tun_chr_read\n");
790 808
791 add_wait_queue(&tun->wq.wait, &wait); 809 add_wait_queue(&tun->wq.wait, &wait);
792 while (len) { 810 while (len) {
@@ -1083,7 +1101,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1083 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || 1101 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1084 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1102 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1085 device_create_file(&tun->dev->dev, &dev_attr_group)) 1103 device_create_file(&tun->dev->dev, &dev_attr_group))
1086 printk(KERN_ERR "Failed to create tun sysfs files\n"); 1104 pr_err("Failed to create tun sysfs files\n");
1087 1105
1088 sk->sk_destruct = tun_sock_destruct; 1106 sk->sk_destruct = tun_sock_destruct;
1089 1107
@@ -1092,7 +1110,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1092 goto failed; 1110 goto failed;
1093 } 1111 }
1094 1112
1095 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); 1113 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1096 1114
1097 if (ifr->ifr_flags & IFF_NO_PI) 1115 if (ifr->ifr_flags & IFF_NO_PI)
1098 tun->flags |= TUN_NO_PI; 1116 tun->flags |= TUN_NO_PI;
@@ -1129,7 +1147,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1129static int tun_get_iff(struct net *net, struct tun_struct *tun, 1147static int tun_get_iff(struct net *net, struct tun_struct *tun,
1130 struct ifreq *ifr) 1148 struct ifreq *ifr)
1131{ 1149{
1132 DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); 1150 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1133 1151
1134 strcpy(ifr->ifr_name, tun->dev->name); 1152 strcpy(ifr->ifr_name, tun->dev->name);
1135 1153
@@ -1142,7 +1160,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
1142 * privs required. */ 1160 * privs required. */
1143static int set_offload(struct net_device *dev, unsigned long arg) 1161static int set_offload(struct net_device *dev, unsigned long arg)
1144{ 1162{
1145 unsigned int old_features, features; 1163 u32 old_features, features;
1146 1164
1147 old_features = dev->features; 1165 old_features = dev->features;
1148 /* Unset features, set them as we chew on the arg. */ 1166 /* Unset features, set them as we chew on the arg. */
@@ -1229,7 +1247,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1229 if (!tun) 1247 if (!tun)
1230 goto unlock; 1248 goto unlock;
1231 1249
1232 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 1250 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
1233 1251
1234 ret = 0; 1252 ret = 0;
1235 switch (cmd) { 1253 switch (cmd) {
@@ -1249,8 +1267,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1249 else 1267 else
1250 tun->flags &= ~TUN_NOCHECKSUM; 1268 tun->flags &= ~TUN_NOCHECKSUM;
1251 1269
1252 DBG(KERN_INFO "%s: checksum %s\n", 1270 tun_debug(KERN_INFO, tun, "checksum %s\n",
1253 tun->dev->name, arg ? "disabled" : "enabled"); 1271 arg ? "disabled" : "enabled");
1254 break; 1272 break;
1255 1273
1256 case TUNSETPERSIST: 1274 case TUNSETPERSIST:
@@ -1260,33 +1278,34 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1260 else 1278 else
1261 tun->flags &= ~TUN_PERSIST; 1279 tun->flags &= ~TUN_PERSIST;
1262 1280
1263 DBG(KERN_INFO "%s: persist %s\n", 1281 tun_debug(KERN_INFO, tun, "persist %s\n",
1264 tun->dev->name, arg ? "enabled" : "disabled"); 1282 arg ? "enabled" : "disabled");
1265 break; 1283 break;
1266 1284
1267 case TUNSETOWNER: 1285 case TUNSETOWNER:
1268 /* Set owner of the device */ 1286 /* Set owner of the device */
1269 tun->owner = (uid_t) arg; 1287 tun->owner = (uid_t) arg;
1270 1288
1271 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); 1289 tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
1272 break; 1290 break;
1273 1291
1274 case TUNSETGROUP: 1292 case TUNSETGROUP:
1275 /* Set group of the device */ 1293 /* Set group of the device */
1276 tun->group= (gid_t) arg; 1294 tun->group= (gid_t) arg;
1277 1295
1278 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); 1296 tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
1279 break; 1297 break;
1280 1298
1281 case TUNSETLINK: 1299 case TUNSETLINK:
1282 /* Only allow setting the type when the interface is down */ 1300 /* Only allow setting the type when the interface is down */
1283 if (tun->dev->flags & IFF_UP) { 1301 if (tun->dev->flags & IFF_UP) {
1284 DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", 1302 tun_debug(KERN_INFO, tun,
1285 tun->dev->name); 1303 "Linktype set failed because interface is up\n");
1286 ret = -EBUSY; 1304 ret = -EBUSY;
1287 } else { 1305 } else {
1288 tun->dev->type = (int) arg; 1306 tun->dev->type = (int) arg;
1289 DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); 1307 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1308 tun->dev->type);
1290 ret = 0; 1309 ret = 0;
1291 } 1310 }
1292 break; 1311 break;
@@ -1318,8 +1337,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1318 1337
1319 case SIOCSIFHWADDR: 1338 case SIOCSIFHWADDR:
1320 /* Set hw address */ 1339 /* Set hw address */
1321 DBG(KERN_DEBUG "%s: set hw address: %pM\n", 1340 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1322 tun->dev->name, ifr.ifr_hwaddr.sa_data); 1341 ifr.ifr_hwaddr.sa_data);
1323 1342
1324 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 1343 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1325 break; 1344 break;
@@ -1433,7 +1452,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1433 if (!tun) 1452 if (!tun)
1434 return -EBADFD; 1453 return -EBADFD;
1435 1454
1436 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); 1455 tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
1437 1456
1438 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) 1457 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1439 goto out; 1458 goto out;
@@ -1455,7 +1474,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1455{ 1474{
1456 struct tun_file *tfile; 1475 struct tun_file *tfile;
1457 1476
1458 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 1477 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
1459 1478
1460 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 1479 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1461 if (!tfile) 1480 if (!tfile)
@@ -1476,7 +1495,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
1476 if (tun) { 1495 if (tun) {
1477 struct net_device *dev = tun->dev; 1496 struct net_device *dev = tun->dev;
1478 1497
1479 DBG(KERN_INFO "%s: tun_chr_close\n", dev->name); 1498 tun_debug(KERN_INFO, tun, "tun_chr_close\n");
1480 1499
1481 __tun_detach(tun); 1500 __tun_detach(tun);
1482 1501
@@ -1607,18 +1626,18 @@ static int __init tun_init(void)
1607{ 1626{
1608 int ret = 0; 1627 int ret = 0;
1609 1628
1610 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 1629 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1611 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); 1630 pr_info("%s\n", DRV_COPYRIGHT);
1612 1631
1613 ret = rtnl_link_register(&tun_link_ops); 1632 ret = rtnl_link_register(&tun_link_ops);
1614 if (ret) { 1633 if (ret) {
1615 printk(KERN_ERR "tun: Can't register link_ops\n"); 1634 pr_err("Can't register link_ops\n");
1616 goto err_linkops; 1635 goto err_linkops;
1617 } 1636 }
1618 1637
1619 ret = misc_register(&tun_miscdev); 1638 ret = misc_register(&tun_miscdev);
1620 if (ret) { 1639 if (ret) {
1621 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); 1640 pr_err("Can't register misc device %d\n", TUN_MINOR);
1622 goto err_misc; 1641 goto err_misc;
1623 } 1642 }
1624 return 0; 1643 return 0;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index a3c46f6a15e..7fa5ec2de94 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
123#include <linux/in6.h> 123#include <linux/in6.h>
124#include <linux/dma-mapping.h> 124#include <linux/dma-mapping.h>
125#include <linux/firmware.h> 125#include <linux/firmware.h>
126#include <generated/utsrelease.h>
127 126
128#include "typhoon.h" 127#include "typhoon.h"
129 128
130MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); 129MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
131MODULE_VERSION(UTS_RELEASE); 130MODULE_VERSION("1.0");
132MODULE_LICENSE("GPL"); 131MODULE_LICENSE("GPL");
133MODULE_FIRMWARE(FIRMWARE_NAME); 132MODULE_FIRMWARE(FIRMWARE_NAME);
134MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); 133MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 715e7b47e7e..ef041057d9d 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3740,7 +3740,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
3740#endif 3740#endif
3741}; 3741};
3742 3742
3743static int ucc_geth_probe(struct platform_device* ofdev, const struct of_device_id *match) 3743static int ucc_geth_probe(struct platform_device* ofdev)
3744{ 3744{
3745 struct device *device = &ofdev->dev; 3745 struct device *device = &ofdev->dev;
3746 struct device_node *np = ofdev->dev.of_node; 3746 struct device_node *np = ofdev->dev.of_node;
@@ -3986,7 +3986,7 @@ static struct of_device_id ucc_geth_match[] = {
3986 3986
3987MODULE_DEVICE_TABLE(of, ucc_geth_match); 3987MODULE_DEVICE_TABLE(of, ucc_geth_match);
3988 3988
3989static struct of_platform_driver ucc_geth_driver = { 3989static struct platform_driver ucc_geth_driver = {
3990 .driver = { 3990 .driver = {
3991 .name = DRV_NAME, 3991 .name = DRV_NAME,
3992 .owner = THIS_MODULE, 3992 .owner = THIS_MODULE,
@@ -4008,14 +4008,14 @@ static int __init ucc_geth_init(void)
4008 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 4008 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4009 sizeof(ugeth_primary_info)); 4009 sizeof(ugeth_primary_info));
4010 4010
4011 ret = of_register_platform_driver(&ucc_geth_driver); 4011 ret = platform_driver_register(&ucc_geth_driver);
4012 4012
4013 return ret; 4013 return ret;
4014} 4014}
4015 4015
4016static void __exit ucc_geth_exit(void) 4016static void __exit ucc_geth_exit(void)
4017{ 4017{
4018 of_unregister_platform_driver(&ucc_geth_driver); 4018 platform_driver_unregister(&ucc_geth_driver);
4019} 4019}
4020 4020
4021module_init(ucc_geth_init); 4021module_init(ucc_geth_init);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 109751bad3b..f967913e11b 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -328,13 +328,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
328{ 328{
329 static const char ifname[] = "usbpn%d"; 329 static const char ifname[] = "usbpn%d";
330 const struct usb_cdc_union_desc *union_header = NULL; 330 const struct usb_cdc_union_desc *union_header = NULL;
331 const struct usb_cdc_header_desc *phonet_header = NULL;
332 const struct usb_host_interface *data_desc; 331 const struct usb_host_interface *data_desc;
333 struct usb_interface *data_intf; 332 struct usb_interface *data_intf;
334 struct usb_device *usbdev = interface_to_usbdev(intf); 333 struct usb_device *usbdev = interface_to_usbdev(intf);
335 struct net_device *dev; 334 struct net_device *dev;
336 struct usbpn_dev *pnd; 335 struct usbpn_dev *pnd;
337 u8 *data; 336 u8 *data;
337 int phonet = 0;
338 int len, err; 338 int len, err;
339 339
340 data = intf->altsetting->extra; 340 data = intf->altsetting->extra;
@@ -355,10 +355,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
355 (struct usb_cdc_union_desc *)data; 355 (struct usb_cdc_union_desc *)data;
356 break; 356 break;
357 case 0xAB: 357 case 0xAB:
358 if (phonet_header || dlen < 5) 358 phonet = 1;
359 break;
360 phonet_header =
361 (struct usb_cdc_header_desc *)data;
362 break; 359 break;
363 } 360 }
364 } 361 }
@@ -366,7 +363,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
366 len -= dlen; 363 len -= dlen;
367 } 364 }
368 365
369 if (!union_header || !phonet_header) 366 if (!union_header || !phonet)
370 return -EINVAL; 367 return -EINVAL;
371 368
372 data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0); 369 data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0);
@@ -392,7 +389,6 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
392 389
393 pnd = netdev_priv(dev); 390 pnd = netdev_priv(dev);
394 SET_NETDEV_DEV(dev, &intf->dev); 391 SET_NETDEV_DEV(dev, &intf->dev);
395 netif_stop_queue(dev);
396 392
397 pnd->dev = dev; 393 pnd->dev = dev;
398 pnd->usb = usb_get_dev(usbdev); 394 pnd->usb = usb_get_dev(usbdev);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d776c4a8d3c..7113168473c 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cdc_ncm.c 2 * cdc_ncm.c
3 * 3 *
4 * Copyright (C) ST-Ericsson 2010 4 * Copyright (C) ST-Ericsson 2010-2011
5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> 5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> 6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 * 7 *
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "30-Nov-2010" 57#define DRIVER_VERSION "7-Feb-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -77,6 +77,9 @@
77 */ 77 */
78#define CDC_NCM_DPT_DATAGRAMS_MAX 32 78#define CDC_NCM_DPT_DATAGRAMS_MAX 32
79 79
80/* Maximum amount of IN datagrams in NTB */
81#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */
82
80/* Restart the timer, if amount of datagrams is less than given value */ 83/* Restart the timer, if amount of datagrams is less than given value */
81#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 84#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
82 85
@@ -85,11 +88,6 @@
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ 88 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) 89 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87 90
88struct connection_speed_change {
89 __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
90 __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
91} __attribute__ ((packed));
92
93struct cdc_ncm_data { 91struct cdc_ncm_data {
94 struct usb_cdc_ncm_nth16 nth16; 92 struct usb_cdc_ncm_nth16 nth16;
95 struct usb_cdc_ncm_ndp16 ndp16; 93 struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
198{ 196{
199 struct usb_cdc_notification req; 197 struct usb_cdc_notification req;
200 u32 val; 198 u32 val;
201 __le16 max_datagram_size;
202 u8 flags; 199 u8 flags;
203 u8 iface_no; 200 u8 iface_no;
204 int err; 201 int err;
202 u16 ntb_fmt_supported;
205 203
206 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 204 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
207 205
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
223 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); 221 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
224 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); 222 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
225 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); 223 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
224 /* devices prior to NCM Errata shall set this field to zero */
225 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
226 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
226 227
227 if (ctx->func_desc != NULL) 228 if (ctx->func_desc != NULL)
228 flags = ctx->func_desc->bmNetworkCapabilities; 229 flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
231 232
232 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " 233 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
233 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " 234 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
234 "wNdpOutAlignment=%u flags=0x%x\n", 235 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
235 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, 236 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
236 ctx->tx_ndp_modulus, flags); 237 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
237 238
238 /* max count of tx datagrams without terminating NULL entry */ 239 /* max count of tx datagrams */
239 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; 240 if ((ctx->tx_max_datagrams == 0) ||
241 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
242 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
240 243
241 /* verify maximum size of received NTB in bytes */ 244 /* verify maximum size of received NTB in bytes */
242 if ((ctx->rx_max < 245 if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
243 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 246 pr_debug("Using min receive length=%d\n",
244 (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { 247 USB_CDC_NCM_NTB_MIN_IN_SIZE);
248 ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
249 }
250
251 if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
245 pr_debug("Using default maximum receive length=%d\n", 252 pr_debug("Using default maximum receive length=%d\n",
246 CDC_NCM_NTB_MAX_SIZE_RX); 253 CDC_NCM_NTB_MAX_SIZE_RX);
247 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; 254 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
248 } 255 }
249 256
257 /* inform device about NTB input size changes */
258 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
259 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
260 USB_RECIP_INTERFACE;
261 req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
262 req.wValue = 0;
263 req.wIndex = cpu_to_le16(iface_no);
264
265 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
266 struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
267
268 req.wLength = 8;
269 ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
270 ndp_in_sz.wNtbInMaxDatagrams =
271 cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
272 ndp_in_sz.wReserved = 0;
273 err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
274 1000);
275 } else {
276 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
277
278 req.wLength = 4;
279 err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
280 NULL, 1000);
281 }
282
283 if (err)
284 pr_debug("Setting NTB Input Size failed\n");
285 }
286
250 /* verify maximum size of transmitted NTB in bytes */ 287 /* verify maximum size of transmitted NTB in bytes */
251 if ((ctx->tx_max < 288 if ((ctx->tx_max <
252 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 289 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
297 /* additional configuration */ 334 /* additional configuration */
298 335
299 /* set CRC Mode */ 336 /* set CRC Mode */
300 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 337 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
301 req.bNotificationType = USB_CDC_SET_CRC_MODE; 338 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
302 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); 339 USB_RECIP_INTERFACE;
303 req.wIndex = cpu_to_le16(iface_no); 340 req.bNotificationType = USB_CDC_SET_CRC_MODE;
304 req.wLength = 0; 341 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
305 342 req.wIndex = cpu_to_le16(iface_no);
306 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 343 req.wLength = 0;
307 if (err) 344
308 pr_debug("Setting CRC mode off failed\n"); 345 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
346 if (err)
347 pr_debug("Setting CRC mode off failed\n");
348 }
309 349
310 /* set NTB format */ 350 /* set NTB format, if both formats are supported */
311 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 351 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
312 req.bNotificationType = USB_CDC_SET_NTB_FORMAT; 352 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
313 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); 353 USB_RECIP_INTERFACE;
314 req.wIndex = cpu_to_le16(iface_no); 354 req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
315 req.wLength = 0; 355 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
356 req.wIndex = cpu_to_le16(iface_no);
357 req.wLength = 0;
358
359 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
360 if (err)
361 pr_debug("Setting NTB format to 16-bit failed\n");
362 }
316 363
317 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 364 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
318 if (err)
319 pr_debug("Setting NTB format to 16-bit failed\n");
320 365
321 /* set Max Datagram Size (MTU) */ 366 /* set Max Datagram Size (MTU) */
322 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; 367 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
323 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; 368 __le16 max_datagram_size;
324 req.wValue = 0; 369 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
325 req.wIndex = cpu_to_le16(iface_no); 370
326 req.wLength = cpu_to_le16(2); 371 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
372 USB_RECIP_INTERFACE;
373 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
374 req.wValue = 0;
375 req.wIndex = cpu_to_le16(iface_no);
376 req.wLength = cpu_to_le16(2);
377
378 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
379 1000);
380 if (err) {
381 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
382 CDC_NCM_MIN_DATAGRAM_SIZE);
383 } else {
384 ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
385 /* Check Eth descriptor value */
386 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
387 if (ctx->max_datagram_size > eth_max_sz)
388 ctx->max_datagram_size = eth_max_sz;
389 } else {
390 if (ctx->max_datagram_size >
391 CDC_NCM_MAX_DATAGRAM_SIZE)
392 ctx->max_datagram_size =
393 CDC_NCM_MAX_DATAGRAM_SIZE;
394 }
327 395
328 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); 396 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
329 if (err) { 397 ctx->max_datagram_size =
330 pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", 398 CDC_NCM_MIN_DATAGRAM_SIZE;
331 CDC_NCM_MIN_DATAGRAM_SIZE); 399
332 /* use default */ 400 /* if value changed, update device */
333 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; 401 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
334 } else { 402 USB_RECIP_INTERFACE;
335 ctx->max_datagram_size = le16_to_cpu(max_datagram_size); 403 req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
404 req.wValue = 0;
405 req.wIndex = cpu_to_le16(iface_no);
406 req.wLength = 2;
407 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
408
409 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
410 0, NULL, 1000);
411 if (err)
412 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
413 }
336 414
337 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
338 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
339 else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
340 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
341 } 415 }
342 416
343 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) 417 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
466 540
467 ctx->ether_desc = 541 ctx->ether_desc =
468 (const struct usb_cdc_ether_desc *)buf; 542 (const struct usb_cdc_ether_desc *)buf;
469
470 dev->hard_mtu = 543 dev->hard_mtu =
471 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); 544 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
472 545
473 if (dev->hard_mtu < 546 if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
474 (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) 547 dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
475 dev->hard_mtu = 548 else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
476 CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; 549 dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
477
478 else if (dev->hard_mtu >
479 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
480 dev->hard_mtu =
481 CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
482 break; 550 break;
483 551
484 case USB_CDC_NCM_TYPE: 552 case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
628 u32 offset; 696 u32 offset;
629 u32 last_offset; 697 u32 last_offset;
630 u16 n = 0; 698 u16 n = 0;
631 u8 timeout = 0; 699 u8 ready2send = 0;
632 700
633 /* if there is a remaining skb, it gets priority */ 701 /* if there is a remaining skb, it gets priority */
634 if (skb != NULL) 702 if (skb != NULL)
635 swap(skb, ctx->tx_rem_skb); 703 swap(skb, ctx->tx_rem_skb);
636 else 704 else
637 timeout = 1; 705 ready2send = 1;
638 706
639 /* 707 /*
640 * +----------------+ 708 * +----------------+
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
682 750
683 for (; n < ctx->tx_max_datagrams; n++) { 751 for (; n < ctx->tx_max_datagrams; n++) {
684 /* check if end of transmit buffer is reached */ 752 /* check if end of transmit buffer is reached */
685 if (offset >= ctx->tx_max) 753 if (offset >= ctx->tx_max) {
754 ready2send = 1;
686 break; 755 break;
687 756 }
688 /* compute maximum buffer size */ 757 /* compute maximum buffer size */
689 rem = ctx->tx_max - offset; 758 rem = ctx->tx_max - offset;
690 759
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
711 } 780 }
712 ctx->tx_rem_skb = skb; 781 ctx->tx_rem_skb = skb;
713 skb = NULL; 782 skb = NULL;
714 783 ready2send = 1;
715 /* loop one more time */
716 timeout = 1;
717 } 784 }
718 break; 785 break;
719 } 786 }
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
756 ctx->tx_curr_last_offset = last_offset; 823 ctx->tx_curr_last_offset = last_offset;
757 goto exit_no_skb; 824 goto exit_no_skb;
758 825
759 } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { 826 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
760 /* wait for more frames */ 827 /* wait for more frames */
761 /* push variables */ 828 /* push variables */
762 ctx->tx_curr_skb = skb_out; 829 ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
813 cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); 880 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
814 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); 881 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
815 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); 882 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
816 ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), 883 ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
817 ctx->tx_ndp_modulus); 884 ctx->tx_ndp_modulus);
818 885
819 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); 886 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
825 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * 892 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
826 sizeof(struct usb_cdc_ncm_dpe16)); 893 sizeof(struct usb_cdc_ncm_dpe16));
827 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); 894 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
828 ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ 895 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
829 896
830 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, 897 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
831 &(ctx->tx_ncm.ndp16), 898 &(ctx->tx_ncm.ndp16),
832 sizeof(ctx->tx_ncm.ndp16)); 899 sizeof(ctx->tx_ncm.ndp16));
833 900
834 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + 901 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
835 sizeof(ctx->tx_ncm.ndp16), 902 sizeof(ctx->tx_ncm.ndp16),
836 &(ctx->tx_ncm.dpe16), 903 &(ctx->tx_ncm.dpe16),
837 (ctx->tx_curr_frame_num + 1) * 904 (ctx->tx_curr_frame_num + 1) *
@@ -868,15 +935,19 @@ static void cdc_ncm_tx_timeout(unsigned long arg)
868 if (ctx->tx_timer_pending != 0) { 935 if (ctx->tx_timer_pending != 0) {
869 ctx->tx_timer_pending--; 936 ctx->tx_timer_pending--;
870 restart = 1; 937 restart = 1;
871 } else 938 } else {
872 restart = 0; 939 restart = 0;
940 }
873 941
874 spin_unlock(&ctx->mtx); 942 spin_unlock(&ctx->mtx);
875 943
876 if (restart) 944 if (restart) {
945 spin_lock(&ctx->mtx);
877 cdc_ncm_tx_timeout_start(ctx); 946 cdc_ncm_tx_timeout_start(ctx);
878 else if (ctx->netdev != NULL) 947 spin_unlock(&ctx->mtx);
948 } else if (ctx->netdev != NULL) {
879 usbnet_start_xmit(NULL, ctx->netdev); 949 usbnet_start_xmit(NULL, ctx->netdev);
950 }
880} 951}
881 952
882static struct sk_buff * 953static struct sk_buff *
@@ -900,7 +971,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
900 skb_out = cdc_ncm_fill_tx_frame(ctx, skb); 971 skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
901 if (ctx->tx_curr_skb != NULL) 972 if (ctx->tx_curr_skb != NULL)
902 need_timer = 1; 973 need_timer = 1;
903 spin_unlock(&ctx->mtx);
904 974
905 /* Start timer, if there is a remaining skb */ 975 /* Start timer, if there is a remaining skb */
906 if (need_timer) 976 if (need_timer)
@@ -908,6 +978,8 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
908 978
909 if (skb_out) 979 if (skb_out)
910 dev->net->stats.tx_packets += ctx->tx_curr_frame_num; 980 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
981
982 spin_unlock(&ctx->mtx);
911 return skb_out; 983 return skb_out;
912 984
913error: 985error:
@@ -956,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
956 goto error; 1028 goto error;
957 } 1029 }
958 1030
959 temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); 1031 temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
960 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { 1032 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
961 pr_debug("invalid DPT16 index\n"); 1033 pr_debug("invalid DPT16 index\n");
962 goto error; 1034 goto error;
@@ -1020,8 +1092,8 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1020 if (((offset + temp) > actlen) || 1092 if (((offset + temp) > actlen) ||
1021 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { 1093 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
1022 pr_debug("invalid frame detected (ignored)" 1094 pr_debug("invalid frame detected (ignored)"
1023 "offset[%u]=%u, length=%u, skb=%p\n", 1095 "offset[%u]=%u, length=%u, skb=%p\n",
1024 x, offset, temp, skb_in); 1096 x, offset, temp, skb_in);
1025 if (!x) 1097 if (!x)
1026 goto error; 1098 goto error;
1027 break; 1099 break;
@@ -1043,10 +1115,10 @@ error:
1043 1115
1044static void 1116static void
1045cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, 1117cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
1046 struct connection_speed_change *data) 1118 struct usb_cdc_speed_change *data)
1047{ 1119{
1048 uint32_t rx_speed = le32_to_cpu(data->USBitRate); 1120 uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
1049 uint32_t tx_speed = le32_to_cpu(data->DSBitRate); 1121 uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
1050 1122
1051 /* 1123 /*
1052 * Currently the USB-NET API does not support reporting the actual 1124 * Currently the USB-NET API does not support reporting the actual
@@ -1087,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1087 /* test for split data in 8-byte chunks */ 1159 /* test for split data in 8-byte chunks */
1088 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { 1160 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
1089 cdc_ncm_speed_change(ctx, 1161 cdc_ncm_speed_change(ctx,
1090 (struct connection_speed_change *)urb->transfer_buffer); 1162 (struct usb_cdc_speed_change *)urb->transfer_buffer);
1091 return; 1163 return;
1092 } 1164 }
1093 1165
@@ -1115,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1115 break; 1187 break;
1116 1188
1117 case USB_CDC_NOTIFY_SPEED_CHANGE: 1189 case USB_CDC_NOTIFY_SPEED_CHANGE:
1118 if (urb->actual_length < 1190 if (urb->actual_length < (sizeof(*event) +
1119 (sizeof(*event) + sizeof(struct connection_speed_change))) 1191 sizeof(struct usb_cdc_speed_change)))
1120 set_bit(EVENT_STS_SPLIT, &dev->flags); 1192 set_bit(EVENT_STS_SPLIT, &dev->flags);
1121 else 1193 else
1122 cdc_ncm_speed_change(ctx, 1194 cdc_ncm_speed_change(ctx,
1123 (struct connection_speed_change *) &event[1]); 1195 (struct usb_cdc_speed_change *) &event[1]);
1124 break; 1196 break;
1125 1197
1126 default: 1198 default:
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e3b9f..5002f5be47b 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
651 .driver_info = (unsigned long)&dm9601_info, 651 .driver_info = (unsigned long)&dm9601_info,
652 }, 652 },
653 { 653 {
654 USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
655 .driver_info = (unsigned long)&dm9601_info,
656 },
657 {
654 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ 658 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
655 .driver_info = (unsigned long)&dm9601_info, 659 .driver_info = (unsigned long)&dm9601_info,
656 }, 660 },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fcedff4..387ca43f26f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -324,7 +324,7 @@ struct hso_device {
324/* Prototypes */ 324/* Prototypes */
325/*****************************************************************************/ 325/*****************************************************************************/
326/* Serial driver functions */ 326/* Serial driver functions */
327static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, 327static int hso_serial_tiocmset(struct tty_struct *tty,
328 unsigned int set, unsigned int clear); 328 unsigned int set, unsigned int clear);
329static void ctrl_callback(struct urb *urb); 329static void ctrl_callback(struct urb *urb);
330static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial); 330static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial);
@@ -1335,7 +1335,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1335 1335
1336 /* done */ 1336 /* done */
1337 if (result) 1337 if (result)
1338 hso_serial_tiocmset(tty, NULL, TIOCM_RTS | TIOCM_DTR, 0); 1338 hso_serial_tiocmset(tty, TIOCM_RTS | TIOCM_DTR, 0);
1339err_out: 1339err_out:
1340 mutex_unlock(&serial->parent->mutex); 1340 mutex_unlock(&serial->parent->mutex);
1341 return result; 1341 return result;
@@ -1656,7 +1656,7 @@ static int hso_get_count(struct tty_struct *tty,
1656} 1656}
1657 1657
1658 1658
1659static int hso_serial_tiocmget(struct tty_struct *tty, struct file *file) 1659static int hso_serial_tiocmget(struct tty_struct *tty)
1660{ 1660{
1661 int retval; 1661 int retval;
1662 struct hso_serial *serial = get_serial_by_tty(tty); 1662 struct hso_serial *serial = get_serial_by_tty(tty);
@@ -1687,7 +1687,7 @@ static int hso_serial_tiocmget(struct tty_struct *tty, struct file *file)
1687 return retval; 1687 return retval;
1688} 1688}
1689 1689
1690static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, 1690static int hso_serial_tiocmset(struct tty_struct *tty,
1691 unsigned int set, unsigned int clear) 1691 unsigned int set, unsigned int clear)
1692{ 1692{
1693 int val = 0; 1693 int val = 0;
@@ -1730,7 +1730,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
1730 USB_CTRL_SET_TIMEOUT); 1730 USB_CTRL_SET_TIMEOUT);
1731} 1731}
1732 1732
1733static int hso_serial_ioctl(struct tty_struct *tty, struct file *file, 1733static int hso_serial_ioctl(struct tty_struct *tty,
1734 unsigned int cmd, unsigned long arg) 1734 unsigned int cmd, unsigned long arg)
1735{ 1735{
1736 struct hso_serial *serial = get_serial_by_tty(tty); 1736 struct hso_serial *serial = get_serial_by_tty(tty);
@@ -2628,15 +2628,15 @@ exit:
2628 2628
2629static void hso_free_tiomget(struct hso_serial *serial) 2629static void hso_free_tiomget(struct hso_serial *serial)
2630{ 2630{
2631 struct hso_tiocmget *tiocmget = serial->tiocmget; 2631 struct hso_tiocmget *tiocmget;
2632 if (!serial)
2633 return;
2634 tiocmget = serial->tiocmget;
2632 if (tiocmget) { 2635 if (tiocmget) {
2633 if (tiocmget->urb) { 2636 usb_free_urb(tiocmget->urb);
2634 usb_free_urb(tiocmget->urb); 2637 tiocmget->urb = NULL;
2635 tiocmget->urb = NULL;
2636 }
2637 serial->tiocmget = NULL; 2638 serial->tiocmget = NULL;
2638 kfree(tiocmget); 2639 kfree(tiocmget);
2639
2640 } 2640 }
2641} 2641}
2642 2642
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643a4a2..7dc84971f26 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
406 406
407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { 407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
408 err("Firmware too big: %zu", fw->size); 408 err("Firmware too big: %zu", fw->size);
409 release_firmware(fw);
409 return -ENOSPC; 410 return -ENOSPC;
410 } 411 }
411 data_len = fw->size; 412 data_len = fw->size;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bc86f4b6ecc..727874d9deb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -49,6 +49,8 @@
49 49
50struct smsc95xx_priv { 50struct smsc95xx_priv {
51 u32 mac_cr; 51 u32 mac_cr;
52 u32 hash_hi;
53 u32 hash_lo;
52 spinlock_t mac_cr_lock; 54 spinlock_t mac_cr_lock;
53 bool use_tx_csum; 55 bool use_tx_csum;
54 bool use_rx_csum; 56 bool use_rx_csum;
@@ -370,10 +372,11 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
370{ 372{
371 struct usbnet *dev = netdev_priv(netdev); 373 struct usbnet *dev = netdev_priv(netdev);
372 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 374 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
373 u32 hash_hi = 0;
374 u32 hash_lo = 0;
375 unsigned long flags; 375 unsigned long flags;
376 376
377 pdata->hash_hi = 0;
378 pdata->hash_lo = 0;
379
377 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 380 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
378 381
379 if (dev->net->flags & IFF_PROMISC) { 382 if (dev->net->flags & IFF_PROMISC) {
@@ -394,13 +397,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
394 u32 bitnum = smsc95xx_hash(ha->addr); 397 u32 bitnum = smsc95xx_hash(ha->addr);
395 u32 mask = 0x01 << (bitnum & 0x1F); 398 u32 mask = 0x01 << (bitnum & 0x1F);
396 if (bitnum & 0x20) 399 if (bitnum & 0x20)
397 hash_hi |= mask; 400 pdata->hash_hi |= mask;
398 else 401 else
399 hash_lo |= mask; 402 pdata->hash_lo |= mask;
400 } 403 }
401 404
402 netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n", 405 netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
403 hash_hi, hash_lo); 406 pdata->hash_hi, pdata->hash_lo);
404 } else { 407 } else {
405 netif_dbg(dev, drv, dev->net, "receive own packets only\n"); 408 netif_dbg(dev, drv, dev->net, "receive own packets only\n");
406 pdata->mac_cr &= 409 pdata->mac_cr &=
@@ -410,8 +413,8 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
410 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 413 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
411 414
412 /* Initiate async writes, as we can't wait for completion here */ 415 /* Initiate async writes, as we can't wait for completion here */
413 smsc95xx_write_reg_async(dev, HASHH, &hash_hi); 416 smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
414 smsc95xx_write_reg_async(dev, HASHL, &hash_lo); 417 smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
415 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); 418 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
416} 419}
417 420
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a41643ff..95c41d56631 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
931 if (urb != NULL) { 931 if (urb != NULL) {
932 clear_bit (EVENT_RX_MEMORY, &dev->flags); 932 clear_bit (EVENT_RX_MEMORY, &dev->flags);
933 status = usb_autopm_get_interface(dev->intf); 933 status = usb_autopm_get_interface(dev->intf);
934 if (status < 0) 934 if (status < 0) {
935 usb_free_urb(urb);
935 goto fail_lowmem; 936 goto fail_lowmem;
937 }
936 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 938 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
937 resched = 0; 939 resched = 0;
938 usb_autopm_put_interface(dev->intf); 940 usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cc83fa71c3f..2de9b90c5f8 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -171,7 +171,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
171 if (skb->ip_summed == CHECKSUM_NONE) 171 if (skb->ip_summed == CHECKSUM_NONE)
172 skb->ip_summed = rcv_priv->ip_summed; 172 skb->ip_summed = rcv_priv->ip_summed;
173 173
174 length = skb->len + ETH_HLEN; 174 length = skb->len;
175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) 175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
176 goto rx_drop; 176 goto rx_drop;
177 177
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
403 if (tb[IFLA_ADDRESS] == NULL) 403 if (tb[IFLA_ADDRESS] == NULL)
404 random_ether_addr(dev->dev_addr); 404 random_ether_addr(dev->dev_addr);
405 405
406 if (tb[IFLA_IFNAME])
407 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
408 else
409 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
410
411 if (strchr(dev->name, '%')) {
412 err = dev_alloc_name(dev, dev->name);
413 if (err < 0)
414 goto err_alloc_name;
415 }
416
417 err = register_netdevice(dev); 406 err = register_netdevice(dev);
418 if (err < 0) 407 if (err < 0)
419 goto err_register_dev; 408 goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
433 422
434err_register_dev: 423err_register_dev:
435 /* nothing to do */ 424 /* nothing to do */
436err_alloc_name:
437err_configure_peer: 425err_configure_peer:
438 unregister_netdevice(peer); 426 unregister_netdevice(peer);
439 return err; 427 return err;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 09cac704fdd..0d6fec6b7d9 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2923static int velocity_set_wol(struct velocity_info *vptr) 2923static int velocity_set_wol(struct velocity_info *vptr)
2924{ 2924{
2925 struct mac_regs __iomem *regs = vptr->mac_regs; 2925 struct mac_regs __iomem *regs = vptr->mac_regs;
2926 enum speed_opt spd_dpx = vptr->options.spd_dpx;
2926 static u8 buf[256]; 2927 static u8 buf[256];
2927 int i; 2928 int i;
2928 2929
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
2968 2969
2969 writew(0x0FFF, &regs->WOLSRClr); 2970 writew(0x0FFF, &regs->WOLSRClr);
2970 2971
2972 if (spd_dpx == SPD_DPX_1000_FULL)
2973 goto mac_done;
2974
2975 if (spd_dpx != SPD_DPX_AUTO)
2976 goto advertise_done;
2977
2971 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { 2978 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2972 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 2979 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2973 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); 2980 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
2978 if (vptr->mii_status & VELOCITY_SPEED_1000) 2985 if (vptr->mii_status & VELOCITY_SPEED_1000)
2979 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); 2986 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2980 2987
2988advertise_done:
2981 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR); 2989 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2982 2990
2983 { 2991 {
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
2987 writeb(GCR, &regs->CHIPGCR); 2995 writeb(GCR, &regs->CHIPGCR);
2988 } 2996 }
2989 2997
2998mac_done:
2990 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR); 2999 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2991 /* Turn on SWPTAG just before entering power mode */ 3000 /* Turn on SWPTAG just before entering power mode */
2992 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW); 3001 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b9ff6..d7227539484 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@ enum velocity_owner {
361#define MAC_REG_CHIPGSR 0x9C 361#define MAC_REG_CHIPGSR 0x9C
362#define MAC_REG_TESTCFG 0x9D 362#define MAC_REG_TESTCFG 0x9D
363#define MAC_REG_DEBUG 0x9E 363#define MAC_REG_DEBUG 0x9E
364#define MAC_REG_CHIPGCR 0x9F 364#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
365#define MAC_REG_WOLCR0_SET 0xA0 365#define MAC_REG_WOLCR0_SET 0xA0
366#define MAC_REG_WOLCR1_SET 0xA1 366#define MAC_REG_WOLCR1_SET 0xA1
367#define MAC_REG_PWCFG_SET 0xA2 367#define MAC_REG_PWCFG_SET 0xA2
@@ -848,10 +848,10 @@ enum velocity_owner {
848 * Bits in CHIPGCR register 848 * Bits in CHIPGCR register
849 */ 849 */
850 850
851#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */ 851#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
852#define CHIPGCR_FCFDX 0x40 852#define CHIPGCR_FCFDX 0x40 /* force full duplex */
853#define CHIPGCR_FCRESV 0x20 853#define CHIPGCR_FCRESV 0x20
854#define CHIPGCR_FCMODE 0x10 854#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
855#define CHIPGCR_LPSOPT 0x08 855#define CHIPGCR_LPSOPT 0x08
856#define CHIPGCR_TM1US 0x04 856#define CHIPGCR_TM1US 0x04
857#define CHIPGCR_TM0US 0x02 857#define CHIPGCR_TM0US 0x02
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e410d1..82dba5aaf42 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
446 } 446 }
447} 447}
448 448
449static void virtnet_napi_enable(struct virtnet_info *vi)
450{
451 napi_enable(&vi->napi);
452
453 /* If all buffers were filled by other side before we napi_enabled, we
454 * won't get another interrupt, so process any outstanding packets
455 * now. virtnet_poll wants re-enable the queue, so we disable here.
456 * We synchronize against interrupts via NAPI_STATE_SCHED */
457 if (napi_schedule_prep(&vi->napi)) {
458 virtqueue_disable_cb(vi->rvq);
459 __napi_schedule(&vi->napi);
460 }
461}
462
449static void refill_work(struct work_struct *work) 463static void refill_work(struct work_struct *work)
450{ 464{
451 struct virtnet_info *vi; 465 struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
454 vi = container_of(work, struct virtnet_info, refill.work); 468 vi = container_of(work, struct virtnet_info, refill.work);
455 napi_disable(&vi->napi); 469 napi_disable(&vi->napi);
456 still_empty = !try_fill_recv(vi, GFP_KERNEL); 470 still_empty = !try_fill_recv(vi, GFP_KERNEL);
457 napi_enable(&vi->napi); 471 virtnet_napi_enable(vi);
458 472
459 /* In theory, this can happen: if we don't get any buffers in 473 /* In theory, this can happen: if we don't get any buffers in
460 * we will *never* try to fill again. */ 474 * we will *never* try to fill again. */
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
638{ 652{
639 struct virtnet_info *vi = netdev_priv(dev); 653 struct virtnet_info *vi = netdev_priv(dev);
640 654
641 napi_enable(&vi->napi); 655 virtnet_napi_enable(vi);
642
643 /* If all buffers were filled by other side before we napi_enabled, we
644 * won't get another interrupt, so process any outstanding packets
645 * now. virtnet_poll wants re-enable the queue, so we disable here.
646 * We synchronize against interrupts via NAPI_STATE_SCHED */
647 if (napi_schedule_prep(&vi->napi)) {
648 virtqueue_disable_cb(vi->rvq);
649 __napi_schedule(&vi->napi);
650 }
651 return 0; 656 return 0;
652} 657}
653 658
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b72b5..cc14b4a7504 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@ static atomic_t devices_found;
48static int enable_mq = 1; 48static int enable_mq = 1;
49static int irq_share_mode; 49static int irq_share_mode;
50 50
51static void
52vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
53
51/* 54/*
52 * Enable/Disable the given intr 55 * Enable/Disable the given intr
53 */ 56 */
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139{ 142{
140 u32 ret; 143 u32 ret;
141 int i; 144 int i;
145 unsigned long flags;
142 146
147 spin_lock_irqsave(&adapter->cmd_lock, flags);
143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 148 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 149 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
150 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
151
145 adapter->link_speed = ret >> 16; 152 adapter->link_speed = ret >> 16;
146 if (ret & 1) { /* Link is up. */ 153 if (ret & 1) { /* Link is up. */
147 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 154 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
183 190
184 /* Check if there is an error on xmit/recv queues */ 191 /* Check if there is an error on xmit/recv queues */
185 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
193 spin_lock(&adapter->cmd_lock);
186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
187 VMXNET3_CMD_GET_QUEUE_STATUS); 195 VMXNET3_CMD_GET_QUEUE_STATUS);
196 spin_unlock(&adapter->cmd_lock);
188 197
189 for (i = 0; i < adapter->num_tx_queues; i++) 198 for (i = 0; i < adapter->num_tx_queues; i++)
190 if (adapter->tqd_start[i].status.stopped) 199 if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
804 skb_transport_header(skb))->doff * 4; 813 skb_transport_header(skb))->doff * 4;
805 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 814 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
806 } else { 815 } else {
807 unsigned int pull_size;
808
809 if (skb->ip_summed == CHECKSUM_PARTIAL) { 816 if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 817 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
811 818
812 if (ctx->ipv4) { 819 if (ctx->ipv4) {
813 struct iphdr *iph = (struct iphdr *) 820 struct iphdr *iph = (struct iphdr *)
814 skb_network_header(skb); 821 skb_network_header(skb);
815 if (iph->protocol == IPPROTO_TCP) { 822 if (iph->protocol == IPPROTO_TCP)
816 pull_size = ctx->eth_ip_hdr_size +
817 sizeof(struct tcphdr);
818
819 if (unlikely(!pskb_may_pull(skb,
820 pull_size))) {
821 goto err;
822 }
823 ctx->l4_hdr_size = ((struct tcphdr *) 823 ctx->l4_hdr_size = ((struct tcphdr *)
824 skb_transport_header(skb))->doff * 4; 824 skb_transport_header(skb))->doff * 4;
825 } else if (iph->protocol == IPPROTO_UDP) { 825 else if (iph->protocol == IPPROTO_UDP)
826 /*
827 * Use tcp header size so that bytes to
828 * be copied are more than required by
829 * the device.
830 */
826 ctx->l4_hdr_size = 831 ctx->l4_hdr_size =
827 sizeof(struct udphdr); 832 sizeof(struct tcphdr);
828 } else { 833 else
829 ctx->l4_hdr_size = 0; 834 ctx->l4_hdr_size = 0;
830 }
831 } else { 835 } else {
832 /* for simplicity, don't copy L4 headers */ 836 /* for simplicity, don't copy L4 headers */
833 ctx->l4_hdr_size = 0; 837 ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1859 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1863 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1860 struct Vmxnet3_DriverShared *shared = adapter->shared; 1864 struct Vmxnet3_DriverShared *shared = adapter->shared;
1861 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1865 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1866 unsigned long flags;
1862 1867
1863 if (grp) { 1868 if (grp) {
1864 /* add vlan rx stripping. */ 1869 /* add vlan rx stripping. */
1865 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { 1870 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1866 int i; 1871 int i;
1867 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1868 adapter->vlan_grp = grp; 1872 adapter->vlan_grp = grp;
1869 1873
1870 /* update FEATURES to device */
1871 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1873 VMXNET3_CMD_UPDATE_FEATURE);
1874 /* 1874 /*
1875 * Clear entire vfTable; then enable untagged pkts. 1875 * Clear entire vfTable; then enable untagged pkts.
1876 * Note: setting one entry in vfTable to non-zero turns 1876 * Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1880 vfTable[i] = 0; 1880 vfTable[i] = 0;
1881 1881
1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1883 spin_lock_irqsave(&adapter->cmd_lock, flags);
1883 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1884 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1884 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1885 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1886 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1885 } else { 1887 } else {
1886 printk(KERN_ERR "%s: vlan_rx_register when device has " 1888 printk(KERN_ERR "%s: vlan_rx_register when device has "
1887 "no NETIF_F_HW_VLAN_RX\n", netdev->name); 1889 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1900 */ 1902 */
1901 vfTable[i] = 0; 1903 vfTable[i] = 0;
1902 } 1904 }
1905 spin_lock_irqsave(&adapter->cmd_lock, flags);
1903 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1906 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1904 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1907 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1905 1908 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1906 /* update FEATURES to device */
1907 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1908 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1909 VMXNET3_CMD_UPDATE_FEATURE);
1910 } 1909 }
1911 } 1910 }
1912} 1911}
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1939{ 1938{
1940 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1939 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1941 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1940 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1941 unsigned long flags;
1942 1942
1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1944 spin_lock_irqsave(&adapter->cmd_lock, flags);
1944 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1945 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1945 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1946 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1947 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1946} 1948}
1947 1949
1948 1950
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1951{ 1953{
1952 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1954 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1953 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1955 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1956 unsigned long flags;
1954 1957
1955 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1958 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1959 spin_lock_irqsave(&adapter->cmd_lock, flags);
1956 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1960 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1957 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1961 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1962 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1958} 1963}
1959 1964
1960 1965
@@ -1985,6 +1990,7 @@ static void
1985vmxnet3_set_mc(struct net_device *netdev) 1990vmxnet3_set_mc(struct net_device *netdev)
1986{ 1991{
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1992 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1993 unsigned long flags;
1988 struct Vmxnet3_RxFilterConf *rxConf = 1994 struct Vmxnet3_RxFilterConf *rxConf =
1989 &adapter->shared->devRead.rxFilterConf; 1995 &adapter->shared->devRead.rxFilterConf;
1990 u8 *new_table = NULL; 1996 u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2020 rxConf->mfTablePA = 0; 2026 rxConf->mfTablePA = 0;
2021 } 2027 }
2022 2028
2029 spin_lock_irqsave(&adapter->cmd_lock, flags);
2023 if (new_mode != rxConf->rxMode) { 2030 if (new_mode != rxConf->rxMode) {
2024 rxConf->rxMode = cpu_to_le32(new_mode); 2031 rxConf->rxMode = cpu_to_le32(new_mode);
2025 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2032 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2028 2035
2029 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2036 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2030 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2037 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2038 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2031 2039
2032 kfree(new_table); 2040 kfree(new_table);
2033} 2041}
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2080 devRead->misc.uptFeatures |= UPT1_F_LRO; 2088 devRead->misc.uptFeatures |= UPT1_F_LRO;
2081 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2089 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2082 } 2090 }
2083 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && 2091 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
2084 adapter->vlan_grp) {
2085 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2092 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2086 }
2087 2093
2088 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2094 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2089 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2095 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2168 /* rx filter settings */ 2174 /* rx filter settings */
2169 devRead->rxFilterConf.rxMode = 0; 2175 devRead->rxFilterConf.rxMode = 0;
2170 vmxnet3_restore_vlan(adapter); 2176 vmxnet3_restore_vlan(adapter);
2177 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2178
2171 /* the rest are already zeroed */ 2179 /* the rest are already zeroed */
2172} 2180}
2173 2181
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2177{ 2185{
2178 int err, i; 2186 int err, i;
2179 u32 ret; 2187 u32 ret;
2188 unsigned long flags;
2180 2189
2181 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2190 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2182 " ring sizes %u %u %u\n", adapter->netdev->name, 2191 " ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2206 adapter->shared_pa)); 2215 adapter->shared_pa));
2207 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2208 adapter->shared_pa)); 2217 adapter->shared_pa));
2218 spin_lock_irqsave(&adapter->cmd_lock, flags);
2209 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2219 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2210 VMXNET3_CMD_ACTIVATE_DEV); 2220 VMXNET3_CMD_ACTIVATE_DEV);
2211 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2221 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2222 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2212 2223
2213 if (ret != 0) { 2224 if (ret != 0) {
2214 printk(KERN_ERR "Failed to activate dev %s: error %u\n", 2225 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@ rq_err:
2255void 2266void
2256vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2267vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2257{ 2268{
2269 unsigned long flags;
2270 spin_lock_irqsave(&adapter->cmd_lock, flags);
2258 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 2271 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2272 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2259} 2273}
2260 2274
2261 2275
@@ -2263,12 +2277,15 @@ int
2263vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2277vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2264{ 2278{
2265 int i; 2279 int i;
2280 unsigned long flags;
2266 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2281 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2267 return 0; 2282 return 0;
2268 2283
2269 2284
2285 spin_lock_irqsave(&adapter->cmd_lock, flags);
2270 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2271 VMXNET3_CMD_QUIESCE_DEV); 2287 VMXNET3_CMD_QUIESCE_DEV);
2288 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2272 vmxnet3_disable_all_intrs(adapter); 2289 vmxnet3_disable_all_intrs(adapter);
2273 2290
2274 for (i = 0; i < adapter->num_rx_queues; i++) 2291 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2426 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2443 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2427 ring0_size = adapter->rx_queue[0].rx_ring[0].size; 2444 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2428 ring0_size = (ring0_size + sz - 1) / sz * sz; 2445 ring0_size = (ring0_size + sz - 1) / sz * sz;
2429 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / 2446 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2430 sz * sz); 2447 sz * sz);
2431 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2448 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2432 comp_size = ring0_size + ring1_size; 2449 comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2695 break; 2712 break;
2696 } else { 2713 } else {
2697 /* If fails to enable required number of MSI-x vectors 2714 /* If fails to enable required number of MSI-x vectors
2698 * try enabling 3 of them. One each for rx, tx and event 2715 * try enabling minimum number of vectors required.
2699 */ 2716 */
2700 vectors = vector_threshold; 2717 vectors = vector_threshold;
2701 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" 2718 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2718 u32 cfg; 2735 u32 cfg;
2719 2736
2720 /* intr settings */ 2737 /* intr settings */
2738 spin_lock(&adapter->cmd_lock);
2721 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2739 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2722 VMXNET3_CMD_GET_CONF_INTR); 2740 VMXNET3_CMD_GET_CONF_INTR);
2723 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2741 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2742 spin_unlock(&adapter->cmd_lock);
2724 adapter->intr.type = cfg & 0x3; 2743 adapter->intr.type = cfg & 0x3;
2725 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2744 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2726 2745
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2755 */ 2774 */
2756 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { 2775 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2757 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 2776 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2758 || adapter->num_rx_queues != 2) { 2777 || adapter->num_rx_queues != 1) {
2759 adapter->share_intr = VMXNET3_INTR_TXSHARE; 2778 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2760 printk(KERN_ERR "Number of rx queues : 1\n"); 2779 printk(KERN_ERR "Number of rx queues : 1\n");
2761 adapter->num_rx_queues = 1; 2780 adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2905 adapter->netdev = netdev; 2924 adapter->netdev = netdev;
2906 adapter->pdev = pdev; 2925 adapter->pdev = pdev;
2907 2926
2927 spin_lock_init(&adapter->cmd_lock);
2908 adapter->shared = pci_alloc_consistent(adapter->pdev, 2928 adapter->shared = pci_alloc_consistent(adapter->pdev,
2909 sizeof(struct Vmxnet3_DriverShared), 2929 sizeof(struct Vmxnet3_DriverShared),
2910 &adapter->shared_pa); 2930 &adapter->shared_pa);
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device)
3108 u8 *arpreq; 3128 u8 *arpreq;
3109 struct in_device *in_dev; 3129 struct in_device *in_dev;
3110 struct in_ifaddr *ifa; 3130 struct in_ifaddr *ifa;
3131 unsigned long flags;
3111 int i = 0; 3132 int i = 0;
3112 3133
3113 if (!netif_running(netdev)) 3134 if (!netif_running(netdev))
3114 return 0; 3135 return 0;
3115 3136
3137 for (i = 0; i < adapter->num_rx_queues; i++)
3138 napi_disable(&adapter->rx_queue[i].napi);
3139
3116 vmxnet3_disable_all_intrs(adapter); 3140 vmxnet3_disable_all_intrs(adapter);
3117 vmxnet3_free_irqs(adapter); 3141 vmxnet3_free_irqs(adapter);
3118 vmxnet3_free_intr_resources(adapter); 3142 vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@ skip_arp:
3188 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3212 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3189 pmConf)); 3213 pmConf));
3190 3214
3215 spin_lock_irqsave(&adapter->cmd_lock, flags);
3191 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3192 VMXNET3_CMD_UPDATE_PMCFG); 3217 VMXNET3_CMD_UPDATE_PMCFG);
3218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3193 3219
3194 pci_save_state(pdev); 3220 pci_save_state(pdev);
3195 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 3221 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@ skip_arp:
3204static int 3230static int
3205vmxnet3_resume(struct device *device) 3231vmxnet3_resume(struct device *device)
3206{ 3232{
3207 int err; 3233 int err, i = 0;
3234 unsigned long flags;
3208 struct pci_dev *pdev = to_pci_dev(device); 3235 struct pci_dev *pdev = to_pci_dev(device);
3209 struct net_device *netdev = pci_get_drvdata(pdev); 3236 struct net_device *netdev = pci_get_drvdata(pdev);
3210 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3237 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device)
3232 3259
3233 pci_enable_wake(pdev, PCI_D0, 0); 3260 pci_enable_wake(pdev, PCI_D0, 0);
3234 3261
3262 spin_lock_irqsave(&adapter->cmd_lock, flags);
3235 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3263 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3236 VMXNET3_CMD_UPDATE_PMCFG); 3264 VMXNET3_CMD_UPDATE_PMCFG);
3265 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3237 vmxnet3_alloc_intr_resources(adapter); 3266 vmxnet3_alloc_intr_resources(adapter);
3238 vmxnet3_request_irqs(adapter); 3267 vmxnet3_request_irqs(adapter);
3268 for (i = 0; i < adapter->num_rx_queues; i++)
3269 napi_enable(&adapter->rx_queue[i].napi);
3239 vmxnet3_enable_all_intrs(adapter); 3270 vmxnet3_enable_all_intrs(adapter);
3240 3271
3241 return 0; 3272 return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8a7fe..51f2ef142a5 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@ static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) 45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{ 46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48 unsigned long flags;
48 49
49 if (adapter->rxcsum != val) { 50 if (adapter->rxcsum != val) {
50 adapter->rxcsum = val; 51 adapter->rxcsum = val;
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
56 adapter->shared->devRead.misc.uptFeatures &= 57 adapter->shared->devRead.misc.uptFeatures &=
57 ~UPT1_F_RXCSUM; 58 ~UPT1_F_RXCSUM;
58 59
60 spin_lock_irqsave(&adapter->cmd_lock, flags);
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
63 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
61 } 64 }
62 } 65 }
63 return 0; 66 return 0;
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
68static const struct vmxnet3_stat_desc 71static const struct vmxnet3_stat_desc
69vmxnet3_tq_dev_stats[] = { 72vmxnet3_tq_dev_stats[] = {
70 /* description, offset */ 73 /* description, offset */
71 { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 74 { "Tx Queue#", 0 },
72 { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 75 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
73 { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 76 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
74 { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 77 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
75 { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 78 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
76 { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 79 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
77 { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 80 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
78 { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 81 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
79 { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 82 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
80 { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 83 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
84 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
81}; 85};
82 86
83/* per tq stats maintained by the driver */ 87/* per tq stats maintained by the driver */
84static const struct vmxnet3_stat_desc 88static const struct vmxnet3_stat_desc
85vmxnet3_tq_driver_stats[] = { 89vmxnet3_tq_driver_stats[] = {
86 /* description, offset */ 90 /* description, offset */
87 {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 91 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
88 drop_total) }, 92 drop_total) },
89 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, 93 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
90 drop_too_many_frags) }, 94 drop_too_many_frags) },
91 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 95 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
92 drop_oversized_hdr) }, 96 drop_oversized_hdr) },
93 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, 97 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
94 drop_hdr_inspect_err) }, 98 drop_hdr_inspect_err) },
95 { " tso", offsetof(struct vmxnet3_tq_driver_stats, 99 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
96 drop_tso) }, 100 drop_tso) },
97 { "ring full", offsetof(struct vmxnet3_tq_driver_stats, 101 { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
98 tx_ring_full) }, 102 tx_ring_full) },
99 { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, 103 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
100 linearized) }, 104 linearized) },
101 { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, 105 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
102 copy_skb_header) }, 106 copy_skb_header) },
103 { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 107 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
104 oversized_hdr) }, 108 oversized_hdr) },
105}; 109};
106 110
107/* per rq stats maintained by the device */ 111/* per rq stats maintained by the device */
108static const struct vmxnet3_stat_desc 112static const struct vmxnet3_stat_desc
109vmxnet3_rq_dev_stats[] = { 113vmxnet3_rq_dev_stats[] = {
110 { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, 114 { "Rx Queue#", 0 },
111 { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, 115 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
112 { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, 116 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
113 { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, 117 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
114 { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, 118 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
115 { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, 119 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
116 { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, 120 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
117 { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, 121 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
118 { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, 122 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
119 { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, 123 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
124 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
120}; 125};
121 126
122/* per rq stats maintained by the driver */ 127/* per rq stats maintained by the driver */
123static const struct vmxnet3_stat_desc 128static const struct vmxnet3_stat_desc
124vmxnet3_rq_driver_stats[] = { 129vmxnet3_rq_driver_stats[] = {
125 /* description, offset */ 130 /* description, offset */
126 { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, 131 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
127 drop_total) }, 132 drop_total) },
128 { " err", offsetof(struct vmxnet3_rq_driver_stats, 133 { " err", offsetof(struct vmxnet3_rq_driver_stats,
129 drop_err) }, 134 drop_err) },
130 { " fcs", offsetof(struct vmxnet3_rq_driver_stats, 135 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
131 drop_fcs) }, 136 drop_fcs) },
132 { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, 137 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
133 rx_buf_alloc_failure) }, 138 rx_buf_alloc_failure) },
134}; 139};
135 140
136/* gloabl stats maintained by the driver */ 141/* gloabl stats maintained by the driver */
137static const struct vmxnet3_stat_desc 142static const struct vmxnet3_stat_desc
138vmxnet3_global_stats[] = { 143vmxnet3_global_stats[] = {
139 /* description, offset */ 144 /* description, offset */
140 { "tx timeout count", offsetof(struct vmxnet3_adapter, 145 { "tx timeout count", offsetof(struct vmxnet3_adapter,
141 tx_timeout_count) } 146 tx_timeout_count) }
142}; 147};
143 148
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev)
151 struct UPT1_TxStats *devTxStats; 156 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats; 157 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats; 158 struct net_device_stats *net_stats = &netdev->stats;
159 unsigned long flags;
154 int i; 160 int i;
155 161
156 adapter = netdev_priv(netdev); 162 adapter = netdev_priv(netdev);
157 163
158 /* Collect the dev stats into the shared area */ 164 /* Collect the dev stats into the shared area */
165 spin_lock_irqsave(&adapter->cmd_lock, flags);
159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 166 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
167 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
160 168
161 memset(net_stats, 0, sizeof(*net_stats)); 169 memset(net_stats, 0, sizeof(*net_stats));
162 for (i = 0; i < adapter->num_tx_queues; i++) { 170 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev)
193static int 201static int
194vmxnet3_get_sset_count(struct net_device *netdev, int sset) 202vmxnet3_get_sset_count(struct net_device *netdev, int sset)
195{ 203{
204 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
196 switch (sset) { 205 switch (sset) {
197 case ETH_SS_STATS: 206 case ETH_SS_STATS:
198 return ARRAY_SIZE(vmxnet3_tq_dev_stats) + 207 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
199 ARRAY_SIZE(vmxnet3_tq_driver_stats) + 208 ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
200 ARRAY_SIZE(vmxnet3_rq_dev_stats) + 209 adapter->num_tx_queues +
201 ARRAY_SIZE(vmxnet3_rq_driver_stats) + 210 (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
211 ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
212 adapter->num_rx_queues +
202 ARRAY_SIZE(vmxnet3_global_stats); 213 ARRAY_SIZE(vmxnet3_global_stats);
203 default: 214 default:
204 return -EOPNOTSUPP; 215 return -EOPNOTSUPP;
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset)
206} 217}
207 218
208 219
220/* Should be multiple of 4 */
221#define NUM_TX_REGS 8
222#define NUM_RX_REGS 12
223
209static int 224static int
210vmxnet3_get_regs_len(struct net_device *netdev) 225vmxnet3_get_regs_len(struct net_device *netdev)
211{ 226{
212 return 20 * sizeof(u32); 227 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
228 return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
229 adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
213} 230}
214 231
215 232
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
240static void 257static void
241vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) 258vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
242{ 259{
260 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
243 if (stringset == ETH_SS_STATS) { 261 if (stringset == ETH_SS_STATS) {
244 int i; 262 int i, j;
245 263 for (j = 0; j < adapter->num_tx_queues; j++) {
246 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { 264 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
247 memcpy(buf, vmxnet3_tq_dev_stats[i].desc, 265 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
248 ETH_GSTRING_LEN); 266 ETH_GSTRING_LEN);
249 buf += ETH_GSTRING_LEN; 267 buf += ETH_GSTRING_LEN;
250 } 268 }
251 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { 269 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
252 memcpy(buf, vmxnet3_tq_driver_stats[i].desc, 270 i++) {
253 ETH_GSTRING_LEN); 271 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
254 buf += ETH_GSTRING_LEN; 272 ETH_GSTRING_LEN);
255 } 273 buf += ETH_GSTRING_LEN;
256 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { 274 }
257 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
258 ETH_GSTRING_LEN);
259 buf += ETH_GSTRING_LEN;
260 } 275 }
261 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { 276
262 memcpy(buf, vmxnet3_rq_driver_stats[i].desc, 277 for (j = 0; j < adapter->num_rx_queues; j++) {
263 ETH_GSTRING_LEN); 278 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
264 buf += ETH_GSTRING_LEN; 279 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
280 ETH_GSTRING_LEN);
281 buf += ETH_GSTRING_LEN;
282 }
283 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
284 i++) {
285 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
286 ETH_GSTRING_LEN);
287 buf += ETH_GSTRING_LEN;
288 }
265 } 289 }
290
266 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { 291 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
267 memcpy(buf, vmxnet3_global_stats[i].desc, 292 memcpy(buf, vmxnet3_global_stats[i].desc,
268 ETH_GSTRING_LEN); 293 ETH_GSTRING_LEN);
@@ -277,9 +302,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
277 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 302 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
278 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; 303 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
279 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; 304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
305 unsigned long flags;
280 306
281 if (data & ~ETH_FLAG_LRO) 307 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
282 return -EOPNOTSUPP; 308 return -EINVAL;
283 309
284 if (lro_requested ^ lro_present) { 310 if (lro_requested ^ lro_present) {
285 /* toggle the LRO feature*/ 311 /* toggle the LRO feature*/
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
292 else 318 else
293 adapter->shared->devRead.misc.uptFeatures &= 319 adapter->shared->devRead.misc.uptFeatures &=
294 ~UPT1_F_LRO; 320 ~UPT1_F_LRO;
321 spin_lock_irqsave(&adapter->cmd_lock, flags);
295 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 322 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
296 VMXNET3_CMD_UPDATE_FEATURE); 323 VMXNET3_CMD_UPDATE_FEATURE);
324 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
297 } 325 }
298 return 0; 326 return 0;
299} 327}
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
303 struct ethtool_stats *stats, u64 *buf) 331 struct ethtool_stats *stats, u64 *buf)
304{ 332{
305 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 333 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
334 unsigned long flags;
306 u8 *base; 335 u8 *base;
307 int i; 336 int i;
308 int j = 0; 337 int j = 0;
309 338
339 spin_lock_irqsave(&adapter->cmd_lock, flags);
310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 340 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
341 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
311 342
312 /* this does assume each counter is 64-bit wide */ 343 /* this does assume each counter is 64-bit wide */
313/* TODO change this for multiple queues */ 344 for (j = 0; j < adapter->num_tx_queues; j++) {
314 345 base = (u8 *)&adapter->tqd_start[j].stats;
315 base = (u8 *)&adapter->tqd_start[j].stats; 346 *buf++ = (u64)j;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 347 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); 348 *buf++ = *(u64 *)(base +
318 349 vmxnet3_tq_dev_stats[i].offset);
319 base = (u8 *)&adapter->tx_queue[j].stats; 350
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 351 base = (u8 *)&adapter->tx_queue[j].stats;
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); 352 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
322 353 *buf++ = *(u64 *)(base +
323 base = (u8 *)&adapter->rqd_start[j].stats; 354 vmxnet3_tq_driver_stats[i].offset);
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 355 }
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326 356
327 base = (u8 *)&adapter->rx_queue[j].stats; 357 for (j = 0; j < adapter->num_tx_queues; j++) {
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 358 base = (u8 *)&adapter->rqd_start[j].stats;
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); 359 *buf++ = (u64) j;
360 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
361 *buf++ = *(u64 *)(base +
362 vmxnet3_rq_dev_stats[i].offset);
363
364 base = (u8 *)&adapter->rx_queue[j].stats;
365 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
366 *buf++ = *(u64 *)(base +
367 vmxnet3_rq_driver_stats[i].offset);
368 }
330 369
331 base = (u8 *)adapter; 370 base = (u8 *)adapter;
332 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 371 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{ 378{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 379 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p; 380 u32 *buf = p;
342 int i = 0; 381 int i = 0, j = 0;
343 382
344 memset(p, 0, vmxnet3_get_regs_len(netdev)); 383 memset(p, 0, vmxnet3_get_regs_len(netdev));
345 384
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
348 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 387 /* Update vmxnet3_get_regs_len if we want to dump more registers */
349 388
350 /* make each ring use multiple of 16 bytes */ 389 /* make each ring use multiple of 16 bytes */
351/* TODO change this for multiple queues */ 390 for (i = 0; i < adapter->num_tx_queues; i++) {
352 buf[0] = adapter->tx_queue[i].tx_ring.next2fill; 391 buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
353 buf[1] = adapter->tx_queue[i].tx_ring.next2comp; 392 buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
354 buf[2] = adapter->tx_queue[i].tx_ring.gen; 393 buf[j++] = adapter->tx_queue[i].tx_ring.gen;
355 buf[3] = 0; 394 buf[j++] = 0;
356 395
357 buf[4] = adapter->tx_queue[i].comp_ring.next2proc; 396 buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
358 buf[5] = adapter->tx_queue[i].comp_ring.gen; 397 buf[j++] = adapter->tx_queue[i].comp_ring.gen;
359 buf[6] = adapter->tx_queue[i].stopped; 398 buf[j++] = adapter->tx_queue[i].stopped;
360 buf[7] = 0; 399 buf[j++] = 0;
361 400 }
362 buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; 401
363 buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; 402 for (i = 0; i < adapter->num_rx_queues; i++) {
364 buf[10] = adapter->rx_queue[i].rx_ring[0].gen; 403 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
365 buf[11] = 0; 404 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
366 405 buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
367 buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; 406 buf[j++] = 0;
368 buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; 407
369 buf[14] = adapter->rx_queue[i].rx_ring[1].gen; 408 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
370 buf[15] = 0; 409 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
371 410 buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
372 buf[16] = adapter->rx_queue[i].comp_ring.next2proc; 411 buf[j++] = 0;
373 buf[17] = adapter->rx_queue[i].comp_ring.gen; 412
374 buf[18] = 0; 413 buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
375 buf[19] = 0; 414 buf[j++] = adapter->rx_queue[i].comp_ring.gen;
415 buf[j++] = 0;
416 buf[j++] = 0;
417 }
418
376} 419}
377 420
378 421
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
574 const struct ethtool_rxfh_indir *p) 617 const struct ethtool_rxfh_indir *p)
575{ 618{
576 unsigned int i; 619 unsigned int i;
620 unsigned long flags;
577 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 621 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
578 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 622 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
579 623
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
592 for (i = 0; i < rssConf->indTableSize; i++) 636 for (i = 0; i < rssConf->indTableSize; i++)
593 rssConf->indTable[i] = p->ring_index[i]; 637 rssConf->indTable[i] = p->ring_index[i];
594 638
639 spin_lock_irqsave(&adapter->cmd_lock, flags);
595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 640 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
596 VMXNET3_CMD_UPDATE_RSSIDT); 641 VMXNET3_CMD_UPDATE_RSSIDT);
642 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
597 643
598 return 0; 644 return 0;
599 645
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed37f0..fb5d245ac87 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01001000 74#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
75 75
76#if defined(CONFIG_PCI_MSI) 76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */ 77 /* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue {
289 289
290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ 290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1) 291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
292#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ 292#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
293 293
294 294
295struct vmxnet3_intr { 295struct vmxnet3_intr {
@@ -317,6 +317,7 @@ struct vmxnet3_adapter {
317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; 317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
318 struct vlan_group *vlan_grp; 318 struct vlan_group *vlan_grp;
319 struct vmxnet3_intr intr; 319 struct vmxnet3_intr intr;
320 spinlock_t cmd_lock;
320 struct Vmxnet3_DriverShared *shared; 321 struct Vmxnet3_DriverShared *shared;
321 struct Vmxnet3_PMConf *pm_conf; 322 struct Vmxnet3_PMConf *pm_conf;
322 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ 323 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f53e2f..e74e4b42592 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
387 data1 = steer_ctrl = 0; 387 data1 = steer_ctrl = 0;
388 388
389 status = vxge_hw_vpath_fw_api(vpath, 389 status = vxge_hw_vpath_fw_api(vpath,
390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 VXGE_HW_FW_API_GET_EPROM_REV, 390 VXGE_HW_FW_API_GET_EPROM_REV,
391 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
392 0, &data0, &data1, &steer_ctrl); 392 0, &data0, &data1, &steer_ctrl);
393 if (status != VXGE_HW_OK) 393 if (status != VXGE_HW_OK)
394 break; 394 break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2868 ring->rxd_init = attr->rxd_init; 2868 ring->rxd_init = attr->rxd_init;
2869 ring->rxd_term = attr->rxd_term; 2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode; 2870 ring->buffer_mode = config->buffer_mode;
2871 ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2872 ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2871 ring->rxds_limit = config->rxds_limit; 2873 ring->rxds_limit = config->rxds_limit;
2872 2874
2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); 2875 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3511 3513
3512 /* apply "interrupts per txdl" attribute */ 3514 /* apply "interrupts per txdl" attribute */
3513 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; 3515 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3516 fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3517 fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3514 3518
3515 if (fifo->config->intr) 3519 if (fifo->config->intr)
3516 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; 3520 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -3690,7 +3694,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3690 if (status != VXGE_HW_OK) 3694 if (status != VXGE_HW_OK)
3691 goto exit; 3695 goto exit;
3692 3696
3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3697 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3694 (rts_table != 3698 (rts_table !=
3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) 3699 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3696 *data1 = 0; 3700 *data1 = 0;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4377 } 4381 }
4378 4382
4379 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 4383 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4384 vpath->tim_tti_cfg1_saved = val64;
4385
4380 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); 4386 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4381 4387
4382 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4388 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4433 } 4439 }
4434 4440
4435 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); 4441 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4442 vpath->tim_tti_cfg3_saved = val64;
4436 } 4443 }
4437 4444
4438 if (config->ring.enable == VXGE_HW_RING_ENABLE) { 4445 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4481 } 4488 }
4482 4489
4483 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); 4490 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4491 vpath->tim_rti_cfg1_saved = val64;
4492
4484 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); 4493 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4485 4494
4486 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4495 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4537 } 4546 }
4538 4547
4539 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); 4548 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4549 vpath->tim_rti_cfg3_saved = val64;
4540 } 4550 }
4541 4551
4542 val64 = 0; 4552 val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4555 return status; 4565 return status;
4556} 4566}
4557 4567
4558void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4559{
4560 struct __vxge_hw_virtualpath *vpath;
4561 struct vxge_hw_vpath_reg __iomem *vp_reg;
4562 struct vxge_hw_vp_config *config;
4563 u64 val64;
4564
4565 vpath = &hldev->virtual_paths[vp_id];
4566 vp_reg = vpath->vp_reg;
4567 config = vpath->vp_config;
4568
4569 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4570 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4571 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4572 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4573 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4574 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4575 }
4576}
4577
4578/* 4568/*
4579 * __vxge_hw_vpath_initialize 4569 * __vxge_hw_vpath_initialize
4580 * This routine is the final phase of init which initializes the 4570 * This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e288d16..3c53aa732c9 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
682 u32 vsport_number; 682 u32 vsport_number;
683 u32 max_kdfc_db; 683 u32 max_kdfc_db;
684 u32 max_nofl_db; 684 u32 max_nofl_db;
685 u64 tim_tti_cfg1_saved;
686 u64 tim_tti_cfg3_saved;
687 u64 tim_rti_cfg1_saved;
688 u64 tim_rti_cfg3_saved;
685 689
686 struct __vxge_hw_ring *____cacheline_aligned ringh; 690 struct __vxge_hw_ring *____cacheline_aligned ringh;
687 struct __vxge_hw_fifo *____cacheline_aligned fifoh; 691 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
921 u32 doorbell_cnt; 925 u32 doorbell_cnt;
922 u32 total_db_cnt; 926 u32 total_db_cnt;
923 u64 rxds_limit; 927 u64 rxds_limit;
928 u32 rtimer;
929 u64 tim_rti_cfg1_saved;
930 u64 tim_rti_cfg3_saved;
924 931
925 enum vxge_hw_status (*callback)( 932 enum vxge_hw_status (*callback)(
926 struct __vxge_hw_ring *ringh, 933 struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
1000 u32 per_txdl_space; 1007 u32 per_txdl_space;
1001 u32 vp_id; 1008 u32 vp_id;
1002 u32 tx_intr_num; 1009 u32 tx_intr_num;
1010 u32 rtimer;
1011 u64 tim_tti_cfg1_saved;
1012 u64 tim_tti_cfg3_saved;
1003 1013
1004 enum vxge_hw_status (*callback)( 1014 enum vxge_hw_status (*callback)(
1005 struct __vxge_hw_fifo *fifo_handle, 1015 struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 1dd3a21b3a4..c5eb034107f 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1117,8 +1117,8 @@ static int vxge_set_flags(struct net_device *dev, u32 data)
1117 struct vxgedev *vdev = netdev_priv(dev); 1117 struct vxgedev *vdev = netdev_priv(dev);
1118 enum vxge_hw_status status; 1118 enum vxge_hw_status status;
1119 1119
1120 if (data & ~ETH_FLAG_RXHASH) 1120 if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH))
1121 return -EOPNOTSUPP; 1121 return -EINVAL;
1122 1122
1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) 1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
1124 return 0; 1124 return 0;
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c81a6512c68..395423aeec0 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
371 struct vxge_hw_ring_rxd_info ext_info; 371 struct vxge_hw_ring_rxd_info ext_info;
372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
373 ring->ndev->name, __func__, __LINE__); 373 ring->ndev->name, __func__, __LINE__);
374 ring->pkts_processed = 0;
375
376 vxge_hw_ring_replenish(ringh);
377 374
378 do { 375 do {
379 prefetch((char *)dtr + L1_CACHE_BYTES); 376 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1588 return ret; 1585 return ret;
1589} 1586}
1590 1587
1588/* Configure CI */
1589static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1590{
1591 int i = 0;
1592
1593 /* Enable CI for RTI */
1594 if (vdev->config.intr_type == MSI_X) {
1595 for (i = 0; i < vdev->no_of_vpath; i++) {
1596 struct __vxge_hw_ring *hw_ring;
1597
1598 hw_ring = vdev->vpaths[i].ring.handle;
1599 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1600 }
1601 }
1602
1603 /* Enable CI for TTI */
1604 for (i = 0; i < vdev->no_of_vpath; i++) {
1605 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1606 vxge_hw_vpath_tti_ci_set(hw_fifo);
1607 /*
1608 * For Inta (with or without napi), Set CI ON for only one
1609 * vpath. (Have only one free running timer).
1610 */
1611 if ((vdev->config.intr_type == INTA) && (i == 0))
1612 break;
1613 }
1614
1615 return;
1616}
1617
1591static int do_vxge_reset(struct vxgedev *vdev, int event) 1618static int do_vxge_reset(struct vxgedev *vdev, int event)
1592{ 1619{
1593 enum vxge_hw_status status; 1620 enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1753 netif_tx_wake_all_queues(vdev->ndev); 1780 netif_tx_wake_all_queues(vdev->ndev);
1754 } 1781 }
1755 1782
1783 /* configure CI */
1784 vxge_config_ci_for_tti_rti(vdev);
1785
1756out: 1786out:
1757 vxge_debug_entryexit(VXGE_TRACE, 1787 vxge_debug_entryexit(VXGE_TRACE,
1758 "%s:%d Exiting...", __func__, __LINE__); 1788 "%s:%d Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
1793 */ 1823 */
1794static int vxge_poll_msix(struct napi_struct *napi, int budget) 1824static int vxge_poll_msix(struct napi_struct *napi, int budget)
1795{ 1825{
1796 struct vxge_ring *ring = 1826 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1797 container_of(napi, struct vxge_ring, napi); 1827 int pkts_processed;
1798 int budget_org = budget; 1828 int budget_org = budget;
1799 ring->budget = budget;
1800 1829
1830 ring->budget = budget;
1831 ring->pkts_processed = 0;
1801 vxge_hw_vpath_poll_rx(ring->handle); 1832 vxge_hw_vpath_poll_rx(ring->handle);
1833 pkts_processed = ring->pkts_processed;
1802 1834
1803 if (ring->pkts_processed < budget_org) { 1835 if (ring->pkts_processed < budget_org) {
1804 napi_complete(napi); 1836 napi_complete(napi);
1837
1805 /* Re enable the Rx interrupts for the vpath */ 1838 /* Re enable the Rx interrupts for the vpath */
1806 vxge_hw_channel_msix_unmask( 1839 vxge_hw_channel_msix_unmask(
1807 (struct __vxge_hw_channel *)ring->handle, 1840 (struct __vxge_hw_channel *)ring->handle,
1808 ring->rx_vector_no); 1841 ring->rx_vector_no);
1842 mmiowb();
1809 } 1843 }
1810 1844
1811 return ring->pkts_processed; 1845 /* We are copying and returning the local variable, in case if after
1846 * clearing the msix interrupt above, if the interrupt fires right
1847 * away which can preempt this NAPI thread */
1848 return pkts_processed;
1812} 1849}
1813 1850
1814static int vxge_poll_inta(struct napi_struct *napi, int budget) 1851static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1824 for (i = 0; i < vdev->no_of_vpath; i++) { 1861 for (i = 0; i < vdev->no_of_vpath; i++) {
1825 ring = &vdev->vpaths[i].ring; 1862 ring = &vdev->vpaths[i].ring;
1826 ring->budget = budget; 1863 ring->budget = budget;
1864 ring->pkts_processed = 0;
1827 vxge_hw_vpath_poll_rx(ring->handle); 1865 vxge_hw_vpath_poll_rx(ring->handle);
1828 pkts_processed += ring->pkts_processed; 1866 pkts_processed += ring->pkts_processed;
1829 budget -= ring->pkts_processed; 1867 budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2054 netdev_get_tx_queue(vdev->ndev, 0); 2092 netdev_get_tx_queue(vdev->ndev, 0);
2055 vpath->fifo.indicate_max_pkts = 2093 vpath->fifo.indicate_max_pkts =
2056 vdev->config.fifo_indicate_max_pkts; 2094 vdev->config.fifo_indicate_max_pkts;
2095 vpath->fifo.tx_vector_no = 0;
2057 vpath->ring.rx_vector_no = 0; 2096 vpath->ring.rx_vector_no = 0;
2058 vpath->ring.rx_csum = vdev->rx_csum; 2097 vpath->ring.rx_csum = vdev->rx_csum;
2059 vpath->ring.rx_hwts = vdev->rx_hwts; 2098 vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2079 return VXGE_HW_OK; 2118 return VXGE_HW_OK;
2080} 2119}
2081 2120
2121/**
2122 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2123 * if the interrupts are not within a range
2124 * @fifo: pointer to transmit fifo structure
2125 * Description: The function changes boundary timer and restriction timer
2126 * value depends on the traffic
2127 * Return Value: None
2128 */
2129static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2130{
2131 fifo->interrupt_count++;
2132 if (jiffies > fifo->jiffies + HZ / 100) {
2133 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2134
2135 fifo->jiffies = jiffies;
2136 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2137 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2138 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2139 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2140 } else if (hw_fifo->rtimer != 0) {
2141 hw_fifo->rtimer = 0;
2142 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2143 }
2144 fifo->interrupt_count = 0;
2145 }
2146}
2147
2148/**
2149 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2150 * if the interrupts are not within a range
2151 * @ring: pointer to receive ring structure
2152 * Description: The function increases of decreases the packet counts within
2153 * the ranges of traffic utilization, if the interrupts due to this ring are
2154 * not within a fixed range.
2155 * Return Value: Nothing
2156 */
2157static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2158{
2159 ring->interrupt_count++;
2160 if (jiffies > ring->jiffies + HZ / 100) {
2161 struct __vxge_hw_ring *hw_ring = ring->handle;
2162
2163 ring->jiffies = jiffies;
2164 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2165 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2166 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2167 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2168 } else if (hw_ring->rtimer != 0) {
2169 hw_ring->rtimer = 0;
2170 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2171 }
2172 ring->interrupt_count = 0;
2173 }
2174}
2175
2082/* 2176/*
2083 * vxge_isr_napi 2177 * vxge_isr_napi
2084 * @irq: the irq of the device. 2178 * @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2139 2233
2140#ifdef CONFIG_PCI_MSI 2234#ifdef CONFIG_PCI_MSI
2141 2235
2142static irqreturn_t 2236static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2143vxge_tx_msix_handle(int irq, void *dev_id)
2144{ 2237{
2145 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2238 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2146 2239
2240 adaptive_coalesce_tx_interrupts(fifo);
2241
2242 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2243 fifo->tx_vector_no);
2244
2245 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2246 fifo->tx_vector_no);
2247
2147 VXGE_COMPLETE_VPATH_TX(fifo); 2248 VXGE_COMPLETE_VPATH_TX(fifo);
2148 2249
2250 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2251 fifo->tx_vector_no);
2252
2253 mmiowb();
2254
2149 return IRQ_HANDLED; 2255 return IRQ_HANDLED;
2150} 2256}
2151 2257
2152static irqreturn_t 2258static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2153vxge_rx_msix_napi_handle(int irq, void *dev_id)
2154{ 2259{
2155 struct vxge_ring *ring = (struct vxge_ring *)dev_id; 2260 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2156 2261
2157 /* MSIX_IDX for Rx is 1 */ 2262 adaptive_coalesce_rx_interrupts(ring);
2263
2158 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, 2264 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2159 ring->rx_vector_no); 2265 ring->rx_vector_no);
2266
2267 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2268 ring->rx_vector_no);
2160 2269
2161 napi_schedule(&ring->napi); 2270 napi_schedule(&ring->napi);
2162 return IRQ_HANDLED; 2271 return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2173 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 2282 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2174 2283
2175 for (i = 0; i < vdev->no_of_vpath; i++) { 2284 for (i = 0; i < vdev->no_of_vpath; i++) {
2285 /* Reduce the chance of loosing alarm interrupts by masking
2286 * the vector. A pending bit will be set if an alarm is
2287 * generated and on unmask the interrupt will be fired.
2288 */
2176 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); 2289 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2290 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2291 mmiowb();
2177 2292
2178 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2293 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2179 vdev->exec_mode); 2294 vdev->exec_mode);
2180 if (status == VXGE_HW_OK) { 2295 if (status == VXGE_HW_OK) {
2181
2182 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2296 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2183 msix_id); 2297 msix_id);
2298 mmiowb();
2184 continue; 2299 continue;
2185 } 2300 }
2186 vxge_debug_intr(VXGE_ERR, 2301 vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2299 vpath->ring.rx_vector_no = (vpath->device_id * 2414 vpath->ring.rx_vector_no = (vpath->device_id *
2300 VXGE_HW_VPATH_MSIX_ACTIVE) + 1; 2415 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2301 2416
2417 vpath->fifo.tx_vector_no = (vpath->device_id *
2418 VXGE_HW_VPATH_MSIX_ACTIVE);
2419
2302 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 2420 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2303 VXGE_ALARM_MSIX_ID); 2421 VXGE_ALARM_MSIX_ID);
2304 } 2422 }
@@ -2474,8 +2592,9 @@ INTA_MODE:
2474 "%s:vxge:INTA", vdev->ndev->name); 2592 "%s:vxge:INTA", vdev->ndev->name);
2475 vxge_hw_device_set_intr_type(vdev->devh, 2593 vxge_hw_device_set_intr_type(vdev->devh,
2476 VXGE_HW_INTR_MODE_IRQLINE); 2594 VXGE_HW_INTR_MODE_IRQLINE);
2477 vxge_hw_vpath_tti_ci_set(vdev->devh, 2595
2478 vdev->vpaths[0].device_id); 2596 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2597
2479 ret = request_irq((int) vdev->pdev->irq, 2598 ret = request_irq((int) vdev->pdev->irq,
2480 vxge_isr_napi, 2599 vxge_isr_napi,
2481 IRQF_SHARED, vdev->desc[0], vdev); 2600 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
2745 } 2864 }
2746 2865
2747 netif_tx_start_all_queues(vdev->ndev); 2866 netif_tx_start_all_queues(vdev->ndev);
2867
2868 /* configure CI */
2869 vxge_config_ci_for_tti_rti(vdev);
2870
2748 goto out0; 2871 goto out0;
2749 2872
2750out2: 2873out2:
@@ -3264,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = {
3264#endif 3387#endif
3265}; 3388};
3266 3389
3267static int __devinit vxge_device_revision(struct vxgedev *vdev)
3268{
3269 int ret;
3270 u8 revision;
3271
3272 ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
3273 if (ret)
3274 return -EIO;
3275
3276 vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
3277 return 0;
3278}
3279
3280static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3390static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3281 struct vxge_config *config, 3391 struct vxge_config *config,
3282 int high_dma, int no_of_vpath, 3392 int high_dma, int no_of_vpath,
@@ -3316,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3316 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3426 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3317 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3427 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3318 vdev->rx_hwts = 0; 3428 vdev->rx_hwts = 0;
3319 3429 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3320 ret = vxge_device_revision(vdev);
3321 if (ret < 0)
3322 goto _out1;
3323 3430
3324 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3431 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3325 3432
@@ -3348,7 +3455,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3348 vxge_debug_init(VXGE_ERR, 3455 vxge_debug_init(VXGE_ERR,
3349 "%s: vpath memory allocation failed", 3456 "%s: vpath memory allocation failed",
3350 vdev->ndev->name); 3457 vdev->ndev->name);
3351 ret = -ENODEV; 3458 ret = -ENOMEM;
3352 goto _out1; 3459 goto _out1;
3353 } 3460 }
3354 3461
@@ -3369,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3369 if (vdev->config.gro_enable) 3476 if (vdev->config.gro_enable)
3370 ndev->features |= NETIF_F_GRO; 3477 ndev->features |= NETIF_F_GRO;
3371 3478
3372 if (register_netdev(ndev)) { 3479 ret = register_netdev(ndev);
3480 if (ret) {
3373 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3481 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3374 "%s: %s : device registration failed!", 3482 "%s: %s : device registration failed!",
3375 ndev->name, __func__); 3483 ndev->name, __func__);
3376 ret = -ENODEV;
3377 goto _out2; 3484 goto _out2;
3378 } 3485 }
3379 3486
@@ -3444,6 +3551,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3444 /* in 2.6 will call stop() if device is up */ 3551 /* in 2.6 will call stop() if device is up */
3445 unregister_netdev(dev); 3552 unregister_netdev(dev);
3446 3553
3554 kfree(vdev->vpaths);
3555
3556 /* we are safe to free it now */
3557 free_netdev(dev);
3558
3447 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", 3559 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3448 buf); 3560 buf);
3449 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, 3561 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
@@ -3799,7 +3911,7 @@ static void __devinit vxge_device_config_init(
3799 break; 3911 break;
3800 3912
3801 case MSI_X: 3913 case MSI_X:
3802 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3914 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3803 break; 3915 break;
3804 } 3916 }
3805 3917
@@ -4335,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4335 goto _exit1; 4447 goto _exit1;
4336 } 4448 }
4337 4449
4338 if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) { 4450 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4451 if (ret) {
4339 vxge_debug_init(VXGE_ERR, 4452 vxge_debug_init(VXGE_ERR,
4340 "%s : request regions failed", __func__); 4453 "%s : request regions failed", __func__);
4341 ret = -ENODEV;
4342 goto _exit1; 4454 goto _exit1;
4343 } 4455 }
4344 4456
@@ -4446,7 +4558,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4446 if (!img[i].is_valid) 4558 if (!img[i].is_valid)
4447 break; 4559 break;
4448 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " 4560 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4449 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i, 4561 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4450 VXGE_EPROM_IMG_MAJOR(img[i].version), 4562 VXGE_EPROM_IMG_MAJOR(img[i].version),
4451 VXGE_EPROM_IMG_MINOR(img[i].version), 4563 VXGE_EPROM_IMG_MINOR(img[i].version),
4452 VXGE_EPROM_IMG_FIX(img[i].version), 4564 VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4755,9 @@ _exit6:
4643_exit5: 4755_exit5:
4644 vxge_device_unregister(hldev); 4756 vxge_device_unregister(hldev);
4645_exit4: 4757_exit4:
4646 pci_disable_sriov(pdev); 4758 pci_set_drvdata(pdev, NULL);
4647 vxge_hw_device_terminate(hldev); 4759 vxge_hw_device_terminate(hldev);
4760 pci_disable_sriov(pdev);
4648_exit3: 4761_exit3:
4649 iounmap(attr.bar0); 4762 iounmap(attr.bar0);
4650_exit2: 4763_exit2:
@@ -4655,7 +4768,7 @@ _exit0:
4655 kfree(ll_config); 4768 kfree(ll_config);
4656 kfree(device_config); 4769 kfree(device_config);
4657 driver_config->config_dev_cnt--; 4770 driver_config->config_dev_cnt--;
4658 pci_set_drvdata(pdev, NULL); 4771 driver_config->total_dev_cnt--;
4659 return ret; 4772 return ret;
4660} 4773}
4661 4774
@@ -4668,45 +4781,34 @@ _exit0:
4668static void __devexit vxge_remove(struct pci_dev *pdev) 4781static void __devexit vxge_remove(struct pci_dev *pdev)
4669{ 4782{
4670 struct __vxge_hw_device *hldev; 4783 struct __vxge_hw_device *hldev;
4671 struct vxgedev *vdev = NULL; 4784 struct vxgedev *vdev;
4672 struct net_device *dev; 4785 int i;
4673 int i = 0;
4674 4786
4675 hldev = pci_get_drvdata(pdev); 4787 hldev = pci_get_drvdata(pdev);
4676
4677 if (hldev == NULL) 4788 if (hldev == NULL)
4678 return; 4789 return;
4679 4790
4680 dev = hldev->ndev; 4791 vdev = netdev_priv(hldev->ndev);
4681 vdev = netdev_priv(dev);
4682 4792
4683 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); 4793 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4684
4685 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", 4794 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4686 __func__); 4795 __func__);
4687 vxge_device_unregister(hldev);
4688 4796
4689 for (i = 0; i < vdev->no_of_vpath; i++) { 4797 for (i = 0; i < vdev->no_of_vpath; i++)
4690 vxge_free_mac_add_list(&vdev->vpaths[i]); 4798 vxge_free_mac_add_list(&vdev->vpaths[i]);
4691 vdev->vpaths[i].mcast_addr_cnt = 0;
4692 vdev->vpaths[i].mac_addr_cnt = 0;
4693 }
4694
4695 kfree(vdev->vpaths);
4696 4799
4800 vxge_device_unregister(hldev);
4801 pci_set_drvdata(pdev, NULL);
4802 /* Do not call pci_disable_sriov here, as it will break child devices */
4803 vxge_hw_device_terminate(hldev);
4697 iounmap(vdev->bar0); 4804 iounmap(vdev->bar0);
4698 4805 pci_release_region(pdev, 0);
4699 /* we are safe to free it now */ 4806 pci_disable_device(pdev);
4700 free_netdev(dev); 4807 driver_config->config_dev_cnt--;
4808 driver_config->total_dev_cnt--;
4701 4809
4702 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", 4810 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4703 __func__, __LINE__); 4811 __func__, __LINE__);
4704
4705 vxge_hw_device_terminate(hldev);
4706
4707 pci_disable_device(pdev);
4708 pci_release_region(pdev, 0);
4709 pci_set_drvdata(pdev, NULL);
4710 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, 4812 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4711 __LINE__); 4813 __LINE__);
4712} 4814}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fedc356..40474f0da57 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
59#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
60#define VXGE_T1A_TTI_LTIMER_VAL 80 60#define VXGE_T1A_TTI_LTIMER_VAL 80
61#define VXGE_TTI_RTIMER_VAL 0 61#define VXGE_TTI_RTIMER_VAL 0
62#define VXGE_TTI_RTIMER_ADAPT_VAL 10
62#define VXGE_T1A_TTI_RTIMER_VAL 400 63#define VXGE_T1A_TTI_RTIMER_VAL 400
63#define VXGE_RTI_BTIMER_VAL 250 64#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100 65#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0 66#define VXGE_RTI_RTIMER_VAL 0
66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 67#define VXGE_RTI_RTIMER_ADAPT_VAL 15
68#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
67#define VXGE_ISR_POLLING_CNT 8 69#define VXGE_ISR_POLLING_CNT 8
68#define VXGE_MAX_CONFIG_DEV 0xFF 70#define VXGE_MAX_CONFIG_DEV 0xFF
69#define VXGE_EXEC_MODE_DISABLE 0 71#define VXGE_EXEC_MODE_DISABLE 0
@@ -107,6 +109,14 @@
107#define RTI_T1A_RX_UFC_C 50 109#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60 110#define RTI_T1A_RX_UFC_D 60
109 111
112/*
113 * The interrupt rate is maintained at 3k per second with the moderation
114 * parameters for most traffic but not all. This is the maximum interrupt
115 * count allowed per function with INTA or per vector in the case of
116 * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
117 */
118#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
119#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
110 120
111/* Milli secs timer period */ 121/* Milli secs timer period */
112#define VXGE_TIMER_DELAY 10000 122#define VXGE_TIMER_DELAY 10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
247 int tx_steering_type; 257 int tx_steering_type;
248 int indicate_max_pkts; 258 int indicate_max_pkts;
249 259
260 /* Adaptive interrupt moderation parameters used in T1A */
261 unsigned long interrupt_count;
262 unsigned long jiffies;
263
264 u32 tx_vector_no;
250 /* Tx stats */ 265 /* Tx stats */
251 struct vxge_fifo_stats stats; 266 struct vxge_fifo_stats stats;
252} ____cacheline_aligned; 267} ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
271 */ 286 */
272 int driver_id; 287 int driver_id;
273 288
289 /* Adaptive interrupt moderation parameters used in T1A */
290 unsigned long interrupt_count;
291 unsigned long jiffies;
292
274 /* copy of the flag indicating whether rx_csum is to be used */ 293 /* copy of the flag indicating whether rx_csum is to be used */
275 u32 rx_csum:1, 294 u32 rx_csum:1,
276 rx_hwts:1; 295 rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
286 305
287 int vlan_tag_strip; 306 int vlan_tag_strip;
288 struct vlan_group *vlgrp; 307 struct vlan_group *vlgrp;
289 int rx_vector_no; 308 u32 rx_vector_no;
290 enum vxge_hw_status last_status; 309 enum vxge_hw_status last_status;
291 310
292 /* Rx stats */ 311 /* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c4075..8674f331311 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@ exit:
218 return status; 218 return status;
219} 219}
220 220
221void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
222{
223 struct vxge_hw_vpath_reg __iomem *vp_reg;
224 struct vxge_hw_vp_config *config;
225 u64 val64;
226
227 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
228 return;
229
230 vp_reg = fifo->vp_reg;
231 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
232
233 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
234 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
235 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
236 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
237 fifo->tim_tti_cfg1_saved = val64;
238 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
239 }
240}
241
242void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
243{
244 u64 val64 = ring->tim_rti_cfg1_saved;
245
246 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
247 ring->tim_rti_cfg1_saved = val64;
248 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
249}
250
251void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
252{
253 u64 val64 = fifo->tim_tti_cfg3_saved;
254 u64 timer = (fifo->rtimer * 1000) / 272;
255
256 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
257 if (timer)
258 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
259 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
260
261 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
262 /* tti_cfg3_saved is not updated again because it is
263 * initialized at one place only - init time.
264 */
265}
266
267void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
268{
269 u64 val64 = ring->tim_rti_cfg3_saved;
270 u64 timer = (ring->rtimer * 1000) / 272;
271
272 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
273 if (timer)
274 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
275 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
276
277 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
278 /* rti_cfg3_saved is not updated again because it is
279 * initialized at one place only - init time.
280 */
281}
282
221/** 283/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector. 284 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle 285 * @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
254} 316}
255 317
256/** 318/**
319 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
320 * @channel: Channel for rx or tx handle
321 * @msix_id: MSI ID
322 *
323 * The function unmasks the msix interrupt for the given msix_id
324 * if configured in MSIX oneshot mode
325 *
326 * Returns: 0
327 */
328void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
329{
330 __vxge_hw_pio_mem_write32_upper(
331 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
332 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
333}
334
335/**
257 * vxge_hw_device_set_intr_type - Updates the configuration 336 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type. 337 * with new interrupt type.
259 * @hldev: HW device handle. 338 * @hldev: HW device handle.
@@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2191 if (vpath->hldev->config.intr_mode == 2270 if (vpath->hldev->config.intr_mode ==
2192 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2271 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2193 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2272 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2273 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2274 0, 32), &vp_reg->one_shot_vect0_en);
2275 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 2276 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2195 0, 32), &vp_reg->one_shot_vect1_en); 2277 0, 32), &vp_reg->one_shot_vect1_en);
2196 }
2197
2198 if (vpath->hldev->config.intr_mode ==
2199 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2200 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2278 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2201 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 2279 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2202 0, 32), &vp_reg->one_shot_vect2_en); 2280 0, 32), &vp_reg->one_shot_vect2_en);
2203
2204 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2205 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2206 0, 32), &vp_reg->one_shot_vect3_en);
2207 } 2281 }
2208} 2282}
2209 2283
@@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2229} 2303}
2230 2304
2231/** 2305/**
2306 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2307 * @vp: Virtual Path handle.
2308 * @msix_id: MSI ID
2309 *
2310 * The function clears the msix interrupt for the given msix_id
2311 *
2312 * Returns: 0,
2313 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2314 * status.
2315 * See also:
2316 */
2317void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2318{
2319 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2320
2321 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2322 __vxge_hw_pio_mem_write32_upper(
2323 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2324 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2325 else
2326 __vxge_hw_pio_mem_write32_upper(
2327 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2328 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2329}
2330
2331/**
2232 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. 2332 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2233 * @vp: Virtual Path handle. 2333 * @vp: Virtual Path handle.
2234 * @msix_id: MSI ID 2334 * @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d6afa..9d9dfda4c7a 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
2142 * Virtual Paths 2142 * Virtual Paths
2143 */ 2143 */
2144 2144
2145void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
2146
2147void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
2148
2145u32 vxge_hw_vpath_id( 2149u32 vxge_hw_vpath_id(
2146 struct __vxge_hw_vpath_handle *vpath_handle); 2150 struct __vxge_hw_vpath_handle *vpath_handle);
2147 2151
@@ -2245,6 +2249,8 @@ void
2245vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, 2249vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2246 int msix_id); 2250 int msix_id);
2247 2251
2252void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
2253
2248void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); 2254void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2249 2255
2250void 2256void
@@ -2270,6 +2276,9 @@ void
2270vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); 2276vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2271 2277
2272void 2278void
2279vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
2280
2281void
2273vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, 2282vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2274 void **dtrh); 2283 void **dtrh);
2275 2284
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2282int 2291int
2283vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2292vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2284 2293
2285void 2294void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
2286vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2295
2296void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
2287 2297
2288#endif 2298#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index ad2f99b9bcf..581e21525e8 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "5" 18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "1" 19#define VXGE_VERSION_FIX "2"
20#define VXGE_VERSION_BUILD "22082" 20#define VXGE_VERSION_BUILD "22259"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22 22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) 23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
index dabdcfed4ef..609710d64eb 100644
--- a/drivers/net/wan/lmc/Makefile
+++ b/drivers/net/wan/lmc/Makefile
@@ -14,4 +14,4 @@ lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
14# -DDEBUG \ 14# -DDEBUG \
15# -DLMC_PACKET_LOG 15# -DLMC_PACKET_LOG
16 16
17EXTRA_CFLAGS += -I. $(DBGDEF) 17ccflags-y := -I. $(DBGDEF)
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 515d9b8af01..1c65d1c3387 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -131,9 +131,8 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
131static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); 131static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
132static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char); 132static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char);
133 133
134static int pc300_tiocmset(struct tty_struct *, struct file *, 134static int pc300_tiocmset(struct tty_struct *, unsigned int, unsigned int);
135 unsigned int, unsigned int); 135static int pc300_tiocmget(struct tty_struct *);
136static int pc300_tiocmget(struct tty_struct *, struct file *);
137 136
138/* functions called by PC300 driver */ 137/* functions called by PC300 driver */
139void cpc_tty_init(pc300dev_t *dev); 138void cpc_tty_init(pc300dev_t *dev);
@@ -543,7 +542,7 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
543 return 0; 542 return 0;
544} 543}
545 544
546static int pc300_tiocmset(struct tty_struct *tty, struct file *file, 545static int pc300_tiocmset(struct tty_struct *tty,
547 unsigned int set, unsigned int clear) 546 unsigned int set, unsigned int clear)
548{ 547{
549 st_cpc_tty_area *cpc_tty; 548 st_cpc_tty_area *cpc_tty;
@@ -570,7 +569,7 @@ static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
570 return 0; 569 return 0;
571} 570}
572 571
573static int pc300_tiocmget(struct tty_struct *tty, struct file *file) 572static int pc300_tiocmget(struct tty_struct *tty)
574{ 573{
575 unsigned int result; 574 unsigned int result;
576 unsigned char status; 575 unsigned char status;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 7aeb113cbb9..f354bd4e121 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -284,5 +284,6 @@ source "drivers/net/wireless/rtlwifi/Kconfig"
284source "drivers/net/wireless/wl1251/Kconfig" 284source "drivers/net/wireless/wl1251/Kconfig"
285source "drivers/net/wireless/wl12xx/Kconfig" 285source "drivers/net/wireless/wl12xx/Kconfig"
286source "drivers/net/wireless/zd1211rw/Kconfig" 286source "drivers/net/wireless/zd1211rw/Kconfig"
287source "drivers/net/wireless/mwifiex/Kconfig"
287 288
288endif # WLAN 289endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index ddd3fb6ba1d..7bba6a82b87 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -56,3 +56,5 @@ obj-$(CONFIG_WL12XX) += wl12xx/
56obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/ 56obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
57 57
58obj-$(CONFIG_IWM) += iwmc3200wifi/ 58obj-$(CONFIG_IWM) += iwmc3200wifi/
59
60obj-$(CONFIG_MWIFIEX) += mwifiex/
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index a6c6a466000..6d7105b7e8f 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -119,6 +119,7 @@ struct ath_ops {
119 void (*write)(void *, u32 val, u32 reg_offset); 119 void (*write)(void *, u32 val, u32 reg_offset);
120 void (*enable_write_buffer)(void *); 120 void (*enable_write_buffer)(void *);
121 void (*write_flush) (void *); 121 void (*write_flush) (void *);
122 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
122}; 123};
123 124
124struct ath_common; 125struct ath_common;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index ffcf44a4058..4361704fe0d 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -44,6 +44,34 @@ static const int m1ThreshExt_off = 127;
44static const int m2ThreshExt_off = 127; 44static const int m2ThreshExt_off = 127;
45 45
46 46
47static void ar5008_rf_bank_setup(u32 *bank, struct ar5416IniArray *array,
48 int col)
49{
50 int i;
51
52 for (i = 0; i < array->ia_rows; i++)
53 bank[i] = INI_RA(array, i, col);
54}
55
56
57#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) \
58 ar5008_write_rf_array(ah, iniarray, regData, &(regWr))
59
60static void ar5008_write_rf_array(struct ath_hw *ah, struct ar5416IniArray *array,
61 u32 *data, unsigned int *writecnt)
62{
63 int r;
64
65 ENABLE_REGWRITE_BUFFER(ah);
66
67 for (r = 0; r < array->ia_rows; r++) {
68 REG_WRITE(ah, INI_RA(array, r, 0), data[r]);
69 DO_DELAY(*writecnt);
70 }
71
72 REGWRITE_BUFFER_FLUSH(ah);
73}
74
47/** 75/**
48 * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters 76 * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
49 * @rfbuf: 77 * @rfbuf:
@@ -530,16 +558,16 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
530 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); 558 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
531 559
532 /* Setup Bank 0 Write */ 560 /* Setup Bank 0 Write */
533 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1); 561 ar5008_rf_bank_setup(ah->analogBank0Data, &ah->iniBank0, 1);
534 562
535 /* Setup Bank 1 Write */ 563 /* Setup Bank 1 Write */
536 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1); 564 ar5008_rf_bank_setup(ah->analogBank1Data, &ah->iniBank1, 1);
537 565
538 /* Setup Bank 2 Write */ 566 /* Setup Bank 2 Write */
539 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1); 567 ar5008_rf_bank_setup(ah->analogBank2Data, &ah->iniBank2, 1);
540 568
541 /* Setup Bank 6 Write */ 569 /* Setup Bank 6 Write */
542 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3, 570 ar5008_rf_bank_setup(ah->analogBank3Data, &ah->iniBank3,
543 modesIndex); 571 modesIndex);
544 { 572 {
545 int i; 573 int i;
@@ -569,7 +597,7 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
569 } 597 }
570 598
571 /* Setup Bank 7 Setup */ 599 /* Setup Bank 7 Setup */
572 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1); 600 ar5008_rf_bank_setup(ah->analogBank7Data, &ah->iniBank7, 1);
573 601
574 /* Write Analog registers */ 602 /* Write Analog registers */
575 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data, 603 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
@@ -729,6 +757,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
729 struct ath9k_channel *chan) 757 struct ath9k_channel *chan)
730{ 758{
731 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 759 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
760 struct ath_common *common = ath9k_hw_common(ah);
732 int i, regWrites = 0; 761 int i, regWrites = 0;
733 struct ieee80211_channel *channel = chan->chan; 762 struct ieee80211_channel *channel = chan->chan;
734 u32 modesIndex, freqIndex; 763 u32 modesIndex, freqIndex;
@@ -805,7 +834,8 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
805 REG_WRITE(ah, reg, val); 834 REG_WRITE(ah, reg, val);
806 835
807 if (reg >= 0x7800 && reg < 0x78a0 836 if (reg >= 0x7800 && reg < 0x78a0
808 && ah->config.analog_shiftreg) { 837 && ah->config.analog_shiftreg
838 && (common->bus_ops->ath_bus_type != ATH_USB)) {
809 udelay(100); 839 udelay(100);
810 } 840 }
811 841
@@ -835,7 +865,8 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
835 REG_WRITE(ah, reg, val); 865 REG_WRITE(ah, reg, val);
836 866
837 if (reg >= 0x7800 && reg < 0x78a0 867 if (reg >= 0x7800 && reg < 0x78a0
838 && ah->config.analog_shiftreg) { 868 && ah->config.analog_shiftreg
869 && (common->bus_ops->ath_bus_type != ATH_USB)) {
839 udelay(100); 870 udelay(100);
840 } 871 }
841 872
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 76388c6d669..cb611b287b3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -26,6 +26,27 @@ enum ar9002_cal_types {
26 IQ_MISMATCH_CAL = BIT(2), 26 IQ_MISMATCH_CAL = BIT(2),
27}; 27};
28 28
29static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
30 struct ath9k_channel *chan,
31 enum ar9002_cal_types cal_type)
32{
33 bool supported = false;
34 switch (ah->supp_cals & cal_type) {
35 case IQ_MISMATCH_CAL:
36 /* Run IQ Mismatch for non-CCK only */
37 if (!IS_CHAN_B(chan))
38 supported = true;
39 break;
40 case ADC_GAIN_CAL:
41 case ADC_DC_CAL:
42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
43 if (!IS_CHAN_B(chan) &&
44 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
45 supported = true;
46 break;
47 }
48 return supported;
49}
29 50
30static void ar9002_hw_setup_calibration(struct ath_hw *ah, 51static void ar9002_hw_setup_calibration(struct ath_hw *ah,
31 struct ath9k_cal_list *currCal) 52 struct ath9k_cal_list *currCal)
@@ -858,26 +879,32 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
858 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) { 879 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
859 ah->supp_cals = IQ_MISMATCH_CAL; 880 ah->supp_cals = IQ_MISMATCH_CAL;
860 881
861 if (AR_SREV_9160_10_OR_LATER(ah) && 882 if (AR_SREV_9160_10_OR_LATER(ah))
862 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) {
863 ah->supp_cals |= ADC_GAIN_CAL | ADC_DC_CAL; 883 ah->supp_cals |= ADC_GAIN_CAL | ADC_DC_CAL;
864 884
885 if (AR_SREV_9287(ah))
886 ah->supp_cals &= ~ADC_GAIN_CAL;
865 887
888 if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) {
866 INIT_CAL(&ah->adcgain_caldata); 889 INIT_CAL(&ah->adcgain_caldata);
867 INSERT_CAL(ah, &ah->adcgain_caldata); 890 INSERT_CAL(ah, &ah->adcgain_caldata);
868 ath_dbg(common, ATH_DBG_CALIBRATE, 891 ath_dbg(common, ATH_DBG_CALIBRATE,
869 "enabling ADC Gain Calibration.\n"); 892 "enabling ADC Gain Calibration.\n");
893 }
870 894
895 if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) {
871 INIT_CAL(&ah->adcdc_caldata); 896 INIT_CAL(&ah->adcdc_caldata);
872 INSERT_CAL(ah, &ah->adcdc_caldata); 897 INSERT_CAL(ah, &ah->adcdc_caldata);
873 ath_dbg(common, ATH_DBG_CALIBRATE, 898 ath_dbg(common, ATH_DBG_CALIBRATE,
874 "enabling ADC DC Calibration.\n"); 899 "enabling ADC DC Calibration.\n");
875 } 900 }
876 901
877 INIT_CAL(&ah->iq_caldata); 902 if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) {
878 INSERT_CAL(ah, &ah->iq_caldata); 903 INIT_CAL(&ah->iq_caldata);
879 ath_dbg(common, ATH_DBG_CALIBRATE, 904 INSERT_CAL(ah, &ah->iq_caldata);
880 "enabling IQ Calibration.\n"); 905 ath_dbg(common, ATH_DBG_CALIBRATE,
906 "enabling IQ Calibration.\n");
907 }
881 908
882 ah->cal_list_curr = ah->cal_list; 909 ah->cal_list_curr = ah->cal_list;
883 910
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 7f5de6e4448..3daf3df0248 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -88,66 +88,6 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
88 ar9485_1_1_pcie_phy_clkreq_disable_L1, 88 ar9485_1_1_pcie_phy_clkreq_disable_L1,
89 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1), 89 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
90 2); 90 2);
91 } else if (AR_SREV_9485(ah)) {
92 /* mac */
93 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
94 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
95 ar9485_1_0_mac_core,
96 ARRAY_SIZE(ar9485_1_0_mac_core), 2);
97 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
98 ar9485_1_0_mac_postamble,
99 ARRAY_SIZE(ar9485_1_0_mac_postamble), 5);
100
101 /* bb */
102 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_0,
103 ARRAY_SIZE(ar9485_1_0), 2);
104 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
105 ar9485_1_0_baseband_core,
106 ARRAY_SIZE(ar9485_1_0_baseband_core), 2);
107 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
108 ar9485_1_0_baseband_postamble,
109 ARRAY_SIZE(ar9485_1_0_baseband_postamble), 5);
110
111 /* radio */
112 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
113 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
114 ar9485_1_0_radio_core,
115 ARRAY_SIZE(ar9485_1_0_radio_core), 2);
116 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
117 ar9485_1_0_radio_postamble,
118 ARRAY_SIZE(ar9485_1_0_radio_postamble), 2);
119
120 /* soc */
121 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
122 ar9485_1_0_soc_preamble,
123 ARRAY_SIZE(ar9485_1_0_soc_preamble), 2);
124 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
125 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
126
127 /* rx/tx gain */
128 INIT_INI_ARRAY(&ah->iniModesRxGain,
129 ar9485Common_rx_gain_1_0,
130 ARRAY_SIZE(ar9485Common_rx_gain_1_0), 2);
131 INIT_INI_ARRAY(&ah->iniModesTxGain,
132 ar9485Modes_lowest_ob_db_tx_gain_1_0,
133 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
134 5);
135
136 /* Load PCIE SERDES settings from INI */
137
138 /* Awake Setting */
139
140 INIT_INI_ARRAY(&ah->iniPcieSerdes,
141 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
142 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
143 2);
144
145 /* Sleep Setting */
146
147 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
148 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
149 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
150 2);
151 } else { 91 } else {
152 /* mac */ 92 /* mac */
153 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 93 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -228,11 +168,6 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
228 ar9485_modes_lowest_ob_db_tx_gain_1_1, 168 ar9485_modes_lowest_ob_db_tx_gain_1_1,
229 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1), 169 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
230 5); 170 5);
231 else if (AR_SREV_9485(ah))
232 INIT_INI_ARRAY(&ah->iniModesTxGain,
233 ar9485Modes_lowest_ob_db_tx_gain_1_0,
234 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
235 5);
236 else 171 else
237 INIT_INI_ARRAY(&ah->iniModesTxGain, 172 INIT_INI_ARRAY(&ah->iniModesTxGain,
238 ar9300Modes_lowest_ob_db_tx_gain_table_2p2, 173 ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
@@ -245,11 +180,6 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
245 ar9485Modes_high_ob_db_tx_gain_1_1, 180 ar9485Modes_high_ob_db_tx_gain_1_1,
246 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1), 181 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
247 5); 182 5);
248 else if (AR_SREV_9485(ah))
249 INIT_INI_ARRAY(&ah->iniModesTxGain,
250 ar9485Modes_high_ob_db_tx_gain_1_0,
251 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0),
252 5);
253 else 183 else
254 INIT_INI_ARRAY(&ah->iniModesTxGain, 184 INIT_INI_ARRAY(&ah->iniModesTxGain,
255 ar9300Modes_high_ob_db_tx_gain_table_2p2, 185 ar9300Modes_high_ob_db_tx_gain_table_2p2,
@@ -262,11 +192,6 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
262 ar9485Modes_low_ob_db_tx_gain_1_1, 192 ar9485Modes_low_ob_db_tx_gain_1_1,
263 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1), 193 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
264 5); 194 5);
265 else if (AR_SREV_9485(ah))
266 INIT_INI_ARRAY(&ah->iniModesTxGain,
267 ar9485Modes_low_ob_db_tx_gain_1_0,
268 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0),
269 5);
270 else 195 else
271 INIT_INI_ARRAY(&ah->iniModesTxGain, 196 INIT_INI_ARRAY(&ah->iniModesTxGain,
272 ar9300Modes_low_ob_db_tx_gain_table_2p2, 197 ar9300Modes_low_ob_db_tx_gain_table_2p2,
@@ -279,11 +204,6 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
279 ar9485Modes_high_power_tx_gain_1_1, 204 ar9485Modes_high_power_tx_gain_1_1,
280 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1), 205 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
281 5); 206 5);
282 else if (AR_SREV_9485(ah))
283 INIT_INI_ARRAY(&ah->iniModesTxGain,
284 ar9485Modes_high_power_tx_gain_1_0,
285 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0),
286 5);
287 else 207 else
288 INIT_INI_ARRAY(&ah->iniModesTxGain, 208 INIT_INI_ARRAY(&ah->iniModesTxGain,
289 ar9300Modes_high_power_tx_gain_table_2p2, 209 ar9300Modes_high_power_tx_gain_table_2p2,
@@ -303,11 +223,6 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
303 ar9485_common_rx_gain_1_1, 223 ar9485_common_rx_gain_1_1,
304 ARRAY_SIZE(ar9485_common_rx_gain_1_1), 224 ARRAY_SIZE(ar9485_common_rx_gain_1_1),
305 2); 225 2);
306 else if (AR_SREV_9485(ah))
307 INIT_INI_ARRAY(&ah->iniModesRxGain,
308 ar9485Common_rx_gain_1_0,
309 ARRAY_SIZE(ar9485Common_rx_gain_1_0),
310 2);
311 else 226 else
312 INIT_INI_ARRAY(&ah->iniModesRxGain, 227 INIT_INI_ARRAY(&ah->iniModesRxGain,
313 ar9300Common_rx_gain_table_2p2, 228 ar9300Common_rx_gain_table_2p2,
@@ -320,11 +235,6 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
320 ar9485Common_wo_xlna_rx_gain_1_1, 235 ar9485Common_wo_xlna_rx_gain_1_1,
321 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1), 236 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
322 2); 237 2);
323 else if (AR_SREV_9485(ah))
324 INIT_INI_ARRAY(&ah->iniModesRxGain,
325 ar9485Common_wo_xlna_rx_gain_1_0,
326 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0),
327 2);
328 else 238 else
329 INIT_INI_ARRAY(&ah->iniModesRxGain, 239 INIT_INI_ARRAY(&ah->iniModesRxGain,
330 ar9300Common_wo_xlna_rx_gain_table_2p2, 240 ar9300Common_wo_xlna_rx_gain_table_2p2,
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 71cc0a3a29f..f91f73e50d0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -17,931 +17,6 @@
17#ifndef INITVALS_9485_H 17#ifndef INITVALS_9485_H
18#define INITVALS_9485_H 18#define INITVALS_9485_H
19 19
20static const u32 ar9485Common_1_0[][2] = {
21 /* Addr allmodes */
22 {0x00007010, 0x00000022},
23 {0x00007020, 0x00000000},
24 {0x00007034, 0x00000002},
25 {0x00007038, 0x000004c2},
26};
27
28static const u32 ar9485_1_0_mac_postamble[][5] = {
29 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
30 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
31 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
32 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
33 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
34 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
35 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
36 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
37 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
38};
39
40static const u32 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
41 /* Addr allmodes */
42 {0x00018c00, 0x10212e5e},
43 {0x00018c04, 0x000801d8},
44 {0x00018c08, 0x0000580c},
45};
46
47static const u32 ar9485Common_wo_xlna_rx_gain_1_0[][2] = {
48 /* Addr allmodes */
49 {0x0000a000, 0x00010000},
50 {0x0000a004, 0x00030002},
51 {0x0000a008, 0x00050004},
52 {0x0000a00c, 0x00810080},
53 {0x0000a010, 0x01800082},
54 {0x0000a014, 0x01820181},
55 {0x0000a018, 0x01840183},
56 {0x0000a01c, 0x01880185},
57 {0x0000a020, 0x018a0189},
58 {0x0000a024, 0x02850284},
59 {0x0000a028, 0x02890288},
60 {0x0000a02c, 0x03850384},
61 {0x0000a030, 0x03890388},
62 {0x0000a034, 0x038b038a},
63 {0x0000a038, 0x038d038c},
64 {0x0000a03c, 0x03910390},
65 {0x0000a040, 0x03930392},
66 {0x0000a044, 0x03950394},
67 {0x0000a048, 0x00000396},
68 {0x0000a04c, 0x00000000},
69 {0x0000a050, 0x00000000},
70 {0x0000a054, 0x00000000},
71 {0x0000a058, 0x00000000},
72 {0x0000a05c, 0x00000000},
73 {0x0000a060, 0x00000000},
74 {0x0000a064, 0x00000000},
75 {0x0000a068, 0x00000000},
76 {0x0000a06c, 0x00000000},
77 {0x0000a070, 0x00000000},
78 {0x0000a074, 0x00000000},
79 {0x0000a078, 0x00000000},
80 {0x0000a07c, 0x00000000},
81 {0x0000a080, 0x28282828},
82 {0x0000a084, 0x28282828},
83 {0x0000a088, 0x28282828},
84 {0x0000a08c, 0x28282828},
85 {0x0000a090, 0x28282828},
86 {0x0000a094, 0x21212128},
87 {0x0000a098, 0x171c1c1c},
88 {0x0000a09c, 0x02020212},
89 {0x0000a0a0, 0x00000202},
90 {0x0000a0a4, 0x00000000},
91 {0x0000a0a8, 0x00000000},
92 {0x0000a0ac, 0x00000000},
93 {0x0000a0b0, 0x00000000},
94 {0x0000a0b4, 0x00000000},
95 {0x0000a0b8, 0x00000000},
96 {0x0000a0bc, 0x00000000},
97 {0x0000a0c0, 0x001f0000},
98 {0x0000a0c4, 0x111f1100},
99 {0x0000a0c8, 0x111d111e},
100 {0x0000a0cc, 0x111b111c},
101 {0x0000a0d0, 0x22032204},
102 {0x0000a0d4, 0x22012202},
103 {0x0000a0d8, 0x221f2200},
104 {0x0000a0dc, 0x221d221e},
105 {0x0000a0e0, 0x33013302},
106 {0x0000a0e4, 0x331f3300},
107 {0x0000a0e8, 0x4402331e},
108 {0x0000a0ec, 0x44004401},
109 {0x0000a0f0, 0x441e441f},
110 {0x0000a0f4, 0x55015502},
111 {0x0000a0f8, 0x551f5500},
112 {0x0000a0fc, 0x6602551e},
113 {0x0000a100, 0x66006601},
114 {0x0000a104, 0x661e661f},
115 {0x0000a108, 0x7703661d},
116 {0x0000a10c, 0x77017702},
117 {0x0000a110, 0x00007700},
118 {0x0000a114, 0x00000000},
119 {0x0000a118, 0x00000000},
120 {0x0000a11c, 0x00000000},
121 {0x0000a120, 0x00000000},
122 {0x0000a124, 0x00000000},
123 {0x0000a128, 0x00000000},
124 {0x0000a12c, 0x00000000},
125 {0x0000a130, 0x00000000},
126 {0x0000a134, 0x00000000},
127 {0x0000a138, 0x00000000},
128 {0x0000a13c, 0x00000000},
129 {0x0000a140, 0x001f0000},
130 {0x0000a144, 0x111f1100},
131 {0x0000a148, 0x111d111e},
132 {0x0000a14c, 0x111b111c},
133 {0x0000a150, 0x22032204},
134 {0x0000a154, 0x22012202},
135 {0x0000a158, 0x221f2200},
136 {0x0000a15c, 0x221d221e},
137 {0x0000a160, 0x33013302},
138 {0x0000a164, 0x331f3300},
139 {0x0000a168, 0x4402331e},
140 {0x0000a16c, 0x44004401},
141 {0x0000a170, 0x441e441f},
142 {0x0000a174, 0x55015502},
143 {0x0000a178, 0x551f5500},
144 {0x0000a17c, 0x6602551e},
145 {0x0000a180, 0x66006601},
146 {0x0000a184, 0x661e661f},
147 {0x0000a188, 0x7703661d},
148 {0x0000a18c, 0x77017702},
149 {0x0000a190, 0x00007700},
150 {0x0000a194, 0x00000000},
151 {0x0000a198, 0x00000000},
152 {0x0000a19c, 0x00000000},
153 {0x0000a1a0, 0x00000000},
154 {0x0000a1a4, 0x00000000},
155 {0x0000a1a8, 0x00000000},
156 {0x0000a1ac, 0x00000000},
157 {0x0000a1b0, 0x00000000},
158 {0x0000a1b4, 0x00000000},
159 {0x0000a1b8, 0x00000000},
160 {0x0000a1bc, 0x00000000},
161 {0x0000a1c0, 0x00000000},
162 {0x0000a1c4, 0x00000000},
163 {0x0000a1c8, 0x00000000},
164 {0x0000a1cc, 0x00000000},
165 {0x0000a1d0, 0x00000000},
166 {0x0000a1d4, 0x00000000},
167 {0x0000a1d8, 0x00000000},
168 {0x0000a1dc, 0x00000000},
169 {0x0000a1e0, 0x00000000},
170 {0x0000a1e4, 0x00000000},
171 {0x0000a1e8, 0x00000000},
172 {0x0000a1ec, 0x00000000},
173 {0x0000a1f0, 0x00000396},
174 {0x0000a1f4, 0x00000396},
175 {0x0000a1f8, 0x00000396},
176 {0x0000a1fc, 0x00000296},
177};
178
179static const u32 ar9485Modes_high_power_tx_gain_1_0[][5] = {
180 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
181 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
182 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
183 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
184 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
185 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
186 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
187 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
188 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
189 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
190 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
191 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
192 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
193 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
194 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
195 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
196 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
197 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
198 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
199 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
200 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
201 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
202 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
203 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
204 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
205 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
206 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
207 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
208 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
209 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
210 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
211 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
212 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
213 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
214 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
215};
216
217static const u32 ar9485_1_0[][2] = {
218 /* Addr allmodes */
219 {0x0000a580, 0x00000000},
220 {0x0000a584, 0x00000000},
221 {0x0000a588, 0x00000000},
222 {0x0000a58c, 0x00000000},
223 {0x0000a590, 0x00000000},
224 {0x0000a594, 0x00000000},
225 {0x0000a598, 0x00000000},
226 {0x0000a59c, 0x00000000},
227 {0x0000a5a0, 0x00000000},
228 {0x0000a5a4, 0x00000000},
229 {0x0000a5a8, 0x00000000},
230 {0x0000a5ac, 0x00000000},
231 {0x0000a5b0, 0x00000000},
232 {0x0000a5b4, 0x00000000},
233 {0x0000a5b8, 0x00000000},
234 {0x0000a5bc, 0x00000000},
235};
236
237static const u32 ar9485_1_0_radio_core[][2] = {
238 /* Addr allmodes */
239 {0x00016000, 0x36db6db6},
240 {0x00016004, 0x6db6db40},
241 {0x00016008, 0x73800000},
242 {0x0001600c, 0x00000000},
243 {0x00016040, 0x7f80fff8},
244 {0x00016048, 0x6c92426e},
245 {0x0001604c, 0x000f0278},
246 {0x00016050, 0x6db6db6c},
247 {0x00016054, 0x6db60000},
248 {0x00016080, 0x00080000},
249 {0x00016084, 0x0e48048c},
250 {0x00016088, 0x14214514},
251 {0x0001608c, 0x119f081e},
252 {0x00016090, 0x24926490},
253 {0x00016098, 0xd28b3330},
254 {0x000160a0, 0xc2108ffe},
255 {0x000160a4, 0x812fc370},
256 {0x000160a8, 0x423c8000},
257 {0x000160b4, 0x92480040},
258 {0x000160c0, 0x006db6db},
259 {0x000160c4, 0x0186db60},
260 {0x000160c8, 0x6db6db6c},
261 {0x000160cc, 0x6de6fbe0},
262 {0x000160d0, 0xf7dfcf3c},
263 {0x00016100, 0x04cb0001},
264 {0x00016104, 0xfff80015},
265 {0x00016108, 0x00080010},
266 {0x00016144, 0x01884080},
267 {0x00016148, 0x00008040},
268 {0x00016180, 0x08453333},
269 {0x00016184, 0x18e82f01},
270 {0x00016188, 0x00000000},
271 {0x0001618c, 0x00000000},
272 {0x00016240, 0x08400000},
273 {0x00016244, 0x1bf90f00},
274 {0x00016248, 0x00000000},
275 {0x0001624c, 0x00000000},
276 {0x00016280, 0x01000015},
277 {0x00016284, 0x00d30000},
278 {0x00016288, 0x00318000},
279 {0x0001628c, 0x50000000},
280 {0x00016290, 0x4b96210f},
281 {0x00016380, 0x00000000},
282 {0x00016384, 0x00000000},
283 {0x00016388, 0x00800700},
284 {0x0001638c, 0x00800700},
285 {0x00016390, 0x00800700},
286 {0x00016394, 0x00000000},
287 {0x00016398, 0x00000000},
288 {0x0001639c, 0x00000000},
289 {0x000163a0, 0x00000001},
290 {0x000163a4, 0x00000001},
291 {0x000163a8, 0x00000000},
292 {0x000163ac, 0x00000000},
293 {0x000163b0, 0x00000000},
294 {0x000163b4, 0x00000000},
295 {0x000163b8, 0x00000000},
296 {0x000163bc, 0x00000000},
297 {0x000163c0, 0x000000a0},
298 {0x000163c4, 0x000c0000},
299 {0x000163c8, 0x14021402},
300 {0x000163cc, 0x00001402},
301 {0x000163d0, 0x00000000},
302 {0x000163d4, 0x00000000},
303 {0x00016c40, 0x1319c178},
304 {0x00016c44, 0x10000000},
305};
306
307static const u32 ar9485Modes_lowest_ob_db_tx_gain_1_0[][5] = {
308 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
309 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
310 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
311 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
312 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
313 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
314 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
315 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
316 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
317 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
318 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
319 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
320 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
321 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
322 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
323 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
324 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
325 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
326 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
327 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
328 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
329 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
330 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
331 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
332 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
333 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
334 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
335 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
336 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
337 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
338 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
339 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
340 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
341 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
342 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
343};
344
345static const u32 ar9485_1_0_baseband_core[][2] = {
346 /* Addr allmodes */
347 {0x00009800, 0xafe68e30},
348 {0x00009804, 0xfd14e000},
349 {0x00009808, 0x9c0a8f6b},
350 {0x0000980c, 0x04800000},
351 {0x00009814, 0x9280c00a},
352 {0x00009818, 0x00000000},
353 {0x0000981c, 0x00020028},
354 {0x00009834, 0x5f3ca3de},
355 {0x00009838, 0x0108ecff},
356 {0x0000983c, 0x14750600},
357 {0x00009880, 0x201fff00},
358 {0x00009884, 0x00001042},
359 {0x000098a4, 0x00200400},
360 {0x000098b0, 0x52440bbe},
361 {0x000098bc, 0x00000002},
362 {0x000098d0, 0x004b6a8e},
363 {0x000098d4, 0x00000820},
364 {0x000098dc, 0x00000000},
365 {0x000098f0, 0x00000000},
366 {0x000098f4, 0x00000000},
367 {0x00009c04, 0x00000000},
368 {0x00009c08, 0x03200000},
369 {0x00009c0c, 0x00000000},
370 {0x00009c10, 0x00000000},
371 {0x00009c14, 0x00046384},
372 {0x00009c18, 0x05b6b440},
373 {0x00009c1c, 0x00b6b440},
374 {0x00009d00, 0xc080a333},
375 {0x00009d04, 0x40206c10},
376 {0x00009d08, 0x009c4060},
377 {0x00009d0c, 0x1883800a},
378 {0x00009d10, 0x01834061},
379 {0x00009d14, 0x00c00400},
380 {0x00009d18, 0x00000000},
381 {0x00009d1c, 0x00000000},
382 {0x00009e08, 0x0038233c},
383 {0x00009e24, 0x990bb515},
384 {0x00009e28, 0x0a6f0000},
385 {0x00009e30, 0x06336f77},
386 {0x00009e34, 0x6af6532f},
387 {0x00009e38, 0x0cc80c00},
388 {0x00009e40, 0x0d261820},
389 {0x00009e4c, 0x00001004},
390 {0x00009e50, 0x00ff03f1},
391 {0x00009fc0, 0x80be4788},
392 {0x00009fc4, 0x0001efb5},
393 {0x00009fcc, 0x40000014},
394 {0x0000a20c, 0x00000000},
395 {0x0000a210, 0x00000000},
396 {0x0000a220, 0x00000000},
397 {0x0000a224, 0x00000000},
398 {0x0000a228, 0x10002310},
399 {0x0000a23c, 0x00000000},
400 {0x0000a244, 0x0c000000},
401 {0x0000a2a0, 0x00000001},
402 {0x0000a2c0, 0x00000001},
403 {0x0000a2c8, 0x00000000},
404 {0x0000a2cc, 0x18c43433},
405 {0x0000a2d4, 0x00000000},
406 {0x0000a2dc, 0x00000000},
407 {0x0000a2e0, 0x00000000},
408 {0x0000a2e4, 0x00000000},
409 {0x0000a2e8, 0x00000000},
410 {0x0000a2ec, 0x00000000},
411 {0x0000a2f0, 0x00000000},
412 {0x0000a2f4, 0x00000000},
413 {0x0000a2f8, 0x00000000},
414 {0x0000a344, 0x00000000},
415 {0x0000a34c, 0x00000000},
416 {0x0000a350, 0x0000a000},
417 {0x0000a364, 0x00000000},
418 {0x0000a370, 0x00000000},
419 {0x0000a390, 0x00000001},
420 {0x0000a394, 0x00000444},
421 {0x0000a398, 0x001f0e0f},
422 {0x0000a39c, 0x0075393f},
423 {0x0000a3a0, 0xb79f6427},
424 {0x0000a3a4, 0x00000000},
425 {0x0000a3a8, 0xaaaaaaaa},
426 {0x0000a3ac, 0x3c466478},
427 {0x0000a3c0, 0x20202020},
428 {0x0000a3c4, 0x22222220},
429 {0x0000a3c8, 0x20200020},
430 {0x0000a3cc, 0x20202020},
431 {0x0000a3d0, 0x20202020},
432 {0x0000a3d4, 0x20202020},
433 {0x0000a3d8, 0x20202020},
434 {0x0000a3dc, 0x20202020},
435 {0x0000a3e0, 0x20202020},
436 {0x0000a3e4, 0x20202020},
437 {0x0000a3e8, 0x20202020},
438 {0x0000a3ec, 0x20202020},
439 {0x0000a3f0, 0x00000000},
440 {0x0000a3f4, 0x00000006},
441 {0x0000a3f8, 0x0cdbd380},
442 {0x0000a3fc, 0x000f0f01},
443 {0x0000a400, 0x8fa91f01},
444 {0x0000a404, 0x00000000},
445 {0x0000a408, 0x0e79e5c6},
446 {0x0000a40c, 0x00820820},
447 {0x0000a414, 0x1ce739ce},
448 {0x0000a418, 0x2d0011ce},
449 {0x0000a41c, 0x1ce739ce},
450 {0x0000a420, 0x000001ce},
451 {0x0000a424, 0x1ce739ce},
452 {0x0000a428, 0x000001ce},
453 {0x0000a42c, 0x1ce739ce},
454 {0x0000a430, 0x1ce739ce},
455 {0x0000a434, 0x00000000},
456 {0x0000a438, 0x00001801},
457 {0x0000a43c, 0x00000000},
458 {0x0000a440, 0x00000000},
459 {0x0000a444, 0x00000000},
460 {0x0000a448, 0x04000000},
461 {0x0000a44c, 0x00000001},
462 {0x0000a450, 0x00010000},
463 {0x0000a458, 0x00000000},
464 {0x0000a5c4, 0x3fad9d74},
465 {0x0000a5c8, 0x0048060a},
466 {0x0000a5cc, 0x00000637},
467 {0x0000a760, 0x03020100},
468 {0x0000a764, 0x09080504},
469 {0x0000a768, 0x0d0c0b0a},
470 {0x0000a76c, 0x13121110},
471 {0x0000a770, 0x31301514},
472 {0x0000a774, 0x35343332},
473 {0x0000a778, 0x00000036},
474 {0x0000a780, 0x00000838},
475 {0x0000a7c0, 0x00000000},
476 {0x0000a7c4, 0xfffffffc},
477 {0x0000a7c8, 0x00000000},
478 {0x0000a7cc, 0x00000000},
479 {0x0000a7d0, 0x00000000},
480 {0x0000a7d4, 0x00000004},
481 {0x0000a7dc, 0x00000001},
482};
483
484static const u32 ar9485Modes_high_ob_db_tx_gain_1_0[][5] = {
485 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
486 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
487 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
488 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
489 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
490 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
491 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
492 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
493 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
494 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
495 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
496 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
497 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
498 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
499 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
500 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
501 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
502 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
503 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
504 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
505 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
506 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
507 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
508 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
509 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
510 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
511 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
512 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
513 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
514 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
515 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
516 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
517 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
518 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
519 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
520};
521
522static const u32 ar9485Common_rx_gain_1_0[][2] = {
523 /* Addr allmodes */
524 {0x0000a000, 0x00010000},
525 {0x0000a004, 0x00030002},
526 {0x0000a008, 0x00050004},
527 {0x0000a00c, 0x00810080},
528 {0x0000a010, 0x01800082},
529 {0x0000a014, 0x01820181},
530 {0x0000a018, 0x01840183},
531 {0x0000a01c, 0x01880185},
532 {0x0000a020, 0x018a0189},
533 {0x0000a024, 0x02850284},
534 {0x0000a028, 0x02890288},
535 {0x0000a02c, 0x03850384},
536 {0x0000a030, 0x03890388},
537 {0x0000a034, 0x038b038a},
538 {0x0000a038, 0x038d038c},
539 {0x0000a03c, 0x03910390},
540 {0x0000a040, 0x03930392},
541 {0x0000a044, 0x03950394},
542 {0x0000a048, 0x00000396},
543 {0x0000a04c, 0x00000000},
544 {0x0000a050, 0x00000000},
545 {0x0000a054, 0x00000000},
546 {0x0000a058, 0x00000000},
547 {0x0000a05c, 0x00000000},
548 {0x0000a060, 0x00000000},
549 {0x0000a064, 0x00000000},
550 {0x0000a068, 0x00000000},
551 {0x0000a06c, 0x00000000},
552 {0x0000a070, 0x00000000},
553 {0x0000a074, 0x00000000},
554 {0x0000a078, 0x00000000},
555 {0x0000a07c, 0x00000000},
556 {0x0000a080, 0x28282828},
557 {0x0000a084, 0x28282828},
558 {0x0000a088, 0x28282828},
559 {0x0000a08c, 0x28282828},
560 {0x0000a090, 0x28282828},
561 {0x0000a094, 0x21212128},
562 {0x0000a098, 0x171c1c1c},
563 {0x0000a09c, 0x02020212},
564 {0x0000a0a0, 0x00000202},
565 {0x0000a0a4, 0x00000000},
566 {0x0000a0a8, 0x00000000},
567 {0x0000a0ac, 0x00000000},
568 {0x0000a0b0, 0x00000000},
569 {0x0000a0b4, 0x00000000},
570 {0x0000a0b8, 0x00000000},
571 {0x0000a0bc, 0x00000000},
572 {0x0000a0c0, 0x001f0000},
573 {0x0000a0c4, 0x111f1100},
574 {0x0000a0c8, 0x111d111e},
575 {0x0000a0cc, 0x111b111c},
576 {0x0000a0d0, 0x22032204},
577 {0x0000a0d4, 0x22012202},
578 {0x0000a0d8, 0x221f2200},
579 {0x0000a0dc, 0x221d221e},
580 {0x0000a0e0, 0x33013302},
581 {0x0000a0e4, 0x331f3300},
582 {0x0000a0e8, 0x4402331e},
583 {0x0000a0ec, 0x44004401},
584 {0x0000a0f0, 0x441e441f},
585 {0x0000a0f4, 0x55015502},
586 {0x0000a0f8, 0x551f5500},
587 {0x0000a0fc, 0x6602551e},
588 {0x0000a100, 0x66006601},
589 {0x0000a104, 0x661e661f},
590 {0x0000a108, 0x7703661d},
591 {0x0000a10c, 0x77017702},
592 {0x0000a110, 0x00007700},
593 {0x0000a114, 0x00000000},
594 {0x0000a118, 0x00000000},
595 {0x0000a11c, 0x00000000},
596 {0x0000a120, 0x00000000},
597 {0x0000a124, 0x00000000},
598 {0x0000a128, 0x00000000},
599 {0x0000a12c, 0x00000000},
600 {0x0000a130, 0x00000000},
601 {0x0000a134, 0x00000000},
602 {0x0000a138, 0x00000000},
603 {0x0000a13c, 0x00000000},
604 {0x0000a140, 0x001f0000},
605 {0x0000a144, 0x111f1100},
606 {0x0000a148, 0x111d111e},
607 {0x0000a14c, 0x111b111c},
608 {0x0000a150, 0x22032204},
609 {0x0000a154, 0x22012202},
610 {0x0000a158, 0x221f2200},
611 {0x0000a15c, 0x221d221e},
612 {0x0000a160, 0x33013302},
613 {0x0000a164, 0x331f3300},
614 {0x0000a168, 0x4402331e},
615 {0x0000a16c, 0x44004401},
616 {0x0000a170, 0x441e441f},
617 {0x0000a174, 0x55015502},
618 {0x0000a178, 0x551f5500},
619 {0x0000a17c, 0x6602551e},
620 {0x0000a180, 0x66006601},
621 {0x0000a184, 0x661e661f},
622 {0x0000a188, 0x7703661d},
623 {0x0000a18c, 0x77017702},
624 {0x0000a190, 0x00007700},
625 {0x0000a194, 0x00000000},
626 {0x0000a198, 0x00000000},
627 {0x0000a19c, 0x00000000},
628 {0x0000a1a0, 0x00000000},
629 {0x0000a1a4, 0x00000000},
630 {0x0000a1a8, 0x00000000},
631 {0x0000a1ac, 0x00000000},
632 {0x0000a1b0, 0x00000000},
633 {0x0000a1b4, 0x00000000},
634 {0x0000a1b8, 0x00000000},
635 {0x0000a1bc, 0x00000000},
636 {0x0000a1c0, 0x00000000},
637 {0x0000a1c4, 0x00000000},
638 {0x0000a1c8, 0x00000000},
639 {0x0000a1cc, 0x00000000},
640 {0x0000a1d0, 0x00000000},
641 {0x0000a1d4, 0x00000000},
642 {0x0000a1d8, 0x00000000},
643 {0x0000a1dc, 0x00000000},
644 {0x0000a1e0, 0x00000000},
645 {0x0000a1e4, 0x00000000},
646 {0x0000a1e8, 0x00000000},
647 {0x0000a1ec, 0x00000000},
648 {0x0000a1f0, 0x00000396},
649 {0x0000a1f4, 0x00000396},
650 {0x0000a1f8, 0x00000396},
651 {0x0000a1fc, 0x00000296},
652};
653
654static const u32 ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
655 /* Addr allmodes */
656 {0x00018c00, 0x10252e5e},
657 {0x00018c04, 0x000801d8},
658 {0x00018c08, 0x0000580c},
659};
660
661static const u32 ar9485_1_0_pcie_phy_clkreq_enable_L1[][2] = {
662 /* Addr allmodes */
663 {0x00018c00, 0x10253e5e},
664 {0x00018c04, 0x000801d8},
665 {0x00018c08, 0x0000580c},
666};
667
668static const u32 ar9485_1_0_soc_preamble[][2] = {
669 /* Addr allmodes */
670 {0x00004090, 0x00aa10aa},
671 {0x000040a4, 0x00a0c9c9},
672 {0x00007048, 0x00000004},
673};
674
675static const u32 ar9485_fast_clock_1_0_baseband_postamble[][3] = {
676 /* Addr 5G_HT20 5G_HT40 */
677 {0x00009e00, 0x03721821, 0x03721821},
678 {0x0000a230, 0x0000400b, 0x00004016},
679 {0x0000a254, 0x00000898, 0x00001130},
680};
681
682static const u32 ar9485_1_0_baseband_postamble[][5] = {
683 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
684 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
685 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
686 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
687 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
688 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
689 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
690 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
691 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
692 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
693 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
694 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
695 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
696 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
697 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
698 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
699 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
700 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
701 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
702 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
703 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
704 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
705 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
706 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
707 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
708 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
709 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
710 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
711 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
712 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
713 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
714 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
715 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
716 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
717 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
718 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
719 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
720 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
721 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
722 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
723 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
724 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
725};
726
727static const u32 ar9485Modes_low_ob_db_tx_gain_1_0[][5] = {
728 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
729 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
730 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
731 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
732 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
733 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
734 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
735 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
736 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
737 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
738 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
739 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
740 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
741 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
742 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
743 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
744 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
745 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
746 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
747 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
748 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
749 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
750 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
751 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
752 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
753 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
754 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
755 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
756 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
757 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
758 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
759 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
760 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
761 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
762 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
763};
764
765static const u32 ar9485_1_0_pcie_phy_clkreq_disable_L1[][2] = {
766 /* Addr allmodes */
767 {0x00018c00, 0x10213e5e},
768 {0x00018c04, 0x000801d8},
769 {0x00018c08, 0x0000580c},
770};
771
772static const u32 ar9485_1_0_radio_postamble[][2] = {
773 /* Addr allmodes */
774 {0x0001609c, 0x0b283f31},
775 {0x000160ac, 0x24611800},
776 {0x000160b0, 0x03284f3e},
777 {0x0001610c, 0x00170000},
778 {0x00016140, 0x10804008},
779};
780
781static const u32 ar9485_1_0_mac_core[][2] = {
782 /* Addr allmodes */
783 {0x00000008, 0x00000000},
784 {0x00000030, 0x00020085},
785 {0x00000034, 0x00000005},
786 {0x00000040, 0x00000000},
787 {0x00000044, 0x00000000},
788 {0x00000048, 0x00000008},
789 {0x0000004c, 0x00000010},
790 {0x00000050, 0x00000000},
791 {0x00001040, 0x002ffc0f},
792 {0x00001044, 0x002ffc0f},
793 {0x00001048, 0x002ffc0f},
794 {0x0000104c, 0x002ffc0f},
795 {0x00001050, 0x002ffc0f},
796 {0x00001054, 0x002ffc0f},
797 {0x00001058, 0x002ffc0f},
798 {0x0000105c, 0x002ffc0f},
799 {0x00001060, 0x002ffc0f},
800 {0x00001064, 0x002ffc0f},
801 {0x000010f0, 0x00000100},
802 {0x00001270, 0x00000000},
803 {0x000012b0, 0x00000000},
804 {0x000012f0, 0x00000000},
805 {0x0000143c, 0x00000000},
806 {0x0000147c, 0x00000000},
807 {0x00008000, 0x00000000},
808 {0x00008004, 0x00000000},
809 {0x00008008, 0x00000000},
810 {0x0000800c, 0x00000000},
811 {0x00008018, 0x00000000},
812 {0x00008020, 0x00000000},
813 {0x00008038, 0x00000000},
814 {0x0000803c, 0x00000000},
815 {0x00008040, 0x00000000},
816 {0x00008044, 0x00000000},
817 {0x00008048, 0x00000000},
818 {0x0000804c, 0xffffffff},
819 {0x00008054, 0x00000000},
820 {0x00008058, 0x00000000},
821 {0x0000805c, 0x000fc78f},
822 {0x00008060, 0x0000000f},
823 {0x00008064, 0x00000000},
824 {0x00008070, 0x00000310},
825 {0x00008074, 0x00000020},
826 {0x00008078, 0x00000000},
827 {0x0000809c, 0x0000000f},
828 {0x000080a0, 0x00000000},
829 {0x000080a4, 0x02ff0000},
830 {0x000080a8, 0x0e070605},
831 {0x000080ac, 0x0000000d},
832 {0x000080b0, 0x00000000},
833 {0x000080b4, 0x00000000},
834 {0x000080b8, 0x00000000},
835 {0x000080bc, 0x00000000},
836 {0x000080c0, 0x2a800000},
837 {0x000080c4, 0x06900168},
838 {0x000080c8, 0x13881c20},
839 {0x000080cc, 0x01f40000},
840 {0x000080d0, 0x00252500},
841 {0x000080d4, 0x00a00000},
842 {0x000080d8, 0x00400000},
843 {0x000080dc, 0x00000000},
844 {0x000080e0, 0xffffffff},
845 {0x000080e4, 0x0000ffff},
846 {0x000080e8, 0x3f3f3f3f},
847 {0x000080ec, 0x00000000},
848 {0x000080f0, 0x00000000},
849 {0x000080f4, 0x00000000},
850 {0x000080fc, 0x00020000},
851 {0x00008100, 0x00000000},
852 {0x00008108, 0x00000052},
853 {0x0000810c, 0x00000000},
854 {0x00008110, 0x00000000},
855 {0x00008114, 0x000007ff},
856 {0x00008118, 0x000000aa},
857 {0x0000811c, 0x00003210},
858 {0x00008124, 0x00000000},
859 {0x00008128, 0x00000000},
860 {0x0000812c, 0x00000000},
861 {0x00008130, 0x00000000},
862 {0x00008134, 0x00000000},
863 {0x00008138, 0x00000000},
864 {0x0000813c, 0x0000ffff},
865 {0x00008144, 0xffffffff},
866 {0x00008168, 0x00000000},
867 {0x0000816c, 0x00000000},
868 {0x00008170, 0x18486200},
869 {0x00008174, 0x33332210},
870 {0x00008178, 0x00000000},
871 {0x0000817c, 0x00020000},
872 {0x000081c0, 0x00000000},
873 {0x000081c4, 0x33332210},
874 {0x000081c8, 0x00000000},
875 {0x000081cc, 0x00000000},
876 {0x000081d4, 0x00000000},
877 {0x000081ec, 0x00000000},
878 {0x000081f0, 0x00000000},
879 {0x000081f4, 0x00000000},
880 {0x000081f8, 0x00000000},
881 {0x000081fc, 0x00000000},
882 {0x00008240, 0x00100000},
883 {0x00008244, 0x0010f400},
884 {0x00008248, 0x00000800},
885 {0x0000824c, 0x0001e800},
886 {0x00008250, 0x00000000},
887 {0x00008254, 0x00000000},
888 {0x00008258, 0x00000000},
889 {0x0000825c, 0x40000000},
890 {0x00008260, 0x00080922},
891 {0x00008264, 0x9ca00010},
892 {0x00008268, 0xffffffff},
893 {0x0000826c, 0x0000ffff},
894 {0x00008270, 0x00000000},
895 {0x00008274, 0x40000000},
896 {0x00008278, 0x003e4180},
897 {0x0000827c, 0x00000004},
898 {0x00008284, 0x0000002c},
899 {0x00008288, 0x0000002c},
900 {0x0000828c, 0x000000ff},
901 {0x00008294, 0x00000000},
902 {0x00008298, 0x00000000},
903 {0x0000829c, 0x00000000},
904 {0x00008300, 0x00000140},
905 {0x00008314, 0x00000000},
906 {0x0000831c, 0x0000010d},
907 {0x00008328, 0x00000000},
908 {0x0000832c, 0x00000007},
909 {0x00008330, 0x00000302},
910 {0x00008334, 0x00000700},
911 {0x00008338, 0x00ff0000},
912 {0x0000833c, 0x02400000},
913 {0x00008340, 0x000107ff},
914 {0x00008344, 0xa248105b},
915 {0x00008348, 0x008f0000},
916 {0x0000835c, 0x00000000},
917 {0x00008360, 0xffffffff},
918 {0x00008364, 0xffffffff},
919 {0x00008368, 0x00000000},
920 {0x00008370, 0x00000000},
921 {0x00008374, 0x000000ff},
922 {0x00008378, 0x00000000},
923 {0x0000837c, 0x00000000},
924 {0x00008380, 0xffffffff},
925 {0x00008384, 0xffffffff},
926 {0x00008390, 0xffffffff},
927 {0x00008394, 0xffffffff},
928 {0x00008398, 0x00000000},
929 {0x0000839c, 0x00000000},
930 {0x000083a0, 0x00000000},
931 {0x000083a4, 0x0000fa14},
932 {0x000083a8, 0x000f0c00},
933 {0x000083ac, 0x33332210},
934 {0x000083b0, 0x33332210},
935 {0x000083b4, 0x33332210},
936 {0x000083b8, 0x33332210},
937 {0x000083bc, 0x00000000},
938 {0x000083c0, 0x00000000},
939 {0x000083c4, 0x00000000},
940 {0x000083c8, 0x00000000},
941 {0x000083cc, 0x00000200},
942 {0x000083d0, 0x000301ff},
943};
944
945static const u32 ar9485_1_1_mac_core[][2] = { 20static const u32 ar9485_1_1_mac_core[][2] = {
946 /* Addr allmodes */ 21 /* Addr allmodes */
947 {0x00000008, 0x00000000}, 22 {0x00000008, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 099bd4183ad..7c91ba4dce4 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -362,7 +362,7 @@ struct ath_vif {
362 * number of BSSIDs) if a given beacon does not go out even after waiting this 362 * number of BSSIDs) if a given beacon does not go out even after waiting this
363 * number of beacon intervals, the game's up. 363 * number of beacon intervals, the game's up.
364 */ 364 */
365#define BSTUCK_THRESH (9 * ATH_BCBUF) 365#define BSTUCK_THRESH 9
366#define ATH_BCBUF 4 366#define ATH_BCBUF 4
367#define ATH_DEFAULT_BINTVAL 100 /* TU */ 367#define ATH_DEFAULT_BINTVAL 100 /* TU */
368#define ATH_DEFAULT_BMISS_LIMIT 10 368#define ATH_DEFAULT_BMISS_LIMIT 10
@@ -386,7 +386,7 @@ struct ath_beacon {
386 u32 beaconq; 386 u32 beaconq;
387 u32 bmisscnt; 387 u32 bmisscnt;
388 u32 ast_be_xmit; 388 u32 ast_be_xmit;
389 u64 bc_tstamp; 389 u32 bc_tstamp;
390 struct ieee80211_vif *bslot[ATH_BCBUF]; 390 struct ieee80211_vif *bslot[ATH_BCBUF];
391 int slottime; 391 int slottime;
392 int slotupdate; 392 int slotupdate;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 6d2a545fc35..74f33bc193f 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -57,8 +57,8 @@ int ath_beaconq_config(struct ath_softc *sc)
57 57
58/* 58/*
59 * Associates the beacon frame buffer with a transmit descriptor. Will set 59 * Associates the beacon frame buffer with a transmit descriptor. Will set
60 * up all required antenna switch parameters, rate codes, and channel flags. 60 * up rate codes, and channel flags. Beacons are always sent out at the
61 * Beacons are always sent out at the lowest rate, and are not retried. 61 * lowest rate, and are not retried.
62*/ 62*/
63static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp, 63static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
64 struct ath_buf *bf, int rateidx) 64 struct ath_buf *bf, int rateidx)
@@ -68,7 +68,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
68 struct ath_common *common = ath9k_hw_common(ah); 68 struct ath_common *common = ath9k_hw_common(ah);
69 struct ath_desc *ds; 69 struct ath_desc *ds;
70 struct ath9k_11n_rate_series series[4]; 70 struct ath9k_11n_rate_series series[4];
71 int flags, antenna, ctsrate = 0, ctsduration = 0; 71 int flags, ctsrate = 0, ctsduration = 0;
72 struct ieee80211_supported_band *sband; 72 struct ieee80211_supported_band *sband;
73 u8 rate = 0; 73 u8 rate = 0;
74 74
@@ -76,12 +76,6 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
76 flags = ATH9K_TXDESC_NOACK; 76 flags = ATH9K_TXDESC_NOACK;
77 77
78 ds->ds_link = 0; 78 ds->ds_link = 0;
79 /*
80 * Switch antenna every beacon.
81 * Should only switch every beacon period, not for every SWBA
82 * XXX assumes two antennae
83 */
84 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
85 79
86 sband = &sc->sbands[common->hw->conf.channel->band]; 80 sband = &sc->sbands[common->hw->conf.channel->band];
87 rate = sband->bitrates[rateidx].hw_value; 81 rate = sband->bitrates[rateidx].hw_value;
@@ -278,7 +272,7 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
278 return -ENOMEM; 272 return -ENOMEM;
279 273
280 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 274 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
281 sc->beacon.bc_tstamp = le64_to_cpu(tstamp); 275 sc->beacon.bc_tstamp = (u32) le64_to_cpu(tstamp);
282 /* Calculate a TSF adjustment factor required for staggered beacons. */ 276 /* Calculate a TSF adjustment factor required for staggered beacons. */
283 if (avp->av_bslot > 0) { 277 if (avp->av_bslot > 0) {
284 u64 tsfadjust; 278 u64 tsfadjust;
@@ -294,8 +288,8 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
294 * adjustment. Other slots are adjusted to get the timestamp 288 * adjustment. Other slots are adjusted to get the timestamp
295 * close to the TBTT for the BSS. 289 * close to the TBTT for the BSS.
296 */ 290 */
297 tsfadjust = intval * avp->av_bslot / ATH_BCBUF; 291 tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF;
298 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); 292 avp->tsf_adjust = cpu_to_le64(tsfadjust);
299 293
300 ath_dbg(common, ATH_DBG_BEACON, 294 ath_dbg(common, ATH_DBG_BEACON,
301 "stagger beacons, bslot %d intval %u tsfadjust %llu\n", 295 "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
@@ -369,12 +363,13 @@ void ath_beacon_tasklet(unsigned long data)
369 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) { 363 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
370 sc->beacon.bmisscnt++; 364 sc->beacon.bmisscnt++;
371 365
372 if (sc->beacon.bmisscnt < BSTUCK_THRESH) { 366 if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
373 ath_dbg(common, ATH_DBG_BSTUCK, 367 ath_dbg(common, ATH_DBG_BSTUCK,
374 "missed %u consecutive beacons\n", 368 "missed %u consecutive beacons\n",
375 sc->beacon.bmisscnt); 369 sc->beacon.bmisscnt);
376 ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq); 370 ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
377 ath9k_hw_bstuck_nfcal(ah); 371 if (sc->beacon.bmisscnt > 3)
372 ath9k_hw_bstuck_nfcal(ah);
378 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 373 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
379 ath_dbg(common, ATH_DBG_BSTUCK, 374 ath_dbg(common, ATH_DBG_BSTUCK,
380 "beacon is officially stuck\n"); 375 "beacon is officially stuck\n");
@@ -385,13 +380,6 @@ void ath_beacon_tasklet(unsigned long data)
385 return; 380 return;
386 } 381 }
387 382
388 if (sc->beacon.bmisscnt != 0) {
389 ath_dbg(common, ATH_DBG_BSTUCK,
390 "resume beacon xmit after %u misses\n",
391 sc->beacon.bmisscnt);
392 sc->beacon.bmisscnt = 0;
393 }
394
395 /* 383 /*
396 * Generate beacon frames. we are sending frames 384 * Generate beacon frames. we are sending frames
397 * staggered so calculate the slot for this frame based 385 * staggered so calculate the slot for this frame based
@@ -401,8 +389,9 @@ void ath_beacon_tasklet(unsigned long data)
401 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL; 389 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
402 390
403 tsf = ath9k_hw_gettsf64(ah); 391 tsf = ath9k_hw_gettsf64(ah);
404 tsftu = TSF_TO_TU(tsf>>32, tsf); 392 tsf += TU_TO_USEC(ah->config.sw_beacon_response_time);
405 slot = ((tsftu % intval) * ATH_BCBUF) / intval; 393 tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF);
394 slot = (tsftu % (intval * ATH_BCBUF)) / intval;
406 /* 395 /*
407 * Reverse the slot order to get slot 0 on the TBTT offset that does 396 * Reverse the slot order to get slot 0 on the TBTT offset that does
408 * not require TSF adjustment and other slots adding 397 * not require TSF adjustment and other slots adding
@@ -415,7 +404,7 @@ void ath_beacon_tasklet(unsigned long data)
415 404
416 ath_dbg(common, ATH_DBG_BEACON, 405 ath_dbg(common, ATH_DBG_BEACON,
417 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 406 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
418 slot, tsf, tsftu, intval, vif); 407 slot, tsf, tsftu / ATH_BCBUF, intval, vif);
419 408
420 bfaddr = 0; 409 bfaddr = 0;
421 if (vif) { 410 if (vif) {
@@ -424,6 +413,13 @@ void ath_beacon_tasklet(unsigned long data)
424 bfaddr = bf->bf_daddr; 413 bfaddr = bf->bf_daddr;
425 bc = 1; 414 bc = 1;
426 } 415 }
416
417 if (sc->beacon.bmisscnt != 0) {
418 ath_dbg(common, ATH_DBG_BSTUCK,
419 "resume beacon xmit after %u misses\n",
420 sc->beacon.bmisscnt);
421 sc->beacon.bmisscnt = 0;
422 }
427 } 423 }
428 424
429 /* 425 /*
@@ -463,13 +459,17 @@ static void ath9k_beacon_init(struct ath_softc *sc,
463 u32 next_beacon, 459 u32 next_beacon,
464 u32 beacon_period) 460 u32 beacon_period)
465{ 461{
466 if (beacon_period & ATH9K_BEACON_RESET_TSF) 462 if (sc->sc_flags & SC_OP_TSF_RESET) {
467 ath9k_ps_wakeup(sc); 463 ath9k_ps_wakeup(sc);
464 ath9k_hw_reset_tsf(sc->sc_ah);
465 }
468 466
469 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period); 467 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
470 468
471 if (beacon_period & ATH9K_BEACON_RESET_TSF) 469 if (sc->sc_flags & SC_OP_TSF_RESET) {
472 ath9k_ps_restore(sc); 470 ath9k_ps_restore(sc);
471 sc->sc_flags &= ~SC_OP_TSF_RESET;
472 }
473} 473}
474 474
475/* 475/*
@@ -484,18 +484,14 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
484 u32 nexttbtt, intval; 484 u32 nexttbtt, intval;
485 485
486 /* NB: the beacon interval is kept internally in TU's */ 486 /* NB: the beacon interval is kept internally in TU's */
487 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; 487 intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
488 intval /= ATH_BCBUF; /* for staggered beacons */ 488 intval /= ATH_BCBUF; /* for staggered beacons */
489 nexttbtt = intval; 489 nexttbtt = intval;
490 490
491 if (sc->sc_flags & SC_OP_TSF_RESET)
492 intval |= ATH9K_BEACON_RESET_TSF;
493
494 /* 491 /*
495 * In AP mode we enable the beacon timers and SWBA interrupts to 492 * In AP mode we enable the beacon timers and SWBA interrupts to
496 * prepare beacon frames. 493 * prepare beacon frames.
497 */ 494 */
498 intval |= ATH9K_BEACON_ENA;
499 ah->imask |= ATH9K_INT_SWBA; 495 ah->imask |= ATH9K_INT_SWBA;
500 ath_beaconq_config(sc); 496 ath_beaconq_config(sc);
501 497
@@ -505,11 +501,6 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
505 ath9k_beacon_init(sc, nexttbtt, intval); 501 ath9k_beacon_init(sc, nexttbtt, intval);
506 sc->beacon.bmisscnt = 0; 502 sc->beacon.bmisscnt = 0;
507 ath9k_hw_set_interrupts(ah, ah->imask); 503 ath9k_hw_set_interrupts(ah, ah->imask);
508
509 /* Clear the reset TSF flag, so that subsequent beacon updation
510 will not reset the HW TSF. */
511
512 sc->sc_flags &= ~SC_OP_TSF_RESET;
513} 504}
514 505
515/* 506/*
@@ -643,25 +634,20 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
643{ 634{
644 struct ath_hw *ah = sc->sc_ah; 635 struct ath_hw *ah = sc->sc_ah;
645 struct ath_common *common = ath9k_hw_common(ah); 636 struct ath_common *common = ath9k_hw_common(ah);
646 u64 tsf; 637 u32 tsf, delta, intval, nexttbtt;
647 u32 tsftu, intval, nexttbtt; 638
648 639 tsf = ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE);
649 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; 640 intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
650 641
651 642 if (!sc->beacon.bc_tstamp)
652 /* Pull nexttbtt forward to reflect the current TSF */ 643 nexttbtt = tsf + intval;
653 644 else {
654 nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp); 645 if (tsf > sc->beacon.bc_tstamp)
655 if (nexttbtt == 0) 646 delta = (tsf - sc->beacon.bc_tstamp);
656 nexttbtt = intval; 647 else
657 else if (intval) 648 delta = (tsf + 1 + (~0U - sc->beacon.bc_tstamp));
658 nexttbtt = roundup(nexttbtt, intval); 649 nexttbtt = tsf + roundup(delta, intval);
659 650 }
660 tsf = ath9k_hw_gettsf64(ah);
661 tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
662 do {
663 nexttbtt += intval;
664 } while (nexttbtt < tsftu);
665 651
666 ath_dbg(common, ATH_DBG_BEACON, 652 ath_dbg(common, ATH_DBG_BEACON,
667 "IBSS nexttbtt %u intval %u (%u)\n", 653 "IBSS nexttbtt %u intval %u (%u)\n",
@@ -672,7 +658,6 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
672 * if we need to manually prepare beacon frames. Otherwise we use a 658 * if we need to manually prepare beacon frames. Otherwise we use a
673 * self-linked tx descriptor and let the hardware deal with things. 659 * self-linked tx descriptor and let the hardware deal with things.
674 */ 660 */
675 intval |= ATH9K_BEACON_ENA;
676 ah->imask |= ATH9K_INT_SWBA; 661 ah->imask |= ATH9K_INT_SWBA;
677 662
678 ath_beaconq_config(sc); 663 ath_beaconq_config(sc);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 8df5a92a20f..a762cadb3ab 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1088,67 +1088,43 @@ int ath9k_init_debug(struct ath_hw *ah)
1088 return -ENOMEM; 1088 return -ENOMEM;
1089 1089
1090#ifdef CONFIG_ATH_DEBUG 1090#ifdef CONFIG_ATH_DEBUG
1091 if (!debugfs_create_file("debug", S_IRUSR | S_IWUSR, 1091 debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1092 sc->debug.debugfs_phy, sc, &fops_debug)) 1092 sc, &fops_debug);
1093 goto err;
1094#endif 1093#endif
1095 1094 debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
1096 if (!debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, 1095 &fops_dma);
1097 sc, &fops_dma)) 1096 debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
1098 goto err; 1097 &fops_interrupt);
1099 1098 debugfs_create_file("wiphy", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1100 if (!debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, 1099 sc, &fops_wiphy);
1101 sc, &fops_interrupt)) 1100 debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
1102 goto err; 1101 &fops_xmit);
1103 1102 debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc,
1104 if (!debugfs_create_file("wiphy", S_IRUSR | S_IWUSR, 1103 &fops_stations);
1105 sc->debug.debugfs_phy, sc, &fops_wiphy)) 1104 debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
1106 goto err; 1105 &fops_misc);
1107 1106 debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
1108 if (!debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, 1107 &fops_recv);
1109 sc, &fops_xmit)) 1108 debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
1110 goto err; 1109 sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
1111 1110 debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
1112 if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, 1111 sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
1113 sc, &fops_stations)) 1112 debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1114 goto err; 1113 sc, &fops_regidx);
1115 1114 debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1116 if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, 1115 sc, &fops_regval);
1117 sc, &fops_misc)) 1116 debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
1118 goto err; 1117 sc->debug.debugfs_phy,
1119 1118 &ah->config.cwm_ignore_extcca);
1120 if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, 1119 debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc,
1121 sc, &fops_recv)) 1120 &fops_regdump);
1122 goto err; 1121
1123 1122 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
1124 if (!debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR, 1123 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
1125 sc->debug.debugfs_phy, sc, &fops_rx_chainmask)) 1124
1126 goto err; 1125 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
1127 1126 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
1128 if (!debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
1129 sc->debug.debugfs_phy, sc, &fops_tx_chainmask))
1130 goto err;
1131
1132 if (!debugfs_create_file("regidx", S_IRUSR | S_IWUSR,
1133 sc->debug.debugfs_phy, sc, &fops_regidx))
1134 goto err;
1135
1136 if (!debugfs_create_file("regval", S_IRUSR | S_IWUSR,
1137 sc->debug.debugfs_phy, sc, &fops_regval))
1138 goto err;
1139
1140 if (!debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
1141 sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
1142 goto err;
1143
1144 if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy,
1145 sc, &fops_regdump))
1146 goto err;
1147 1127
1148 sc->debug.regidx = 0; 1128 sc->debug.regidx = 0;
1149 return 0; 1129 return 0;
1150err:
1151 debugfs_remove_recursive(sc->debug.debugfs_phy);
1152 sc->debug.debugfs_phy = NULL;
1153 return -ENOMEM;
1154} 1130}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 8cd8333cc08..2f0712ea49a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -392,6 +392,8 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
392 numXpdGain); 392 numXpdGain);
393 } 393 }
394 394
395 ENABLE_REGWRITE_BUFFER(ah);
396
395 if (i == 0) { 397 if (i == 0) {
396 if (!ath9k_hw_ar9287_get_eeprom(ah, 398 if (!ath9k_hw_ar9287_get_eeprom(ah,
397 EEP_OL_PWRCTRL)) { 399 EEP_OL_PWRCTRL)) {
@@ -442,6 +444,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
442 regOffset += 4; 444 regOffset += 4;
443 } 445 }
444 } 446 }
447 REGWRITE_BUFFER_FLUSH(ah);
445 } 448 }
446 } 449 }
447 450
@@ -757,6 +760,8 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
757 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2; 760 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
758 } 761 }
759 762
763 ENABLE_REGWRITE_BUFFER(ah);
764
760 /* OFDM power per rate */ 765 /* OFDM power per rate */
761 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 766 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
762 ATH9K_POW_SM(ratesArray[rate18mb], 24) 767 ATH9K_POW_SM(ratesArray[rate18mb], 24)
@@ -840,6 +845,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
840 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) 845 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
841 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 846 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
842 } 847 }
848 REGWRITE_BUFFER_FLUSH(ah);
843} 849}
844 850
845static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah, 851static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index fccd87df730..995949ddd63 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -799,6 +799,8 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
799 pwr_table_offset, 799 pwr_table_offset,
800 &diff); 800 &diff);
801 801
802 ENABLE_REGWRITE_BUFFER(ah);
803
802 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { 804 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
803 if (OLC_FOR_AR9280_20_LATER) { 805 if (OLC_FOR_AR9280_20_LATER) {
804 REG_WRITE(ah, 806 REG_WRITE(ah,
@@ -847,6 +849,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
847 849
848 regOffset += 4; 850 regOffset += 4;
849 } 851 }
852 REGWRITE_BUFFER_FLUSH(ah);
850 } 853 }
851 } 854 }
852 855
@@ -1205,6 +1208,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1205 } 1208 }
1206 } 1209 }
1207 1210
1211 ENABLE_REGWRITE_BUFFER(ah);
1212
1208 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 1213 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
1209 ATH9K_POW_SM(ratesArray[rate18mb], 24) 1214 ATH9K_POW_SM(ratesArray[rate18mb], 24)
1210 | ATH9K_POW_SM(ratesArray[rate12mb], 16) 1215 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
@@ -1291,6 +1296,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1291 REG_WRITE(ah, AR_PHY_POWER_TX_SUB, 1296 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
1292 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6) 1297 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
1293 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)); 1298 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
1299
1300 REGWRITE_BUFFER_FLUSH(ah);
1294} 1301}
1295 1302
1296static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) 1303static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 0fb8f8ac275..44a0a886124 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -41,12 +41,14 @@ void ath_init_leds(struct ath_softc *sc)
41{ 41{
42 int ret; 42 int ret;
43 43
44 if (AR_SREV_9287(sc->sc_ah)) 44 if (sc->sc_ah->led_pin < 0) {
45 sc->sc_ah->led_pin = ATH_LED_PIN_9287; 45 if (AR_SREV_9287(sc->sc_ah))
46 else if (AR_SREV_9485(sc->sc_ah)) 46 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
47 sc->sc_ah->led_pin = ATH_LED_PIN_9485; 47 else if (AR_SREV_9485(sc->sc_ah))
48 else 48 sc->sc_ah->led_pin = ATH_LED_PIN_9485;
49 sc->sc_ah->led_pin = ATH_LED_PIN_DEF; 49 else
50 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
51 }
50 52
51 /* Configure gpio 1 for output */ 53 /* Configure gpio 1 for output */
52 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, 54 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 7dc20489f2e..f1b8af64569 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -222,8 +222,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
222 struct tx_buf *tx_buf = NULL; 222 struct tx_buf *tx_buf = NULL;
223 struct sk_buff *nskb = NULL; 223 struct sk_buff *nskb = NULL;
224 int ret = 0, i; 224 int ret = 0, i;
225 u16 *hdr, tx_skb_cnt = 0; 225 u16 tx_skb_cnt = 0;
226 u8 *buf; 226 u8 *buf;
227 __le16 *hdr;
227 228
228 if (hif_dev->tx.tx_skb_cnt == 0) 229 if (hif_dev->tx.tx_skb_cnt == 0)
229 return 0; 230 return 0;
@@ -248,9 +249,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
248 249
249 buf = tx_buf->buf; 250 buf = tx_buf->buf;
250 buf += tx_buf->offset; 251 buf += tx_buf->offset;
251 hdr = (u16 *)buf; 252 hdr = (__le16 *)buf;
252 *hdr++ = nskb->len; 253 *hdr++ = cpu_to_le16(nskb->len);
253 *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; 254 *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
254 buf += 4; 255 buf += 4;
255 memcpy(buf, nskb->data, nskb->len); 256 memcpy(buf, nskb->data, nskb->len);
256 tx_buf->len = nskb->len + 4; 257 tx_buf->len = nskb->len + 4;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 753a245c5ad..ec47be94b74 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -328,7 +328,7 @@ struct ath9k_debug {
328#endif /* CONFIG_ATH9K_HTC_DEBUGFS */ 328#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
329 329
330#define ATH_LED_PIN_DEF 1 330#define ATH_LED_PIN_DEF 1
331#define ATH_LED_PIN_9287 8 331#define ATH_LED_PIN_9287 10
332#define ATH_LED_PIN_9271 15 332#define ATH_LED_PIN_9271 15
333#define ATH_LED_PIN_7010 12 333#define ATH_LED_PIN_7010 12
334#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */ 334#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 8d1d8792436..8f56158e588 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -155,7 +155,7 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
155 nexttbtt = intval; 155 nexttbtt = intval;
156 156
157 if (priv->op_flags & OP_TSF_RESET) { 157 if (priv->op_flags & OP_TSF_RESET) {
158 intval |= ATH9K_BEACON_RESET_TSF; 158 ath9k_hw_reset_tsf(priv->ah);
159 priv->op_flags &= ~OP_TSF_RESET; 159 priv->op_flags &= ~OP_TSF_RESET;
160 } else { 160 } else {
161 /* 161 /*
@@ -168,8 +168,6 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
168 } while (nexttbtt < tsftu); 168 } while (nexttbtt < tsftu);
169 } 169 }
170 170
171 intval |= ATH9K_BEACON_ENA;
172
173 if (priv->op_flags & OP_ENABLE_BEACON) 171 if (priv->op_flags & OP_ENABLE_BEACON)
174 imask |= ATH9K_INT_SWBA; 172 imask |= ATH9K_INT_SWBA;
175 173
@@ -178,7 +176,7 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
178 bss_conf->beacon_interval, nexttbtt, imask); 176 bss_conf->beacon_interval, nexttbtt, imask);
179 177
180 WMI_CMD(WMI_DISABLE_INTR_CMDID); 178 WMI_CMD(WMI_DISABLE_INTR_CMDID);
181 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); 179 ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
182 priv->bmiss_cnt = 0; 180 priv->bmiss_cnt = 0;
183 htc_imask = cpu_to_be32(imask); 181 htc_imask = cpu_to_be32(imask);
184 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); 182 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
@@ -207,7 +205,6 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
207 nexttbtt += intval; 205 nexttbtt += intval;
208 } while (nexttbtt < tsftu); 206 } while (nexttbtt < tsftu);
209 207
210 intval |= ATH9K_BEACON_ENA;
211 if (priv->op_flags & OP_ENABLE_BEACON) 208 if (priv->op_flags & OP_ENABLE_BEACON)
212 imask |= ATH9K_INT_SWBA; 209 imask |= ATH9K_INT_SWBA;
213 210
@@ -216,7 +213,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
216 bss_conf->beacon_interval, nexttbtt, imask); 213 bss_conf->beacon_interval, nexttbtt, imask);
217 214
218 WMI_CMD(WMI_DISABLE_INTR_CMDID); 215 WMI_CMD(WMI_DISABLE_INTR_CMDID);
219 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); 216 ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
220 priv->bmiss_cnt = 0; 217 priv->bmiss_cnt = 0;
221 htc_imask = cpu_to_be32(imask); 218 htc_imask = cpu_to_be32(imask);
222 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); 219 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index fc67c937e17..8303b34bdc9 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -430,13 +430,16 @@ static void ath9k_regwrite_flush(void *hw_priv)
430 mutex_unlock(&priv->wmi->multi_write_mutex); 430 mutex_unlock(&priv->wmi->multi_write_mutex);
431} 431}
432 432
433static const struct ath_ops ath9k_common_ops = { 433static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
434 .read = ath9k_regread, 434{
435 .multi_read = ath9k_multi_regread, 435 u32 val;
436 .write = ath9k_regwrite, 436
437 .enable_write_buffer = ath9k_enable_regwrite_buffer, 437 val = ath9k_regread(hw_priv, reg_offset);
438 .write_flush = ath9k_regwrite_flush, 438 val &= ~clr;
439}; 439 val |= set;
440 ath9k_regwrite(hw_priv, val, reg_offset);
441 return val;
442}
440 443
441static void ath_usb_read_cachesize(struct ath_common *common, int *csz) 444static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
442{ 445{
@@ -561,13 +564,7 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
561 int i = 0; 564 int i = 0;
562 565
563 /* Get the hardware key cache size. */ 566 /* Get the hardware key cache size. */
564 common->keymax = priv->ah->caps.keycache_size; 567 common->keymax = AR_KEYTABLE_SIZE;
565 if (common->keymax > ATH_KEYMAX) {
566 ath_dbg(common, ATH_DBG_ANY,
567 "Warning, using only %u entries in %u key cache\n",
568 ATH_KEYMAX, common->keymax);
569 common->keymax = ATH_KEYMAX;
570 }
571 568
572 if (priv->ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) 569 if (priv->ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
573 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED; 570 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
@@ -658,10 +655,16 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
658 ah->hw_version.subsysid = 0; /* FIXME */ 655 ah->hw_version.subsysid = 0; /* FIXME */
659 ah->hw_version.usbdev = drv_info; 656 ah->hw_version.usbdev = drv_info;
660 ah->ah_flags |= AH_USE_EEPROM; 657 ah->ah_flags |= AH_USE_EEPROM;
658 ah->reg_ops.read = ath9k_regread;
659 ah->reg_ops.multi_read = ath9k_multi_regread;
660 ah->reg_ops.write = ath9k_regwrite;
661 ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer;
662 ah->reg_ops.write_flush = ath9k_regwrite_flush;
663 ah->reg_ops.rmw = ath9k_reg_rmw;
661 priv->ah = ah; 664 priv->ah = ah;
662 665
663 common = ath9k_hw_common(ah); 666 common = ath9k_hw_common(ah);
664 common->ops = &ath9k_common_ops; 667 common->ops = &ah->reg_ops;
665 common->bus_ops = &ath9k_usb_bus_ops; 668 common->bus_ops = &ath9k_usb_bus_ops;
666 common->ah = ah; 669 common->ah = ah;
667 common->hw = priv->hw; 670 common->hw = priv->hw;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 338b07502f1..298f4d6cbdb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -130,6 +130,20 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
130} 130}
131EXPORT_SYMBOL(ath9k_hw_wait); 131EXPORT_SYMBOL(ath9k_hw_wait);
132 132
133void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
134 int column, unsigned int *writecnt)
135{
136 int r;
137
138 ENABLE_REGWRITE_BUFFER(ah);
139 for (r = 0; r < array->ia_rows; r++) {
140 REG_WRITE(ah, INI_RA(array, r, 0),
141 INI_RA(array, r, column));
142 DO_DELAY(*writecnt);
143 }
144 REGWRITE_BUFFER_FLUSH(ah);
145}
146
133u32 ath9k_hw_reverse_bits(u32 val, u32 n) 147u32 ath9k_hw_reverse_bits(u32 val, u32 n)
134{ 148{
135 u32 retval; 149 u32 retval;
@@ -364,11 +378,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
364 ah->config.spurchans[i][1] = AR_NO_SPUR; 378 ah->config.spurchans[i][1] = AR_NO_SPUR;
365 } 379 }
366 380
367 if (ah->hw_version.devid != AR2427_DEVID_PCIE)
368 ah->config.ht_enable = 1;
369 else
370 ah->config.ht_enable = 0;
371
372 /* PAPRD needs some more work to be enabled */ 381 /* PAPRD needs some more work to be enabled */
373 ah->config.paprd_disable = 1; 382 ah->config.paprd_disable = 1;
374 383
@@ -410,6 +419,8 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
410 ah->sta_id1_defaults = 419 ah->sta_id1_defaults =
411 AR_STA_ID1_CRPT_MIC_ENABLE | 420 AR_STA_ID1_CRPT_MIC_ENABLE |
412 AR_STA_ID1_MCAST_KSRCH; 421 AR_STA_ID1_MCAST_KSRCH;
422 if (AR_SREV_9100(ah))
423 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
413 ah->enable_32kHz_clock = DONT_USE_32KHZ; 424 ah->enable_32kHz_clock = DONT_USE_32KHZ;
414 ah->slottime = 20; 425 ah->slottime = 20;
415 ah->globaltxtimeout = (u32) -1; 426 ah->globaltxtimeout = (u32) -1;
@@ -673,14 +684,14 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
673 684
674unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) 685unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
675{ 686{
676 REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK))); 687 REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
677 udelay(100); 688 udelay(100);
678 REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK)); 689 REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
679 690
680 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) 691 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
681 udelay(100); 692 udelay(100);
682 693
683 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; 694 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
684} 695}
685EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); 696EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
686 697
@@ -830,8 +841,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
830 ah->misc_mode); 841 ah->misc_mode);
831 842
832 if (ah->misc_mode != 0) 843 if (ah->misc_mode != 0)
833 REG_WRITE(ah, AR_PCU_MISC, 844 REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode);
834 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
835 845
836 if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ) 846 if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
837 sifstime = 16; 847 sifstime = 16;
@@ -899,23 +909,19 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
899static inline void ath9k_hw_set_dma(struct ath_hw *ah) 909static inline void ath9k_hw_set_dma(struct ath_hw *ah)
900{ 910{
901 struct ath_common *common = ath9k_hw_common(ah); 911 struct ath_common *common = ath9k_hw_common(ah);
902 u32 regval;
903 912
904 ENABLE_REGWRITE_BUFFER(ah); 913 ENABLE_REGWRITE_BUFFER(ah);
905 914
906 /* 915 /*
907 * set AHB_MODE not to do cacheline prefetches 916 * set AHB_MODE not to do cacheline prefetches
908 */ 917 */
909 if (!AR_SREV_9300_20_OR_LATER(ah)) { 918 if (!AR_SREV_9300_20_OR_LATER(ah))
910 regval = REG_READ(ah, AR_AHB_MODE); 919 REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN);
911 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
912 }
913 920
914 /* 921 /*
915 * let mac dma reads be in 128 byte chunks 922 * let mac dma reads be in 128 byte chunks
916 */ 923 */
917 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK; 924 REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK);
918 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
919 925
920 REGWRITE_BUFFER_FLUSH(ah); 926 REGWRITE_BUFFER_FLUSH(ah);
921 927
@@ -932,8 +938,7 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
932 /* 938 /*
933 * let mac dma writes be in 128 byte chunks 939 * let mac dma writes be in 128 byte chunks
934 */ 940 */
935 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK; 941 REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK);
936 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
937 942
938 /* 943 /*
939 * Setup receive FIFO threshold to hold off TX activities 944 * Setup receive FIFO threshold to hold off TX activities
@@ -972,30 +977,27 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
972 977
973static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) 978static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
974{ 979{
975 u32 val; 980 u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
981 u32 set = AR_STA_ID1_KSRCH_MODE;
976 982
977 val = REG_READ(ah, AR_STA_ID1);
978 val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
979 switch (opmode) { 983 switch (opmode) {
980 case NL80211_IFTYPE_AP:
981 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
982 | AR_STA_ID1_KSRCH_MODE);
983 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
984 break;
985 case NL80211_IFTYPE_ADHOC: 984 case NL80211_IFTYPE_ADHOC:
986 case NL80211_IFTYPE_MESH_POINT: 985 case NL80211_IFTYPE_MESH_POINT:
987 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC 986 set |= AR_STA_ID1_ADHOC;
988 | AR_STA_ID1_KSRCH_MODE);
989 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 987 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
990 break; 988 break;
989 case NL80211_IFTYPE_AP:
990 set |= AR_STA_ID1_STA_AP;
991 /* fall through */
991 case NL80211_IFTYPE_STATION: 992 case NL80211_IFTYPE_STATION:
992 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); 993 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
993 break; 994 break;
994 default: 995 default:
995 if (ah->is_monitoring) 996 if (!ah->is_monitoring)
996 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); 997 set = 0;
997 break; 998 break;
998 } 999 }
1000 REG_RMW(ah, AR_STA_ID1, set, mask);
999} 1001}
1000 1002
1001void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, 1003void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
@@ -1021,10 +1023,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1021 u32 tmpReg; 1023 u32 tmpReg;
1022 1024
1023 if (AR_SREV_9100(ah)) { 1025 if (AR_SREV_9100(ah)) {
1024 u32 val = REG_READ(ah, AR_RTC_DERIVED_CLK); 1026 REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK,
1025 val &= ~AR_RTC_DERIVED_CLK_PERIOD; 1027 AR_RTC_DERIVED_CLK_PERIOD, 1);
1026 val |= SM(1, AR_RTC_DERIVED_CLK_PERIOD);
1027 REG_WRITE(ah, AR_RTC_DERIVED_CLK, val);
1028 (void)REG_READ(ah, AR_RTC_DERIVED_CLK); 1028 (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
1029 } 1029 }
1030 1030
@@ -1212,6 +1212,20 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1212 return true; 1212 return true;
1213} 1213}
1214 1214
1215static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
1216{
1217 u32 gpio_mask = ah->gpio_mask;
1218 int i;
1219
1220 for (i = 0; gpio_mask; i++, gpio_mask >>= 1) {
1221 if (!(gpio_mask & 1))
1222 continue;
1223
1224 ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1225 ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
1226 }
1227}
1228
1215bool ath9k_hw_check_alive(struct ath_hw *ah) 1229bool ath9k_hw_check_alive(struct ath_hw *ah)
1216{ 1230{
1217 int count = 50; 1231 int count = 50;
@@ -1418,7 +1432,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1418 REGWRITE_BUFFER_FLUSH(ah); 1432 REGWRITE_BUFFER_FLUSH(ah);
1419 1433
1420 ah->intr_txqs = 0; 1434 ah->intr_txqs = 0;
1421 for (i = 0; i < ah->caps.total_queues; i++) 1435 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1422 ath9k_hw_resettxqueue(ah, i); 1436 ath9k_hw_resettxqueue(ah, i);
1423 1437
1424 ath9k_hw_init_interrupt_masks(ah, ah->opmode); 1438 ath9k_hw_init_interrupt_masks(ah, ah->opmode);
@@ -1435,8 +1449,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1435 ar9002_hw_enable_wep_aggregation(ah); 1449 ar9002_hw_enable_wep_aggregation(ah);
1436 } 1450 }
1437 1451
1438 REG_WRITE(ah, AR_STA_ID1, 1452 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
1439 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
1440 1453
1441 ath9k_hw_set_dma(ah); 1454 ath9k_hw_set_dma(ah);
1442 1455
@@ -1500,6 +1513,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1500 if (AR_SREV_9300_20_OR_LATER(ah)) 1513 if (AR_SREV_9300_20_OR_LATER(ah))
1501 ar9003_hw_bb_watchdog_config(ah); 1514 ar9003_hw_bb_watchdog_config(ah);
1502 1515
1516 ath9k_hw_apply_gpio_override(ah);
1517
1503 return 0; 1518 return 0;
1504} 1519}
1505EXPORT_SYMBOL(ath9k_hw_reset); 1520EXPORT_SYMBOL(ath9k_hw_reset);
@@ -1679,21 +1694,15 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1679 case NL80211_IFTYPE_MESH_POINT: 1694 case NL80211_IFTYPE_MESH_POINT:
1680 REG_SET_BIT(ah, AR_TXCFG, 1695 REG_SET_BIT(ah, AR_TXCFG,
1681 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); 1696 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
1682 REG_WRITE(ah, AR_NEXT_NDP_TIMER, 1697 REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
1683 TU_TO_USEC(next_beacon + 1698 TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
1684 (ah->atim_window ? ah->
1685 atim_window : 1)));
1686 flags |= AR_NDP_TIMER_EN; 1699 flags |= AR_NDP_TIMER_EN;
1687 case NL80211_IFTYPE_AP: 1700 case NL80211_IFTYPE_AP:
1688 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 1701 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
1689 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 1702 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
1690 TU_TO_USEC(next_beacon - 1703 TU_TO_USEC(ah->config.dma_beacon_response_time));
1691 ah->config. 1704 REG_WRITE(ah, AR_NEXT_SWBA, next_beacon -
1692 dma_beacon_response_time)); 1705 TU_TO_USEC(ah->config.sw_beacon_response_time));
1693 REG_WRITE(ah, AR_NEXT_SWBA,
1694 TU_TO_USEC(next_beacon -
1695 ah->config.
1696 sw_beacon_response_time));
1697 flags |= 1706 flags |=
1698 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 1707 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
1699 break; 1708 break;
@@ -1705,18 +1714,13 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1705 break; 1714 break;
1706 } 1715 }
1707 1716
1708 REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period)); 1717 REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
1709 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period)); 1718 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
1710 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period)); 1719 REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
1711 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period)); 1720 REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
1712 1721
1713 REGWRITE_BUFFER_FLUSH(ah); 1722 REGWRITE_BUFFER_FLUSH(ah);
1714 1723
1715 beacon_period &= ~ATH9K_BEACON_ENA;
1716 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
1717 ath9k_hw_reset_tsf(ah);
1718 }
1719
1720 REG_SET_BIT(ah, AR_TIMER_MODE, flags); 1724 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
1721} 1725}
1722EXPORT_SYMBOL(ath9k_hw_beaconinit); 1726EXPORT_SYMBOL(ath9k_hw_beaconinit);
@@ -1851,6 +1855,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1851 !(AR_SREV_9271(ah))) 1855 !(AR_SREV_9271(ah)))
1852 /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */ 1856 /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */
1853 pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7; 1857 pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7;
1858 else if (AR_SREV_9100(ah))
1859 pCap->rx_chainmask = 0x7;
1854 else 1860 else
1855 /* Use rx_chainmask from EEPROM. */ 1861 /* Use rx_chainmask from EEPROM. */
1856 pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); 1862 pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
@@ -1869,28 +1875,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1869 1875
1870 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM; 1876 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
1871 1877
1872 if (ah->config.ht_enable) 1878 if (ah->hw_version.devid != AR2427_DEVID_PCIE)
1873 pCap->hw_caps |= ATH9K_HW_CAP_HT; 1879 pCap->hw_caps |= ATH9K_HW_CAP_HT;
1874 else 1880 else
1875 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 1881 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
1876 1882
1877 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
1878 pCap->total_queues =
1879 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
1880 else
1881 pCap->total_queues = ATH9K_NUM_TX_QUEUES;
1882
1883 if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
1884 pCap->keycache_size =
1885 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
1886 else
1887 pCap->keycache_size = AR_KEYTABLE_SIZE;
1888
1889 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
1890 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
1891 else
1892 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
1893
1894 if (AR_SREV_9271(ah)) 1883 if (AR_SREV_9271(ah))
1895 pCap->num_gpio_pins = AR9271_NUM_GPIO; 1884 pCap->num_gpio_pins = AR9271_NUM_GPIO;
1896 else if (AR_DEVID_7010(ah)) 1885 else if (AR_DEVID_7010(ah))
@@ -1909,8 +1898,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1909 pCap->rts_aggr_limit = (8 * 1024); 1898 pCap->rts_aggr_limit = (8 * 1024);
1910 } 1899 }
1911 1900
1912 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
1913
1914#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 1901#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1915 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); 1902 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
1916 if (ah->rfsilent & EEP_RFSILENT_ENABLED) { 1903 if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
@@ -1932,23 +1919,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1932 else 1919 else
1933 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; 1920 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
1934 1921
1935 if (regulatory->current_rd_ext & (1 << REG_EXT_JAPAN_MIDBAND)) {
1936 pCap->reg_cap =
1937 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
1938 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
1939 AR_EEPROM_EEREGCAP_EN_KK_U2 |
1940 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
1941 } else {
1942 pCap->reg_cap =
1943 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
1944 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
1945 }
1946
1947 /* Advertise midband for AR5416 with FCC midband set in eeprom */
1948 if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) &&
1949 AR_SREV_5416(ah))
1950 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
1951
1952 if (AR_SREV_9280_20_OR_LATER(ah) && common->btcoex_enabled) { 1922 if (AR_SREV_9280_20_OR_LATER(ah) && common->btcoex_enabled) {
1953 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO; 1923 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
1954 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO; 1924 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
@@ -2195,11 +2165,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
2195 REG_WRITE(ah, AR_PHY_ERR, phybits); 2165 REG_WRITE(ah, AR_PHY_ERR, phybits);
2196 2166
2197 if (phybits) 2167 if (phybits)
2198 REG_WRITE(ah, AR_RXCFG, 2168 REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
2199 REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
2200 else 2169 else
2201 REG_WRITE(ah, AR_RXCFG, 2170 REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
2202 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
2203 2171
2204 REGWRITE_BUFFER_FLUSH(ah); 2172 REGWRITE_BUFFER_FLUSH(ah);
2205} 2173}
@@ -2375,10 +2343,11 @@ static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
2375 return timer_table->gen_timer_index[b]; 2343 return timer_table->gen_timer_index[b];
2376} 2344}
2377 2345
2378static u32 ath9k_hw_gettsf32(struct ath_hw *ah) 2346u32 ath9k_hw_gettsf32(struct ath_hw *ah)
2379{ 2347{
2380 return REG_READ(ah, AR_TSF_L32); 2348 return REG_READ(ah, AR_TSF_L32);
2381} 2349}
2350EXPORT_SYMBOL(ath9k_hw_gettsf32);
2382 2351
2383struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 2352struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
2384 void (*trigger)(void *), 2353 void (*trigger)(void *),
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 6650fd48415..4cc320bdf0a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -65,53 +65,49 @@
65 65
66/* Register read/write primitives */ 66/* Register read/write primitives */
67#define REG_WRITE(_ah, _reg, _val) \ 67#define REG_WRITE(_ah, _reg, _val) \
68 ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg)) 68 (_ah)->reg_ops.write((_ah), (_val), (_reg))
69 69
70#define REG_READ(_ah, _reg) \ 70#define REG_READ(_ah, _reg) \
71 ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) 71 (_ah)->reg_ops.read((_ah), (_reg))
72 72
73#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \ 73#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \
74 ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt)) 74 (_ah)->reg_ops.multi_read((_ah), (_addr), (_val), (_cnt))
75
76#define REG_RMW(_ah, _reg, _set, _clr) \
77 (_ah)->reg_ops.rmw((_ah), (_reg), (_set), (_clr))
75 78
76#define ENABLE_REGWRITE_BUFFER(_ah) \ 79#define ENABLE_REGWRITE_BUFFER(_ah) \
77 do { \ 80 do { \
78 if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \ 81 if ((_ah)->reg_ops.enable_write_buffer) \
79 ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \ 82 (_ah)->reg_ops.enable_write_buffer((_ah)); \
80 } while (0) 83 } while (0)
81 84
82#define REGWRITE_BUFFER_FLUSH(_ah) \ 85#define REGWRITE_BUFFER_FLUSH(_ah) \
83 do { \ 86 do { \
84 if (ath9k_hw_common(_ah)->ops->write_flush) \ 87 if ((_ah)->reg_ops.write_flush) \
85 ath9k_hw_common(_ah)->ops->write_flush((_ah)); \ 88 (_ah)->reg_ops.write_flush((_ah)); \
86 } while (0) 89 } while (0)
87 90
88#define SM(_v, _f) (((_v) << _f##_S) & _f) 91#define SM(_v, _f) (((_v) << _f##_S) & _f)
89#define MS(_v, _f) (((_v) & _f) >> _f##_S) 92#define MS(_v, _f) (((_v) & _f) >> _f##_S)
90#define REG_RMW(_a, _r, _set, _clr) \
91 REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
92#define REG_RMW_FIELD(_a, _r, _f, _v) \ 93#define REG_RMW_FIELD(_a, _r, _f, _v) \
93 REG_WRITE(_a, _r, \ 94 REG_RMW(_a, _r, (((_v) << _f##_S) & _f), (_f))
94 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
95#define REG_READ_FIELD(_a, _r, _f) \ 95#define REG_READ_FIELD(_a, _r, _f) \
96 (((REG_READ(_a, _r) & _f) >> _f##_S)) 96 (((REG_READ(_a, _r) & _f) >> _f##_S))
97#define REG_SET_BIT(_a, _r, _f) \ 97#define REG_SET_BIT(_a, _r, _f) \
98 REG_WRITE(_a, _r, REG_READ(_a, _r) | (_f)) 98 REG_RMW(_a, _r, (_f), 0)
99#define REG_CLR_BIT(_a, _r, _f) \ 99#define REG_CLR_BIT(_a, _r, _f) \
100 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~(_f)) 100 REG_RMW(_a, _r, 0, (_f))
101 101
102#define DO_DELAY(x) do { \ 102#define DO_DELAY(x) do { \
103 if ((++(x) % 64) == 0) \ 103 if (((++(x) % 64) == 0) && \
104 udelay(1); \ 104 (ath9k_hw_common(ah)->bus_ops->ath_bus_type \
105 != ATH_USB)) \
106 udelay(1); \
105 } while (0) 107 } while (0)
106 108
107#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \ 109#define REG_WRITE_ARRAY(iniarray, column, regWr) \
108 int r; \ 110 ath9k_hw_write_array(ah, iniarray, column, &(regWr))
109 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
110 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
111 INI_RA((iniarray), r, (column))); \
112 DO_DELAY(regWr); \
113 } \
114 } while (0)
115 111
116#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0 112#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
117#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1 113#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
@@ -178,7 +174,6 @@ enum ath9k_hw_caps {
178 ATH9K_HW_CAP_HT = BIT(0), 174 ATH9K_HW_CAP_HT = BIT(0),
179 ATH9K_HW_CAP_RFSILENT = BIT(1), 175 ATH9K_HW_CAP_RFSILENT = BIT(1),
180 ATH9K_HW_CAP_CST = BIT(2), 176 ATH9K_HW_CAP_CST = BIT(2),
181 ATH9K_HW_CAP_ENHANCEDPM = BIT(3),
182 ATH9K_HW_CAP_AUTOSLEEP = BIT(4), 177 ATH9K_HW_CAP_AUTOSLEEP = BIT(4),
183 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5), 178 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5),
184 ATH9K_HW_CAP_EDMA = BIT(6), 179 ATH9K_HW_CAP_EDMA = BIT(6),
@@ -195,8 +190,6 @@ enum ath9k_hw_caps {
195 190
196struct ath9k_hw_capabilities { 191struct ath9k_hw_capabilities {
197 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */ 192 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
198 u16 total_queues;
199 u16 keycache_size;
200 u16 low_5ghz_chan, high_5ghz_chan; 193 u16 low_5ghz_chan, high_5ghz_chan;
201 u16 low_2ghz_chan, high_2ghz_chan; 194 u16 low_2ghz_chan, high_2ghz_chan;
202 u16 rts_aggr_limit; 195 u16 rts_aggr_limit;
@@ -204,8 +197,6 @@ struct ath9k_hw_capabilities {
204 u8 rx_chainmask; 197 u8 rx_chainmask;
205 u8 max_txchains; 198 u8 max_txchains;
206 u8 max_rxchains; 199 u8 max_rxchains;
207 u16 tx_triglevel_max;
208 u16 reg_cap;
209 u8 num_gpio_pins; 200 u8 num_gpio_pins;
210 u8 rx_hp_qdepth; 201 u8 rx_hp_qdepth;
211 u8 rx_lp_qdepth; 202 u8 rx_lp_qdepth;
@@ -227,7 +218,6 @@ struct ath9k_ops_config {
227 u8 pcie_clock_req; 218 u8 pcie_clock_req;
228 u32 pcie_waen; 219 u32 pcie_waen;
229 u8 analog_shiftreg; 220 u8 analog_shiftreg;
230 u8 ht_enable;
231 u8 paprd_disable; 221 u8 paprd_disable;
232 u32 ofdm_trig_low; 222 u32 ofdm_trig_low;
233 u32 ofdm_trig_high; 223 u32 ofdm_trig_high;
@@ -412,8 +402,6 @@ struct ath9k_beacon_state {
412 u32 bs_nextdtim; 402 u32 bs_nextdtim;
413 u32 bs_intval; 403 u32 bs_intval;
414#define ATH9K_BEACON_PERIOD 0x0000ffff 404#define ATH9K_BEACON_PERIOD 0x0000ffff
415#define ATH9K_BEACON_ENA 0x00800000
416#define ATH9K_BEACON_RESET_TSF 0x01000000
417#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */ 405#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */
418 u32 bs_dtimperiod; 406 u32 bs_dtimperiod;
419 u16 bs_cfpperiod; 407 u16 bs_cfpperiod;
@@ -655,6 +643,8 @@ struct ath_nf_limits {
655#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */ 643#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
656 644
657struct ath_hw { 645struct ath_hw {
646 struct ath_ops reg_ops;
647
658 struct ieee80211_hw *hw; 648 struct ieee80211_hw *hw;
659 struct ath_common common; 649 struct ath_common common;
660 struct ath9k_hw_version hw_version; 650 struct ath9k_hw_version hw_version;
@@ -794,7 +784,9 @@ struct ath_hw {
794 u32 originalGain[22]; 784 u32 originalGain[22];
795 int initPDADC; 785 int initPDADC;
796 int PDADCdelta; 786 int PDADCdelta;
797 u8 led_pin; 787 int led_pin;
788 u32 gpio_mask;
789 u32 gpio_val;
798 790
799 struct ar5416IniArray iniModes; 791 struct ar5416IniArray iniModes;
800 struct ar5416IniArray iniCommon; 792 struct ar5416IniArray iniCommon;
@@ -907,6 +899,8 @@ void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
907 899
908/* General Operation */ 900/* General Operation */
909bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 901bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
902void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
903 int column, unsigned int *writecnt);
910u32 ath9k_hw_reverse_bits(u32 val, u32 n); 904u32 ath9k_hw_reverse_bits(u32 val, u32 n);
911bool ath9k_get_channel_edges(struct ath_hw *ah, u16 flags, u16 *low, u16 *high); 905bool ath9k_get_channel_edges(struct ath_hw *ah, u16 flags, u16 *low, u16 *high);
912u16 ath9k_hw_computetxtime(struct ath_hw *ah, 906u16 ath9k_hw_computetxtime(struct ath_hw *ah,
@@ -924,6 +918,7 @@ void ath9k_hw_setopmode(struct ath_hw *ah);
924void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); 918void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
925void ath9k_hw_setbssidmask(struct ath_hw *ah); 919void ath9k_hw_setbssidmask(struct ath_hw *ah);
926void ath9k_hw_write_associd(struct ath_hw *ah); 920void ath9k_hw_write_associd(struct ath_hw *ah);
921u32 ath9k_hw_gettsf32(struct ath_hw *ah);
927u64 ath9k_hw_gettsf64(struct ath_hw *ah); 922u64 ath9k_hw_gettsf64(struct ath_hw *ah);
928void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); 923void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
929void ath9k_hw_reset_tsf(struct ath_hw *ah); 924void ath9k_hw_reset_tsf(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 79aec983279..1ac8318d82a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/ath9k_platform.h>
18 19
19#include "ath9k.h" 20#include "ath9k.h"
20 21
@@ -195,10 +196,27 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
195 return val; 196 return val;
196} 197}
197 198
198static const struct ath_ops ath9k_common_ops = { 199static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
199 .read = ath9k_ioread32, 200{
200 .write = ath9k_iowrite32, 201 struct ath_hw *ah = (struct ath_hw *) hw_priv;
201}; 202 struct ath_common *common = ath9k_hw_common(ah);
203 struct ath_softc *sc = (struct ath_softc *) common->priv;
204 unsigned long uninitialized_var(flags);
205 u32 val;
206
207 if (ah->config.serialize_regmode == SER_REG_MODE_ON)
208 spin_lock_irqsave(&sc->sc_serial_rw, flags);
209
210 val = ioread32(sc->mem + reg_offset);
211 val &= ~clr;
212 val |= set;
213 iowrite32(val, sc->mem + reg_offset);
214
215 if (ah->config.serialize_regmode == SER_REG_MODE_ON)
216 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
217
218 return val;
219}
202 220
203/**************************/ 221/**************************/
204/* Initialization */ 222/* Initialization */
@@ -389,13 +407,7 @@ void ath9k_init_crypto(struct ath_softc *sc)
389 int i = 0; 407 int i = 0;
390 408
391 /* Get the hardware key cache size. */ 409 /* Get the hardware key cache size. */
392 common->keymax = sc->sc_ah->caps.keycache_size; 410 common->keymax = AR_KEYTABLE_SIZE;
393 if (common->keymax > ATH_KEYMAX) {
394 ath_dbg(common, ATH_DBG_ANY,
395 "Warning, using only %u entries in %u key cache\n",
396 ATH_KEYMAX, common->keymax);
397 common->keymax = ATH_KEYMAX;
398 }
399 411
400 /* 412 /*
401 * Reset the key cache since some parts do not 413 * Reset the key cache since some parts do not
@@ -537,6 +549,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
537static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, 549static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
538 const struct ath_bus_ops *bus_ops) 550 const struct ath_bus_ops *bus_ops)
539{ 551{
552 struct ath9k_platform_data *pdata = sc->dev->platform_data;
540 struct ath_hw *ah = NULL; 553 struct ath_hw *ah = NULL;
541 struct ath_common *common; 554 struct ath_common *common;
542 int ret = 0, i; 555 int ret = 0, i;
@@ -549,13 +562,22 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
549 ah->hw = sc->hw; 562 ah->hw = sc->hw;
550 ah->hw_version.devid = devid; 563 ah->hw_version.devid = devid;
551 ah->hw_version.subsysid = subsysid; 564 ah->hw_version.subsysid = subsysid;
565 ah->reg_ops.read = ath9k_ioread32;
566 ah->reg_ops.write = ath9k_iowrite32;
567 ah->reg_ops.rmw = ath9k_reg_rmw;
552 sc->sc_ah = ah; 568 sc->sc_ah = ah;
553 569
554 if (!sc->dev->platform_data) 570 if (!pdata) {
555 ah->ah_flags |= AH_USE_EEPROM; 571 ah->ah_flags |= AH_USE_EEPROM;
572 sc->sc_ah->led_pin = -1;
573 } else {
574 sc->sc_ah->gpio_mask = pdata->gpio_mask;
575 sc->sc_ah->gpio_val = pdata->gpio_val;
576 sc->sc_ah->led_pin = pdata->led_pin;
577 }
556 578
557 common = ath9k_hw_common(ah); 579 common = ath9k_hw_common(ah);
558 common->ops = &ath9k_common_ops; 580 common->ops = &ah->reg_ops;
559 common->bus_ops = bus_ops; 581 common->bus_ops = bus_ops;
560 common->ah = ah; 582 common->ah = ah;
561 common->hw = sc->hw; 583 common->hw = sc->hw;
@@ -587,6 +609,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
587 if (ret) 609 if (ret)
588 goto err_hw; 610 goto err_hw;
589 611
612 if (pdata && pdata->macaddr)
613 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
614
590 ret = ath9k_init_queues(sc); 615 ret = ath9k_init_queues(sc);
591 if (ret) 616 if (ret)
592 goto err_queues; 617 goto err_queues;
@@ -679,6 +704,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
679 if (AR_SREV_5416(sc->sc_ah)) 704 if (AR_SREV_5416(sc->sc_ah))
680 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 705 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
681 706
707 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
708
682 hw->queues = 4; 709 hw->queues = 4;
683 hw->max_rates = 4; 710 hw->max_rates = 4;
684 hw->channel_change_time = 5000; 711 hw->channel_change_time = 5000;
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index cb5d81426d5..05efcfbeead 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -209,15 +209,8 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
209{ 209{
210 u32 cw; 210 u32 cw;
211 struct ath_common *common = ath9k_hw_common(ah); 211 struct ath_common *common = ath9k_hw_common(ah);
212 struct ath9k_hw_capabilities *pCap = &ah->caps;
213 struct ath9k_tx_queue_info *qi; 212 struct ath9k_tx_queue_info *qi;
214 213
215 if (q >= pCap->total_queues) {
216 ath_dbg(common, ATH_DBG_QUEUE,
217 "Set TXQ properties, invalid queue: %u\n", q);
218 return false;
219 }
220
221 qi = &ah->txq[q]; 214 qi = &ah->txq[q];
222 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 215 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
223 ath_dbg(common, ATH_DBG_QUEUE, 216 ath_dbg(common, ATH_DBG_QUEUE,
@@ -280,15 +273,8 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
280 struct ath9k_tx_queue_info *qinfo) 273 struct ath9k_tx_queue_info *qinfo)
281{ 274{
282 struct ath_common *common = ath9k_hw_common(ah); 275 struct ath_common *common = ath9k_hw_common(ah);
283 struct ath9k_hw_capabilities *pCap = &ah->caps;
284 struct ath9k_tx_queue_info *qi; 276 struct ath9k_tx_queue_info *qi;
285 277
286 if (q >= pCap->total_queues) {
287 ath_dbg(common, ATH_DBG_QUEUE,
288 "Get TXQ properties, invalid queue: %u\n", q);
289 return false;
290 }
291
292 qi = &ah->txq[q]; 278 qi = &ah->txq[q];
293 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 279 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
294 ath_dbg(common, ATH_DBG_QUEUE, 280 ath_dbg(common, ATH_DBG_QUEUE,
@@ -320,28 +306,27 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
320{ 306{
321 struct ath_common *common = ath9k_hw_common(ah); 307 struct ath_common *common = ath9k_hw_common(ah);
322 struct ath9k_tx_queue_info *qi; 308 struct ath9k_tx_queue_info *qi;
323 struct ath9k_hw_capabilities *pCap = &ah->caps;
324 int q; 309 int q;
325 310
326 switch (type) { 311 switch (type) {
327 case ATH9K_TX_QUEUE_BEACON: 312 case ATH9K_TX_QUEUE_BEACON:
328 q = pCap->total_queues - 1; 313 q = ATH9K_NUM_TX_QUEUES - 1;
329 break; 314 break;
330 case ATH9K_TX_QUEUE_CAB: 315 case ATH9K_TX_QUEUE_CAB:
331 q = pCap->total_queues - 2; 316 q = ATH9K_NUM_TX_QUEUES - 2;
332 break; 317 break;
333 case ATH9K_TX_QUEUE_PSPOLL: 318 case ATH9K_TX_QUEUE_PSPOLL:
334 q = 1; 319 q = 1;
335 break; 320 break;
336 case ATH9K_TX_QUEUE_UAPSD: 321 case ATH9K_TX_QUEUE_UAPSD:
337 q = pCap->total_queues - 3; 322 q = ATH9K_NUM_TX_QUEUES - 3;
338 break; 323 break;
339 case ATH9K_TX_QUEUE_DATA: 324 case ATH9K_TX_QUEUE_DATA:
340 for (q = 0; q < pCap->total_queues; q++) 325 for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
341 if (ah->txq[q].tqi_type == 326 if (ah->txq[q].tqi_type ==
342 ATH9K_TX_QUEUE_INACTIVE) 327 ATH9K_TX_QUEUE_INACTIVE)
343 break; 328 break;
344 if (q == pCap->total_queues) { 329 if (q == ATH9K_NUM_TX_QUEUES) {
345 ath_err(common, "No available TX queue\n"); 330 ath_err(common, "No available TX queue\n");
346 return -1; 331 return -1;
347 } 332 }
@@ -382,15 +367,9 @@ EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
382 367
383bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 368bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
384{ 369{
385 struct ath9k_hw_capabilities *pCap = &ah->caps;
386 struct ath_common *common = ath9k_hw_common(ah); 370 struct ath_common *common = ath9k_hw_common(ah);
387 struct ath9k_tx_queue_info *qi; 371 struct ath9k_tx_queue_info *qi;
388 372
389 if (q >= pCap->total_queues) {
390 ath_dbg(common, ATH_DBG_QUEUE,
391 "Release TXQ, invalid queue: %u\n", q);
392 return false;
393 }
394 qi = &ah->txq[q]; 373 qi = &ah->txq[q];
395 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 374 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
396 ath_dbg(common, ATH_DBG_QUEUE, 375 ath_dbg(common, ATH_DBG_QUEUE,
@@ -414,18 +393,11 @@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
414 393
415bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 394bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
416{ 395{
417 struct ath9k_hw_capabilities *pCap = &ah->caps;
418 struct ath_common *common = ath9k_hw_common(ah); 396 struct ath_common *common = ath9k_hw_common(ah);
419 struct ath9k_channel *chan = ah->curchan; 397 struct ath9k_channel *chan = ah->curchan;
420 struct ath9k_tx_queue_info *qi; 398 struct ath9k_tx_queue_info *qi;
421 u32 cwMin, chanCwMin, value; 399 u32 cwMin, chanCwMin, value;
422 400
423 if (q >= pCap->total_queues) {
424 ath_dbg(common, ATH_DBG_QUEUE,
425 "Reset TXQ, invalid queue: %u\n", q);
426 return false;
427 }
428
429 qi = &ah->txq[q]; 401 qi = &ah->txq[q];
430 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 402 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
431 ath_dbg(common, ATH_DBG_QUEUE, 403 ath_dbg(common, ATH_DBG_QUEUE,
@@ -465,10 +437,9 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
465 REG_WRITE(ah, AR_QCBRCFG(q), 437 REG_WRITE(ah, AR_QCBRCFG(q),
466 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 438 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
467 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); 439 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
468 REG_WRITE(ah, AR_QMISC(q), 440 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
469 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | 441 (qi->tqi_cbrOverflowLimit ?
470 (qi->tqi_cbrOverflowLimit ? 442 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
471 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
472 } 443 }
473 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { 444 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
474 REG_WRITE(ah, AR_QRDYTIMECFG(q), 445 REG_WRITE(ah, AR_QRDYTIMECFG(q),
@@ -481,40 +452,31 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
481 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 452 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
482 453
483 if (qi->tqi_burstTime 454 if (qi->tqi_burstTime
484 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) { 455 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
485 REG_WRITE(ah, AR_QMISC(q), 456 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
486 REG_READ(ah, AR_QMISC(q)) |
487 AR_Q_MISC_RDYTIME_EXP_POLICY);
488
489 }
490 457
491 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) { 458 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
492 REG_WRITE(ah, AR_DMISC(q), 459 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
493 REG_READ(ah, AR_DMISC(q)) |
494 AR_D_MISC_POST_FR_BKOFF_DIS);
495 }
496 460
497 REGWRITE_BUFFER_FLUSH(ah); 461 REGWRITE_BUFFER_FLUSH(ah);
498 462
499 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { 463 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
500 REG_WRITE(ah, AR_DMISC(q), 464 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
501 REG_READ(ah, AR_DMISC(q)) | 465
502 AR_D_MISC_FRAG_BKOFF_EN);
503 }
504 switch (qi->tqi_type) { 466 switch (qi->tqi_type) {
505 case ATH9K_TX_QUEUE_BEACON: 467 case ATH9K_TX_QUEUE_BEACON:
506 ENABLE_REGWRITE_BUFFER(ah); 468 ENABLE_REGWRITE_BUFFER(ah);
507 469
508 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 470 REG_SET_BIT(ah, AR_QMISC(q),
509 | AR_Q_MISC_FSP_DBA_GATED 471 AR_Q_MISC_FSP_DBA_GATED
510 | AR_Q_MISC_BEACON_USE 472 | AR_Q_MISC_BEACON_USE
511 | AR_Q_MISC_CBR_INCR_DIS1); 473 | AR_Q_MISC_CBR_INCR_DIS1);
512 474
513 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 475 REG_SET_BIT(ah, AR_DMISC(q),
514 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 476 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
515 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 477 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
516 | AR_D_MISC_BEACON_USE 478 | AR_D_MISC_BEACON_USE
517 | AR_D_MISC_POST_FR_BKOFF_DIS); 479 | AR_D_MISC_POST_FR_BKOFF_DIS);
518 480
519 REGWRITE_BUFFER_FLUSH(ah); 481 REGWRITE_BUFFER_FLUSH(ah);
520 482
@@ -533,41 +495,38 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
533 case ATH9K_TX_QUEUE_CAB: 495 case ATH9K_TX_QUEUE_CAB:
534 ENABLE_REGWRITE_BUFFER(ah); 496 ENABLE_REGWRITE_BUFFER(ah);
535 497
536 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 498 REG_SET_BIT(ah, AR_QMISC(q),
537 | AR_Q_MISC_FSP_DBA_GATED 499 AR_Q_MISC_FSP_DBA_GATED
538 | AR_Q_MISC_CBR_INCR_DIS1 500 | AR_Q_MISC_CBR_INCR_DIS1
539 | AR_Q_MISC_CBR_INCR_DIS0); 501 | AR_Q_MISC_CBR_INCR_DIS0);
540 value = (qi->tqi_readyTime - 502 value = (qi->tqi_readyTime -
541 (ah->config.sw_beacon_response_time - 503 (ah->config.sw_beacon_response_time -
542 ah->config.dma_beacon_response_time) - 504 ah->config.dma_beacon_response_time) -
543 ah->config.additional_swba_backoff) * 1024; 505 ah->config.additional_swba_backoff) * 1024;
544 REG_WRITE(ah, AR_QRDYTIMECFG(q), 506 REG_WRITE(ah, AR_QRDYTIMECFG(q),
545 value | AR_Q_RDYTIMECFG_EN); 507 value | AR_Q_RDYTIMECFG_EN);
546 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 508 REG_SET_BIT(ah, AR_DMISC(q),
547 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 509 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
548 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 510 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
549 511
550 REGWRITE_BUFFER_FLUSH(ah); 512 REGWRITE_BUFFER_FLUSH(ah);
551 513
552 break; 514 break;
553 case ATH9K_TX_QUEUE_PSPOLL: 515 case ATH9K_TX_QUEUE_PSPOLL:
554 REG_WRITE(ah, AR_QMISC(q), 516 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
555 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
556 break; 517 break;
557 case ATH9K_TX_QUEUE_UAPSD: 518 case ATH9K_TX_QUEUE_UAPSD:
558 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) | 519 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
559 AR_D_MISC_POST_FR_BKOFF_DIS);
560 break; 520 break;
561 default: 521 default:
562 break; 522 break;
563 } 523 }
564 524
565 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { 525 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
566 REG_WRITE(ah, AR_DMISC(q), 526 REG_SET_BIT(ah, AR_DMISC(q),
567 REG_READ(ah, AR_DMISC(q)) | 527 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
568 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 528 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
569 AR_D_MISC_ARB_LOCKOUT_CNTRL) | 529 AR_D_MISC_POST_FR_BKOFF_DIS);
570 AR_D_MISC_POST_FR_BKOFF_DIS);
571 } 530 }
572 531
573 if (AR_SREV_9300_20_OR_LATER(ah)) 532 if (AR_SREV_9300_20_OR_LATER(ah))
@@ -866,7 +825,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
866 struct ath_common *common = ath9k_hw_common(ah); 825 struct ath_common *common = ath9k_hw_common(ah);
867 826
868 if (!(ints & ATH9K_INT_GLOBAL)) 827 if (!(ints & ATH9K_INT_GLOBAL))
869 ath9k_hw_enable_interrupts(ah); 828 ath9k_hw_disable_interrupts(ah);
870 829
871 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 830 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
872 831
@@ -944,7 +903,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
944 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 903 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
945 } 904 }
946 905
947 ath9k_hw_enable_interrupts(ah); 906 if (ints & ATH9K_INT_GLOBAL)
907 ath9k_hw_enable_interrupts(ah);
948 908
949 return; 909 return;
950} 910}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 115f162c617..3c5de73dcb4 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1845,6 +1845,20 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1845 if (ath9k_modparam_nohwcrypt) 1845 if (ath9k_modparam_nohwcrypt)
1846 return -ENOSPC; 1846 return -ENOSPC;
1847 1847
1848 if (vif->type == NL80211_IFTYPE_ADHOC &&
1849 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
1850 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
1851 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1852 /*
1853 * For now, disable hw crypto for the RSN IBSS group keys. This
1854 * could be optimized in the future to use a modified key cache
1855 * design to support per-STA RX GTK, but until that gets
1856 * implemented, use of software crypto for group addressed
1857 * frames is a acceptable to allow RSN IBSS to be used.
1858 */
1859 return -EOPNOTSUPP;
1860 }
1861
1848 mutex_lock(&sc->mutex); 1862 mutex_lock(&sc->mutex);
1849 ath9k_ps_wakeup(sc); 1863 ath9k_ps_wakeup(sc);
1850 ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n"); 1864 ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
@@ -2160,6 +2174,8 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2160 if (!ath_drain_all_txq(sc, false)) 2174 if (!ath_drain_all_txq(sc, false))
2161 ath_reset(sc, false); 2175 ath_reset(sc, false);
2162 2176
2177 ieee80211_wake_queues(hw);
2178
2163out: 2179out:
2164 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); 2180 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2165 mutex_unlock(&sc->mutex); 2181 mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 5e3d7496986..f50e2c29f71 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -38,25 +38,11 @@
38#define AR_PHY_CLC_Q0 0x0000ffd0 38#define AR_PHY_CLC_Q0 0x0000ffd0
39#define AR_PHY_CLC_Q0_S 5 39#define AR_PHY_CLC_Q0_S 5
40 40
41#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
42 int r; \
43 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
44 REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \
45 DO_DELAY(regWr); \
46 } \
47 } while (0)
48
49#define ANTSWAP_AB 0x0001 41#define ANTSWAP_AB 0x0001
50#define REDUCE_CHAIN_0 0x00000050 42#define REDUCE_CHAIN_0 0x00000050
51#define REDUCE_CHAIN_1 0x00000051 43#define REDUCE_CHAIN_1 0x00000051
52#define AR_PHY_CHIP_ID 0x9818 44#define AR_PHY_CHIP_ID 0x9818
53 45
54#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
55 int i; \
56 for (i = 0; i < (_iniarray)->ia_rows; i++) \
57 (_bank)[i] = INI_RA((_iniarray), i, _col);; \
58 } while (0)
59
60#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000 46#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
61#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20 47#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
62 48
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 960d717ca7c..a3241cd089b 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1328,7 +1328,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1328 1328
1329 hdr = (struct ieee80211_hdr *)skb->data; 1329 hdr = (struct ieee80211_hdr *)skb->data;
1330 fc = hdr->frame_control; 1330 fc = hdr->frame_control;
1331 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 1331 for (i = 0; i < sc->hw->max_rates; i++) {
1332 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; 1332 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
1333 if (!rate->count) 1333 if (!rate->count)
1334 break; 1334 break;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8fa8acfde62..693d543937b 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1396,6 +1396,7 @@ enum {
1396#define AR_STA_ID1_PCF 0x00100000 1396#define AR_STA_ID1_PCF 0x00100000
1397#define AR_STA_ID1_USE_DEFANT 0x00200000 1397#define AR_STA_ID1_USE_DEFANT 0x00200000
1398#define AR_STA_ID1_DEFANT_UPDATE 0x00400000 1398#define AR_STA_ID1_DEFANT_UPDATE 0x00400000
1399#define AR_STA_ID1_AR9100_BA_FIX 0x00400000
1399#define AR_STA_ID1_RTS_USE_DEF 0x00800000 1400#define AR_STA_ID1_RTS_USE_DEF 0x00800000
1400#define AR_STA_ID1_ACKCTS_6MB 0x01000000 1401#define AR_STA_ID1_ACKCTS_6MB 0x01000000
1401#define AR_STA_ID1_BASE_RATE_11B 0x02000000 1402#define AR_STA_ID1_BASE_RATE_11B 0x02000000
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index ef22096d40c..5943bdc4c8f 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,8 +1725,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1725 u8 tidno; 1725 u8 tidno;
1726 1726
1727 spin_lock_bh(&txctl->txq->axq_lock); 1727 spin_lock_bh(&txctl->txq->axq_lock);
1728 1728 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1729 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) { 1729 ieee80211_is_data_qos(hdr->frame_control)) {
1730 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1730 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1731 IEEE80211_QOS_CTL_TID_MASK; 1731 IEEE80211_QOS_CTL_TID_MASK;
1732 tid = ATH_AN_2_TID(txctl->an, tidno); 1732 tid = ATH_AN_2_TID(txctl->an, tidno);
@@ -1980,7 +1980,7 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1980 if (ieee80211_is_data(hdr->frame_control) && 1980 if (ieee80211_is_data(hdr->frame_control) &&
1981 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | 1981 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1982 ATH9K_TX_DELIM_UNDERRUN)) && 1982 ATH9K_TX_DELIM_UNDERRUN)) &&
1983 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max) 1983 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
1984 tx_info->status.rates[tx_rateindex].count = 1984 tx_info->status.rates[tx_rateindex].count =
1985 hw->max_rate_tries; 1985 hw->max_rate_tries;
1986 } 1986 }
@@ -2144,33 +2144,6 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2144 } else { 2144 } else {
2145 txq->axq_tx_inprogress = true; 2145 txq->axq_tx_inprogress = true;
2146 } 2146 }
2147 } else {
2148 /* If the queue has pending buffers, then it
2149 * should be doing tx work (and have axq_depth).
2150 * Shouldn't get to this state I think..but
2151 * we do.
2152 */
2153 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2154 (txq->pending_frames > 0 ||
2155 !list_empty(&txq->axq_acq) ||
2156 txq->stopped)) {
2157 ath_err(ath9k_hw_common(sc->sc_ah),
2158 "txq: %p axq_qnum: %u,"
2159 " mac80211_qnum: %i"
2160 " axq_link: %p"
2161 " pending frames: %i"
2162 " axq_acq empty: %i"
2163 " stopped: %i"
2164 " axq_depth: 0 Attempting to"
2165 " restart tx logic.\n",
2166 txq, txq->axq_qnum,
2167 txq->mac80211_qnum,
2168 txq->axq_link,
2169 txq->pending_frames,
2170 list_empty(&txq->axq_acq),
2171 txq->stopped);
2172 ath_txq_schedule(sc, txq);
2173 }
2174 } 2147 }
2175 spin_unlock_bh(&txq->axq_lock); 2148 spin_unlock_bh(&txq->axq_lock);
2176 } 2149 }
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e5964..f82c400be28 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
118 { USB_DEVICE(0x057c, 0x8402) }, 118 { USB_DEVICE(0x057c, 0x8402) },
119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ 119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
120 { USB_DEVICE(0x1668, 0x1200) }, 120 { USB_DEVICE(0x1668, 0x1200) },
121 /* Airlive X.USB a/b/g/n */
122 { USB_DEVICE(0x1b75, 0x9170) },
121 123
122 /* terminate */ 124 /* terminate */
123 {} 125 {}
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 37b8e115375..0d4f39cbdca 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -23,6 +23,14 @@
23 23
24#define REG_READ (common->ops->read) 24#define REG_READ (common->ops->read)
25#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg) 25#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
26#define ENABLE_REGWRITE_BUFFER(_ah) \
27 if (common->ops->enable_write_buffer) \
28 common->ops->enable_write_buffer((_ah));
29
30#define REGWRITE_BUFFER_FLUSH(_ah) \
31 if (common->ops->write_flush) \
32 common->ops->write_flush((_ah));
33
26 34
27#define IEEE80211_WEP_NKID 4 /* number of key ids */ 35#define IEEE80211_WEP_NKID 4 /* number of key ids */
28 36
@@ -42,6 +50,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
42 50
43 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry)); 51 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
44 52
53 ENABLE_REGWRITE_BUFFER(ah);
54
45 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); 55 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
46 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); 56 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
47 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); 57 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
@@ -66,6 +76,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
66 76
67 } 77 }
68 78
79 REGWRITE_BUFFER_FLUSH(ah);
80
69 return true; 81 return true;
70} 82}
71EXPORT_SYMBOL(ath_hw_keyreset); 83EXPORT_SYMBOL(ath_hw_keyreset);
@@ -104,9 +116,13 @@ static bool ath_hw_keysetmac(struct ath_common *common,
104 } else { 116 } else {
105 macLo = macHi = 0; 117 macLo = macHi = 0;
106 } 118 }
119 ENABLE_REGWRITE_BUFFER(ah);
120
107 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); 121 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
108 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag); 122 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
109 123
124 REGWRITE_BUFFER_FLUSH(ah);
125
110 return true; 126 return true;
111} 127}
112 128
@@ -223,6 +239,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
223 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff; 239 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
224 mic4 = get_unaligned_le32(k->kv_txmic + 4); 240 mic4 = get_unaligned_le32(k->kv_txmic + 4);
225 241
242 ENABLE_REGWRITE_BUFFER(ah);
243
226 /* Write RX[31:0] and TX[31:16] */ 244 /* Write RX[31:0] and TX[31:16] */
227 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); 245 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
228 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1); 246 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
@@ -236,6 +254,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
236 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), 254 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
237 AR_KEYTABLE_TYPE_CLR); 255 AR_KEYTABLE_TYPE_CLR);
238 256
257 REGWRITE_BUFFER_FLUSH(ah);
258
239 } else { 259 } else {
240 /* 260 /*
241 * TKIP uses four key cache entries (two for group 261 * TKIP uses four key cache entries (two for group
@@ -258,6 +278,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
258 mic0 = get_unaligned_le32(k->kv_mic + 0); 278 mic0 = get_unaligned_le32(k->kv_mic + 0);
259 mic2 = get_unaligned_le32(k->kv_mic + 4); 279 mic2 = get_unaligned_le32(k->kv_mic + 4);
260 280
281 ENABLE_REGWRITE_BUFFER(ah);
282
261 /* Write MIC key[31:0] */ 283 /* Write MIC key[31:0] */
262 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); 284 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
263 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); 285 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
@@ -270,8 +292,12 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
270 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); 292 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
271 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), 293 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
272 AR_KEYTABLE_TYPE_CLR); 294 AR_KEYTABLE_TYPE_CLR);
295
296 REGWRITE_BUFFER_FLUSH(ah);
273 } 297 }
274 298
299 ENABLE_REGWRITE_BUFFER(ah);
300
275 /* MAC address registers are reserved for the MIC entry */ 301 /* MAC address registers are reserved for the MIC entry */
276 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0); 302 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
277 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0); 303 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
@@ -283,7 +309,11 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
283 */ 309 */
284 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); 310 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
285 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); 311 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
312
313 REGWRITE_BUFFER_FLUSH(ah);
286 } else { 314 } else {
315 ENABLE_REGWRITE_BUFFER(ah);
316
287 /* Write key[47:0] */ 317 /* Write key[47:0] */
288 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); 318 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
289 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); 319 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
@@ -296,6 +326,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
296 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); 326 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
297 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); 327 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
298 328
329 REGWRITE_BUFFER_FLUSH(ah);
330
299 /* Write MAC address for the entry */ 331 /* Write MAC address for the entry */
300 (void) ath_hw_keysetmac(common, entry, mac); 332 (void) ath_hw_keysetmac(common, entry, mac);
301 } 333 }
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index 30acd39d76a..2c8f71f0ed4 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -30,9 +30,9 @@
30 30
31/* Following defines can be used to remove unneeded parts of the driver, e.g., 31/* Following defines can be used to remove unneeded parts of the driver, e.g.,
32 * to limit the size of the kernel module. Definitions can be added here in 32 * to limit the size of the kernel module. Definitions can be added here in
33 * hostap_config.h or they can be added to make command with EXTRA_CFLAGS, 33 * hostap_config.h or they can be added to make command with ccflags-y,
34 * e.g., 34 * e.g.,
35 * 'make pccard EXTRA_CFLAGS="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"' 35 * 'make pccard ccflags-y="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
36 */ 36 */
37 37
38/* Do not include debug messages into the driver */ 38/* Do not include debug messages into the driver */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index da60faee74f..4b97f918daf 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -706,11 +706,10 @@ static void schedule_reset(struct ipw2100_priv *priv)
706 netif_stop_queue(priv->net_dev); 706 netif_stop_queue(priv->net_dev);
707 priv->status |= STATUS_RESET_PENDING; 707 priv->status |= STATUS_RESET_PENDING;
708 if (priv->reset_backoff) 708 if (priv->reset_backoff)
709 queue_delayed_work(priv->workqueue, &priv->reset_work, 709 schedule_delayed_work(&priv->reset_work,
710 priv->reset_backoff * HZ); 710 priv->reset_backoff * HZ);
711 else 711 else
712 queue_delayed_work(priv->workqueue, &priv->reset_work, 712 schedule_delayed_work(&priv->reset_work, 0);
713 0);
714 713
715 if (priv->reset_backoff < MAX_RESET_BACKOFF) 714 if (priv->reset_backoff < MAX_RESET_BACKOFF)
716 priv->reset_backoff++; 715 priv->reset_backoff++;
@@ -1474,7 +1473,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
1474 1473
1475 if (priv->stop_hang_check) { 1474 if (priv->stop_hang_check) {
1476 priv->stop_hang_check = 0; 1475 priv->stop_hang_check = 0;
1477 queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); 1476 schedule_delayed_work(&priv->hang_check, HZ / 2);
1478 } 1477 }
1479 1478
1480 fail_up: 1479 fail_up:
@@ -1808,8 +1807,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1808 1807
1809 if (priv->stop_rf_kill) { 1808 if (priv->stop_rf_kill) {
1810 priv->stop_rf_kill = 0; 1809 priv->stop_rf_kill = 0;
1811 queue_delayed_work(priv->workqueue, &priv->rf_kill, 1810 schedule_delayed_work(&priv->rf_kill,
1812 round_jiffies_relative(HZ)); 1811 round_jiffies_relative(HZ));
1813 } 1812 }
1814 1813
1815 deferred = 1; 1814 deferred = 1;
@@ -2086,7 +2085,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
2086 priv->status |= STATUS_ASSOCIATING; 2085 priv->status |= STATUS_ASSOCIATING;
2087 priv->connect_start = get_seconds(); 2086 priv->connect_start = get_seconds();
2088 2087
2089 queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10); 2088 schedule_delayed_work(&priv->wx_event_work, HZ / 10);
2090} 2089}
2091 2090
2092static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, 2091static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
@@ -2166,9 +2165,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2166 return; 2165 return;
2167 2166
2168 if (priv->status & STATUS_SECURITY_UPDATED) 2167 if (priv->status & STATUS_SECURITY_UPDATED)
2169 queue_delayed_work(priv->workqueue, &priv->security_work, 0); 2168 schedule_delayed_work(&priv->security_work, 0);
2170 2169
2171 queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); 2170 schedule_delayed_work(&priv->wx_event_work, 0);
2172} 2171}
2173 2172
2174static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) 2173static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -2183,8 +2182,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
2183 /* Make sure the RF Kill check timer is running */ 2182 /* Make sure the RF Kill check timer is running */
2184 priv->stop_rf_kill = 0; 2183 priv->stop_rf_kill = 0;
2185 cancel_delayed_work(&priv->rf_kill); 2184 cancel_delayed_work(&priv->rf_kill);
2186 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2185 schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
2187 round_jiffies_relative(HZ));
2188} 2186}
2189 2187
2190static void send_scan_event(void *data) 2188static void send_scan_event(void *data)
@@ -2219,13 +2217,12 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
2219 /* Only userspace-requested scan completion events go out immediately */ 2217 /* Only userspace-requested scan completion events go out immediately */
2220 if (!priv->user_requested_scan) { 2218 if (!priv->user_requested_scan) {
2221 if (!delayed_work_pending(&priv->scan_event_later)) 2219 if (!delayed_work_pending(&priv->scan_event_later))
2222 queue_delayed_work(priv->workqueue, 2220 schedule_delayed_work(&priv->scan_event_later,
2223 &priv->scan_event_later, 2221 round_jiffies_relative(msecs_to_jiffies(4000)));
2224 round_jiffies_relative(msecs_to_jiffies(4000)));
2225 } else { 2222 } else {
2226 priv->user_requested_scan = 0; 2223 priv->user_requested_scan = 0;
2227 cancel_delayed_work(&priv->scan_event_later); 2224 cancel_delayed_work(&priv->scan_event_later);
2228 queue_work(priv->workqueue, &priv->scan_event_now); 2225 schedule_work(&priv->scan_event_now);
2229 } 2226 }
2230} 2227}
2231 2228
@@ -4329,8 +4326,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
4329 /* Make sure the RF_KILL check timer is running */ 4326 /* Make sure the RF_KILL check timer is running */
4330 priv->stop_rf_kill = 0; 4327 priv->stop_rf_kill = 0;
4331 cancel_delayed_work(&priv->rf_kill); 4328 cancel_delayed_work(&priv->rf_kill);
4332 queue_delayed_work(priv->workqueue, &priv->rf_kill, 4329 schedule_delayed_work(&priv->rf_kill,
4333 round_jiffies_relative(HZ)); 4330 round_jiffies_relative(HZ));
4334 } else 4331 } else
4335 schedule_reset(priv); 4332 schedule_reset(priv);
4336 } 4333 }
@@ -4461,20 +4458,17 @@ static void bd_queue_initialize(struct ipw2100_priv *priv,
4461 IPW_DEBUG_INFO("exit\n"); 4458 IPW_DEBUG_INFO("exit\n");
4462} 4459}
4463 4460
4464static void ipw2100_kill_workqueue(struct ipw2100_priv *priv) 4461static void ipw2100_kill_works(struct ipw2100_priv *priv)
4465{ 4462{
4466 if (priv->workqueue) { 4463 priv->stop_rf_kill = 1;
4467 priv->stop_rf_kill = 1; 4464 priv->stop_hang_check = 1;
4468 priv->stop_hang_check = 1; 4465 cancel_delayed_work_sync(&priv->reset_work);
4469 cancel_delayed_work(&priv->reset_work); 4466 cancel_delayed_work_sync(&priv->security_work);
4470 cancel_delayed_work(&priv->security_work); 4467 cancel_delayed_work_sync(&priv->wx_event_work);
4471 cancel_delayed_work(&priv->wx_event_work); 4468 cancel_delayed_work_sync(&priv->hang_check);
4472 cancel_delayed_work(&priv->hang_check); 4469 cancel_delayed_work_sync(&priv->rf_kill);
4473 cancel_delayed_work(&priv->rf_kill); 4470 cancel_work_sync(&priv->scan_event_now);
4474 cancel_delayed_work(&priv->scan_event_later); 4471 cancel_delayed_work_sync(&priv->scan_event_later);
4475 destroy_workqueue(priv->workqueue);
4476 priv->workqueue = NULL;
4477 }
4478} 4472}
4479 4473
4480static int ipw2100_tx_allocate(struct ipw2100_priv *priv) 4474static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -6046,7 +6040,7 @@ static void ipw2100_hang_check(struct work_struct *work)
6046 priv->last_rtc = rtc; 6040 priv->last_rtc = rtc;
6047 6041
6048 if (!priv->stop_hang_check) 6042 if (!priv->stop_hang_check)
6049 queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); 6043 schedule_delayed_work(&priv->hang_check, HZ / 2);
6050 6044
6051 spin_unlock_irqrestore(&priv->low_lock, flags); 6045 spin_unlock_irqrestore(&priv->low_lock, flags);
6052} 6046}
@@ -6062,8 +6056,8 @@ static void ipw2100_rf_kill(struct work_struct *work)
6062 if (rf_kill_active(priv)) { 6056 if (rf_kill_active(priv)) {
6063 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 6057 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6064 if (!priv->stop_rf_kill) 6058 if (!priv->stop_rf_kill)
6065 queue_delayed_work(priv->workqueue, &priv->rf_kill, 6059 schedule_delayed_work(&priv->rf_kill,
6066 round_jiffies_relative(HZ)); 6060 round_jiffies_relative(HZ));
6067 goto exit_unlock; 6061 goto exit_unlock;
6068 } 6062 }
6069 6063
@@ -6209,8 +6203,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6209 INIT_LIST_HEAD(&priv->fw_pend_list); 6203 INIT_LIST_HEAD(&priv->fw_pend_list);
6210 INIT_STAT(&priv->fw_pend_stat); 6204 INIT_STAT(&priv->fw_pend_stat);
6211 6205
6212 priv->workqueue = create_workqueue(DRV_NAME);
6213
6214 INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); 6206 INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
6215 INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); 6207 INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
6216 INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); 6208 INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
@@ -6410,7 +6402,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6410 if (dev->irq) 6402 if (dev->irq)
6411 free_irq(dev->irq, priv); 6403 free_irq(dev->irq, priv);
6412 6404
6413 ipw2100_kill_workqueue(priv); 6405 ipw2100_kill_works(priv);
6414 6406
6415 /* These are safe to call even if they weren't allocated */ 6407 /* These are safe to call even if they weren't allocated */
6416 ipw2100_queues_free(priv); 6408 ipw2100_queues_free(priv);
@@ -6460,9 +6452,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6460 * first, then close() will crash. */ 6452 * first, then close() will crash. */
6461 unregister_netdev(dev); 6453 unregister_netdev(dev);
6462 6454
6463 /* ipw2100_down will ensure that there is no more pending work 6455 ipw2100_kill_works(priv);
6464 * in the workqueue's, so we can safely remove them now. */
6465 ipw2100_kill_workqueue(priv);
6466 6456
6467 ipw2100_queues_free(priv); 6457 ipw2100_queues_free(priv);
6468 6458
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 838002b4881..99cba968aa5 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -580,7 +580,6 @@ struct ipw2100_priv {
580 580
581 struct tasklet_struct irq_tasklet; 581 struct tasklet_struct irq_tasklet;
582 582
583 struct workqueue_struct *workqueue;
584 struct delayed_work reset_work; 583 struct delayed_work reset_work;
585 struct delayed_work security_work; 584 struct delayed_work security_work;
586 struct delayed_work wx_event_work; 585 struct delayed_work wx_event_work;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index ae438ed80c2..160881f234c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -894,9 +894,8 @@ static void ipw_led_link_on(struct ipw_priv *priv)
894 894
895 /* If we aren't associated, schedule turning the LED off */ 895 /* If we aren't associated, schedule turning the LED off */
896 if (!(priv->status & STATUS_ASSOCIATED)) 896 if (!(priv->status & STATUS_ASSOCIATED))
897 queue_delayed_work(priv->workqueue, 897 schedule_delayed_work(&priv->led_link_off,
898 &priv->led_link_off, 898 LD_TIME_LINK_ON);
899 LD_TIME_LINK_ON);
900 } 899 }
901 900
902 spin_unlock_irqrestore(&priv->lock, flags); 901 spin_unlock_irqrestore(&priv->lock, flags);
@@ -939,8 +938,8 @@ static void ipw_led_link_off(struct ipw_priv *priv)
939 * turning the LED on (blink while unassociated) */ 938 * turning the LED on (blink while unassociated) */
940 if (!(priv->status & STATUS_RF_KILL_MASK) && 939 if (!(priv->status & STATUS_RF_KILL_MASK) &&
941 !(priv->status & STATUS_ASSOCIATED)) 940 !(priv->status & STATUS_ASSOCIATED))
942 queue_delayed_work(priv->workqueue, &priv->led_link_on, 941 schedule_delayed_work(&priv->led_link_on,
943 LD_TIME_LINK_OFF); 942 LD_TIME_LINK_OFF);
944 943
945 } 944 }
946 945
@@ -980,13 +979,11 @@ static void __ipw_led_activity_on(struct ipw_priv *priv)
980 priv->status |= STATUS_LED_ACT_ON; 979 priv->status |= STATUS_LED_ACT_ON;
981 980
982 cancel_delayed_work(&priv->led_act_off); 981 cancel_delayed_work(&priv->led_act_off);
983 queue_delayed_work(priv->workqueue, &priv->led_act_off, 982 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
984 LD_TIME_ACT_ON);
985 } else { 983 } else {
986 /* Reschedule LED off for full time period */ 984 /* Reschedule LED off for full time period */
987 cancel_delayed_work(&priv->led_act_off); 985 cancel_delayed_work(&priv->led_act_off);
988 queue_delayed_work(priv->workqueue, &priv->led_act_off, 986 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
989 LD_TIME_ACT_ON);
990 } 987 }
991} 988}
992 989
@@ -1795,13 +1792,11 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1795 if (disable_radio) { 1792 if (disable_radio) {
1796 priv->status |= STATUS_RF_KILL_SW; 1793 priv->status |= STATUS_RF_KILL_SW;
1797 1794
1798 if (priv->workqueue) { 1795 cancel_delayed_work(&priv->request_scan);
1799 cancel_delayed_work(&priv->request_scan); 1796 cancel_delayed_work(&priv->request_direct_scan);
1800 cancel_delayed_work(&priv->request_direct_scan); 1797 cancel_delayed_work(&priv->request_passive_scan);
1801 cancel_delayed_work(&priv->request_passive_scan); 1798 cancel_delayed_work(&priv->scan_event);
1802 cancel_delayed_work(&priv->scan_event); 1799 schedule_work(&priv->down);
1803 }
1804 queue_work(priv->workqueue, &priv->down);
1805 } else { 1800 } else {
1806 priv->status &= ~STATUS_RF_KILL_SW; 1801 priv->status &= ~STATUS_RF_KILL_SW;
1807 if (rf_kill_active(priv)) { 1802 if (rf_kill_active(priv)) {
@@ -1809,10 +1804,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1809 "disabled by HW switch\n"); 1804 "disabled by HW switch\n");
1810 /* Make sure the RF_KILL check timer is running */ 1805 /* Make sure the RF_KILL check timer is running */
1811 cancel_delayed_work(&priv->rf_kill); 1806 cancel_delayed_work(&priv->rf_kill);
1812 queue_delayed_work(priv->workqueue, &priv->rf_kill, 1807 schedule_delayed_work(&priv->rf_kill,
1813 round_jiffies_relative(2 * HZ)); 1808 round_jiffies_relative(2 * HZ));
1814 } else 1809 } else
1815 queue_work(priv->workqueue, &priv->up); 1810 schedule_work(&priv->up);
1816 } 1811 }
1817 1812
1818 return 1; 1813 return 1;
@@ -2063,7 +2058,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
2063 cancel_delayed_work(&priv->request_passive_scan); 2058 cancel_delayed_work(&priv->request_passive_scan);
2064 cancel_delayed_work(&priv->scan_event); 2059 cancel_delayed_work(&priv->scan_event);
2065 schedule_work(&priv->link_down); 2060 schedule_work(&priv->link_down);
2066 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); 2061 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2067 handled |= IPW_INTA_BIT_RF_KILL_DONE; 2062 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2068 } 2063 }
2069 2064
@@ -2103,7 +2098,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
2103 priv->status &= ~STATUS_HCMD_ACTIVE; 2098 priv->status &= ~STATUS_HCMD_ACTIVE;
2104 wake_up_interruptible(&priv->wait_command_queue); 2099 wake_up_interruptible(&priv->wait_command_queue);
2105 2100
2106 queue_work(priv->workqueue, &priv->adapter_restart); 2101 schedule_work(&priv->adapter_restart);
2107 handled |= IPW_INTA_BIT_FATAL_ERROR; 2102 handled |= IPW_INTA_BIT_FATAL_ERROR;
2108 } 2103 }
2109 2104
@@ -2323,11 +2318,6 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2323 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); 2318 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2324} 2319}
2325 2320
2326/*
2327 * NOTE: This must be executed from our workqueue as it results in udelay
2328 * being called which may corrupt the keyboard if executed on default
2329 * workqueue
2330 */
2331static void ipw_adapter_restart(void *adapter) 2321static void ipw_adapter_restart(void *adapter)
2332{ 2322{
2333 struct ipw_priv *priv = adapter; 2323 struct ipw_priv *priv = adapter;
@@ -2368,13 +2358,13 @@ static void ipw_scan_check(void *data)
2368 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2358 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2369 "adapter after (%dms).\n", 2359 "adapter after (%dms).\n",
2370 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2360 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2371 queue_work(priv->workqueue, &priv->adapter_restart); 2361 schedule_work(&priv->adapter_restart);
2372 } else if (priv->status & STATUS_SCANNING) { 2362 } else if (priv->status & STATUS_SCANNING) {
2373 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan " 2363 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2374 "after (%dms).\n", 2364 "after (%dms).\n",
2375 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2365 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2376 ipw_abort_scan(priv); 2366 ipw_abort_scan(priv);
2377 queue_delayed_work(priv->workqueue, &priv->scan_check, HZ); 2367 schedule_delayed_work(&priv->scan_check, HZ);
2378 } 2368 }
2379} 2369}
2380 2370
@@ -3943,7 +3933,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3943 3933
3944 if (priv->status & STATUS_ASSOCIATING) { 3934 if (priv->status & STATUS_ASSOCIATING) {
3945 IPW_DEBUG_ASSOC("Disassociating while associating.\n"); 3935 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3946 queue_work(priv->workqueue, &priv->disassociate); 3936 schedule_work(&priv->disassociate);
3947 return; 3937 return;
3948 } 3938 }
3949 3939
@@ -4360,8 +4350,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
4360 4350
4361 priv->quality = quality; 4351 priv->quality = quality;
4362 4352
4363 queue_delayed_work(priv->workqueue, &priv->gather_stats, 4353 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4364 IPW_STATS_INTERVAL);
4365} 4354}
4366 4355
4367static void ipw_bg_gather_stats(struct work_struct *work) 4356static void ipw_bg_gather_stats(struct work_struct *work)
@@ -4396,10 +4385,10 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4396 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4385 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4397 IPW_DL_STATE, 4386 IPW_DL_STATE,
4398 "Aborting scan with missed beacon.\n"); 4387 "Aborting scan with missed beacon.\n");
4399 queue_work(priv->workqueue, &priv->abort_scan); 4388 schedule_work(&priv->abort_scan);
4400 } 4389 }
4401 4390
4402 queue_work(priv->workqueue, &priv->disassociate); 4391 schedule_work(&priv->disassociate);
4403 return; 4392 return;
4404 } 4393 }
4405 4394
@@ -4425,8 +4414,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4425 if (!(priv->status & STATUS_ROAMING)) { 4414 if (!(priv->status & STATUS_ROAMING)) {
4426 priv->status |= STATUS_ROAMING; 4415 priv->status |= STATUS_ROAMING;
4427 if (!(priv->status & STATUS_SCANNING)) 4416 if (!(priv->status & STATUS_SCANNING))
4428 queue_delayed_work(priv->workqueue, 4417 schedule_delayed_work(&priv->request_scan, 0);
4429 &priv->request_scan, 0);
4430 } 4418 }
4431 return; 4419 return;
4432 } 4420 }
@@ -4439,7 +4427,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4439 * channels..) */ 4427 * channels..) */
4440 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, 4428 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4441 "Aborting scan with missed beacon.\n"); 4429 "Aborting scan with missed beacon.\n");
4442 queue_work(priv->workqueue, &priv->abort_scan); 4430 schedule_work(&priv->abort_scan);
4443 } 4431 }
4444 4432
4445 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); 4433 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
@@ -4462,8 +4450,8 @@ static void handle_scan_event(struct ipw_priv *priv)
4462 /* Only userspace-requested scan completion events go out immediately */ 4450 /* Only userspace-requested scan completion events go out immediately */
4463 if (!priv->user_requested_scan) { 4451 if (!priv->user_requested_scan) {
4464 if (!delayed_work_pending(&priv->scan_event)) 4452 if (!delayed_work_pending(&priv->scan_event))
4465 queue_delayed_work(priv->workqueue, &priv->scan_event, 4453 schedule_delayed_work(&priv->scan_event,
4466 round_jiffies_relative(msecs_to_jiffies(4000))); 4454 round_jiffies_relative(msecs_to_jiffies(4000)));
4467 } else { 4455 } else {
4468 union iwreq_data wrqu; 4456 union iwreq_data wrqu;
4469 4457
@@ -4516,20 +4504,17 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4516 4504
4517 IPW_DEBUG_ASSOC 4505 IPW_DEBUG_ASSOC
4518 ("queueing adhoc check\n"); 4506 ("queueing adhoc check\n");
4519 queue_delayed_work(priv-> 4507 schedule_delayed_work(
4520 workqueue, 4508 &priv->adhoc_check,
4521 &priv-> 4509 le16_to_cpu(priv->
4522 adhoc_check, 4510 assoc_request.
4523 le16_to_cpu(priv-> 4511 beacon_interval));
4524 assoc_request.
4525 beacon_interval));
4526 break; 4512 break;
4527 } 4513 }
4528 4514
4529 priv->status &= ~STATUS_ASSOCIATING; 4515 priv->status &= ~STATUS_ASSOCIATING;
4530 priv->status |= STATUS_ASSOCIATED; 4516 priv->status |= STATUS_ASSOCIATED;
4531 queue_work(priv->workqueue, 4517 schedule_work(&priv->system_config);
4532 &priv->system_config);
4533 4518
4534#ifdef CONFIG_IPW2200_QOS 4519#ifdef CONFIG_IPW2200_QOS
4535#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ 4520#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
@@ -4792,43 +4777,37 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4792#ifdef CONFIG_IPW2200_MONITOR 4777#ifdef CONFIG_IPW2200_MONITOR
4793 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4778 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4794 priv->status |= STATUS_SCAN_FORCED; 4779 priv->status |= STATUS_SCAN_FORCED;
4795 queue_delayed_work(priv->workqueue, 4780 schedule_delayed_work(&priv->request_scan, 0);
4796 &priv->request_scan, 0);
4797 break; 4781 break;
4798 } 4782 }
4799 priv->status &= ~STATUS_SCAN_FORCED; 4783 priv->status &= ~STATUS_SCAN_FORCED;
4800#endif /* CONFIG_IPW2200_MONITOR */ 4784#endif /* CONFIG_IPW2200_MONITOR */
4801 4785
4802 /* Do queued direct scans first */ 4786 /* Do queued direct scans first */
4803 if (priv->status & STATUS_DIRECT_SCAN_PENDING) { 4787 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4804 queue_delayed_work(priv->workqueue, 4788 schedule_delayed_work(&priv->request_direct_scan, 0);
4805 &priv->request_direct_scan, 0);
4806 }
4807 4789
4808 if (!(priv->status & (STATUS_ASSOCIATED | 4790 if (!(priv->status & (STATUS_ASSOCIATED |
4809 STATUS_ASSOCIATING | 4791 STATUS_ASSOCIATING |
4810 STATUS_ROAMING | 4792 STATUS_ROAMING |
4811 STATUS_DISASSOCIATING))) 4793 STATUS_DISASSOCIATING)))
4812 queue_work(priv->workqueue, &priv->associate); 4794 schedule_work(&priv->associate);
4813 else if (priv->status & STATUS_ROAMING) { 4795 else if (priv->status & STATUS_ROAMING) {
4814 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4796 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4815 /* If a scan completed and we are in roam mode, then 4797 /* If a scan completed and we are in roam mode, then
4816 * the scan that completed was the one requested as a 4798 * the scan that completed was the one requested as a
4817 * result of entering roam... so, schedule the 4799 * result of entering roam... so, schedule the
4818 * roam work */ 4800 * roam work */
4819 queue_work(priv->workqueue, 4801 schedule_work(&priv->roam);
4820 &priv->roam);
4821 else 4802 else
4822 /* Don't schedule if we aborted the scan */ 4803 /* Don't schedule if we aborted the scan */
4823 priv->status &= ~STATUS_ROAMING; 4804 priv->status &= ~STATUS_ROAMING;
4824 } else if (priv->status & STATUS_SCAN_PENDING) 4805 } else if (priv->status & STATUS_SCAN_PENDING)
4825 queue_delayed_work(priv->workqueue, 4806 schedule_delayed_work(&priv->request_scan, 0);
4826 &priv->request_scan, 0);
4827 else if (priv->config & CFG_BACKGROUND_SCAN 4807 else if (priv->config & CFG_BACKGROUND_SCAN
4828 && priv->status & STATUS_ASSOCIATED) 4808 && priv->status & STATUS_ASSOCIATED)
4829 queue_delayed_work(priv->workqueue, 4809 schedule_delayed_work(&priv->request_scan,
4830 &priv->request_scan, 4810 round_jiffies_relative(HZ));
4831 round_jiffies_relative(HZ));
4832 4811
4833 /* Send an empty event to user space. 4812 /* Send an empty event to user space.
4834 * We don't send the received data on the event because 4813 * We don't send the received data on the event because
@@ -5192,7 +5171,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv)
5192 /* If the pre-allocated buffer pool is dropping low, schedule to 5171 /* If the pre-allocated buffer pool is dropping low, schedule to
5193 * refill it */ 5172 * refill it */
5194 if (rxq->free_count <= RX_LOW_WATERMARK) 5173 if (rxq->free_count <= RX_LOW_WATERMARK)
5195 queue_work(priv->workqueue, &priv->rx_replenish); 5174 schedule_work(&priv->rx_replenish);
5196 5175
5197 /* If we've added more space for the firmware to place data, tell it */ 5176 /* If we've added more space for the firmware to place data, tell it */
5198 if (write != rxq->write) 5177 if (write != rxq->write)
@@ -6133,8 +6112,8 @@ static void ipw_adhoc_check(void *data)
6133 return; 6112 return;
6134 } 6113 }
6135 6114
6136 queue_delayed_work(priv->workqueue, &priv->adhoc_check, 6115 schedule_delayed_work(&priv->adhoc_check,
6137 le16_to_cpu(priv->assoc_request.beacon_interval)); 6116 le16_to_cpu(priv->assoc_request.beacon_interval));
6138} 6117}
6139 6118
6140static void ipw_bg_adhoc_check(struct work_struct *work) 6119static void ipw_bg_adhoc_check(struct work_struct *work)
@@ -6523,8 +6502,7 @@ send_request:
6523 } else 6502 } else
6524 priv->status &= ~STATUS_SCAN_PENDING; 6503 priv->status &= ~STATUS_SCAN_PENDING;
6525 6504
6526 queue_delayed_work(priv->workqueue, &priv->scan_check, 6505 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6527 IPW_SCAN_CHECK_WATCHDOG);
6528done: 6506done:
6529 mutex_unlock(&priv->mutex); 6507 mutex_unlock(&priv->mutex);
6530 return err; 6508 return err;
@@ -6994,8 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6994 !memcmp(network->ssid, 6972 !memcmp(network->ssid,
6995 priv->assoc_network->ssid, 6973 priv->assoc_network->ssid,
6996 network->ssid_len)) { 6974 network->ssid_len)) {
6997 queue_work(priv->workqueue, 6975 schedule_work(&priv->merge_networks);
6998 &priv->merge_networks);
6999 } 6976 }
7000 } 6977 }
7001 6978
@@ -7663,7 +7640,7 @@ static int ipw_associate(void *data)
7663 if (priv->status & STATUS_DISASSOCIATING) { 7640 if (priv->status & STATUS_DISASSOCIATING) {
7664 IPW_DEBUG_ASSOC("Not attempting association (in " 7641 IPW_DEBUG_ASSOC("Not attempting association (in "
7665 "disassociating)\n "); 7642 "disassociating)\n ");
7666 queue_work(priv->workqueue, &priv->associate); 7643 schedule_work(&priv->associate);
7667 return 0; 7644 return 0;
7668 } 7645 }
7669 7646
@@ -7731,12 +7708,10 @@ static int ipw_associate(void *data)
7731 7708
7732 if (!(priv->status & STATUS_SCANNING)) { 7709 if (!(priv->status & STATUS_SCANNING)) {
7733 if (!(priv->config & CFG_SPEED_SCAN)) 7710 if (!(priv->config & CFG_SPEED_SCAN))
7734 queue_delayed_work(priv->workqueue, 7711 schedule_delayed_work(&priv->request_scan,
7735 &priv->request_scan, 7712 SCAN_INTERVAL);
7736 SCAN_INTERVAL);
7737 else 7713 else
7738 queue_delayed_work(priv->workqueue, 7714 schedule_delayed_work(&priv->request_scan, 0);
7739 &priv->request_scan, 0);
7740 } 7715 }
7741 7716
7742 return 0; 7717 return 0;
@@ -8899,7 +8874,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
8899 8874
8900 priv->ieee->iw_mode = wrqu->mode; 8875 priv->ieee->iw_mode = wrqu->mode;
8901 8876
8902 queue_work(priv->workqueue, &priv->adapter_restart); 8877 schedule_work(&priv->adapter_restart);
8903 mutex_unlock(&priv->mutex); 8878 mutex_unlock(&priv->mutex);
8904 return err; 8879 return err;
8905} 8880}
@@ -9598,7 +9573,7 @@ static int ipw_wx_set_scan(struct net_device *dev,
9598 9573
9599 IPW_DEBUG_WX("Start scan\n"); 9574 IPW_DEBUG_WX("Start scan\n");
9600 9575
9601 queue_delayed_work(priv->workqueue, work, 0); 9576 schedule_delayed_work(work, 0);
9602 9577
9603 return 0; 9578 return 0;
9604} 9579}
@@ -9937,7 +9912,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
9937#else 9912#else
9938 priv->net_dev->type = ARPHRD_IEEE80211; 9913 priv->net_dev->type = ARPHRD_IEEE80211;
9939#endif 9914#endif
9940 queue_work(priv->workqueue, &priv->adapter_restart); 9915 schedule_work(&priv->adapter_restart);
9941 } 9916 }
9942 9917
9943 ipw_set_channel(priv, parms[1]); 9918 ipw_set_channel(priv, parms[1]);
@@ -9947,7 +9922,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
9947 return 0; 9922 return 0;
9948 } 9923 }
9949 priv->net_dev->type = ARPHRD_ETHER; 9924 priv->net_dev->type = ARPHRD_ETHER;
9950 queue_work(priv->workqueue, &priv->adapter_restart); 9925 schedule_work(&priv->adapter_restart);
9951 } 9926 }
9952 mutex_unlock(&priv->mutex); 9927 mutex_unlock(&priv->mutex);
9953 return 0; 9928 return 0;
@@ -9961,7 +9936,7 @@ static int ipw_wx_reset(struct net_device *dev,
9961{ 9936{
9962 struct ipw_priv *priv = libipw_priv(dev); 9937 struct ipw_priv *priv = libipw_priv(dev);
9963 IPW_DEBUG_WX("RESET\n"); 9938 IPW_DEBUG_WX("RESET\n");
9964 queue_work(priv->workqueue, &priv->adapter_restart); 9939 schedule_work(&priv->adapter_restart);
9965 return 0; 9940 return 0;
9966} 9941}
9967 9942
@@ -10551,7 +10526,7 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10551 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 10526 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10552 printk(KERN_INFO "%s: Setting MAC to %pM\n", 10527 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10553 priv->net_dev->name, priv->mac_addr); 10528 priv->net_dev->name, priv->mac_addr);
10554 queue_work(priv->workqueue, &priv->adapter_restart); 10529 schedule_work(&priv->adapter_restart);
10555 mutex_unlock(&priv->mutex); 10530 mutex_unlock(&priv->mutex);
10556 return 0; 10531 return 0;
10557} 10532}
@@ -10684,9 +10659,7 @@ static void ipw_rf_kill(void *adapter)
10684 10659
10685 if (rf_kill_active(priv)) { 10660 if (rf_kill_active(priv)) {
10686 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 10661 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10687 if (priv->workqueue) 10662 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10688 queue_delayed_work(priv->workqueue,
10689 &priv->rf_kill, 2 * HZ);
10690 goto exit_unlock; 10663 goto exit_unlock;
10691 } 10664 }
10692 10665
@@ -10697,7 +10670,7 @@ static void ipw_rf_kill(void *adapter)
10697 "device\n"); 10670 "device\n");
10698 10671
10699 /* we can not do an adapter restart while inside an irq lock */ 10672 /* we can not do an adapter restart while inside an irq lock */
10700 queue_work(priv->workqueue, &priv->adapter_restart); 10673 schedule_work(&priv->adapter_restart);
10701 } else 10674 } else
10702 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " 10675 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10703 "enabled\n"); 10676 "enabled\n");
@@ -10735,7 +10708,7 @@ static void ipw_link_up(struct ipw_priv *priv)
10735 notify_wx_assoc_event(priv); 10708 notify_wx_assoc_event(priv);
10736 10709
10737 if (priv->config & CFG_BACKGROUND_SCAN) 10710 if (priv->config & CFG_BACKGROUND_SCAN)
10738 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); 10711 schedule_delayed_work(&priv->request_scan, HZ);
10739} 10712}
10740 10713
10741static void ipw_bg_link_up(struct work_struct *work) 10714static void ipw_bg_link_up(struct work_struct *work)
@@ -10764,7 +10737,7 @@ static void ipw_link_down(struct ipw_priv *priv)
10764 10737
10765 if (!(priv->status & STATUS_EXIT_PENDING)) { 10738 if (!(priv->status & STATUS_EXIT_PENDING)) {
10766 /* Queue up another scan... */ 10739 /* Queue up another scan... */
10767 queue_delayed_work(priv->workqueue, &priv->request_scan, 0); 10740 schedule_delayed_work(&priv->request_scan, 0);
10768 } else 10741 } else
10769 cancel_delayed_work(&priv->scan_event); 10742 cancel_delayed_work(&priv->scan_event);
10770} 10743}
@@ -10782,7 +10755,6 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10782{ 10755{
10783 int ret = 0; 10756 int ret = 0;
10784 10757
10785 priv->workqueue = create_workqueue(DRV_NAME);
10786 init_waitqueue_head(&priv->wait_command_queue); 10758 init_waitqueue_head(&priv->wait_command_queue);
10787 init_waitqueue_head(&priv->wait_state); 10759 init_waitqueue_head(&priv->wait_state);
10788 10760
@@ -11339,8 +11311,7 @@ static int ipw_up(struct ipw_priv *priv)
11339 IPW_WARNING("Radio Frequency Kill Switch is On:\n" 11311 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11340 "Kill switch must be turned off for " 11312 "Kill switch must be turned off for "
11341 "wireless networking to work.\n"); 11313 "wireless networking to work.\n");
11342 queue_delayed_work(priv->workqueue, &priv->rf_kill, 11314 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11343 2 * HZ);
11344 return 0; 11315 return 0;
11345 } 11316 }
11346 11317
@@ -11350,8 +11321,7 @@ static int ipw_up(struct ipw_priv *priv)
11350 11321
11351 /* If configure to try and auto-associate, kick 11322 /* If configure to try and auto-associate, kick
11352 * off a scan. */ 11323 * off a scan. */
11353 queue_delayed_work(priv->workqueue, 11324 schedule_delayed_work(&priv->request_scan, 0);
11354 &priv->request_scan, 0);
11355 11325
11356 return 0; 11326 return 0;
11357 } 11327 }
@@ -11817,7 +11787,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11817 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); 11787 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11818 if (err) { 11788 if (err) {
11819 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); 11789 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11820 goto out_destroy_workqueue; 11790 goto out_iounmap;
11821 } 11791 }
11822 11792
11823 SET_NETDEV_DEV(net_dev, &pdev->dev); 11793 SET_NETDEV_DEV(net_dev, &pdev->dev);
@@ -11885,9 +11855,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11885 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11855 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11886 out_release_irq: 11856 out_release_irq:
11887 free_irq(pdev->irq, priv); 11857 free_irq(pdev->irq, priv);
11888 out_destroy_workqueue:
11889 destroy_workqueue(priv->workqueue);
11890 priv->workqueue = NULL;
11891 out_iounmap: 11858 out_iounmap:
11892 iounmap(priv->hw_base); 11859 iounmap(priv->hw_base);
11893 out_pci_release_regions: 11860 out_pci_release_regions:
@@ -11930,18 +11897,31 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11930 kfree(priv->cmdlog); 11897 kfree(priv->cmdlog);
11931 priv->cmdlog = NULL; 11898 priv->cmdlog = NULL;
11932 } 11899 }
11933 /* ipw_down will ensure that there is no more pending work 11900
11934 * in the workqueue's, so we can safely remove them now. */ 11901 /* make sure all works are inactive */
11935 cancel_delayed_work(&priv->adhoc_check); 11902 cancel_delayed_work_sync(&priv->adhoc_check);
11936 cancel_delayed_work(&priv->gather_stats); 11903 cancel_work_sync(&priv->associate);
11937 cancel_delayed_work(&priv->request_scan); 11904 cancel_work_sync(&priv->disassociate);
11938 cancel_delayed_work(&priv->request_direct_scan); 11905 cancel_work_sync(&priv->system_config);
11939 cancel_delayed_work(&priv->request_passive_scan); 11906 cancel_work_sync(&priv->rx_replenish);
11940 cancel_delayed_work(&priv->scan_event); 11907 cancel_work_sync(&priv->adapter_restart);
11941 cancel_delayed_work(&priv->rf_kill); 11908 cancel_delayed_work_sync(&priv->rf_kill);
11942 cancel_delayed_work(&priv->scan_check); 11909 cancel_work_sync(&priv->up);
11943 destroy_workqueue(priv->workqueue); 11910 cancel_work_sync(&priv->down);
11944 priv->workqueue = NULL; 11911 cancel_delayed_work_sync(&priv->request_scan);
11912 cancel_delayed_work_sync(&priv->request_direct_scan);
11913 cancel_delayed_work_sync(&priv->request_passive_scan);
11914 cancel_delayed_work_sync(&priv->scan_event);
11915 cancel_delayed_work_sync(&priv->gather_stats);
11916 cancel_work_sync(&priv->abort_scan);
11917 cancel_work_sync(&priv->roam);
11918 cancel_delayed_work_sync(&priv->scan_check);
11919 cancel_work_sync(&priv->link_up);
11920 cancel_work_sync(&priv->link_down);
11921 cancel_delayed_work_sync(&priv->led_link_on);
11922 cancel_delayed_work_sync(&priv->led_link_off);
11923 cancel_delayed_work_sync(&priv->led_act_off);
11924 cancel_work_sync(&priv->merge_networks);
11945 11925
11946 /* Free MAC hash list for ADHOC */ 11926 /* Free MAC hash list for ADHOC */
11947 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { 11927 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
@@ -12029,7 +12009,7 @@ static int ipw_pci_resume(struct pci_dev *pdev)
12029 priv->suspend_time = get_seconds() - priv->suspend_at; 12009 priv->suspend_time = get_seconds() - priv->suspend_at;
12030 12010
12031 /* Bring the device back up */ 12011 /* Bring the device back up */
12032 queue_work(priv->workqueue, &priv->up); 12012 schedule_work(&priv->up);
12033 12013
12034 return 0; 12014 return 0;
12035} 12015}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index d9e1d9bad58..91795b5a93c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -1299,8 +1299,6 @@ struct ipw_priv {
1299 u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; 1299 u8 direct_scan_ssid[IW_ESSID_MAX_SIZE];
1300 u8 direct_scan_ssid_len; 1300 u8 direct_scan_ssid_len;
1301 1301
1302 struct workqueue_struct *workqueue;
1303
1304 struct delayed_work adhoc_check; 1302 struct delayed_work adhoc_check;
1305 struct work_struct associate; 1303 struct work_struct associate;
1306 struct work_struct disassociate; 1304 struct work_struct disassociate;
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index 8359594839e..d096dc28204 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -897,13 +897,11 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
897{ 897{
898 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; 898 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
899 unsigned long flags; 899 unsigned long flags;
900 u8 rev_id = 0; 900 u8 rev_id = priv->pci_dev->revision;
901 901
902 spin_lock_irqsave(&priv->lock, flags); 902 spin_lock_irqsave(&priv->lock, flags);
903 903
904 /* Determine HW type */ 904 /* Determine HW type */
905 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
906
907 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); 905 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
908 906
909 if (rev_id & PCI_CFG_REV_ID_BIT_RTP) 907 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 9631d8fa802..2601b552c6f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -49,7 +49,7 @@
49#include "iwl-agn-debugfs.h" 49#include "iwl-agn-debugfs.h"
50 50
51/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
52#define IWL1000_UCODE_API_MAX 3 52#define IWL1000_UCODE_API_MAX 5
53#define IWL100_UCODE_API_MAX 5 53#define IWL100_UCODE_API_MAX 5
54 54
55/* Lowest firmware API version supported */ 55/* Lowest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 4064490e3b9..8cab3571047 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
51#include "iwl-agn-debugfs.h" 51#include "iwl-agn-debugfs.h"
52 52
53/* Highest firmware API version supported */ 53/* Highest firmware API version supported */
54#define IWL5000_UCODE_API_MAX 2 54#define IWL5000_UCODE_API_MAX 5
55#define IWL5150_UCODE_API_MAX 2 55#define IWL5150_UCODE_API_MAX 2
56 56
57/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 8163a0efdc8..3a02bea4632 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -2264,7 +2264,7 @@ signed long iwlagn_wait_notification(struct iwl_priv *priv,
2264 int ret; 2264 int ret;
2265 2265
2266 ret = wait_event_timeout(priv->_agn.notif_waitq, 2266 ret = wait_event_timeout(priv->_agn.notif_waitq,
2267 &wait_entry->triggered, 2267 wait_entry->triggered,
2268 timeout); 2268 timeout);
2269 2269
2270 spin_lock_bh(&priv->_agn.notif_wait_lock); 2270 spin_lock_bh(&priv->_agn.notif_wait_lock);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 39a05e32c34..28ac0d44555 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2343,7 +2343,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv);
2343static void __iwl_down(struct iwl_priv *priv) 2343static void __iwl_down(struct iwl_priv *priv)
2344{ 2344{
2345 unsigned long flags; 2345 unsigned long flags;
2346 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2346 int exit_pending;
2347 2347
2348 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2348 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2349 2349
@@ -2815,14 +2815,17 @@ static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
2815 2815
2816 mutex_lock(&priv->mutex); 2816 mutex_lock(&priv->mutex);
2817 2817
2818 if (!priv->_agn.offchan_tx_skb) 2818 if (!priv->_agn.offchan_tx_skb) {
2819 return -EINVAL; 2819 ret = -EINVAL;
2820 goto unlock;
2821 }
2820 2822
2821 priv->_agn.offchan_tx_skb = NULL; 2823 priv->_agn.offchan_tx_skb = NULL;
2822 2824
2823 ret = iwl_scan_cancel_timeout(priv, 200); 2825 ret = iwl_scan_cancel_timeout(priv, 200);
2824 if (ret) 2826 if (ret)
2825 ret = -EIO; 2827 ret = -EIO;
2828unlock:
2826 mutex_unlock(&priv->mutex); 2829 mutex_unlock(&priv->mutex);
2827 2830
2828 return ret; 2831 return ret;
@@ -3714,7 +3717,7 @@ static void iwl_hw_detect(struct iwl_priv *priv)
3714{ 3717{
3715 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); 3718 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
3716 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); 3719 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
3717 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); 3720 priv->rev_id = priv->pci_dev->revision;
3718 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); 3721 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3719} 3722}
3720 3723
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index f6c2cd665f4..078ef43d957 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -57,6 +57,7 @@ struct if_spi_card {
57 /* Handles all SPI communication (except for FW load) */ 57 /* Handles all SPI communication (except for FW load) */
58 struct workqueue_struct *workqueue; 58 struct workqueue_struct *workqueue;
59 struct work_struct packet_work; 59 struct work_struct packet_work;
60 struct work_struct resume_work;
60 61
61 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE]; 62 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
62 63
@@ -68,6 +69,9 @@ struct if_spi_card {
68 69
69 /* Protects cmd_packet_list and data_packet_list */ 70 /* Protects cmd_packet_list and data_packet_list */
70 spinlock_t buffer_lock; 71 spinlock_t buffer_lock;
72
73 /* True is card suspended */
74 u8 suspended;
71}; 75};
72 76
73static void free_if_spi_card(struct if_spi_card *card) 77static void free_if_spi_card(struct if_spi_card *card)
@@ -1057,6 +1061,28 @@ out:
1057 return err; 1061 return err;
1058} 1062}
1059 1063
1064static void if_spi_resume_worker(struct work_struct *work)
1065{
1066 struct if_spi_card *card;
1067
1068 card = container_of(work, struct if_spi_card, resume_work);
1069
1070 if (card->suspended) {
1071 if (card->pdata->setup)
1072 card->pdata->setup(card->spi);
1073
1074 /* Init card ... */
1075 if_spi_init_card(card);
1076
1077 enable_irq(card->spi->irq);
1078
1079 /* And resume it ... */
1080 lbs_resume(card->priv);
1081
1082 card->suspended = 0;
1083 }
1084}
1085
1060static int __devinit if_spi_probe(struct spi_device *spi) 1086static int __devinit if_spi_probe(struct spi_device *spi)
1061{ 1087{
1062 struct if_spi_card *card; 1088 struct if_spi_card *card;
@@ -1107,6 +1133,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1107 goto free_card; 1133 goto free_card;
1108 } 1134 }
1109 card->priv = priv; 1135 card->priv = priv;
1136 priv->setup_fw_on_resume = 1;
1110 priv->card = card; 1137 priv->card = card;
1111 priv->hw_host_to_card = if_spi_host_to_card; 1138 priv->hw_host_to_card = if_spi_host_to_card;
1112 priv->enter_deep_sleep = NULL; 1139 priv->enter_deep_sleep = NULL;
@@ -1117,6 +1144,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1117 /* Initialize interrupt handling stuff. */ 1144 /* Initialize interrupt handling stuff. */
1118 card->workqueue = create_workqueue("libertas_spi"); 1145 card->workqueue = create_workqueue("libertas_spi");
1119 INIT_WORK(&card->packet_work, if_spi_host_to_card_worker); 1146 INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
1147 INIT_WORK(&card->resume_work, if_spi_resume_worker);
1120 1148
1121 err = request_irq(spi->irq, if_spi_host_interrupt, 1149 err = request_irq(spi->irq, if_spi_host_interrupt,
1122 IRQF_TRIGGER_FALLING, "libertas_spi", card); 1150 IRQF_TRIGGER_FALLING, "libertas_spi", card);
@@ -1161,6 +1189,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
1161 lbs_deb_spi("libertas_spi_remove\n"); 1189 lbs_deb_spi("libertas_spi_remove\n");
1162 lbs_deb_enter(LBS_DEB_SPI); 1190 lbs_deb_enter(LBS_DEB_SPI);
1163 1191
1192 cancel_work_sync(&card->resume_work);
1193
1164 lbs_stop_card(priv); 1194 lbs_stop_card(priv);
1165 lbs_remove_card(priv); /* will call free_netdev */ 1195 lbs_remove_card(priv); /* will call free_netdev */
1166 1196
@@ -1174,6 +1204,40 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
1174 return 0; 1204 return 0;
1175} 1205}
1176 1206
1207static int if_spi_suspend(struct device *dev)
1208{
1209 struct spi_device *spi = to_spi_device(dev);
1210 struct if_spi_card *card = spi_get_drvdata(spi);
1211
1212 if (!card->suspended) {
1213 lbs_suspend(card->priv);
1214 flush_workqueue(card->workqueue);
1215 disable_irq(spi->irq);
1216
1217 if (card->pdata->teardown)
1218 card->pdata->teardown(spi);
1219 card->suspended = 1;
1220 }
1221
1222 return 0;
1223}
1224
1225static int if_spi_resume(struct device *dev)
1226{
1227 struct spi_device *spi = to_spi_device(dev);
1228 struct if_spi_card *card = spi_get_drvdata(spi);
1229
1230 /* Schedule delayed work */
1231 schedule_work(&card->resume_work);
1232
1233 return 0;
1234}
1235
1236static const struct dev_pm_ops if_spi_pm_ops = {
1237 .suspend = if_spi_suspend,
1238 .resume = if_spi_resume,
1239};
1240
1177static struct spi_driver libertas_spi_driver = { 1241static struct spi_driver libertas_spi_driver = {
1178 .probe = if_spi_probe, 1242 .probe = if_spi_probe,
1179 .remove = __devexit_p(libertas_spi_remove), 1243 .remove = __devexit_p(libertas_spi_remove),
@@ -1181,6 +1245,7 @@ static struct spi_driver libertas_spi_driver = {
1181 .name = "libertas_spi", 1245 .name = "libertas_spi",
1182 .bus = &spi_bus_type, 1246 .bus = &spi_bus_type,
1183 .owner = THIS_MODULE, 1247 .owner = THIS_MODULE,
1248 .pm = &if_spi_pm_ops,
1184 }, 1249 },
1185}; 1250};
1186 1251
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
new file mode 100644
index 00000000000..0e04a21be0a
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -0,0 +1,922 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * Fills HT capability information field, AMPDU Parameters field, HT extended
30 * capability field, and supported MCS set fields.
31 *
32 * Only the following HT capability information fields are used, all other
33 * fields are always turned off.
34 *
35 * Bit 1 : Supported channel width (0: 20MHz, 1: Both 20 and 40 MHz)
36 * Bit 4 : Greenfield support (0: Not supported, 1: Supported)
37 * Bit 5 : Short GI for 20 MHz support (0: Not supported, 1: Supported)
38 * Bit 6 : Short GI for 40 MHz support (0: Not supported, 1: Supported)
39 * Bit 7 : Tx STBC (0: Not supported, 1: Supported)
40 * Bit 8-9 : Rx STBC (0: Not supported, X: Support for up to X spatial streams)
41 * Bit 10 : Delayed BA support (0: Not supported, 1: Supported)
42 * Bit 11 : Maximum AMSDU length (0: 3839 octets, 1: 7935 octets)
43 * Bit 14 : 40-Mhz intolerant support (0: Not supported, 1: Supported)
44 *
45 * In addition, the following AMPDU Parameters are set -
46 * - Maximum AMPDU length exponent (set to 3)
47 * - Minimum AMPDU start spacing (set to 0 - No restrictions)
48 *
49 * MCS is set for 1x1, with MSC32 for infra mode or ad-hoc mode with 40 MHz
50 * support.
51 *
52 * RD responder bit to set to clear in the extended capability header.
53 */
54void
55mwifiex_fill_cap_info(struct mwifiex_private *priv,
56 struct mwifiex_ie_types_htcap *ht_cap)
57{
58 struct mwifiex_adapter *adapter = priv->adapter;
59 u8 *mcs;
60 int rx_mcs_supp;
61 uint16_t ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
62 uint16_t ht_ext_cap = le16_to_cpu(ht_cap->ht_cap.extended_ht_cap_info);
63
64 if (ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap) &&
65 ISSUPP_CHANWIDTH40(adapter->usr_dot_11n_dev_cap))
66 SETHT_SUPPCHANWIDTH(ht_cap_info);
67 else
68 RESETHT_SUPPCHANWIDTH(ht_cap_info);
69
70 if (ISSUPP_GREENFIELD(adapter->hw_dot_11n_dev_cap) &&
71 ISSUPP_GREENFIELD(adapter->usr_dot_11n_dev_cap))
72 SETHT_GREENFIELD(ht_cap_info);
73 else
74 RESETHT_GREENFIELD(ht_cap_info);
75
76 if (ISSUPP_SHORTGI20(adapter->hw_dot_11n_dev_cap) &&
77 ISSUPP_SHORTGI20(adapter->usr_dot_11n_dev_cap))
78 SETHT_SHORTGI20(ht_cap_info);
79 else
80 RESETHT_SHORTGI20(ht_cap_info);
81
82 if (ISSUPP_SHORTGI40(adapter->hw_dot_11n_dev_cap) &&
83 ISSUPP_SHORTGI40(adapter->usr_dot_11n_dev_cap))
84 SETHT_SHORTGI40(ht_cap_info);
85 else
86 RESETHT_SHORTGI40(ht_cap_info);
87
88 /* No user config for RX STBC yet */
89 if (ISSUPP_RXSTBC(adapter->hw_dot_11n_dev_cap)
90 && ISSUPP_RXSTBC(adapter->usr_dot_11n_dev_cap))
91 SETHT_RXSTBC(ht_cap_info, 1);
92 else
93 RESETHT_RXSTBC(ht_cap_info);
94
95 /* No user config for TX STBC yet */
96 if (ISSUPP_TXSTBC(adapter->hw_dot_11n_dev_cap))
97 SETHT_TXSTBC(ht_cap_info);
98 else
99 RESETHT_TXSTBC(ht_cap_info);
100
101 /* No user config for Delayed BACK yet */
102 if (GET_DELAYEDBACK(adapter->hw_dot_11n_dev_cap))
103 SETHT_DELAYEDBACK(ht_cap_info);
104 else
105 RESETHT_DELAYEDBACK(ht_cap_info);
106
107 if (ISENABLED_40MHZ_INTOLARENT(adapter->usr_dot_11n_dev_cap))
108 SETHT_40MHZ_INTOLARANT(ht_cap_info);
109 else
110 RESETHT_40MHZ_INTOLARANT(ht_cap_info);
111
112 SETAMPDU_SIZE(ht_cap->ht_cap.ampdu_params_info, AMPDU_FACTOR_64K);
113 SETAMPDU_SPACING(ht_cap->ht_cap.ampdu_params_info, 0);
114
115 /* Need change to support 8k AMSDU receive */
116 RESETHT_MAXAMSDU(ht_cap_info);
117
118 rx_mcs_supp = GET_RXMCSSUPP(adapter->hw_dev_mcs_support);
119
120 mcs = (u8 *)&ht_cap->ht_cap.mcs;
121
122 /* Set MCS for 1x1 */
123 memset(mcs, 0xff, rx_mcs_supp);
124
125 /* Clear all the other values */
126 memset(&mcs[rx_mcs_supp], 0,
127 sizeof(struct ieee80211_mcs_info) - rx_mcs_supp);
128
129 if (priv->bss_mode == MWIFIEX_BSS_MODE_INFRA ||
130 (ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap) &&
131 ISSUPP_CHANWIDTH40(adapter->usr_dot_11n_dev_cap)))
132 /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
133 SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
134
135 /* Clear RD responder bit */
136 RESETHT_EXTCAP_RDG(ht_ext_cap);
137
138 ht_cap->ht_cap.cap_info = cpu_to_le16(ht_cap_info);
139 ht_cap->ht_cap.extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
140}
141
142/*
143 * Shows HT capability information fields.
144 *
145 * The following HT capability information fields are supported.
146 * - Maximum AMSDU length (3839 bytes or 7935 bytes)
147 * - Beam forming support
148 * - Greenfield preamble support
149 * - AMPDU support
150 * - MIMO Power Save support
151 * - Rx STBC support
152 * - Tx STBC support
153 * - Short GI for 20 MHz support
154 * - Short GI for 40 MHz support
155 * - LDPC coded packets receive support
156 * - Number of delayed BA streams
157 * - Number of immediate BA streams
158 * - 10 MHz channel width support
159 * - 20 MHz channel width support
160 * - 40 MHz channel width support
161 * - Presence of Tx antenna A/B/C/D
162 * - Presence of Rx antenna A/B/C/D
163 */
164void
165mwifiex_show_dot_11n_dev_cap(struct mwifiex_adapter *adapter, u32 cap)
166{
167 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Max MSDU len = %s octets\n",
168 (ISSUPP_MAXAMSDU(cap) ? "7935" : "3839"));
169 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Beam forming %s\n",
170 (ISSUPP_BEAMFORMING(cap) ? "supported" : "not supported"));
171 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Greenfield preamble %s\n",
172 (ISSUPP_GREENFIELD(cap) ? "supported" : "not supported"));
173 dev_dbg(adapter->dev, "info: GET_HW_SPEC: AMPDU %s\n",
174 (ISSUPP_AMPDU(cap) ? "supported" : "not supported"));
175 dev_dbg(adapter->dev, "info: GET_HW_SPEC: MIMO Power Save %s\n",
176 (ISSUPP_MIMOPS(cap) ? "supported" : "not supported"));
177 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Rx STBC %s\n",
178 (ISSUPP_RXSTBC(cap) ? "supported" : "not supported"));
179 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Tx STBC %s\n",
180 (ISSUPP_TXSTBC(cap) ? "supported" : "not supported"));
181 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Short GI for 40 Mhz %s\n",
182 (ISSUPP_SHORTGI40(cap) ? "supported" : "not supported"));
183 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Short GI for 20 Mhz %s\n",
184 (ISSUPP_SHORTGI20(cap) ? "supported" : "not supported"));
185 dev_dbg(adapter->dev, "info: GET_HW_SPEC: LDPC coded packet receive %s\n",
186 (ISSUPP_RXLDPC(cap) ? "supported" : "not supported"));
187 dev_dbg(adapter->dev,
188 "info: GET_HW_SPEC: Number of Delayed Block Ack streams = %d\n",
189 GET_DELAYEDBACK(cap));
190 dev_dbg(adapter->dev,
191 "info: GET_HW_SPEC: Number of Immediate Block Ack streams = %d\n",
192 GET_IMMEDIATEBACK(cap));
193 dev_dbg(adapter->dev, "info: GET_HW_SPEC: 40 Mhz channel width %s\n",
194 (ISSUPP_CHANWIDTH40(cap) ? "supported" : "not supported"));
195 dev_dbg(adapter->dev, "info: GET_HW_SPEC: 20 Mhz channel width %s\n",
196 (ISSUPP_CHANWIDTH20(cap) ? "supported" : "not supported"));
197 dev_dbg(adapter->dev, "info: GET_HW_SPEC: 10 Mhz channel width %s\n",
198 (ISSUPP_CHANWIDTH10(cap) ? "supported" : "not supported"));
199
200 if (ISSUPP_RXANTENNAA(cap))
201 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Prescence of Rx antennea A\n");
202
203 if (ISSUPP_RXANTENNAB(cap))
204 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Prescence of Rx antennea B\n");
205
206 if (ISSUPP_RXANTENNAC(cap))
207 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Prescence of Rx antennea C\n");
208
209 if (ISSUPP_RXANTENNAD(cap))
210 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Prescence of Rx antennea D\n");
211
212 if (ISSUPP_TXANTENNAA(cap))
213 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Prescence of Tx antennea A\n");
214
215 if (ISSUPP_TXANTENNAB(cap))
216 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Prescence of Tx antennea B\n");
217
218 if (ISSUPP_TXANTENNAC(cap))
219 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Prescence of Tx antennea C\n");
220
221 if (ISSUPP_TXANTENNAD(cap))
222 dev_dbg(adapter->dev, "info: GET_HW_SPEC: Prescence of Tx antennea D\n");
223
224 return;
225}
226
227/*
228 * Shows HT MCS support field.
229 */
230void
231mwifiex_show_dev_mcs_support(struct mwifiex_adapter *adapter, u8 support)
232{
233 dev_dbg(adapter->dev, "info: GET_HW_SPEC: MCSs for %dx%d MIMO\n",
234 GET_RXMCSSUPP(support), GET_TXMCSSUPP(support));
235 return;
236}
237
238/*
239 * This function returns the pointer to an entry in BA Stream
240 * table which matches the requested BA status.
241 */
242static struct mwifiex_tx_ba_stream_tbl *
243mwifiex_11n_get_tx_ba_stream_status(struct mwifiex_private *priv,
244 enum mwifiex_ba_status ba_status)
245{
246 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
247 unsigned long flags;
248
249 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
250 list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
251 if (tx_ba_tsr_tbl->ba_status == ba_status) {
252 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
253 flags);
254 return tx_ba_tsr_tbl;
255 }
256 }
257 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
258 return NULL;
259}
260
261/*
262 * This function handles the command response of delete a block
263 * ack request.
264 *
265 * The function checks the response success status and takes action
266 * accordingly (send an add BA request in case of success, or recreate
267 * the deleted stream in case of failure, if the add BA was also
268 * initiated by us).
269 */
270int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
271 struct host_cmd_ds_command *resp)
272{
273 int tid;
274 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
275 struct host_cmd_ds_11n_delba *del_ba =
276 (struct host_cmd_ds_11n_delba *) &resp->params.del_ba;
277 uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set);
278
279 tid = del_ba_param_set >> DELBA_TID_POS;
280 if (del_ba->del_result == BA_RESULT_SUCCESS) {
281 mwifiex_11n_delete_ba_stream_tbl(priv, tid,
282 del_ba->peer_mac_addr, TYPE_DELBA_SENT,
283 INITIATOR_BIT(del_ba_param_set));
284
285 tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_status(priv,
286 BA_STREAM_SETUP_INPROGRESS);
287 if (tx_ba_tbl)
288 mwifiex_send_addba(priv, tx_ba_tbl->tid,
289 tx_ba_tbl->ra);
290 } else { /*
291 * In case of failure, recreate the deleted stream in case
292 * we initiated the ADDBA
293 */
294 if (INITIATOR_BIT(del_ba_param_set)) {
295 mwifiex_11n_create_tx_ba_stream_tbl(priv,
296 del_ba->peer_mac_addr, tid,
297 BA_STREAM_SETUP_INPROGRESS);
298
299 tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_status(priv,
300 BA_STREAM_SETUP_INPROGRESS);
301 if (tx_ba_tbl)
302 mwifiex_11n_delete_ba_stream_tbl(priv,
303 tx_ba_tbl->tid, tx_ba_tbl->ra,
304 TYPE_DELBA_SENT, true);
305 }
306 }
307
308 return 0;
309}
310
311/*
312 * This function handles the command response of add a block
313 * ack request.
314 *
315 * Handling includes changing the header fields to CPU formats, checking
316 * the response success status and taking actions accordingly (delete the
317 * BA stream table in case of failure).
318 */
319int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
320 struct host_cmd_ds_command *resp)
321{
322 int tid;
323 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
324 (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp;
325 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
326
327 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
328 & SSN_MASK);
329
330 tid = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
331 & IEEE80211_ADDBA_PARAM_TID_MASK)
332 >> BLOCKACKPARAM_TID_POS;
333 if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
334 tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid,
335 add_ba_rsp->peer_mac_addr);
336 if (tx_ba_tbl) {
337 dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
338 tx_ba_tbl->ba_status = BA_STREAM_SETUP_COMPLETE;
339 } else {
340 dev_err(priv->adapter->dev, "BA stream not created\n");
341 }
342 } else {
343 mwifiex_11n_delete_ba_stream_tbl(priv, tid,
344 add_ba_rsp->peer_mac_addr,
345 TYPE_DELBA_SENT, true);
346 if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
347 priv->aggr_prio_tbl[tid].ampdu_ap =
348 BA_STREAM_NOT_ALLOWED;
349 }
350
351 return 0;
352}
353
354/*
355 * This function handles the command response of 11n configuration request.
356 *
357 * Handling includes changing the header fields into CPU format.
358 */
359int mwifiex_ret_11n_cfg(struct mwifiex_private *priv,
360 struct host_cmd_ds_command *resp,
361 void *data_buf)
362{
363 struct mwifiex_ds_11n_tx_cfg *tx_cfg = NULL;
364 struct host_cmd_ds_11n_cfg *htcfg = &resp->params.htcfg;
365
366 if (data_buf) {
367 tx_cfg = (struct mwifiex_ds_11n_tx_cfg *) data_buf;
368 tx_cfg->tx_htcap = le16_to_cpu(htcfg->ht_tx_cap);
369 tx_cfg->tx_htinfo = le16_to_cpu(htcfg->ht_tx_info);
370 }
371 return 0;
372}
373
374/*
375 * This function prepares command of reconfigure Tx buffer.
376 *
377 * Preparation includes -
378 * - Setting command ID, action and proper size
379 * - Setting Tx buffer size (for SET only)
380 * - Ensuring correct endian-ness
381 */
382int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
383 struct host_cmd_ds_command *cmd, int cmd_action,
384 void *data_buf)
385{
386 struct host_cmd_ds_txbuf_cfg *tx_buf = &cmd->params.tx_buf;
387 u16 action = (u16) cmd_action;
388 u16 buf_size = *((u16 *) data_buf);
389
390 cmd->command = cpu_to_le16(HostCmd_CMD_RECONFIGURE_TX_BUFF);
391 cmd->size =
392 cpu_to_le16(sizeof(struct host_cmd_ds_txbuf_cfg) + S_DS_GEN);
393 tx_buf->action = cpu_to_le16(action);
394 switch (action) {
395 case HostCmd_ACT_GEN_SET:
396 dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", buf_size);
397 tx_buf->buff_size = cpu_to_le16(buf_size);
398 break;
399 case HostCmd_ACT_GEN_GET:
400 default:
401 tx_buf->buff_size = 0;
402 break;
403 }
404 return 0;
405}
406
407/*
408 * This function prepares command of AMSDU aggregation control.
409 *
410 * Preparation includes -
411 * - Setting command ID, action and proper size
412 * - Setting AMSDU control parameters (for SET only)
413 * - Ensuring correct endian-ness
414 */
415int mwifiex_cmd_amsdu_aggr_ctrl(struct mwifiex_private *priv,
416 struct host_cmd_ds_command *cmd,
417 int cmd_action, void *data_buf)
418{
419 struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
420 &cmd->params.amsdu_aggr_ctrl;
421 u16 action = (u16) cmd_action;
422 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl =
423 (struct mwifiex_ds_11n_amsdu_aggr_ctrl *) data_buf;
424
425 cmd->command = cpu_to_le16(HostCmd_CMD_AMSDU_AGGR_CTRL);
426 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_amsdu_aggr_ctrl)
427 + S_DS_GEN);
428 amsdu_ctrl->action = cpu_to_le16(action);
429 switch (action) {
430 case HostCmd_ACT_GEN_SET:
431 amsdu_ctrl->enable = cpu_to_le16(aa_ctrl->enable);
432 amsdu_ctrl->curr_buf_size = 0;
433 break;
434 case HostCmd_ACT_GEN_GET:
435 default:
436 amsdu_ctrl->curr_buf_size = 0;
437 break;
438 }
439 return 0;
440}
441
442/*
443 * This function handles the command response of AMSDU aggregation
444 * control request.
445 *
446 * Handling includes changing the header fields into CPU format.
447 */
448int mwifiex_ret_amsdu_aggr_ctrl(struct mwifiex_private *priv,
449 struct host_cmd_ds_command *resp,
450 void *data_buf)
451{
452 struct mwifiex_ds_11n_amsdu_aggr_ctrl *amsdu_aggr_ctrl = NULL;
453 struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
454 &resp->params.amsdu_aggr_ctrl;
455
456 if (data_buf) {
457 amsdu_aggr_ctrl =
458 (struct mwifiex_ds_11n_amsdu_aggr_ctrl *) data_buf;
459 amsdu_aggr_ctrl->enable = le16_to_cpu(amsdu_ctrl->enable);
460 amsdu_aggr_ctrl->curr_buf_size =
461 le16_to_cpu(amsdu_ctrl->curr_buf_size);
462 }
463 return 0;
464}
465
466/*
467 * This function prepares 11n configuration command.
468 *
469 * Preparation includes -
470 * - Setting command ID, action and proper size
471 * - Setting HT Tx capability and HT Tx information fields
472 * - Ensuring correct endian-ness
473 */
474int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
475 struct host_cmd_ds_command *cmd,
476 u16 cmd_action, void *data_buf)
477{
478 struct host_cmd_ds_11n_cfg *htcfg = &cmd->params.htcfg;
479 struct mwifiex_ds_11n_tx_cfg *txcfg =
480 (struct mwifiex_ds_11n_tx_cfg *) data_buf;
481
482 cmd->command = cpu_to_le16(HostCmd_CMD_11N_CFG);
483 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_11n_cfg) + S_DS_GEN);
484 htcfg->action = cpu_to_le16(cmd_action);
485 htcfg->ht_tx_cap = cpu_to_le16(txcfg->tx_htcap);
486 htcfg->ht_tx_info = cpu_to_le16(txcfg->tx_htinfo);
487 return 0;
488}
489
490/*
491 * This function appends an 11n TLV to a buffer.
492 *
493 * Buffer allocation is responsibility of the calling
494 * function. No size validation is made here.
495 *
496 * The function fills up the following sections, if applicable -
497 * - HT capability IE
498 * - HT information IE (with channel list)
499 * - 20/40 BSS Coexistence IE
500 * - HT Extended Capabilities IE
501 */
502int
503mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
504 struct mwifiex_bssdescriptor *bss_desc,
505 u8 **buffer)
506{
507 struct mwifiex_ie_types_htcap *ht_cap;
508 struct mwifiex_ie_types_htinfo *ht_info;
509 struct mwifiex_ie_types_chan_list_param_set *chan_list;
510 struct mwifiex_ie_types_2040bssco *bss_co_2040;
511 struct mwifiex_ie_types_extcap *ext_cap;
512 int ret_len = 0;
513
514 if (!buffer || !*buffer)
515 return ret_len;
516
517 if (bss_desc->bcn_ht_cap) {
518 ht_cap = (struct mwifiex_ie_types_htcap *) *buffer;
519 memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
520 ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
521 ht_cap->header.len =
522 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
523 memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
524 (u8 *) bss_desc->bcn_ht_cap +
525 sizeof(struct ieee_types_header),
526 le16_to_cpu(ht_cap->header.len));
527
528 mwifiex_fill_cap_info(priv, ht_cap);
529
530 *buffer += sizeof(struct mwifiex_ie_types_htcap);
531 ret_len += sizeof(struct mwifiex_ie_types_htcap);
532 }
533
534 if (bss_desc->bcn_ht_info) {
535 if (priv->bss_mode == MWIFIEX_BSS_MODE_IBSS) {
536 ht_info = (struct mwifiex_ie_types_htinfo *) *buffer;
537 memset(ht_info, 0,
538 sizeof(struct mwifiex_ie_types_htinfo));
539 ht_info->header.type =
540 cpu_to_le16(WLAN_EID_HT_INFORMATION);
541 ht_info->header.len =
542 cpu_to_le16(sizeof(struct ieee80211_ht_info));
543
544 memcpy((u8 *) ht_info +
545 sizeof(struct mwifiex_ie_types_header),
546 (u8 *) bss_desc->bcn_ht_info +
547 sizeof(struct ieee_types_header),
548 le16_to_cpu(ht_info->header.len));
549
550 if (!ISSUPP_CHANWIDTH40
551 (priv->adapter->hw_dot_11n_dev_cap)
552 || !ISSUPP_CHANWIDTH40(priv->adapter->
553 usr_dot_11n_dev_cap))
554 RESET_CHANWIDTH40(ht_info->ht_info.ht_param);
555
556 *buffer += sizeof(struct mwifiex_ie_types_htinfo);
557 ret_len += sizeof(struct mwifiex_ie_types_htinfo);
558 }
559
560 chan_list =
561 (struct mwifiex_ie_types_chan_list_param_set *) *buffer;
562 memset(chan_list, 0,
563 sizeof(struct mwifiex_ie_types_chan_list_param_set));
564 chan_list->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
565 chan_list->header.len = cpu_to_le16(
566 sizeof(struct mwifiex_ie_types_chan_list_param_set) -
567 sizeof(struct mwifiex_ie_types_header));
568 chan_list->chan_scan_param[0].chan_number =
569 bss_desc->bcn_ht_info->control_chan;
570 chan_list->chan_scan_param[0].radio_type =
571 mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
572
573 if ((ISSUPP_CHANWIDTH40(priv->adapter->hw_dot_11n_dev_cap) &&
574 ISSUPP_CHANWIDTH40(priv->adapter->usr_dot_11n_dev_cap))
575 && ISALLOWED_CHANWIDTH40(bss_desc->bcn_ht_info->ht_param))
576 SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
577 radio_type,
578 GET_SECONDARYCHAN(bss_desc->
579 bcn_ht_info->ht_param));
580
581 *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
582 ret_len += sizeof(struct mwifiex_ie_types_chan_list_param_set);
583 }
584
585 if (bss_desc->bcn_bss_co_2040) {
586 bss_co_2040 = (struct mwifiex_ie_types_2040bssco *) *buffer;
587 memset(bss_co_2040, 0,
588 sizeof(struct mwifiex_ie_types_2040bssco));
589 bss_co_2040->header.type = cpu_to_le16(WLAN_EID_BSS_COEX_2040);
590 bss_co_2040->header.len =
591 cpu_to_le16(sizeof(bss_co_2040->bss_co_2040));
592
593 memcpy((u8 *) bss_co_2040 +
594 sizeof(struct mwifiex_ie_types_header),
595 (u8 *) bss_desc->bcn_bss_co_2040 +
596 sizeof(struct ieee_types_header),
597 le16_to_cpu(bss_co_2040->header.len));
598
599 *buffer += sizeof(struct mwifiex_ie_types_2040bssco);
600 ret_len += sizeof(struct mwifiex_ie_types_2040bssco);
601 }
602
603 if (bss_desc->bcn_ext_cap) {
604 ext_cap = (struct mwifiex_ie_types_extcap *) *buffer;
605 memset(ext_cap, 0, sizeof(struct mwifiex_ie_types_extcap));
606 ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
607 ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
608
609 memcpy((u8 *) ext_cap +
610 sizeof(struct mwifiex_ie_types_header),
611 (u8 *) bss_desc->bcn_ext_cap +
612 sizeof(struct ieee_types_header),
613 le16_to_cpu(ext_cap->header.len));
614
615 *buffer += sizeof(struct mwifiex_ie_types_extcap);
616 ret_len += sizeof(struct mwifiex_ie_types_extcap);
617 }
618
619 return ret_len;
620}
621
622/*
623 * This function reconfigures the Tx buffer size in firmware.
624 *
625 * This function prepares a firmware command and issues it, if
626 * the current Tx buffer size is different from the one requested.
627 * Maximum configurable Tx buffer size is limited by the HT capability
628 * field value.
629 */
630void
631mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
632 struct mwifiex_bssdescriptor *bss_desc)
633{
634 u16 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_2K;
635 u16 tx_buf = 0;
636 u16 curr_tx_buf_size = 0;
637
638 if (bss_desc->bcn_ht_cap) {
639 if (GETHT_MAXAMSDU(le16_to_cpu(bss_desc->bcn_ht_cap->cap_info)))
640 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_8K;
641 else
642 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_4K;
643 }
644
645 tx_buf = min(priv->adapter->max_tx_buf_size, max_amsdu);
646
647 dev_dbg(priv->adapter->dev, "info: max_amsdu=%d, max_tx_buf=%d\n",
648 max_amsdu, priv->adapter->max_tx_buf_size);
649
650 if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_2K)
651 curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
652 else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_4K)
653 curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
654 else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_8K)
655 curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_8K;
656 if (curr_tx_buf_size != tx_buf)
657 mwifiex_prepare_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
658 HostCmd_ACT_GEN_SET, 0,
659 NULL, &tx_buf);
660
661 return;
662}
663
664/*
665 * This function checks if the given pointer is valid entry of
666 * Tx BA Stream table.
667 */
668static int mwifiex_is_tx_ba_stream_ptr_valid(struct mwifiex_private *priv,
669 struct mwifiex_tx_ba_stream_tbl *tx_tbl_ptr)
670{
671 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
672
673 list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
674 if (tx_ba_tsr_tbl == tx_tbl_ptr)
675 return true;
676 }
677
678 return false;
679}
680
681/*
682 * This function deletes the given entry in Tx BA Stream table.
683 *
684 * The function also performs a validity check on the supplied
685 * pointer before trying to delete.
686 */
687void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
688 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl)
689{
690 if (!tx_ba_tsr_tbl &&
691 mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl))
692 return;
693
694 dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
695
696 list_del(&tx_ba_tsr_tbl->list);
697
698 kfree(tx_ba_tsr_tbl);
699
700 return;
701}
702
703/*
704 * This function deletes all the entries in Tx BA Stream table.
705 */
706void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv)
707{
708 int i;
709 struct mwifiex_tx_ba_stream_tbl *del_tbl_ptr, *tmp_node;
710 unsigned long flags;
711
712 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
713 list_for_each_entry_safe(del_tbl_ptr, tmp_node,
714 &priv->tx_ba_stream_tbl_ptr, list)
715 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, del_tbl_ptr);
716 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
717
718 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
719
720 for (i = 0; i < MAX_NUM_TID; ++i)
721 priv->aggr_prio_tbl[i].ampdu_ap =
722 priv->aggr_prio_tbl[i].ampdu_user;
723}
724
725/*
726 * This function returns the pointer to an entry in BA Stream
727 * table which matches the given RA/TID pair.
728 */
729struct mwifiex_tx_ba_stream_tbl *
730mwifiex_11n_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
731 int tid, u8 *ra)
732{
733 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
734 unsigned long flags;
735
736 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
737 list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
738 if ((!memcmp(tx_ba_tsr_tbl->ra, ra, ETH_ALEN))
739 && (tx_ba_tsr_tbl->tid == tid)) {
740 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
741 flags);
742 return tx_ba_tsr_tbl;
743 }
744 }
745 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
746 return NULL;
747}
748
749/*
750 * This function creates an entry in Tx BA stream table for the
751 * given RA/TID pair.
752 */
753void mwifiex_11n_create_tx_ba_stream_tbl(struct mwifiex_private *priv,
754 u8 *ra, int tid,
755 enum mwifiex_ba_status ba_status)
756{
757 struct mwifiex_tx_ba_stream_tbl *new_node;
758 unsigned long flags;
759
760 if (!mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, ra)) {
761 new_node = kzalloc(sizeof(struct mwifiex_tx_ba_stream_tbl),
762 GFP_ATOMIC);
763 if (!new_node) {
764 dev_err(priv->adapter->dev,
765 "%s: failed to alloc new_node\n", __func__);
766 return;
767 }
768
769 INIT_LIST_HEAD(&new_node->list);
770
771 new_node->tid = tid;
772 new_node->ba_status = ba_status;
773 memcpy(new_node->ra, ra, ETH_ALEN);
774
775 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
776 list_add_tail(&new_node->list, &priv->tx_ba_stream_tbl_ptr);
777 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
778 }
779
780 return;
781}
782
783/*
784 * This function sends an add BA request to the given TID/RA pair.
785 */
786int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
787{
788 struct host_cmd_ds_11n_addba_req add_ba_req;
789 static u8 dialog_tok;
790 int ret;
791
792 dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
793
794 add_ba_req.block_ack_param_set = cpu_to_le16(
795 (u16) ((tid << BLOCKACKPARAM_TID_POS) |
796 (priv->add_ba_param.
797 tx_win_size << BLOCKACKPARAM_WINSIZE_POS) |
798 IMMEDIATE_BLOCK_ACK));
799 add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout);
800
801 ++dialog_tok;
802
803 if (dialog_tok == 0)
804 dialog_tok = 1;
805
806 add_ba_req.dialog_token = dialog_tok;
807 memcpy(&add_ba_req.peer_mac_addr, peer_mac, ETH_ALEN);
808
809 /* We don't wait for the response of this command */
810 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_11N_ADDBA_REQ,
811 0, 0, NULL, &add_ba_req);
812
813 return ret;
814}
815
816/*
817 * This function sends a delete BA request to the given TID/RA pair.
818 */
819int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
820 int initiator)
821{
822 struct host_cmd_ds_11n_delba delba;
823 int ret;
824 uint16_t del_ba_param_set;
825
826 memset(&delba, 0, sizeof(delba));
827 delba.del_ba_param_set = cpu_to_le16(tid << DELBA_TID_POS);
828
829 del_ba_param_set = le16_to_cpu(delba.del_ba_param_set);
830 if (initiator)
831 del_ba_param_set |= IEEE80211_DELBA_PARAM_INITIATOR_MASK;
832 else
833 del_ba_param_set &= ~IEEE80211_DELBA_PARAM_INITIATOR_MASK;
834
835 memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN);
836
837 /* We don't wait for the response of this command */
838 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_11N_DELBA,
839 HostCmd_ACT_GEN_SET, 0, NULL, &delba);
840
841 return ret;
842}
843
844/*
845 * This function handles the command response of a delete BA request.
846 */
847void mwifiex_11n_delete_ba_stream(struct mwifiex_private *priv, u8 *del_ba)
848{
849 struct host_cmd_ds_11n_delba *cmd_del_ba =
850 (struct host_cmd_ds_11n_delba *) del_ba;
851 uint16_t del_ba_param_set = le16_to_cpu(cmd_del_ba->del_ba_param_set);
852 int tid;
853
854 tid = del_ba_param_set >> DELBA_TID_POS;
855
856 mwifiex_11n_delete_ba_stream_tbl(priv, tid, cmd_del_ba->peer_mac_addr,
857 TYPE_DELBA_RECEIVE,
858 INITIATOR_BIT(del_ba_param_set));
859}
860
861/*
862 * This function retrieves the Rx reordering table.
863 */
864int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
865 struct mwifiex_ds_rx_reorder_tbl *buf)
866{
867 int i;
868 struct mwifiex_ds_rx_reorder_tbl *rx_reo_tbl = buf;
869 struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr;
870 int count = 0;
871 unsigned long flags;
872
873 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
874 list_for_each_entry(rx_reorder_tbl_ptr, &priv->rx_reorder_tbl_ptr,
875 list) {
876 rx_reo_tbl->tid = (u16) rx_reorder_tbl_ptr->tid;
877 memcpy(rx_reo_tbl->ta, rx_reorder_tbl_ptr->ta, ETH_ALEN);
878 rx_reo_tbl->start_win = rx_reorder_tbl_ptr->start_win;
879 rx_reo_tbl->win_size = rx_reorder_tbl_ptr->win_size;
880 for (i = 0; i < rx_reorder_tbl_ptr->win_size; ++i) {
881 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
882 rx_reo_tbl->buffer[i] = true;
883 else
884 rx_reo_tbl->buffer[i] = false;
885 }
886 rx_reo_tbl++;
887 count++;
888
889 if (count >= MWIFIEX_MAX_RX_BASTREAM_SUPPORTED)
890 break;
891 }
892 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
893
894 return count;
895}
896
897/*
898 * This function retrieves the Tx BA stream table.
899 */
900int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
901 struct mwifiex_ds_tx_ba_stream_tbl *buf)
902{
903 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
904 struct mwifiex_ds_tx_ba_stream_tbl *rx_reo_tbl = buf;
905 int count = 0;
906 unsigned long flags;
907
908 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
909 list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
910 rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
911 dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
912 __func__, rx_reo_tbl->tid);
913 memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
914 rx_reo_tbl++;
915 count++;
916 if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
917 break;
918 }
919 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
920
921 return count;
922}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
new file mode 100644
index 00000000000..769a27f2b2c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -0,0 +1,178 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_11N_H_
21#define _MWIFIEX_11N_H_
22
23#include "11n_aggr.h"
24#include "11n_rxreorder.h"
25#include "wmm.h"
26
27void mwifiex_show_dot_11n_dev_cap(struct mwifiex_adapter *adapter, u32 cap);
28void mwifiex_show_dev_mcs_support(struct mwifiex_adapter *adapter, u8 support);
29int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
30 struct host_cmd_ds_command *resp);
31int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
32 struct host_cmd_ds_command *resp);
33int mwifiex_ret_11n_cfg(struct mwifiex_private *priv,
34 struct host_cmd_ds_command *resp,
35 void *data_buf);
36int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
37 struct host_cmd_ds_command *cmd,
38 u16 cmd_action, void *data_buf);
39
40int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
41 struct host_cmd_ds_command *cmd,
42 u16 cmd_action, void *data_buf);
43
44int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
45 struct mwifiex_bssdescriptor *bss_desc,
46 u8 **buffer);
47void mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
48 struct mwifiex_bssdescriptor *bss_desc);
49void mwifiex_fill_cap_info(struct mwifiex_private *,
50 struct mwifiex_ie_types_htcap *);
51int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
52 u16 action, int *htcap_cfg);
53void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
54 struct mwifiex_tx_ba_stream_tbl
55 *tx_tbl);
56void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv);
57struct mwifiex_tx_ba_stream_tbl *mwifiex_11n_get_tx_ba_stream_tbl(struct
58 mwifiex_private
59 *priv, int tid,
60 u8 *ra);
61void mwifiex_11n_create_tx_ba_stream_tbl(struct mwifiex_private *priv, u8 *ra,
62 int tid,
63 enum mwifiex_ba_status ba_status);
64int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac);
65int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
66 int initiator);
67void mwifiex_11n_delete_ba_stream(struct mwifiex_private *priv, u8 *del_ba);
68int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
69 struct mwifiex_ds_rx_reorder_tbl *buf);
70int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
71 struct mwifiex_ds_tx_ba_stream_tbl *buf);
72int mwifiex_ret_amsdu_aggr_ctrl(struct mwifiex_private *priv,
73 struct host_cmd_ds_command
74 *resp,
75 void *data_buf);
76int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
77 struct host_cmd_ds_command *cmd,
78 int cmd_action, void *data_buf);
79int mwifiex_cmd_amsdu_aggr_ctrl(struct mwifiex_private *priv,
80 struct host_cmd_ds_command *cmd,
81 int cmd_action,
82 void *data_buf);
83
84/*
85 * This function checks whether AMPDU is allowed or not for a particular TID.
86 */
87static inline u8
88mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
89 struct mwifiex_ra_list_tbl *ptr, int tid)
90{
91 return ((priv->aggr_prio_tbl[tid].ampdu_ap != BA_STREAM_NOT_ALLOWED)
92 ? true : false);
93}
94
95/*
96 * This function checks whether AMSDU is allowed or not for a particular TID.
97 */
98static inline u8
99mwifiex_is_amsdu_allowed(struct mwifiex_private *priv,
100 struct mwifiex_ra_list_tbl *ptr, int tid)
101{
102 return (((priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED)
103 && ((priv->is_data_rate_auto)
104 || !((priv->bitmap_rates[2]) & 0x03)))
105 ? true : false);
106}
107
108/*
109 * This function checks whether a BA stream is available or not.
110 */
111static inline u8
112mwifiex_is_ba_stream_avail(struct mwifiex_private *priv)
113{
114 struct mwifiex_private *pmpriv = NULL;
115 u8 i = 0;
116 u32 ba_stream_num = 0;
117
118 for (i = 0; i < priv->adapter->priv_num; i++) {
119 pmpriv = priv->adapter->priv[i];
120 if (pmpriv)
121 ba_stream_num +=
122 mwifiex_wmm_list_len(priv->adapter,
123 (struct list_head
124 *) &pmpriv->
125 tx_ba_stream_tbl_ptr);
126 }
127
128 return ((ba_stream_num <
129 MWIFIEX_MAX_TX_BASTREAM_SUPPORTED) ? true : false);
130}
131
132/*
133 * This function finds the correct Tx BA stream to delete.
134 *
135 * Upon successfully locating, both the TID and the RA are returned.
136 */
137static inline u8
138mwifiex_find_stream_to_delete(struct mwifiex_private *priv,
139 struct mwifiex_ra_list_tbl *ptr, int ptr_tid,
140 int *ptid, u8 *ra)
141{
142 int tid;
143 u8 ret = false;
144 struct mwifiex_tx_ba_stream_tbl *tx_tbl;
145 unsigned long flags;
146
147 tid = priv->aggr_prio_tbl[ptr_tid].ampdu_user;
148
149 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
150 list_for_each_entry(tx_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
151 if (tid > priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user) {
152 tid = priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user;
153 *ptid = tx_tbl->tid;
154 memcpy(ra, tx_tbl->ra, ETH_ALEN);
155 ret = true;
156 }
157 }
158 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
159
160 return ret;
161}
162
163/*
164 * This function checks whether BA stream is set up or not.
165 */
166static inline int
167mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
168 struct mwifiex_ra_list_tbl *ptr, int tid)
169{
170 struct mwifiex_tx_ba_stream_tbl *tx_tbl;
171
172 tx_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, ptr->ra);
173 if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl))
174 return true;
175
176 return false;
177}
178#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
new file mode 100644
index 00000000000..c2abced6695
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -0,0 +1,423 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n Aggregation
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27#include "11n_aggr.h"
28
29/*
30 * Creates an AMSDU subframe for aggregation into one AMSDU packet.
31 *
32 * The resultant AMSDU subframe format is -
33 *
34 * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
35 * | DA | SA | Length | SNAP header | MSDU |
36 * | data[0..5] | data[6..11] | | | data[14..] |
37 * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
38 * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes-->
39 *
40 * This function also computes the amount of padding required to make the
41 * buffer length multiple of 4 bytes.
42 *
43 * Data => |DA|SA|SNAP-TYPE|........ .|
44 * MSDU => |DA|SA|Length|SNAP|...... ..|
45 */
46static int
47mwifiex_11n_form_amsdu_pkt(struct mwifiex_adapter *adapter,
48 struct sk_buff *skb_aggr,
49 struct sk_buff *skb_src, int *pad)
50
51{
52 int dt_offset;
53 struct rfc_1042_hdr snap = {
54 0xaa, /* LLC DSAP */
55 0xaa, /* LLC SSAP */
56 0x03, /* LLC CTRL */
57 {0x00, 0x00, 0x00}, /* SNAP OUI */
58 0x0000 /* SNAP type */
59 /*
60 * This field will be overwritten
61 * later with ethertype
62 */
63 };
64 struct tx_packet_hdr *tx_header = NULL;
65
66 skb_put(skb_aggr, sizeof(*tx_header));
67
68 tx_header = (struct tx_packet_hdr *) skb_aggr->data;
69
70 /* Copy DA and SA */
71 dt_offset = 2 * ETH_ALEN;
72 memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
73
74 /* Copy SNAP header */
75 snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset);
76 dt_offset += sizeof(u16);
77
78 memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
79
80 skb_pull(skb_src, dt_offset);
81
82 /* Update Length field */
83 tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
84
85 /* Add payload */
86 skb_put(skb_aggr, skb_src->len);
87 memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
88 skb_src->len);
89 *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
90 LLC_SNAP_LEN)) & 3)) : 0;
91 skb_put(skb_aggr, *pad);
92
93 return skb_aggr->len + *pad;
94}
95
96/*
97 * Adds TxPD to AMSDU header.
98 *
99 * Each AMSDU packet will contain one TxPD at the beginning,
100 * followed by multiple AMSDU subframes.
101 */
102static void
103mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
104 struct sk_buff *skb)
105{
106 struct txpd *local_tx_pd;
107
108 skb_push(skb, sizeof(*local_tx_pd));
109
110 local_tx_pd = (struct txpd *) skb->data;
111 memset(local_tx_pd, 0, sizeof(struct txpd));
112
113 /* Original priority has been overwritten */
114 local_tx_pd->priority = (u8) skb->priority;
115 local_tx_pd->pkt_delay_2ms =
116 mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
117 local_tx_pd->bss_num = priv->bss_num;
118 local_tx_pd->bss_type = priv->bss_type;
119 /* Always zero as the data is followed by struct txpd */
120 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
121 local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
122 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
123 sizeof(*local_tx_pd));
124
125 if (local_tx_pd->tx_control == 0)
126 /* TxCtrl set by user or default */
127 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
128
129 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
130 (priv->adapter->pps_uapsd_mode)) {
131 if (true == mwifiex_check_last_packet_indication(priv)) {
132 priv->adapter->tx_lock_flag = true;
133 local_tx_pd->flags =
134 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
135 }
136 }
137}
138
139/*
140 * Counts the number of subframes in an aggregate packet.
141 *
142 * This function parses an aggregate packet buffer, looking for
143 * subframes and counting the number of such subframe found. The
144 * function automatically skips the DA/SA fields at the beginning
145 * of each subframe and padding at the end.
146 */
147static int
148mwifiex_11n_get_num_aggr_pkts(u8 *data, int total_pkt_len)
149{
150 int pkt_count = 0, pkt_len, pad;
151
152 while (total_pkt_len > 0) {
153 /* Length will be in network format, change it to host */
154 pkt_len = ntohs((*(__be16 *)(data + 2 * ETH_ALEN)));
155 pad = (((pkt_len + sizeof(struct ethhdr)) & 3)) ?
156 (4 - ((pkt_len + sizeof(struct ethhdr)) & 3)) : 0;
157 data += pkt_len + pad + sizeof(struct ethhdr);
158 total_pkt_len -= pkt_len + pad + sizeof(struct ethhdr);
159 ++pkt_count;
160 }
161
162 return pkt_count;
163}
164
165/*
166 * De-aggregate received packets.
167 *
168 * This function parses the received aggregate buffer, extracts each subframe,
169 * strips off the SNAP header from them and sends the data portion for further
170 * processing.
171 *
172 * Each subframe body is copied onto a separate buffer, which are freed by
173 * upper layer after processing. The function also performs sanity tests on
174 * the received buffer.
175 */
176int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
177 struct sk_buff *skb)
178{
179 u16 pkt_len;
180 int total_pkt_len;
181 u8 *data;
182 int pad;
183 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
184 struct rxpd *local_rx_pd = (struct rxpd *) skb->data;
185 struct sk_buff *skb_daggr;
186 struct mwifiex_rxinfo *rx_info_daggr = NULL;
187 int ret = -1;
188 struct rx_packet_hdr *rx_pkt_hdr;
189 struct mwifiex_adapter *adapter = priv->adapter;
190 u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
191
192 data = (u8 *) (local_rx_pd + local_rx_pd->rx_pkt_offset);
193 total_pkt_len = local_rx_pd->rx_pkt_length;
194
195 /* Sanity test */
196 if (total_pkt_len > MWIFIEX_RX_DATA_BUF_SIZE) {
197 dev_err(adapter->dev, "total pkt len greater than buffer"
198 " size %d\n", total_pkt_len);
199 return -1;
200 }
201
202 rx_info->use_count = mwifiex_11n_get_num_aggr_pkts(data, total_pkt_len);
203
204 while (total_pkt_len > 0) {
205 rx_pkt_hdr = (struct rx_packet_hdr *) data;
206 /* Length will be in network format, change it to host */
207 pkt_len = ntohs((*(__be16 *) (data + 2 * ETH_ALEN)));
208 if (pkt_len > total_pkt_len) {
209 dev_err(adapter->dev, "pkt_len %d > total_pkt_len %d\n",
210 total_pkt_len, pkt_len);
211 break;
212 }
213
214 pad = (((pkt_len + sizeof(struct ethhdr)) & 3)) ?
215 (4 - ((pkt_len + sizeof(struct ethhdr)) & 3)) : 0;
216
217 total_pkt_len -= pkt_len + pad + sizeof(struct ethhdr);
218
219 if (memcmp(&rx_pkt_hdr->rfc1042_hdr,
220 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)) == 0) {
221 memmove(data + LLC_SNAP_LEN, data, 2 * ETH_ALEN);
222 data += LLC_SNAP_LEN;
223 pkt_len += sizeof(struct ethhdr) - LLC_SNAP_LEN;
224 } else {
225 *(u16 *) (data + 2 * ETH_ALEN) = (u16) 0;
226 pkt_len += sizeof(struct ethhdr);
227 }
228
229 skb_daggr = dev_alloc_skb(pkt_len);
230 if (!skb_daggr) {
231 dev_err(adapter->dev, "%s: failed to alloc skb_daggr\n",
232 __func__);
233 return -1;
234 }
235 rx_info_daggr = MWIFIEX_SKB_RXCB(skb_daggr);
236
237 rx_info_daggr->bss_index = rx_info->bss_index;
238 skb_daggr->tstamp = skb->tstamp;
239 rx_info_daggr->parent = skb;
240 skb_daggr->priority = skb->priority;
241 skb_put(skb_daggr, pkt_len);
242 memcpy(skb_daggr->data, data, pkt_len);
243
244 ret = mwifiex_recv_packet(adapter, skb_daggr);
245
246 switch (ret) {
247 case -EINPROGRESS:
248 break;
249 case -1:
250 dev_err(adapter->dev, "deaggr: host_to_card failed\n");
251 case 0:
252 mwifiex_recv_packet_complete(adapter, skb_daggr, ret);
253 break;
254 default:
255 break;
256 }
257
258 data += pkt_len + pad;
259 }
260
261 return ret;
262}
263
264/*
265 * Create aggregated packet.
266 *
267 * This function creates an aggregated MSDU packet, by combining buffers
268 * from the RA list. Each individual buffer is encapsulated as an AMSDU
269 * subframe and all such subframes are concatenated together to form the
270 * AMSDU packet.
271 *
272 * A TxPD is also added to the front of the resultant AMSDU packets for
273 * transmission. The resultant packets format is -
274 *
275 * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+
276 * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame|
277 * | | 1 | 2 | .. | n |
278 * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+
279 */
280int
281mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
282 struct mwifiex_ra_list_tbl *pra_list, int headroom,
283 int ptrindex, unsigned long ra_list_flags)
284 __releases(&priv->wmm.ra_list_spinlock)
285{
286 struct mwifiex_adapter *adapter = priv->adapter;
287 struct sk_buff *skb_aggr, *skb_src;
288 struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
289 int pad = 0;
290 int ret = 0;
291 struct mwifiex_tx_param tx_param;
292 struct txpd *ptx_pd = NULL;
293
294 if (skb_queue_empty(&pra_list->skb_head)) {
295 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
296 ra_list_flags);
297 return 0;
298 }
299 skb_src = skb_peek(&pra_list->skb_head);
300 tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
301 skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
302 if (!skb_aggr) {
303 dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
304 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
305 ra_list_flags);
306 return -1;
307 }
308 skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
309 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
310
311 tx_info_aggr->bss_index = tx_info_src->bss_index;
312 skb_aggr->priority = skb_src->priority;
313
314 while (skb_src && ((skb_headroom(skb_aggr) + skb_src->len
315 + LLC_SNAP_LEN)
316 <= adapter->tx_buf_size)) {
317
318 if (!skb_queue_empty(&pra_list->skb_head))
319 skb_src = skb_dequeue(&pra_list->skb_head);
320 else
321 skb_src = NULL;
322
323 pra_list->total_pkts_size -= skb_src->len;
324
325 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
326 ra_list_flags);
327 mwifiex_11n_form_amsdu_pkt(adapter, skb_aggr, skb_src, &pad);
328
329 mwifiex_write_data_complete(adapter, skb_src, 0);
330
331 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
332
333 if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
334 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
335 ra_list_flags);
336 return -1;
337 }
338
339 if (!skb_queue_empty(&pra_list->skb_head))
340 skb_src = skb_peek(&pra_list->skb_head);
341 else
342 skb_src = NULL;
343 }
344
345 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
346
347 /* Last AMSDU packet does not need padding */
348 skb_trim(skb_aggr, skb_aggr->len - pad);
349
350 /* Form AMSDU */
351 mwifiex_11n_form_amsdu_txpd(priv, skb_aggr);
352 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
353 ptx_pd = (struct txpd *)skb_aggr->data;
354
355 skb_push(skb_aggr, headroom);
356
357 tx_param.next_pkt_len = ((pra_list->total_pkts_size) ?
358 (((pra_list->total_pkts_size) >
359 adapter->tx_buf_size) ? adapter->
360 tx_buf_size : pra_list->total_pkts_size +
361 LLC_SNAP_LEN + sizeof(struct txpd)) : 0);
362 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
363 skb_aggr->data,
364 skb_aggr->len, &tx_param);
365 switch (ret) {
366 case -EBUSY:
367 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
368 if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
369 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
370 ra_list_flags);
371 mwifiex_write_data_complete(adapter, skb_aggr, -1);
372 return -1;
373 }
374 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
375 (adapter->pps_uapsd_mode) &&
376 (adapter->tx_lock_flag)) {
377 priv->adapter->tx_lock_flag = false;
378 ptx_pd->flags = 0;
379 }
380
381 skb_queue_tail(&pra_list->skb_head, skb_aggr);
382
383 pra_list->total_pkts_size += skb_aggr->len;
384
385 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
386 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
387 ra_list_flags);
388 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
389 break;
390 case -1:
391 adapter->data_sent = false;
392 dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
393 __func__, ret);
394 adapter->dbg.num_tx_host_to_card_failure++;
395 mwifiex_write_data_complete(adapter, skb_aggr, ret);
396 return 0;
397 case -EINPROGRESS:
398 adapter->data_sent = false;
399 break;
400 case 0:
401 mwifiex_write_data_complete(adapter, skb_aggr, ret);
402 break;
403 default:
404 break;
405 }
406 if (ret != -EBUSY) {
407 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
408 if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
409 priv->wmm.packets_out[ptrindex]++;
410 priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list;
411 }
412 /* Now bss_prio_cur pointer points to next node */
413 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
414 list_first_entry(
415 &adapter->bss_prio_tbl[priv->bss_priority]
416 .bss_prio_cur->list,
417 struct mwifiex_bss_prio_node, list);
418 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
419 ra_list_flags);
420 }
421
422 return 0;
423}
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
new file mode 100644
index 00000000000..9c6dca7ab02
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -0,0 +1,32 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n Aggregation
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_11N_AGGR_H_
21#define _MWIFIEX_11N_AGGR_H_
22
23#define PKT_TYPE_AMSDU 0xE6
24
25int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
26 struct sk_buff *skb);
27int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
28 struct mwifiex_ra_list_tbl *ptr, int headroom,
29 int ptr_index, unsigned long flags)
30 __releases(&priv->wmm.ra_list_spinlock);
31
32#endif /* !_MWIFIEX_11N_AGGR_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
new file mode 100644
index 00000000000..8e94e620e6f
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -0,0 +1,637 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27#include "11n_rxreorder.h"
28
29/*
30 * This function processes a received packet and forwards
31 * it to the kernel/upper layer.
32 */
33static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
34{
35 int ret = 0;
36 struct mwifiex_adapter *adapter = priv->adapter;
37
38 ret = mwifiex_process_rx_packet(adapter, (struct sk_buff *) payload);
39 return ret;
40}
41
42/*
43 * This function dispatches all packets in the Rx reorder table.
44 *
45 * There could be holes in the buffer, which are skipped by the function.
46 * Since the buffer is linear, the function uses rotation to simulate
47 * circular buffer.
48 */
49static int
50mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
51 struct mwifiex_rx_reorder_tbl
52 *rx_reor_tbl_ptr, int start_win)
53{
54 int no_pkt_to_send, i, xchg;
55 void *rx_tmp_ptr = NULL;
56 unsigned long flags;
57
58 no_pkt_to_send = (start_win > rx_reor_tbl_ptr->start_win) ?
59 min((start_win - rx_reor_tbl_ptr->start_win),
60 rx_reor_tbl_ptr->win_size) : rx_reor_tbl_ptr->win_size;
61
62 for (i = 0; i < no_pkt_to_send; ++i) {
63 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
64 rx_tmp_ptr = NULL;
65 if (rx_reor_tbl_ptr->rx_reorder_ptr[i]) {
66 rx_tmp_ptr = rx_reor_tbl_ptr->rx_reorder_ptr[i];
67 rx_reor_tbl_ptr->rx_reorder_ptr[i] = NULL;
68 }
69 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
70 if (rx_tmp_ptr)
71 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
72 }
73
74 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
75 /*
76 * We don't have a circular buffer, hence use rotation to simulate
77 * circular buffer
78 */
79 xchg = rx_reor_tbl_ptr->win_size - no_pkt_to_send;
80 for (i = 0; i < xchg; ++i) {
81 rx_reor_tbl_ptr->rx_reorder_ptr[i] =
82 rx_reor_tbl_ptr->rx_reorder_ptr[no_pkt_to_send + i];
83 rx_reor_tbl_ptr->rx_reorder_ptr[no_pkt_to_send + i] = NULL;
84 }
85
86 rx_reor_tbl_ptr->start_win = start_win;
87 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
88
89 return 0;
90}
91
92/*
93 * This function dispatches all packets in the Rx reorder table until
94 * a hole is found.
95 *
96 * The start window is adjusted automatically when a hole is located.
97 * Since the buffer is linear, the function uses rotation to simulate
98 * circular buffer.
99 */
100static int
101mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
102 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
103{
104 int i, j, xchg;
105 void *rx_tmp_ptr = NULL;
106 unsigned long flags;
107
108 for (i = 0; i < rx_reor_tbl_ptr->win_size; ++i) {
109 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
110 if (!rx_reor_tbl_ptr->rx_reorder_ptr[i]) {
111 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
112 break;
113 }
114 rx_tmp_ptr = rx_reor_tbl_ptr->rx_reorder_ptr[i];
115 rx_reor_tbl_ptr->rx_reorder_ptr[i] = NULL;
116 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
117 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
118 }
119
120 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
121 /*
122 * We don't have a circular buffer, hence use rotation to simulate
123 * circular buffer
124 */
125 if (i > 0) {
126 xchg = rx_reor_tbl_ptr->win_size - i;
127 for (j = 0; j < xchg; ++j) {
128 rx_reor_tbl_ptr->rx_reorder_ptr[j] =
129 rx_reor_tbl_ptr->rx_reorder_ptr[i + j];
130 rx_reor_tbl_ptr->rx_reorder_ptr[i + j] = NULL;
131 }
132 }
133 rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
134 &(MAX_TID_VALUE - 1);
135 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
136 return 0;
137}
138
139/*
140 * This function deletes the Rx reorder table and frees the memory.
141 *
142 * The function stops the associated timer and dispatches all the
143 * pending packets in the Rx reorder table before deletion.
144 */
145static void
146mwifiex_11n_delete_rx_reorder_tbl_entry(struct mwifiex_private *priv,
147 struct mwifiex_rx_reorder_tbl
148 *rx_reor_tbl_ptr)
149{
150 unsigned long flags;
151
152 if (!rx_reor_tbl_ptr)
153 return;
154
155 mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr,
156 (rx_reor_tbl_ptr->start_win +
157 rx_reor_tbl_ptr->win_size)
158 &(MAX_TID_VALUE - 1));
159
160 del_timer(&rx_reor_tbl_ptr->timer_context.timer);
161
162 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
163 list_del(&rx_reor_tbl_ptr->list);
164 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
165
166 kfree(rx_reor_tbl_ptr->rx_reorder_ptr);
167 kfree(rx_reor_tbl_ptr);
168}
169
170/*
171 * This function returns the pointer to an entry in Rx reordering
172 * table which matches the given TA/TID pair.
173 */
174static struct mwifiex_rx_reorder_tbl *
175mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
176{
177 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
178 unsigned long flags;
179
180 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
181 list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
182 if ((!memcmp(rx_reor_tbl_ptr->ta, ta, ETH_ALEN))
183 && (rx_reor_tbl_ptr->tid == tid)) {
184 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
185 flags);
186 return rx_reor_tbl_ptr;
187 }
188 }
189 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
190
191 return NULL;
192}
193
194/*
195 * This function finds the last sequence number used in the packets
196 * buffered in Rx reordering table.
197 */
198static int
199mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr)
200{
201 int i;
202
203 for (i = (rx_reorder_tbl_ptr->win_size - 1); i >= 0; --i)
204 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
205 return i;
206
207 return -1;
208}
209
210/*
211 * This function flushes all the packets in Rx reordering table.
212 *
213 * The function checks if any packets are currently buffered in the
214 * table or not. In case there are packets available, it dispatches
215 * them and then dumps the Rx reordering table.
216 */
217static void
218mwifiex_flush_data(unsigned long context)
219{
220 struct reorder_tmr_cnxt *reorder_cnxt =
221 (struct reorder_tmr_cnxt *) context;
222 int start_win;
223
224 start_win = mwifiex_11n_find_last_seq_num(reorder_cnxt->ptr);
225 if (start_win >= 0) {
226 dev_dbg(reorder_cnxt->priv->adapter->dev,
227 "info: flush data %d\n", start_win);
228 mwifiex_11n_dispatch_pkt_until_start_win(reorder_cnxt->priv,
229 reorder_cnxt->ptr,
230 ((reorder_cnxt->ptr->start_win +
231 start_win + 1) & (MAX_TID_VALUE - 1)));
232 }
233}
234
235/*
236 * This function creates an entry in Rx reordering table for the
237 * given TA/TID.
238 *
239 * The function also initializes the entry with sequence number, window
240 * size as well as initializes the timer.
241 *
242 * If the received TA/TID pair is already present, all the packets are
243 * dispatched and the window size is moved until the SSN.
244 */
245static void
246mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
247 int tid, int win_size, int seq_num)
248{
249 int i;
250 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr, *new_node;
251 u16 last_seq = 0;
252 unsigned long flags;
253
254 /*
255 * If we get a TID, ta pair which is already present dispatch all the
256 * the packets and move the window size until the ssn
257 */
258 rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
259 if (rx_reor_tbl_ptr) {
260 mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr,
261 seq_num);
262 return;
263 }
264 /* if !rx_reor_tbl_ptr then create one */
265 new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
266 if (!new_node) {
267 dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n",
268 __func__);
269 return;
270 }
271
272 INIT_LIST_HEAD(&new_node->list);
273 new_node->tid = tid;
274 memcpy(new_node->ta, ta, ETH_ALEN);
275 new_node->start_win = seq_num;
276 if (mwifiex_queuing_ra_based(priv))
277 /* TODO for adhoc */
278 dev_dbg(priv->adapter->dev,
279 "info: ADHOC:last_seq=%d start_win=%d\n",
280 last_seq, new_node->start_win);
281 else
282 last_seq = priv->rx_seq[tid];
283
284 if (last_seq >= new_node->start_win)
285 new_node->start_win = last_seq + 1;
286
287 new_node->win_size = win_size;
288
289 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
290 GFP_KERNEL);
291 if (!new_node->rx_reorder_ptr) {
292 kfree((u8 *) new_node);
293 dev_err(priv->adapter->dev,
294 "%s: failed to alloc reorder_ptr\n", __func__);
295 return;
296 }
297
298 new_node->timer_context.ptr = new_node;
299 new_node->timer_context.priv = priv;
300
301 init_timer(&new_node->timer_context.timer);
302 new_node->timer_context.timer.function = mwifiex_flush_data;
303 new_node->timer_context.timer.data =
304 (unsigned long) &new_node->timer_context;
305
306 for (i = 0; i < win_size; ++i)
307 new_node->rx_reorder_ptr[i] = NULL;
308
309 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
310 list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
311 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
312
313 return;
314}
315
316/*
317 * This function prepares command for adding a BA request.
318 *
319 * Preparation includes -
320 * - Setting command ID and proper size
321 * - Setting add BA request buffer
322 * - Ensuring correct endian-ness
323 */
324int mwifiex_cmd_11n_addba_req(struct mwifiex_private *priv,
325 struct host_cmd_ds_command *cmd, void *data_buf)
326{
327 struct host_cmd_ds_11n_addba_req *add_ba_req =
328 (struct host_cmd_ds_11n_addba_req *)
329 &cmd->params.add_ba_req;
330
331 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
332 cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
333 memcpy(add_ba_req, data_buf, sizeof(*add_ba_req));
334
335 return 0;
336}
337
338/*
339 * This function prepares command for adding a BA response.
340 *
341 * Preparation includes -
342 * - Setting command ID and proper size
343 * - Setting add BA response buffer
344 * - Ensuring correct endian-ness
345 */
346int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
347 struct host_cmd_ds_command *cmd,
348 void *data_buf)
349{
350 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
351 (struct host_cmd_ds_11n_addba_rsp *)
352 &cmd->params.add_ba_rsp;
353 struct host_cmd_ds_11n_addba_req *cmd_addba_req =
354 (struct host_cmd_ds_11n_addba_req *) data_buf;
355 u8 tid = 0;
356 int win_size = 0;
357 uint16_t block_ack_param_set;
358
359 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
360 cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
361
362 memcpy(add_ba_rsp->peer_mac_addr, cmd_addba_req->peer_mac_addr,
363 ETH_ALEN);
364 add_ba_rsp->dialog_token = cmd_addba_req->dialog_token;
365 add_ba_rsp->block_ack_tmo = cmd_addba_req->block_ack_tmo;
366 add_ba_rsp->ssn = cmd_addba_req->ssn;
367
368 block_ack_param_set = le16_to_cpu(cmd_addba_req->block_ack_param_set);
369 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
370 >> BLOCKACKPARAM_TID_POS;
371 add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
372 block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
373 /* We donot support AMSDU inside AMPDU, hence reset the bit */
374 block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
375 block_ack_param_set |= (priv->add_ba_param.rx_win_size <<
376 BLOCKACKPARAM_WINSIZE_POS);
377 add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
378 win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
379 & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
380 >> BLOCKACKPARAM_WINSIZE_POS;
381 cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set);
382
383 mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr,
384 tid, win_size, le16_to_cpu(cmd_addba_req->ssn));
385 return 0;
386}
387
388/*
389 * This function prepares command for deleting a BA request.
390 *
391 * Preparation includes -
392 * - Setting command ID and proper size
393 * - Setting del BA request buffer
394 * - Ensuring correct endian-ness
395 */
396int mwifiex_cmd_11n_delba(struct mwifiex_private *priv,
397 struct host_cmd_ds_command *cmd, void *data_buf)
398{
399 struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *)
400 &cmd->params.del_ba;
401
402 cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
403 cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
404 memcpy(del_ba, data_buf, sizeof(*del_ba));
405
406 return 0;
407}
408
409/*
410 * This function identifies if Rx reordering is needed for a received packet.
411 *
412 * In case reordering is required, the function will do the reordering
413 * before sending it to kernel.
414 *
415 * The Rx reorder table is checked first with the received TID/TA pair. If
416 * not found, the received packet is dispatched immediately. But if found,
417 * the packet is reordered and all the packets in the updated Rx reordering
418 * table is dispatched until a hole is found.
419 *
420 * For sequence number less than the starting window, the packet is dropped.
421 */
422int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
423 u16 seq_num, u16 tid,
424 u8 *ta, u8 pkt_type, void *payload)
425{
426 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
427 int start_win, end_win, win_size;
428 int ret = 0;
429 u16 pkt_index = 0;
430
431 rx_reor_tbl_ptr =
432 mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv,
433 tid, ta);
434 if (!rx_reor_tbl_ptr) {
435 if (pkt_type != PKT_TYPE_BAR)
436 mwifiex_11n_dispatch_pkt(priv, payload);
437 return 0;
438 }
439 start_win = rx_reor_tbl_ptr->start_win;
440 win_size = rx_reor_tbl_ptr->win_size;
441 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
442 del_timer(&rx_reor_tbl_ptr->timer_context.timer);
443 mod_timer(&rx_reor_tbl_ptr->timer_context.timer, jiffies
444 + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000);
445
446 /*
447 * If seq_num is less then starting win then ignore and drop the
448 * packet
449 */
450 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */
451 if (seq_num >= ((start_win + (TWOPOW11)) & (MAX_TID_VALUE - 1))
452 && (seq_num < start_win))
453 return -1;
454 } else if ((seq_num < start_win)
455 || (seq_num > (start_win + (TWOPOW11)))) {
456 return -1;
457 }
458
459 /*
460 * If this packet is a BAR we adjust seq_num as
461 * WinStart = seq_num
462 */
463 if (pkt_type == PKT_TYPE_BAR)
464 seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
465
466 if (((end_win < start_win)
467 && (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win)))
468 && (seq_num > end_win)) || ((end_win > start_win)
469 && ((seq_num > end_win) || (seq_num < start_win)))) {
470 end_win = seq_num;
471 if (((seq_num - win_size) + 1) >= 0)
472 start_win = (end_win - win_size) + 1;
473 else
474 start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
475 ret = mwifiex_11n_dispatch_pkt_until_start_win(priv,
476 rx_reor_tbl_ptr, start_win);
477
478 if (ret)
479 return ret;
480 }
481
482 if (pkt_type != PKT_TYPE_BAR) {
483 if (seq_num >= start_win)
484 pkt_index = seq_num - start_win;
485 else
486 pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
487
488 if (rx_reor_tbl_ptr->rx_reorder_ptr[pkt_index])
489 return -1;
490
491 rx_reor_tbl_ptr->rx_reorder_ptr[pkt_index] = payload;
492 }
493
494 /*
495 * Dispatch all packets sequentially from start_win until a
496 * hole is found and adjust the start_win appropriately
497 */
498 ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
499
500 return ret;
501}
502
503/*
504 * This function deletes an entry for a given TID/TA pair.
505 *
506 * The TID/TA are taken from del BA event body.
507 */
508void
509mwifiex_11n_delete_ba_stream_tbl(struct mwifiex_private *priv, int tid,
510 u8 *peer_mac, u8 type, int initiator)
511{
512 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
513 struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
514 u8 cleanup_rx_reorder_tbl;
515 unsigned long flags;
516
517 if (type == TYPE_DELBA_RECEIVE)
518 cleanup_rx_reorder_tbl = (initiator) ? true : false;
519 else
520 cleanup_rx_reorder_tbl = (initiator) ? false : true;
521
522 dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d, "
523 "initiator=%d\n", peer_mac, tid, initiator);
524
525 if (cleanup_rx_reorder_tbl) {
526 rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
527 peer_mac);
528 if (!rx_reor_tbl_ptr) {
529 dev_dbg(priv->adapter->dev,
530 "event: TID, TA not found in table\n");
531 return;
532 }
533 mwifiex_11n_delete_rx_reorder_tbl_entry(priv, rx_reor_tbl_ptr);
534 } else {
535 ptx_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, peer_mac);
536 if (!ptx_tbl) {
537 dev_dbg(priv->adapter->dev,
538 "event: TID, RA not found in table\n");
539 return;
540 }
541
542 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
543 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
544 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
545 }
546}
547
548/*
549 * This function handles the command response of an add BA response.
550 *
551 * Handling includes changing the header fields into CPU format and
552 * creating the stream, provided the add BA is accepted.
553 */
554int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
555 struct host_cmd_ds_command *resp)
556{
557 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
558 (struct host_cmd_ds_11n_addba_rsp *)
559 &resp->params.add_ba_rsp;
560 int tid, win_size;
561 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr = NULL;
562 uint16_t block_ack_param_set;
563
564 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
565
566 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
567 >> BLOCKACKPARAM_TID_POS;
568 /*
569 * Check if we had rejected the ADDBA, if yes then do not create
570 * the stream
571 */
572 if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
573 win_size = (block_ack_param_set &
574 IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
575 >> BLOCKACKPARAM_WINSIZE_POS;
576
577 dev_dbg(priv->adapter->dev, "cmd: ADDBA RSP: %pM"
578 " tid=%d ssn=%d win_size=%d\n",
579 add_ba_rsp->peer_mac_addr,
580 tid, add_ba_rsp->ssn, win_size);
581 } else {
582 dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
583 add_ba_rsp->peer_mac_addr, tid);
584
585 rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv,
586 tid, add_ba_rsp->peer_mac_addr);
587 if (rx_reor_tbl_ptr)
588 mwifiex_11n_delete_rx_reorder_tbl_entry(priv,
589 rx_reor_tbl_ptr);
590 }
591
592 return 0;
593}
594
595/*
596 * This function handles BA stream timeout event by preparing and sending
597 * a command to the firmware.
598 */
599void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
600 struct host_cmd_ds_11n_batimeout *event)
601{
602 struct host_cmd_ds_11n_delba delba;
603
604 memset(&delba, 0, sizeof(struct host_cmd_ds_11n_delba));
605 memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN);
606
607 delba.del_ba_param_set |=
608 cpu_to_le16((u16) event->tid << DELBA_TID_POS);
609 delba.del_ba_param_set |= cpu_to_le16(
610 (u16) event->origninator << DELBA_INITIATOR_POS);
611 delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
612 mwifiex_prepare_cmd(priv, HostCmd_CMD_11N_DELBA, 0, 0, NULL, &delba);
613
614 return;
615}
616
617/*
618 * This function cleans up the Rx reorder table by deleting all the entries
619 * and re-initializing.
620 */
621void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
622{
623 struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
624 unsigned long flags;
625
626 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
627 list_for_each_entry_safe(del_tbl_ptr, tmp_node,
628 &priv->rx_reorder_tbl_ptr, list) {
629 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
630 mwifiex_11n_delete_rx_reorder_tbl_entry(priv, del_tbl_ptr);
631 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
632 }
633 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
634
635 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
636 memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
637}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
new file mode 100644
index 00000000000..42f56903574
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -0,0 +1,67 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_11N_RXREORDER_H_
21#define _MWIFIEX_11N_RXREORDER_H_
22
23#define MIN_FLUSH_TIMER_MS 50
24
25#define PKT_TYPE_BAR 0xE7
26#define MAX_TID_VALUE (2 << 11)
27#define TWOPOW11 (2 << 10)
28
29#define BLOCKACKPARAM_TID_POS 2
30#define BLOCKACKPARAM_AMSDU_SUPP_MASK 0x1
31#define BLOCKACKPARAM_WINSIZE_POS 6
32#define DELBA_TID_POS 12
33#define DELBA_INITIATOR_POS 11
34#define TYPE_DELBA_SENT 1
35#define TYPE_DELBA_RECEIVE 2
36#define IMMEDIATE_BLOCK_ACK 0x2
37
38#define ADDBA_RSP_STATUS_ACCEPT 0
39
40int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
41 u16 seqNum,
42 u16 tid, u8 *ta,
43 u8 pkttype, void *payload);
44void mwifiex_11n_delete_ba_stream_tbl(struct mwifiex_private *priv, int Tid,
45 u8 *PeerMACAddr, u8 type,
46 int initiator);
47void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
48 struct host_cmd_ds_11n_batimeout *event);
49int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
50 struct host_cmd_ds_command
51 *resp);
52int mwifiex_cmd_11n_delba(struct mwifiex_private *priv,
53 struct host_cmd_ds_command *cmd,
54 void *data_buf);
55int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
56 struct host_cmd_ds_command
57 *cmd, void *data_buf);
58int mwifiex_cmd_11n_addba_req(struct mwifiex_private *priv,
59 struct host_cmd_ds_command *cmd,
60 void *data_buf);
61void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv);
62struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct
63 mwifiex_private
64 *priv, int tid,
65 u8 *ta);
66
67#endif /* _MWIFIEX_11N_RXREORDER_H_ */
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
new file mode 100644
index 00000000000..86962920cef
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -0,0 +1,21 @@
1config MWIFIEX
2 tristate "Marvell WiFi-Ex Driver"
3 depends on CFG80211
4 select LIB80211
5 ---help---
6 This adds support for wireless adapters based on Marvell
7 802.11n chipsets.
8
9 If you choose to build it as a module, it will be called
10 mwifiex.
11
12config MWIFIEX_SDIO
13 tristate "Marvell WiFi-Ex Driver for SD8787"
14 depends on MWIFIEX && MMC
15 select FW_LOADER
16 ---help---
17 This adds support for wireless adapters based on Marvell
18 8787 chipset with SDIO interface.
19
20 If you choose to build it as a module, it will be called
21 mwifiex_sdio.
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
new file mode 100644
index 00000000000..42cb733ea33
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -0,0 +1,41 @@
1#
2# Copyright (C) 2011, Marvell International Ltd.
3#
4# This software file (the "File") is distributed by Marvell International
5# Ltd. under the terms of the GNU General Public License Version 2, June 1991
6# (the "License"). You may use, redistribute and/or modify this File in
7# accordance with the terms and conditions of the License, a copy of which
8# is available by writing to the Free Software Foundation, Inc.,
9# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
10# worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
11#
12# THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
13# IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
14# ARE EXPRESSLY DISCLAIMED. The License provides additional details about
15# this warranty disclaimer.
16
17
18mwifiex-y += main.o
19mwifiex-y += init.o
20mwifiex-y += cfp.o
21mwifiex-y += cmdevt.o
22mwifiex-y += util.o
23mwifiex-y += txrx.o
24mwifiex-y += wmm.o
25mwifiex-y += 11n.o
26mwifiex-y += 11n_aggr.o
27mwifiex-y += 11n_rxreorder.o
28mwifiex-y += scan.o
29mwifiex-y += join.o
30mwifiex-y += sta_ioctl.o
31mwifiex-y += sta_cmd.o
32mwifiex-y += sta_cmdresp.o
33mwifiex-y += sta_event.o
34mwifiex-y += sta_tx.o
35mwifiex-y += sta_rx.o
36mwifiex-y += cfg80211.o
37mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
38obj-$(CONFIG_MWIFIEX) += mwifiex.o
39
40mwifiex_sdio-y += sdio.o
41obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
new file mode 100644
index 00000000000..338377f7093
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/README
@@ -0,0 +1,204 @@
1# Copyright (C) 2011, Marvell International Ltd.
2#
3# This software file (the "File") is distributed by Marvell International
4# Ltd. under the terms of the GNU General Public License Version 2, June 1991
5# (the "License"). You may use, redistribute and/or modify this File in
6# accordance with the terms and conditions of the License, a copy of which
7# is available by writing to the Free Software Foundation, Inc.,
8# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
9# worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
10#
11# THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
12# IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
13# ARE EXPRESSLY DISCLAIMED. The License provides additional details about
14# this warranty disclaimer.
15
16
17===============================================================================
18 U S E R M A N U A L
19
201) FOR DRIVER INSTALL
21
22 a) Copy sd8787.bin to /lib/firmware/mrvl/ directory,
23 create the directory if it doesn't exist.
24 b) Install WLAN driver,
25 insmod mwifiex.ko
26 c) Uninstall WLAN driver,
27 ifconfig mlanX down
28 rmmod mwifiex
29
30
312) FOR DRIVER CONFIGURATION AND INFO
32 The configurations can be done either using the 'iw' user space
33 utility or debugfs.
34
35 a) 'iw' utility commands
36
37 Following are some useful iw commands:-
38
39iw dev mlan0 scan
40
41 This command will trigger a scan.
42 The command will then display the scan table entries
43
44iw dev mlan0 connect -w <SSID> [<freq in MHz>] [<bssid>] [key 0:abcde d:1123456789a]
45 The above command can be used to connect to an AP with a particular SSID.
46 Ap's operating frequency can be specified or even the bssid. If the AP is using
47 WEP encryption, wep keys can be specified in the command.
48 Note: Every time before connecting to an AP scan command (iw dev mlan0 scan) should be used by user.
49
50iw dev mlan0 disconnect
51 This command will be used to disconnect from an AP.
52
53
54iw dev mlan0 ibss join <SSID> <freq in MHz> [fixed-freq] [fixed-bssid] [key 0:abcde]
55 The command will be used to join or create an ibss. Optionally, operating frequency,
56 bssid and the security related parameters can be specified while joining/creating
57 and ibss.
58
59iw dev mlan0 ibss leave
60 The command will be used to leave an ibss network.
61
62iw dev mlan0 link
63 The command will be used to get the connection status. The command will return parameters
64 such as SSID, operating frequency, rx/tx packets, signal strength, tx bitrate.
65
66 Apart from the iw utility all standard configurations using the 'iwconfig' utility are also supported.
67
68 b) Debugfs interface
69
70 The debugfs interface can be used for configurations and for getting
71 some useful information from the driver.
72 The section below explains the configurations that can be
73 done.
74
75 Mount debugfs to /debugfs mount point:
76
77 mkdir /debugfs
78 mount -t debugfs debugfs /debugfs
79
80 The information is provided in /debugfs/mwifiex/mlanX/:
81
82iw reg set <country code>
83 The command will be used to change the regulatory domain.
84
85iw reg get
86 The command will be used to get current regulatory domain.
87
88info
89 This command is used to get driver info.
90
91 Usage:
92 cat info
93
94 driver_name = "mwifiex"
95 driver_version = <driver_name, driver_version, (firmware_version)>
96 interface_name = "mlanX"
97 bss_mode = "Ad-hoc" | "Managed" | "Auto" | "Unknown"
98 media_state = "Disconnected" | "Connected"
99 mac_address = <6-byte adapter MAC address>
100 multicase_count = <multicast address count>
101 essid = <current SSID>
102 bssid = <current BSSID>
103 channel = <current channel>
104 region_code = <current region code>
105 multicasr_address[n] = <multicast address>
106 num_tx_bytes = <number of bytes sent to device>
107 num_rx_bytes = <number of bytes received from device and sent to kernel>
108 num_tx_pkts = <number of packets sent to device>
109 num_rx_pkts = <number of packets received from device and sent to kernel>
110 num_tx_pkts_dropped = <number of Tx packets dropped by driver>
111 num_rx_pkts_dropped = <number of Rx packets dropped by driver>
112 num_tx_pkts_err = <number of Tx packets failed to send to device>
113 num_rx_pkts_err = <number of Rx packets failed to receive from device>
114 carrier "on" | "off"
115 tx queue "stopped" | "started"
116
117 The following debug info are provided in /debugfs/mwifiex/mlanX/debug:
118
119 int_counter = <interrupt count, cleared when interrupt handled>
120 wmm_ac_vo = <number of packets sent to device from WMM AcVo queue>
121 wmm_ac_vi = <number of packets sent to device from WMM AcVi queue>
122 wmm_ac_be = <number of packets sent to device from WMM AcBE queue>
123 wmm_ac_bk = <number of packets sent to device from WMM AcBK queue>
124 max_tx_buf_size = <maximum Tx buffer size>
125 tx_buf_size = <current Tx buffer size>
126 curr_tx_buf_size = <current Tx buffer size>
127 ps_mode = <0/1, CAM mode/PS mode>
128 ps_state = <0/1/2/3, full power state/awake state/pre-sleep state/sleep state>
129 is_deep_sleep = <0/1, not deep sleep state/deep sleep state>
130 wakeup_dev_req = <0/1, wakeup device not required/required>
131 wakeup_tries = <wakeup device count, cleared when device awake>
132 hs_configured = <0/1, host sleep not configured/configured>
133 hs_activated = <0/1, extended host sleep not activated/activated>
134 num_tx_timeout = <number of Tx timeout>
135 num_cmd_timeout = <number of timeout commands>
136 timeout_cmd_id = <command id of the last timeout command>
137 timeout_cmd_act = <command action of the last timeout command>
138 last_cmd_id = <command id of the last several commands sent to device>
139 last_cmd_act = <command action of the last several commands sent to device>
140 last_cmd_index = <0 based last command index>
141 last_cmd_resp_id = <command id of the last several command responses received from device>
142 last_cmd_resp_index = <0 based last command response index>
143 last_event = <event id of the last several events received from device>
144 last_event_index = <0 based last event index>
145 num_cmd_h2c_fail = <number of commands failed to send to device>
146 num_cmd_sleep_cfm_fail = <number of sleep confirm failed to send to device>
147 num_tx_h2c_fail = <number of data packets failed to send to device>
148 num_evt_deauth = <number of deauthenticated events received from device>
149 num_evt_disassoc = <number of disassociated events received from device>
150 num_evt_link_lost = <number of link lost events received from device>
151 num_cmd_deauth = <number of deauthenticate commands sent to device>
152 num_cmd_assoc_ok = <number of associate commands with success return>
153 num_cmd_assoc_fail = <number of associate commands with failure return>
154 cmd_sent = <0/1, send command resources available/sending command to device>
155 data_sent = <0/1, send data resources available/sending data to device>
156 mp_rd_bitmap = <SDIO multi-port read bitmap>
157 mp_wr_bitmap = <SDIO multi-port write bitmap>
158 cmd_resp_received = <0/1, no cmd response to process/response received and yet to process>
159 event_received = <0/1, no event to process/event received and yet to process>
160 ioctl_pending = <number of ioctl pending>
161 tx_pending = <number of Tx packet pending>
162 rx_pending = <number of Rx packet pending>
163
164
1653) FOR DRIVER CONFIGURATION
166
167regrdwr
168 This command is used to read/write the adapter register.
169
170 Usage:
171 echo " <type> <offset> [value]" > regrdwr
172 cat regrdwr
173
174 where the parameters are,
175 <type>: 1:MAC/SOC, 2:BBP, 3:RF, 4:PMIC, 5:CAU
176 <offset>: offset of register
177 [value]: value to be written
178
179 Examples:
180 echo "1 0xa060" > regrdwr : Read the MAC register
181 echo "1 0xa060 0x12" > regrdwr : Write the MAC register
182 echo "1 0xa794 0x80000000" > regrdwr
183 : Write 0x80000000 to MAC register
184rdeeprom
185 This command is used to read the EEPROM contents of the card.
186
187 Usage:
188 echo "<offset> <length>" > rdeeprom
189 cat rdeeprom
190
191 where the parameters are,
192 <offset>: multiples of 4
193 <length>: 4-20, multiples of 4
194
195 Example:
196 echo "0 20" > rdeeprom : Read 20 bytes of EEPROM data from offset 0
197
198getlog
199 This command is used to get the statistics available in the station.
200 Usage:
201
202 cat getlog
203
204===============================================================================
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
new file mode 100644
index 00000000000..80f367f27ef
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -0,0 +1,1517 @@
1/*
2 * Marvell Wireless LAN device driver: CFG80211
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "cfg80211.h"
21#include "main.h"
22
23/*
24 * This function maps the nl802.11 channel type into driver channel type.
25 *
26 * The mapping is as follows -
27 * NL80211_CHAN_NO_HT -> NO_SEC_CHANNEL
28 * NL80211_CHAN_HT20 -> NO_SEC_CHANNEL
29 * NL80211_CHAN_HT40PLUS -> SEC_CHANNEL_ABOVE
30 * NL80211_CHAN_HT40MINUS -> SEC_CHANNEL_BELOW
31 * Others -> NO_SEC_CHANNEL
32 */
33static int
34mwifiex_cfg80211_channel_type_to_mwifiex_channels(enum nl80211_channel_type
35 channel_type)
36{
37 int channel;
38 switch (channel_type) {
39 case NL80211_CHAN_NO_HT:
40 case NL80211_CHAN_HT20:
41 channel = NO_SEC_CHANNEL;
42 break;
43 case NL80211_CHAN_HT40PLUS:
44 channel = SEC_CHANNEL_ABOVE;
45 break;
46 case NL80211_CHAN_HT40MINUS:
47 channel = SEC_CHANNEL_BELOW;
48 break;
49 default:
50 channel = NO_SEC_CHANNEL;
51 }
52 return channel;
53}
54
55/*
56 * This function maps the driver channel type into nl802.11 channel type.
57 *
58 * The mapping is as follows -
59 * NO_SEC_CHANNEL -> NL80211_CHAN_HT20
60 * SEC_CHANNEL_ABOVE -> NL80211_CHAN_HT40PLUS
61 * SEC_CHANNEL_BELOW -> NL80211_CHAN_HT40MINUS
62 * Others -> NL80211_CHAN_HT20
63 */
64static enum nl80211_channel_type
65mwifiex_channels_to_cfg80211_channel_type(int channel_type)
66{
67 int channel;
68 switch (channel_type) {
69 case NO_SEC_CHANNEL:
70 channel = NL80211_CHAN_HT20;
71 break;
72 case SEC_CHANNEL_ABOVE:
73 channel = NL80211_CHAN_HT40PLUS;
74 break;
75 case SEC_CHANNEL_BELOW:
76 channel = NL80211_CHAN_HT40MINUS;
77 break;
78 default:
79 channel = NL80211_CHAN_HT20;
80 }
81 return channel;
82}
83
84/*
85 * This function checks whether WEP is set.
86 */
87static int
88mwifiex_is_alg_wep(u32 cipher)
89{
90 int alg = 0;
91
92 switch (cipher) {
93 case MWIFIEX_ENCRYPTION_MODE_WEP40:
94 case MWIFIEX_ENCRYPTION_MODE_WEP104:
95 alg = 1;
96 break;
97 default:
98 alg = 0;
99 break;
100 }
101 return alg;
102}
103
104/*
105 * This function maps the given cipher type into driver specific type.
106 *
107 * It also sets a flag to indicate whether WPA is enabled or not.
108 *
109 * The mapping table is -
110 * Input cipher Driver cipher type WPA enabled?
111 * ------------ ------------------ ------------
112 * IW_AUTH_CIPHER_NONE MWIFIEX_ENCRYPTION_MODE_NONE No
113 * WLAN_CIPHER_SUITE_WEP40 MWIFIEX_ENCRYPTION_MODE_WEP40 No
114 * WLAN_CIPHER_SUITE_WEP104 MWIFIEX_ENCRYPTION_MODE_WEP104 No
115 * WLAN_CIPHER_SUITE_TKIP MWIFIEX_ENCRYPTION_MODE_TKIP Yes
116 * WLAN_CIPHER_SUITE_CCMP MWIFIEX_ENCRYPTION_MODE_CCMP Yes
117 * Others -1 No
118 */
119static int
120mwifiex_get_mwifiex_cipher(u32 cipher, int *wpa_enabled)
121{
122 int encrypt_mode;
123
124 if (wpa_enabled)
125 *wpa_enabled = 0;
126 switch (cipher) {
127 case IW_AUTH_CIPHER_NONE:
128 encrypt_mode = MWIFIEX_ENCRYPTION_MODE_NONE;
129 break;
130 case WLAN_CIPHER_SUITE_WEP40:
131 encrypt_mode = MWIFIEX_ENCRYPTION_MODE_WEP40;
132 break;
133 case WLAN_CIPHER_SUITE_WEP104:
134 encrypt_mode = MWIFIEX_ENCRYPTION_MODE_WEP104;
135 break;
136 case WLAN_CIPHER_SUITE_TKIP:
137 encrypt_mode = MWIFIEX_ENCRYPTION_MODE_TKIP;
138 if (wpa_enabled)
139 *wpa_enabled = 1;
140 break;
141 case WLAN_CIPHER_SUITE_CCMP:
142 encrypt_mode = MWIFIEX_ENCRYPTION_MODE_CCMP;
143 if (wpa_enabled)
144 *wpa_enabled = 1;
145 break;
146 default:
147 encrypt_mode = -1;
148 }
149
150 return encrypt_mode;
151}
152
153/*
154 * This function retrieves the private structure from kernel wiphy structure.
155 */
156static void *mwifiex_cfg80211_get_priv(struct wiphy *wiphy)
157{
158 return (void *) (*(unsigned long *) wiphy_priv(wiphy));
159}
160
161/*
162 * CFG802.11 operation handler to delete a network key.
163 */
164static int
165mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
166 u8 key_index, bool pairwise, const u8 *mac_addr)
167{
168 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
169 int ret = 0;
170
171 ret = mwifiex_set_encode(priv, NULL, 0, key_index, 1);
172 if (ret) {
173 wiphy_err(wiphy, "deleting the crypto keys\n");
174 return -EFAULT;
175 }
176
177 wiphy_dbg(wiphy, "info: crypto keys deleted\n");
178 return 0;
179}
180
181/*
182 * CFG802.11 operation handler to set Tx power.
183 */
184static int
185mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
186 enum nl80211_tx_power_setting type,
187 int dbm)
188{
189 int ret = 0;
190 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
191
192 ret = mwifiex_set_tx_power(priv, type, dbm);
193
194 return ret;
195}
196
197/*
198 * CFG802.11 operation handler to set Power Save option.
199 *
200 * The timeout value, if provided, is currently ignored.
201 */
202static int
203mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
204 struct net_device *dev,
205 bool enabled, int timeout)
206{
207 int ret = 0;
208 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
209
210 if (timeout)
211 wiphy_dbg(wiphy,
212 "info: ignoring the timeout value"
213 " for IEEE power save\n");
214
215 ret = mwifiex_drv_set_power(priv, enabled);
216
217 return ret;
218}
219
220/*
221 * CFG802.11 operation handler to set the default network key.
222 */
223static int
224mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
225 u8 key_index, bool unicast,
226 bool multicast)
227{
228 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
229 int ret;
230
231 ret = mwifiex_set_encode(priv, NULL, 0, key_index, 0);
232
233 wiphy_dbg(wiphy, "info: set default Tx key index\n");
234
235 if (ret)
236 return -EFAULT;
237
238 return 0;
239}
240
241/*
242 * CFG802.11 operation handler to add a network key.
243 */
244static int
245mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
246 u8 key_index, bool pairwise, const u8 *mac_addr,
247 struct key_params *params)
248{
249 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
250 int ret = 0;
251 int encrypt_mode;
252
253 encrypt_mode = mwifiex_get_mwifiex_cipher(params->cipher, NULL);
254
255 if (encrypt_mode != -1)
256 ret = mwifiex_set_encode(priv, params->key, params->key_len,
257 key_index, 0);
258
259 wiphy_dbg(wiphy, "info: crypto keys added\n");
260
261 if (ret)
262 return -EFAULT;
263
264 return 0;
265}
266
267/*
268 * This function sends domain information to the firmware.
269 *
270 * The following information are passed to the firmware -
271 * - Country codes
272 * - Sub bands (first channel, number of channels, maximum Tx power)
273 */
274static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
275{
276 u8 no_of_triplet = 0;
277 struct ieee80211_country_ie_triplet *t;
278 u8 no_of_parsed_chan = 0;
279 u8 first_chan = 0, next_chan = 0, max_pwr = 0;
280 u8 i, flag = 0;
281 enum ieee80211_band band;
282 struct ieee80211_supported_band *sband;
283 struct ieee80211_channel *ch;
284 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
285 struct mwifiex_adapter *adapter = priv->adapter;
286 struct mwifiex_802_11d_domain_reg *domain_info = &adapter->domain_reg;
287 int ret = 0;
288
289 /* Set country code */
290 domain_info->country_code[0] = priv->country_code[0];
291 domain_info->country_code[1] = priv->country_code[1];
292 domain_info->country_code[2] = ' ';
293
294 band = mwifiex_band_to_radio_type(adapter->config_bands);
295 if (!wiphy->bands[band]) {
296 wiphy_err(wiphy, "11D: setting domain info in FW\n");
297 return -1;
298 }
299
300 sband = wiphy->bands[band];
301
302 for (i = 0; i < sband->n_channels ; i++) {
303 ch = &sband->channels[i];
304 if (ch->flags & IEEE80211_CHAN_DISABLED)
305 continue;
306
307 if (!flag) {
308 flag = 1;
309 first_chan = (u32) ch->hw_value;
310 next_chan = first_chan;
311 max_pwr = ch->max_power;
312 no_of_parsed_chan = 1;
313 continue;
314 }
315
316 if (ch->hw_value == next_chan + 1 &&
317 ch->max_power == max_pwr) {
318 next_chan++;
319 no_of_parsed_chan++;
320 } else {
321 t = &domain_info->triplet[no_of_triplet];
322 t->chans.first_channel = first_chan;
323 t->chans.num_channels = no_of_parsed_chan;
324 t->chans.max_power = max_pwr;
325 no_of_triplet++;
326 first_chan = (u32) ch->hw_value;
327 next_chan = first_chan;
328 max_pwr = ch->max_power;
329 no_of_parsed_chan = 1;
330 }
331 }
332
333 if (flag) {
334 t = &domain_info->triplet[no_of_triplet];
335 t->chans.first_channel = first_chan;
336 t->chans.num_channels = no_of_parsed_chan;
337 t->chans.max_power = max_pwr;
338 no_of_triplet++;
339 }
340
341 domain_info->no_of_triplet = no_of_triplet;
342 /* Send cmd to FW to set domain info */
343 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
344 HostCmd_ACT_GEN_SET, 0, NULL, NULL);
345 if (ret)
346 wiphy_err(wiphy, "11D: setting domain info in FW\n");
347
348 return ret;
349}
350
351/*
352 * CFG802.11 regulatory domain callback function.
353 *
354 * This function is called when the regulatory domain is changed due to the
355 * following reasons -
356 * - Set by driver
357 * - Set by system core
358 * - Set by user
359 * - Set bt Country IE
360 */
361static int mwifiex_reg_notifier(struct wiphy *wiphy,
362 struct regulatory_request *request)
363{
364 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
365
366 wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for domain"
367 " %c%c\n", request->alpha2[0], request->alpha2[1]);
368
369 memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
370
371 switch (request->initiator) {
372 case NL80211_REGDOM_SET_BY_DRIVER:
373 case NL80211_REGDOM_SET_BY_CORE:
374 case NL80211_REGDOM_SET_BY_USER:
375 break;
376 /* Todo: apply driver specific changes in channel flags based
377 on the request initiator if necessary. */
378 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
379 break;
380 }
381 mwifiex_send_domain_info_cmd_fw(wiphy);
382
383 return 0;
384}
385
386/*
387 * This function sets the RF channel.
388 *
389 * This function creates multiple IOCTL requests, populates them accordingly
390 * and issues them to set the band/channel and frequency.
391 */
392static int
393mwifiex_set_rf_channel(struct mwifiex_private *priv,
394 struct ieee80211_channel *chan,
395 enum nl80211_channel_type channel_type)
396{
397 struct mwifiex_chan_freq_power cfp;
398 int ret = 0;
399 int status = 0;
400 struct mwifiex_ds_band_cfg band_cfg;
401 int mode;
402 u8 wait_option = MWIFIEX_IOCTL_WAIT;
403 u32 config_bands = 0;
404 struct wiphy *wiphy = priv->wdev->wiphy;
405
406 mode = mwifiex_drv_get_mode(priv, wait_option);
407
408 if (chan) {
409 memset(&band_cfg, 0, sizeof(band_cfg));
410 /* Set appropriate bands */
411 if (chan->band == IEEE80211_BAND_2GHZ)
412 config_bands = BAND_B | BAND_G | BAND_GN;
413 else
414 config_bands = BAND_AN | BAND_A;
415 if (mode == MWIFIEX_BSS_MODE_INFRA
416 || mode == MWIFIEX_BSS_MODE_AUTO) {
417 band_cfg.config_bands = config_bands;
418 } else if (mode == MWIFIEX_BSS_MODE_IBSS) {
419 band_cfg.config_bands = config_bands;
420 band_cfg.adhoc_start_band = config_bands;
421 }
422 /* Set channel offset */
423 band_cfg.sec_chan_offset =
424 mwifiex_cfg80211_channel_type_to_mwifiex_channels
425 (channel_type);
426 status = mwifiex_radio_ioctl_band_cfg(priv, HostCmd_ACT_GEN_SET,
427 &band_cfg);
428
429 if (status)
430 return -EFAULT;
431 mwifiex_send_domain_info_cmd_fw(wiphy);
432 }
433
434 wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and "
435 "mode %d\n", config_bands, band_cfg.sec_chan_offset, mode);
436 if (!chan)
437 return ret;
438
439 memset(&cfp, 0, sizeof(cfp));
440 cfp.freq = chan->center_freq;
441 /* Convert frequency to channel */
442 cfp.channel = ieee80211_frequency_to_channel(chan->center_freq);
443
444 status = mwifiex_bss_ioctl_channel(priv, HostCmd_ACT_GEN_SET, &cfp);
445 if (status)
446 return -EFAULT;
447
448 ret = mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
449
450 return ret;
451}
452
453/*
454 * CFG802.11 operation handler to set channel.
455 *
456 * This function can only be used when station is not connected.
457 */
458static int
459mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
460 struct ieee80211_channel *chan,
461 enum nl80211_channel_type channel_type)
462{
463 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
464
465 if (priv->media_connected) {
466 wiphy_err(wiphy, "This setting is valid only when station "
467 "is not connected\n");
468 return -EINVAL;
469 }
470
471 return mwifiex_set_rf_channel(priv, chan, channel_type);
472}
473
474/*
475 * This function sets the fragmentation threshold.
476 *
477 * This function creates an IOCTL request, populates it accordingly
478 * and issues an IOCTL.
479 *
480 * The fragmentation threshold value must lies between MWIFIEX_FRAG_MIN_VALUE
481 * and MWIFIEX_FRAG_MAX_VALUE.
482 */
483static int
484mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
485{
486 int ret = 0;
487 int status = 0;
488 struct mwifiex_wait_queue *wait = NULL;
489 u8 wait_option = MWIFIEX_IOCTL_WAIT;
490
491 if (frag_thr < MWIFIEX_FRAG_MIN_VALUE
492 || frag_thr > MWIFIEX_FRAG_MAX_VALUE)
493 return -EINVAL;
494
495 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
496 if (!wait)
497 return -ENOMEM;
498
499 status = mwifiex_snmp_mib_ioctl(priv, wait, FRAG_THRESH_I,
500 HostCmd_ACT_GEN_SET, &frag_thr);
501
502 if (mwifiex_request_ioctl(priv, wait, status, wait_option))
503 ret = -EFAULT;
504
505 kfree(wait);
506 return ret;
507}
508
509/*
510 * This function sets the RTS threshold.
511 *
512 * This function creates an IOCTL request, populates it accordingly
513 * and issues an IOCTL.
514 */
515static int
516mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
517{
518 int ret = 0;
519 struct mwifiex_wait_queue *wait = NULL;
520 int status = 0;
521 u8 wait_option = MWIFIEX_IOCTL_WAIT;
522
523 if (rts_thr < MWIFIEX_RTS_MIN_VALUE || rts_thr > MWIFIEX_RTS_MAX_VALUE)
524 rts_thr = MWIFIEX_RTS_MAX_VALUE;
525
526 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
527 if (!wait)
528 return -ENOMEM;
529
530 status = mwifiex_snmp_mib_ioctl(priv, wait, RTS_THRESH_I,
531 HostCmd_ACT_GEN_SET, &rts_thr);
532
533 if (mwifiex_request_ioctl(priv, wait, status, wait_option))
534 ret = -EFAULT;
535
536 kfree(wait);
537 return ret;
538}
539
540/*
541 * CFG802.11 operation handler to set wiphy parameters.
542 *
543 * This function can be used to set the RTS threshold and the
544 * Fragmentation threshold of the driver.
545 */
546static int
547mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
548{
549 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
550
551 int ret = 0;
552
553 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
554 ret = mwifiex_set_rts(priv, wiphy->rts_threshold);
555
556 if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
557 ret = mwifiex_set_frag(priv, wiphy->frag_threshold);
558
559 return ret;
560}
561
562/*
563 * CFG802.11 operation handler to change interface type.
564 *
565 * This function creates an IOCTL request, populates it accordingly
566 * and issues an IOCTL.
567 *
568 * The function also maps the CFG802.11 mode type into driver mode type.
569 * NL80211_IFTYPE_ADHOC -> MWIFIEX_BSS_MODE_IBSS
570 * NL80211_IFTYPE_STATION -> MWIFIEX_BSS_MODE_INFRA
571 * NL80211_IFTYPE_UNSPECIFIED -> MWIFIEX_BSS_MODE_AUTO
572 */
573static int
574mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
575 struct net_device *dev,
576 enum nl80211_iftype type, u32 *flags,
577 struct vif_params *params)
578{
579 int ret = 0;
580 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
581 int mode = -1;
582 struct mwifiex_wait_queue *wait = NULL;
583 int status = 0;
584
585 wait = mwifiex_alloc_fill_wait_queue(priv, MWIFIEX_IOCTL_WAIT);
586 if (!wait)
587 return -ENOMEM;
588
589 switch (type) {
590 case NL80211_IFTYPE_ADHOC:
591 mode = MWIFIEX_BSS_MODE_IBSS;
592 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_ADHOC;
593 wiphy_dbg(wiphy, "info: setting interface type to adhoc\n");
594 break;
595 case NL80211_IFTYPE_STATION:
596 mode = MWIFIEX_BSS_MODE_INFRA;
597 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
598 wiphy_dbg(wiphy, "info: Setting interface type to managed\n");
599 break;
600 case NL80211_IFTYPE_UNSPECIFIED:
601 mode = MWIFIEX_BSS_MODE_AUTO;
602 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
603 wiphy_dbg(wiphy, "info: setting interface type to auto\n");
604 break;
605 default:
606 ret = -EINVAL;
607 }
608 if (ret)
609 goto done;
610 status = mwifiex_bss_ioctl_mode(priv, wait, HostCmd_ACT_GEN_SET, &mode);
611
612 if (mwifiex_request_ioctl(priv, wait, status, MWIFIEX_IOCTL_WAIT))
613 ret = -EFAULT;
614
615done:
616 kfree(wait);
617 return ret;
618}
619
620/*
621 * This function dumps the station information on a buffer.
622 *
623 * The following information are shown -
624 * - Total bytes transmitted
625 * - Total bytes received
626 * - Total packets transmitted
627 * - Total packets received
628 * - Signal quality level
629 * - Transmission rate
630 */
631static int
632mwifiex_dump_station_info(struct mwifiex_private *priv,
633 struct station_info *sinfo)
634{
635 struct mwifiex_ds_get_signal signal;
636 struct mwifiex_rate_cfg rate;
637 int ret = 0;
638
639 sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES |
640 STATION_INFO_RX_PACKETS |
641 STATION_INFO_TX_PACKETS
642 | STATION_INFO_SIGNAL | STATION_INFO_TX_BITRATE;
643
644 /* Get signal information from the firmware */
645 memset(&signal, 0, sizeof(struct mwifiex_ds_get_signal));
646 if (mwifiex_get_signal_info(priv, MWIFIEX_IOCTL_WAIT, &signal)) {
647 dev_err(priv->adapter->dev, "getting signal information\n");
648 ret = -EFAULT;
649 }
650
651 if (mwifiex_drv_get_data_rate(priv, &rate)) {
652 dev_err(priv->adapter->dev, "getting data rate\n");
653 ret = -EFAULT;
654 }
655
656 sinfo->rx_bytes = priv->stats.rx_bytes;
657 sinfo->tx_bytes = priv->stats.tx_bytes;
658 sinfo->rx_packets = priv->stats.rx_packets;
659 sinfo->tx_packets = priv->stats.tx_packets;
660 sinfo->signal = priv->w_stats.qual.level;
661 sinfo->txrate.legacy = rate.rate;
662
663 return ret;
664}
665
666/*
667 * CFG802.11 operation handler to get station information.
668 *
669 * This function only works in connected mode, and dumps the
670 * requested station information, if available.
671 */
672static int
673mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
674 u8 *mac, struct station_info *sinfo)
675{
676 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
677 int ret = 0;
678
679 mwifiex_dump_station_info(priv, sinfo);
680
681 if (!priv->media_connected)
682 return -ENOENT;
683 if (memcmp(mac, priv->cfg_bssid, ETH_ALEN))
684 return -ENOENT;
685
686
687 ret = mwifiex_dump_station_info(priv, sinfo);
688
689 return ret;
690}
691
692/* Supported rates to be advertised to the cfg80211 */
693
694static struct ieee80211_rate mwifiex_rates[] = {
695 {.bitrate = 10, .hw_value = 2, },
696 {.bitrate = 20, .hw_value = 4, },
697 {.bitrate = 55, .hw_value = 11, },
698 {.bitrate = 110, .hw_value = 22, },
699 {.bitrate = 220, .hw_value = 44, },
700 {.bitrate = 60, .hw_value = 12, },
701 {.bitrate = 90, .hw_value = 18, },
702 {.bitrate = 120, .hw_value = 24, },
703 {.bitrate = 180, .hw_value = 36, },
704 {.bitrate = 240, .hw_value = 48, },
705 {.bitrate = 360, .hw_value = 72, },
706 {.bitrate = 480, .hw_value = 96, },
707 {.bitrate = 540, .hw_value = 108, },
708 {.bitrate = 720, .hw_value = 144, },
709};
710
711/* Channel definitions to be advertised to cfg80211 */
712
713static struct ieee80211_channel mwifiex_channels_2ghz[] = {
714 {.center_freq = 2412, .hw_value = 1, },
715 {.center_freq = 2417, .hw_value = 2, },
716 {.center_freq = 2422, .hw_value = 3, },
717 {.center_freq = 2427, .hw_value = 4, },
718 {.center_freq = 2432, .hw_value = 5, },
719 {.center_freq = 2437, .hw_value = 6, },
720 {.center_freq = 2442, .hw_value = 7, },
721 {.center_freq = 2447, .hw_value = 8, },
722 {.center_freq = 2452, .hw_value = 9, },
723 {.center_freq = 2457, .hw_value = 10, },
724 {.center_freq = 2462, .hw_value = 11, },
725 {.center_freq = 2467, .hw_value = 12, },
726 {.center_freq = 2472, .hw_value = 13, },
727 {.center_freq = 2484, .hw_value = 14, },
728};
729
730static struct ieee80211_supported_band mwifiex_band_2ghz = {
731 .channels = mwifiex_channels_2ghz,
732 .n_channels = ARRAY_SIZE(mwifiex_channels_2ghz),
733 .bitrates = mwifiex_rates,
734 .n_bitrates = 14,
735};
736
737static struct ieee80211_channel mwifiex_channels_5ghz[] = {
738 {.center_freq = 5040, .hw_value = 8, },
739 {.center_freq = 5060, .hw_value = 12, },
740 {.center_freq = 5080, .hw_value = 16, },
741 {.center_freq = 5170, .hw_value = 34, },
742 {.center_freq = 5190, .hw_value = 38, },
743 {.center_freq = 5210, .hw_value = 42, },
744 {.center_freq = 5230, .hw_value = 46, },
745 {.center_freq = 5180, .hw_value = 36, },
746 {.center_freq = 5200, .hw_value = 40, },
747 {.center_freq = 5220, .hw_value = 44, },
748 {.center_freq = 5240, .hw_value = 48, },
749 {.center_freq = 5260, .hw_value = 52, },
750 {.center_freq = 5280, .hw_value = 56, },
751 {.center_freq = 5300, .hw_value = 60, },
752 {.center_freq = 5320, .hw_value = 64, },
753 {.center_freq = 5500, .hw_value = 100, },
754 {.center_freq = 5520, .hw_value = 104, },
755 {.center_freq = 5540, .hw_value = 108, },
756 {.center_freq = 5560, .hw_value = 112, },
757 {.center_freq = 5580, .hw_value = 116, },
758 {.center_freq = 5600, .hw_value = 120, },
759 {.center_freq = 5620, .hw_value = 124, },
760 {.center_freq = 5640, .hw_value = 128, },
761 {.center_freq = 5660, .hw_value = 132, },
762 {.center_freq = 5680, .hw_value = 136, },
763 {.center_freq = 5700, .hw_value = 140, },
764 {.center_freq = 5745, .hw_value = 149, },
765 {.center_freq = 5765, .hw_value = 153, },
766 {.center_freq = 5785, .hw_value = 157, },
767 {.center_freq = 5805, .hw_value = 161, },
768 {.center_freq = 5825, .hw_value = 165, },
769};
770
771static struct ieee80211_supported_band mwifiex_band_5ghz = {
772 .channels = mwifiex_channels_5ghz,
773 .n_channels = ARRAY_SIZE(mwifiex_channels_5ghz),
774 .bitrates = mwifiex_rates - 4,
775 .n_bitrates = ARRAY_SIZE(mwifiex_rates) + 4,
776};
777
778
779/* Supported crypto cipher suits to be advertised to cfg80211 */
780
781static const u32 mwifiex_cipher_suites[] = {
782 WLAN_CIPHER_SUITE_WEP40,
783 WLAN_CIPHER_SUITE_WEP104,
784 WLAN_CIPHER_SUITE_TKIP,
785 WLAN_CIPHER_SUITE_CCMP,
786};
787
788/*
789 * CFG802.11 operation handler for disconnection request.
790 *
791 * This function does not work when there is already a disconnection
792 * procedure going on.
793 */
794static int
795mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
796 u16 reason_code)
797{
798 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
799
800 if (priv->disconnect)
801 return -EBUSY;
802
803 priv->disconnect = 1;
804 if (mwifiex_disconnect(priv, MWIFIEX_IOCTL_WAIT, NULL))
805 return -EFAULT;
806
807 wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
808 " reason code %d\n", priv->cfg_bssid, reason_code);
809
810 queue_work(priv->workqueue, &priv->cfg_workqueue);
811
812 return 0;
813}
814
815/*
816 * This function informs the CFG802.11 subsystem of a new IBSS.
817 *
818 * The following information are sent to the CFG802.11 subsystem
819 * to register the new IBSS. If we do not register the new IBSS,
820 * a kernel panic will result.
821 * - SSID
822 * - SSID length
823 * - BSSID
824 * - Channel
825 */
826static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
827{
828 int ret = 0;
829 struct ieee80211_channel *chan;
830 struct mwifiex_bss_info bss_info;
831 int ie_len = 0;
832 u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
833
834 ret = mwifiex_get_bss_info(priv, &bss_info);
835 if (ret)
836 return ret;
837
838 ie_buf[0] = WLAN_EID_SSID;
839 ie_buf[1] = bss_info.ssid.ssid_len;
840
841 memcpy(&ie_buf[sizeof(struct ieee_types_header)],
842 &bss_info.ssid.ssid,
843 bss_info.ssid.ssid_len);
844 ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
845
846 chan = __ieee80211_get_channel(priv->wdev->wiphy,
847 ieee80211_channel_to_frequency(bss_info.bss_chan,
848 priv->curr_bss_params.band));
849
850 cfg80211_inform_bss(priv->wdev->wiphy, chan,
851 bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
852 0, ie_buf, ie_len, 0, GFP_KERNEL);
853 memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
854
855 return ret;
856}
857
858/*
859 * This function informs the CFG802.11 subsystem of a new BSS connection.
860 *
861 * The following information are sent to the CFG802.11 subsystem
862 * to register the new BSS connection. If we do not register the new BSS,
863 * a kernel panic will result.
864 * - MAC address
865 * - Capabilities
866 * - Beacon period
867 * - RSSI value
868 * - Channel
869 * - Supported rates IE
870 * - Extended capabilities IE
871 * - DS parameter set IE
872 * - HT Capability IE
873 * - Vendor Specific IE (221)
874 * - WPA IE
875 * - RSN IE
876 */
877static int mwifiex_inform_bss_from_scan_result(struct mwifiex_private *priv,
878 struct mwifiex_802_11_ssid *ssid)
879{
880 struct mwifiex_scan_resp scan_resp;
881 struct mwifiex_bssdescriptor *scan_table;
882 int i, j;
883 struct ieee80211_channel *chan;
884 u8 *ie, *tmp, *ie_buf;
885 u32 ie_len;
886 u64 ts = 0;
887 u8 *beacon;
888 int beacon_size;
889 u8 element_id, element_len;
890
891 memset(&scan_resp, 0, sizeof(scan_resp));
892 if (mwifiex_get_scan_table(priv, MWIFIEX_IOCTL_WAIT, &scan_resp))
893 return -EFAULT;
894
895#define MAX_IE_BUF 2048
896 ie_buf = kzalloc(MAX_IE_BUF, GFP_KERNEL);
897 if (!ie_buf) {
898 dev_err(priv->adapter->dev, "%s: failed to alloc ie_buf\n",
899 __func__);
900 return -ENOMEM;
901 }
902
903 scan_table = (struct mwifiex_bssdescriptor *) scan_resp.scan_table;
904 for (i = 0; i < scan_resp.num_in_scan_table; i++) {
905 if (ssid) {
906 /* Inform specific BSS only */
907 if (memcmp(ssid->ssid, scan_table[i].ssid.ssid,
908 ssid->ssid_len))
909 continue;
910 }
911 memset(ie_buf, 0, MAX_IE_BUF);
912 ie_buf[0] = WLAN_EID_SSID;
913 ie_buf[1] = scan_table[i].ssid.ssid_len;
914 memcpy(&ie_buf[sizeof(struct ieee_types_header)],
915 scan_table[i].ssid.ssid, ie_buf[1]);
916
917 ie = ie_buf + ie_buf[1] + sizeof(struct ieee_types_header);
918 ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
919
920 ie[0] = WLAN_EID_SUPP_RATES;
921
922 for (j = 0; j < sizeof(scan_table[i].supported_rates); j++) {
923 if (!scan_table[i].supported_rates[j])
924 break;
925 else
926 ie[j + sizeof(struct ieee_types_header)] =
927 scan_table[i].supported_rates[j];
928 }
929
930 ie[1] = j;
931 ie_len += ie[1] + sizeof(struct ieee_types_header);
932
933 beacon = scan_table[i].beacon_buf;
934 beacon_size = scan_table[i].beacon_buf_size;
935
936 /* Skip time stamp, beacon interval and capability */
937
938 if (beacon) {
939 beacon += sizeof(scan_table[i].beacon_period)
940 + sizeof(scan_table[i].time_stamp) +
941 +sizeof(scan_table[i].cap_info_bitmap);
942
943 beacon_size -= sizeof(scan_table[i].beacon_period)
944 + sizeof(scan_table[i].time_stamp)
945 + sizeof(scan_table[i].cap_info_bitmap);
946 }
947
948 while (beacon_size >= sizeof(struct ieee_types_header)) {
949 ie = ie_buf + ie_len;
950 element_id = *beacon;
951 element_len = *(beacon + 1);
952 if (beacon_size < (int) element_len +
953 sizeof(struct ieee_types_header)) {
954 dev_err(priv->adapter->dev, "%s: in processing"
955 " IE, bytes left < IE length\n",
956 __func__);
957 break;
958 }
959 switch (element_id) {
960 case WLAN_EID_EXT_CAPABILITY:
961 case WLAN_EID_DS_PARAMS:
962 case WLAN_EID_HT_CAPABILITY:
963 case WLAN_EID_VENDOR_SPECIFIC:
964 case WLAN_EID_RSN:
965 case WLAN_EID_BSS_AC_ACCESS_DELAY:
966 ie[0] = element_id;
967 ie[1] = element_len;
968 tmp = (u8 *) beacon;
969 memcpy(&ie[sizeof(struct ieee_types_header)],
970 tmp + sizeof(struct ieee_types_header),
971 element_len);
972 ie_len += ie[1] +
973 sizeof(struct ieee_types_header);
974 break;
975 default:
976 break;
977 }
978 beacon += element_len +
979 sizeof(struct ieee_types_header);
980 beacon_size -= element_len +
981 sizeof(struct ieee_types_header);
982 }
983 chan = ieee80211_get_channel(priv->wdev->wiphy,
984 scan_table[i].freq);
985 cfg80211_inform_bss(priv->wdev->wiphy, chan,
986 scan_table[i].mac_address,
987 ts, scan_table[i].cap_info_bitmap,
988 scan_table[i].beacon_period,
989 ie_buf, ie_len,
990 scan_table[i].rssi, GFP_KERNEL);
991 }
992
993 kfree(ie_buf);
994 return 0;
995}
996
997/*
998 * This function connects with a BSS.
999 *
1000 * This function handles both Infra and Ad-Hoc modes. It also performs
1001 * validity checking on the provided parameters, disconnects from the
1002 * current BSS (if any), sets up the association/scan parameters,
1003 * including security settings, and performs specific SSID scan before
1004 * trying to connect.
1005 *
1006 * For Infra mode, the function returns failure if the specified SSID
1007 * is not found in scan table. However, for Ad-Hoc mode, it can create
1008 * the IBSS if it does not exist. On successful completion in either case,
1009 * the function notifies the CFG802.11 subsystem of the new BSS connection,
1010 * otherwise the kernel will panic.
1011 */
1012static int
1013mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1014 u8 *bssid, int mode, struct ieee80211_channel *channel,
1015 struct cfg80211_connect_params *sme, bool privacy)
1016{
1017 struct mwifiex_802_11_ssid req_ssid;
1018 struct mwifiex_ssid_bssid ssid_bssid;
1019 int ret = 0;
1020 int auth_type = 0, pairwise_encrypt_mode = 0, wpa_enabled = 0;
1021 int group_encrypt_mode = 0;
1022 int alg_is_wep = 0;
1023
1024 memset(&req_ssid, 0, sizeof(struct mwifiex_802_11_ssid));
1025 memset(&ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
1026
1027 req_ssid.ssid_len = ssid_len;
1028 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
1029 dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
1030 return -EINVAL;
1031 }
1032
1033 memcpy(req_ssid.ssid, ssid, ssid_len);
1034 if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) {
1035 dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
1036 return -EINVAL;
1037 }
1038
1039 /* disconnect before try to associate */
1040 mwifiex_disconnect(priv, MWIFIEX_IOCTL_WAIT, NULL);
1041
1042 if (channel)
1043 ret = mwifiex_set_rf_channel(priv, channel,
1044 mwifiex_channels_to_cfg80211_channel_type
1045 (priv->adapter->chan_offset));
1046
1047 ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); /* Disable keys */
1048
1049 if (mode == MWIFIEX_BSS_MODE_IBSS) {
1050 /* "privacy" is set only for ad-hoc mode */
1051 if (privacy) {
1052 /*
1053 * Keep MWIFIEX_ENCRYPTION_MODE_WEP104 for now so that
1054 * the firmware can find a matching network from the
1055 * scan. The cfg80211 does not give us the encryption
1056 * mode at this stage so just setting it to WEP here.
1057 */
1058 wpa_enabled = 0;
1059 auth_type = MWIFIEX_AUTH_MODE_OPEN;
1060 ret = mwifiex_set_auth(priv,
1061 MWIFIEX_ENCRYPTION_MODE_WEP104,
1062 auth_type, wpa_enabled);
1063 }
1064
1065 goto done;
1066 }
1067
1068 /* Now handle infra mode. "sme" is valid for infra mode only */
1069 if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC
1070 || sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
1071 auth_type = MWIFIEX_AUTH_MODE_OPEN;
1072 else if (sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY)
1073 auth_type = MWIFIEX_AUTH_MODE_SHARED;
1074
1075 if (sme->crypto.n_ciphers_pairwise) {
1076 pairwise_encrypt_mode = mwifiex_get_mwifiex_cipher(sme->crypto.
1077 ciphers_pairwise[0], &wpa_enabled);
1078 ret = mwifiex_set_auth(priv, pairwise_encrypt_mode, auth_type,
1079 wpa_enabled);
1080 }
1081
1082 if (sme->crypto.cipher_group) {
1083 group_encrypt_mode = mwifiex_get_mwifiex_cipher(sme->crypto.
1084 cipher_group, &wpa_enabled);
1085 ret = mwifiex_set_auth(priv, group_encrypt_mode, auth_type,
1086 wpa_enabled);
1087 }
1088 if (sme->ie)
1089 ret = mwifiex_set_gen_ie(priv, sme->ie, sme->ie_len);
1090
1091 if (sme->key) {
1092 alg_is_wep = mwifiex_is_alg_wep(pairwise_encrypt_mode)
1093 | mwifiex_is_alg_wep(group_encrypt_mode);
1094 if (alg_is_wep) {
1095 dev_dbg(priv->adapter->dev,
1096 "info: setting wep encryption"
1097 " with key len %d\n", sme->key_len);
1098 ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
1099 sme->key_idx, 0);
1100 }
1101 }
1102done:
1103 /* Do specific SSID scanning */
1104 if (mwifiex_request_scan(priv, MWIFIEX_IOCTL_WAIT, &req_ssid)) {
1105 dev_err(priv->adapter->dev, "scan error\n");
1106 return -EFAULT;
1107 }
1108
1109
1110 memcpy(&ssid_bssid.ssid, &req_ssid, sizeof(struct mwifiex_802_11_ssid));
1111
1112 if (mode != MWIFIEX_BSS_MODE_IBSS) {
1113 if (mwifiex_find_best_bss(priv, MWIFIEX_IOCTL_WAIT,
1114 &ssid_bssid))
1115 return -EFAULT;
1116 /* Inform the BSS information to kernel, otherwise
1117 * kernel will give a panic after successful assoc */
1118 if (mwifiex_inform_bss_from_scan_result(priv, &req_ssid))
1119 return -EFAULT;
1120 }
1121
1122 dev_dbg(priv->adapter->dev, "info: trying to associate to %s and bssid %pM\n",
1123 (char *) req_ssid.ssid, ssid_bssid.bssid);
1124
1125 memcpy(&priv->cfg_bssid, ssid_bssid.bssid, 6);
1126
1127 /* Connect to BSS by ESSID */
1128 memset(&ssid_bssid.bssid, 0, ETH_ALEN);
1129
1130 if (mwifiex_bss_start(priv, MWIFIEX_IOCTL_WAIT, &ssid_bssid))
1131 return -EFAULT;
1132
1133 if (mode == MWIFIEX_BSS_MODE_IBSS) {
1134 /* Inform the BSS information to kernel, otherwise
1135 * kernel will give a panic after successful assoc */
1136 if (mwifiex_cfg80211_inform_ibss_bss(priv))
1137 return -EFAULT;
1138 }
1139
1140 return ret;
1141}
1142
1143/*
1144 * CFG802.11 operation handler for association request.
1145 *
1146 * This function does not work when the current mode is set to Ad-Hoc, or
1147 * when there is already an association procedure going on. The given BSS
1148 * information is used to associate.
1149 */
1150static int
1151mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1152 struct cfg80211_connect_params *sme)
1153{
1154 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1155 int ret = 0;
1156 int mode = 0;
1157
1158 if (priv->assoc_request)
1159 return -EBUSY;
1160
1161 mode = mwifiex_drv_get_mode(priv, MWIFIEX_IOCTL_WAIT);
1162
1163 if (mode == MWIFIEX_BSS_MODE_IBSS) {
1164 wiphy_err(wiphy, "received infra assoc request "
1165 "when station is in ibss mode\n");
1166 goto done;
1167 }
1168
1169 priv->assoc_request = 1;
1170
1171 wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
1172 (char *) sme->ssid, sme->bssid);
1173
1174 ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
1175 mode, sme->channel, sme, 0);
1176
1177done:
1178 priv->assoc_result = ret;
1179 queue_work(priv->workqueue, &priv->cfg_workqueue);
1180 return ret;
1181}
1182
1183/*
1184 * CFG802.11 operation handler to join an IBSS.
1185 *
1186 * This function does not work in any mode other than Ad-Hoc, or if
1187 * a join operation is already in progress.
1188 */
1189static int
1190mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1191 struct cfg80211_ibss_params *params)
1192{
1193 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
1194 int ret = 0;
1195 int mode = 0;
1196
1197 if (priv->ibss_join_request)
1198 return -EBUSY;
1199
1200 mode = mwifiex_drv_get_mode(priv, MWIFIEX_IOCTL_WAIT);
1201 if (mode != MWIFIEX_BSS_MODE_IBSS) {
1202 wiphy_err(wiphy, "request to join ibss received "
1203 "when station is not in ibss mode\n");
1204 goto done;
1205 }
1206
1207 priv->ibss_join_request = 1;
1208
1209 wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
1210 (char *) params->ssid, params->bssid);
1211
1212 ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
1213 params->bssid, mode, params->channel, NULL,
1214 params->privacy);
1215done:
1216 priv->ibss_join_result = ret;
1217 queue_work(priv->workqueue, &priv->cfg_workqueue);
1218 return ret;
1219}
1220
1221/*
1222 * CFG802.11 operation handler to leave an IBSS.
1223 *
1224 * This function does not work if a leave operation is
1225 * already in progress.
1226 */
1227static int
1228mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
1229{
1230 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
1231
1232 if (priv->disconnect)
1233 return -EBUSY;
1234
1235 priv->disconnect = 1;
1236
1237 wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
1238 priv->cfg_bssid);
1239 if (mwifiex_disconnect(priv, MWIFIEX_IOCTL_WAIT, NULL))
1240 return -EFAULT;
1241
1242 queue_work(priv->workqueue, &priv->cfg_workqueue);
1243
1244 return 0;
1245}
1246
1247/*
1248 * CFG802.11 operation handler for scan request.
1249 *
1250 * This function issues a scan request to the firmware based upon
1251 * the user specified scan configuration. On successfull completion,
1252 * it also informs the results.
1253 */
1254static int
1255mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1256 struct cfg80211_scan_request *request)
1257{
1258 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1259
1260 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
1261
1262 if (priv->scan_request && priv->scan_request != request)
1263 return -EBUSY;
1264
1265 priv->scan_request = request;
1266
1267 queue_work(priv->workqueue, &priv->cfg_workqueue);
1268 return 0;
1269}
1270
1271/*
1272 * This function sets up the CFG802.11 specific HT capability fields
1273 * with default values.
1274 *
1275 * The following default values are set -
1276 * - HT Supported = True
1277 * - Maximum AMPDU length factor = 0x3
1278 * - Minimum AMPDU spacing = 0x6
1279 * - HT Capabilities map = IEEE80211_HT_CAP_SUP_WIDTH_20_40 (0x0002)
1280 * - MCS information, Rx mask = 0xff
1281 * - MCD information, Tx parameters = IEEE80211_HT_MCS_TX_DEFINED (0x01)
1282 */
1283static void
1284mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
1285 struct mwifiex_private *priv)
1286{
1287 int rx_mcs_supp;
1288 struct ieee80211_mcs_info mcs_set;
1289 u8 *mcs = (u8 *)&mcs_set;
1290 struct mwifiex_adapter *adapter = priv->adapter;
1291
1292 ht_info->ht_supported = true;
1293 ht_info->ampdu_factor = 0x3;
1294 ht_info->ampdu_density = 0x6;
1295
1296 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
1297 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1298
1299 rx_mcs_supp = GET_RXMCSSUPP(priv->adapter->hw_dev_mcs_support);
1300 /* Set MCS for 1x1 */
1301 memset(mcs, 0xff, rx_mcs_supp);
1302 /* Clear all the other values */
1303 memset(&mcs[rx_mcs_supp], 0,
1304 sizeof(struct ieee80211_mcs_info) - rx_mcs_supp);
1305 if (priv->bss_mode == MWIFIEX_BSS_MODE_INFRA ||
1306 (ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap) &&
1307 ISSUPP_CHANWIDTH40(adapter->usr_dot_11n_dev_cap)))
1308 /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
1309 SETHT_MCS32(mcs_set.rx_mask);
1310
1311 memcpy((u8 *) &ht_info->mcs, mcs, sizeof(struct ieee80211_mcs_info));
1312
1313 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1314}
1315
1316/* station cfg80211 operations */
1317static struct cfg80211_ops mwifiex_cfg80211_ops = {
1318 .change_virtual_intf = mwifiex_cfg80211_change_virtual_intf,
1319 .scan = mwifiex_cfg80211_scan,
1320 .connect = mwifiex_cfg80211_connect,
1321 .disconnect = mwifiex_cfg80211_disconnect,
1322 .get_station = mwifiex_cfg80211_get_station,
1323 .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
1324 .set_channel = mwifiex_cfg80211_set_channel,
1325 .join_ibss = mwifiex_cfg80211_join_ibss,
1326 .leave_ibss = mwifiex_cfg80211_leave_ibss,
1327 .add_key = mwifiex_cfg80211_add_key,
1328 .del_key = mwifiex_cfg80211_del_key,
1329 .set_default_key = mwifiex_cfg80211_set_default_key,
1330 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
1331 .set_tx_power = mwifiex_cfg80211_set_tx_power,
1332};
1333
1334/*
1335 * This function registers the device with CFG802.11 subsystem.
1336 *
1337 * The function creates the wireless device/wiphy, populates it with
1338 * default parameters and handler function pointers, and finally
1339 * registers the device.
1340 */
1341int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
1342 struct mwifiex_private *priv)
1343{
1344 int ret = 0;
1345 void *wdev_priv = NULL;
1346 struct wireless_dev *wdev;
1347
1348 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
1349 if (!wdev) {
1350 dev_err(priv->adapter->dev, "%s: allocating wireless device\n",
1351 __func__);
1352 return -ENOMEM;
1353 }
1354 wdev->wiphy =
1355 wiphy_new(&mwifiex_cfg80211_ops,
1356 sizeof(struct mwifiex_private *));
1357 if (!wdev->wiphy)
1358 return -ENOMEM;
1359 wdev->iftype = NL80211_IFTYPE_STATION;
1360 wdev->wiphy->max_scan_ssids = 10;
1361 wdev->wiphy->interface_modes =
1362 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
1363 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
1364 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
1365
1366 /* Initialize cipher suits */
1367 wdev->wiphy->cipher_suites = mwifiex_cipher_suites;
1368 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
1369
1370 /* Initialize parameters for 2GHz band */
1371
1372 mwifiex_setup_ht_caps(&wdev->wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap,
1373 priv);
1374 mwifiex_setup_ht_caps(&wdev->wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap,
1375 priv);
1376
1377 memcpy(wdev->wiphy->perm_addr, mac, 6);
1378 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1379
1380 /* We are using custom domains */
1381 wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1382
1383 wdev->wiphy->reg_notifier = mwifiex_reg_notifier;
1384
1385 /* Set struct mwifiex_private pointer in wiphy_priv */
1386 wdev_priv = wiphy_priv(wdev->wiphy);
1387
1388 *(unsigned long *) wdev_priv = (unsigned long) priv;
1389
1390 ret = wiphy_register(wdev->wiphy);
1391 if (ret < 0) {
1392 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
1393 __func__);
1394 wiphy_free(wdev->wiphy);
1395 return ret;
1396 } else {
1397 dev_dbg(priv->adapter->dev,
1398 "info: successfully registered wiphy device\n");
1399 }
1400
1401 dev_net_set(dev, wiphy_net(wdev->wiphy));
1402 dev->ieee80211_ptr = wdev;
1403 memcpy(dev->dev_addr, wdev->wiphy->perm_addr, 6);
1404 memcpy(dev->perm_addr, wdev->wiphy->perm_addr, 6);
1405 SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
1406 priv->wdev = wdev;
1407
1408 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1409 dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
1410 dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
1411
1412 return ret;
1413}
1414
1415/*
1416 * This function handles the result of different pending network operations.
1417 *
1418 * The following operations are handled and CFG802.11 subsystem is
1419 * notified accordingly -
1420 * - Scan request completion
1421 * - Association request completion
1422 * - IBSS join request completion
1423 * - Disconnect request completion
1424 */
1425void
1426mwifiex_cfg80211_results(struct work_struct *work)
1427{
1428 struct mwifiex_private *priv =
1429 container_of(work, struct mwifiex_private, cfg_workqueue);
1430 struct mwifiex_user_scan_cfg *scan_req;
1431 int ret = 0, i;
1432 struct ieee80211_channel *chan;
1433
1434 if (priv->scan_request) {
1435 scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
1436 GFP_KERNEL);
1437 if (!scan_req) {
1438 dev_err(priv->adapter->dev, "failed to alloc "
1439 "scan_req\n");
1440 return;
1441 }
1442 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1443 memcpy(scan_req->ssid_list[i].ssid,
1444 priv->scan_request->ssids[i].ssid,
1445 priv->scan_request->ssids[i].ssid_len);
1446 scan_req->ssid_list[i].max_len =
1447 priv->scan_request->ssids[i].ssid_len;
1448 }
1449 for (i = 0; i < priv->scan_request->n_channels; i++) {
1450 chan = priv->scan_request->channels[i];
1451 scan_req->chan_list[i].chan_number = chan->hw_value;
1452 scan_req->chan_list[i].radio_type = chan->band;
1453 if (chan->flags & IEEE80211_CHAN_DISABLED)
1454 scan_req->chan_list[i].scan_type =
1455 MWIFIEX_SCAN_TYPE_PASSIVE;
1456 else
1457 scan_req->chan_list[i].scan_type =
1458 MWIFIEX_SCAN_TYPE_ACTIVE;
1459 scan_req->chan_list[i].scan_time = 0;
1460 }
1461 if (mwifiex_set_user_scan_ioctl(priv, scan_req)) {
1462 ret = -EFAULT;
1463 goto done;
1464 }
1465 if (mwifiex_inform_bss_from_scan_result(priv, NULL))
1466 ret = -EFAULT;
1467done:
1468 priv->scan_result_status = ret;
1469 dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
1470 __func__);
1471 cfg80211_scan_done(priv->scan_request,
1472 (priv->scan_result_status < 0));
1473 priv->scan_request = NULL;
1474 kfree(scan_req);
1475 }
1476
1477 if (priv->assoc_request) {
1478 if (!priv->assoc_result) {
1479 cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
1480 NULL, 0, NULL, 0,
1481 WLAN_STATUS_SUCCESS,
1482 GFP_KERNEL);
1483 dev_dbg(priv->adapter->dev,
1484 "info: associated to bssid %pM successfully\n",
1485 priv->cfg_bssid);
1486 } else {
1487 dev_dbg(priv->adapter->dev,
1488 "info: association to bssid %pM failed\n",
1489 priv->cfg_bssid);
1490 memset(priv->cfg_bssid, 0, ETH_ALEN);
1491 }
1492 priv->assoc_request = 0;
1493 priv->assoc_result = 0;
1494 }
1495
1496 if (priv->ibss_join_request) {
1497 if (!priv->ibss_join_result) {
1498 cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
1499 GFP_KERNEL);
1500 dev_dbg(priv->adapter->dev,
1501 "info: joined/created adhoc network with bssid"
1502 " %pM successfully\n", priv->cfg_bssid);
1503 } else {
1504 dev_dbg(priv->adapter->dev,
1505 "info: failed creating/joining adhoc network\n");
1506 }
1507 priv->ibss_join_request = 0;
1508 priv->ibss_join_result = 0;
1509 }
1510
1511 if (priv->disconnect) {
1512 memset(priv->cfg_bssid, 0, ETH_ALEN);
1513 priv->disconnect = 0;
1514 }
1515
1516 return;
1517}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
new file mode 100644
index 00000000000..c4db8f36aa1
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -0,0 +1,31 @@
1/*
2 * Marvell Wireless LAN device driver: CFG80211
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef __MWIFIEX_CFG80211__
21#define __MWIFIEX_CFG80211__
22
23#include <net/cfg80211.h>
24
25#include "main.h"
26
27int mwifiex_register_cfg80211(struct net_device *, u8 *,
28 struct mwifiex_private *);
29
30void mwifiex_cfg80211_results(struct work_struct *work);
31#endif
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
new file mode 100644
index 00000000000..999ed81512f
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -0,0 +1,368 @@
1/*
2 * Marvell Wireless LAN device driver: Channel, Frequence and Power
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "cfg80211.h"
26
27/* 100mW */
28#define MWIFIEX_TX_PWR_DEFAULT 20
29/* 100mW */
30#define MWIFIEX_TX_PWR_US_DEFAULT 20
31/* 50mW */
32#define MWIFIEX_TX_PWR_JP_DEFAULT 16
33/* 100mW */
34#define MWIFIEX_TX_PWR_FR_100MW 20
35/* 10mW */
36#define MWIFIEX_TX_PWR_FR_10MW 10
37/* 100mW */
38#define MWIFIEX_TX_PWR_EMEA_DEFAULT 20
39
40static u8 adhoc_rates_b[B_SUPPORTED_RATES] = { 0x82, 0x84, 0x8b, 0x96, 0 };
41
42static u8 adhoc_rates_g[G_SUPPORTED_RATES] = { 0x8c, 0x12, 0x98, 0x24,
43 0xb0, 0x48, 0x60, 0x6c, 0 };
44
45static u8 adhoc_rates_bg[BG_SUPPORTED_RATES] = { 0x82, 0x84, 0x8b, 0x96,
46 0x0c, 0x12, 0x18, 0x24,
47 0x30, 0x48, 0x60, 0x6c, 0 };
48
49static u8 adhoc_rates_a[A_SUPPORTED_RATES] = { 0x8c, 0x12, 0x98, 0x24,
50 0xb0, 0x48, 0x60, 0x6c, 0 };
51u8 supported_rates_a[A_SUPPORTED_RATES] = { 0x0c, 0x12, 0x18, 0x24,
52 0xb0, 0x48, 0x60, 0x6c, 0 };
53static u16 mwifiex_data_rates[MWIFIEX_SUPPORTED_RATES_EXT] = { 0x02, 0x04,
54 0x0B, 0x16, 0x00, 0x0C, 0x12, 0x18,
55 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90,
56 0x0D, 0x1A, 0x27, 0x34, 0x4E, 0x68,
57 0x75, 0x82, 0x0C, 0x1B, 0x36, 0x51,
58 0x6C, 0xA2, 0xD8, 0xF3, 0x10E, 0x00 };
59
60u8 supported_rates_b[B_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x16, 0 };
61
62u8 supported_rates_g[G_SUPPORTED_RATES] = { 0x0c, 0x12, 0x18, 0x24,
63 0x30, 0x48, 0x60, 0x6c, 0 };
64
65u8 supported_rates_bg[BG_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x0c,
66 0x12, 0x16, 0x18, 0x24, 0x30, 0x48,
67 0x60, 0x6c, 0 };
68
69u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
70 0x32, 0x40, 0x41, 0xff };
71
72u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
73
74/*
75 * This function maps an index in supported rates table into
76 * the corresponding data rate.
77 */
78u32 mwifiex_index_to_data_rate(struct mwifiex_adapter *adapter, u8 index,
79 u8 ht_info)
80{
81 u16 mcs_rate[4][8] = {
82 {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e}
83 , /* LG 40M */
84 {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c}
85 , /* SG 40M */
86 {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82}
87 , /* LG 20M */
88 {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90}
89 }; /* SG 20M */
90
91 u32 rate;
92
93 if (ht_info & BIT(0)) {
94 if (index == MWIFIEX_RATE_BITMAP_MCS0) {
95 if (ht_info & BIT(2))
96 rate = 0x0D; /* MCS 32 SGI rate */
97 else
98 rate = 0x0C; /* MCS 32 LGI rate */
99 } else if (index < 8) {
100 if (ht_info & BIT(1)) {
101 if (ht_info & BIT(2))
102 /* SGI, 40M */
103 rate = mcs_rate[1][index];
104 else
105 /* LGI, 40M */
106 rate = mcs_rate[0][index];
107 } else {
108 if (ht_info & BIT(2))
109 /* SGI, 20M */
110 rate = mcs_rate[3][index];
111 else
112 /* LGI, 20M */
113 rate = mcs_rate[2][index];
114 }
115 } else
116 rate = mwifiex_data_rates[0];
117 } else {
118 if (index >= MWIFIEX_SUPPORTED_RATES_EXT)
119 index = 0;
120 rate = mwifiex_data_rates[index];
121 }
122 return rate;
123}
124
125/*
126 * This function maps a data rate value into corresponding index in supported
127 * rates table.
128 */
129u8 mwifiex_data_rate_to_index(struct mwifiex_adapter *adapter, u32 rate)
130{
131 u16 *ptr;
132
133 if (rate) {
134 ptr = memchr(mwifiex_data_rates, rate,
135 sizeof(mwifiex_data_rates));
136 if (ptr)
137 return (u8) (ptr - mwifiex_data_rates);
138 }
139 return 0;
140}
141
142/*
143 * This function returns the current active data rates.
144 *
145 * The result may vary depending upon connection status.
146 */
147u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv, u8 *rates)
148{
149 u32 k;
150
151 if (!priv->media_connected)
152 k = mwifiex_get_supported_rates(priv, rates);
153 else
154 k = mwifiex_copy_rates(rates, 0,
155 priv->curr_bss_params.data_rates,
156 priv->curr_bss_params.num_of_rates);
157
158 return k;
159}
160
161/*
162 * This function locates the Channel-Frequency-Power triplet based upon
163 * band and channel parameters.
164 */
165struct mwifiex_chan_freq_power *
166mwifiex_get_cfp_by_band_and_channel_from_cfg80211(struct mwifiex_private
167 *priv, u8 band, u16 channel)
168{
169 struct mwifiex_chan_freq_power *cfp = NULL;
170 struct ieee80211_supported_band *sband;
171 struct ieee80211_channel *ch;
172 int i;
173
174 if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
175 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
176 else
177 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
178
179 if (!sband) {
180 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
181 " & channel %d\n", __func__, band, channel);
182 return cfp;
183 }
184
185 for (i = 0; i < sband->n_channels; i++) {
186 ch = &sband->channels[i];
187 if (((ch->hw_value == channel) ||
188 (channel == FIRST_VALID_CHANNEL))
189 && !(ch->flags & IEEE80211_CHAN_DISABLED)) {
190 priv->cfp.channel = channel;
191 priv->cfp.freq = ch->center_freq;
192 priv->cfp.max_tx_power = ch->max_power;
193 cfp = &priv->cfp;
194 break;
195 }
196 }
197 if (i == sband->n_channels)
198 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
199 " & channel %d\n", __func__, band, channel);
200
201 return cfp;
202}
203
204/*
205 * This function locates the Channel-Frequency-Power triplet based upon
206 * band and frequency parameters.
207 */
208struct mwifiex_chan_freq_power *
209mwifiex_get_cfp_by_band_and_freq_from_cfg80211(struct mwifiex_private *priv,
210 u8 band, u32 freq)
211{
212 struct mwifiex_chan_freq_power *cfp = NULL;
213 struct ieee80211_supported_band *sband;
214 struct ieee80211_channel *ch;
215 int i;
216
217 if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
218 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
219 else
220 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
221
222 if (!sband) {
223 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
224 " & freq %d\n", __func__, band, freq);
225 return cfp;
226 }
227
228 for (i = 0; i < sband->n_channels; i++) {
229 ch = &sband->channels[i];
230 if ((ch->center_freq == freq) &&
231 !(ch->flags & IEEE80211_CHAN_DISABLED)) {
232 priv->cfp.channel = ch->hw_value;
233 priv->cfp.freq = freq;
234 priv->cfp.max_tx_power = ch->max_power;
235 cfp = &priv->cfp;
236 break;
237 }
238 }
239 if (i == sband->n_channels)
240 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
241 " & freq %d\n", __func__, band, freq);
242
243 return cfp;
244}
245
246/*
247 * This function checks if the data rate is set to auto.
248 */
249u8
250mwifiex_is_rate_auto(struct mwifiex_private *priv)
251{
252 u32 i;
253 int rate_num = 0;
254
255 for (i = 0; i < ARRAY_SIZE(priv->bitmap_rates); i++)
256 if (priv->bitmap_rates[i])
257 rate_num++;
258
259 if (rate_num > 1)
260 return true;
261 else
262 return false;
263}
264
265/*
266 * This function converts rate bitmap into rate index.
267 */
268int
269mwifiex_get_rate_index(struct mwifiex_adapter *adapter, u16 *rate_bitmap,
270 int size)
271{
272 int i;
273
274 for (i = 0; i < size * 8; i++)
275 if (rate_bitmap[i / 16] & (1 << (i % 16)))
276 return i;
277
278 return 0;
279}
280
281/*
282 * This function gets the supported data rates.
283 *
284 * The function works in both Ad-Hoc and infra mode by printing the
285 * band and returning the data rates.
286 */
287u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
288{
289 u32 k = 0;
290 struct mwifiex_adapter *adapter = priv->adapter;
291 if (priv->bss_mode == MWIFIEX_BSS_MODE_INFRA) {
292 /* Infra. mode */
293 switch (adapter->config_bands) {
294 case BAND_B:
295 dev_dbg(adapter->dev, "info: infra band=%d "
296 "supported_rates_b\n", adapter->config_bands);
297 k = mwifiex_copy_rates(rates, k, supported_rates_b,
298 sizeof(supported_rates_b));
299 break;
300 case BAND_G:
301 case BAND_G | BAND_GN:
302 dev_dbg(adapter->dev, "info: infra band=%d "
303 "supported_rates_g\n", adapter->config_bands);
304 k = mwifiex_copy_rates(rates, k, supported_rates_g,
305 sizeof(supported_rates_g));
306 break;
307 case BAND_B | BAND_G:
308 case BAND_A | BAND_B | BAND_G:
309 case BAND_A | BAND_B:
310 case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
311 case BAND_B | BAND_G | BAND_GN:
312 dev_dbg(adapter->dev, "info: infra band=%d "
313 "supported_rates_bg\n", adapter->config_bands);
314 k = mwifiex_copy_rates(rates, k, supported_rates_bg,
315 sizeof(supported_rates_bg));
316 break;
317 case BAND_A:
318 case BAND_A | BAND_G:
319 dev_dbg(adapter->dev, "info: infra band=%d "
320 "supported_rates_a\n", adapter->config_bands);
321 k = mwifiex_copy_rates(rates, k, supported_rates_a,
322 sizeof(supported_rates_a));
323 break;
324 case BAND_A | BAND_AN:
325 case BAND_A | BAND_G | BAND_AN | BAND_GN:
326 dev_dbg(adapter->dev, "info: infra band=%d "
327 "supported_rates_a\n", adapter->config_bands);
328 k = mwifiex_copy_rates(rates, k, supported_rates_a,
329 sizeof(supported_rates_a));
330 break;
331 case BAND_GN:
332 dev_dbg(adapter->dev, "info: infra band=%d "
333 "supported_rates_n\n", adapter->config_bands);
334 k = mwifiex_copy_rates(rates, k, supported_rates_n,
335 sizeof(supported_rates_n));
336 break;
337 }
338 } else {
339 /* Ad-hoc mode */
340 switch (adapter->adhoc_start_band) {
341 case BAND_B:
342 dev_dbg(adapter->dev, "info: adhoc B\n");
343 k = mwifiex_copy_rates(rates, k, adhoc_rates_b,
344 sizeof(adhoc_rates_b));
345 break;
346 case BAND_G:
347 case BAND_G | BAND_GN:
348 dev_dbg(adapter->dev, "info: adhoc G only\n");
349 k = mwifiex_copy_rates(rates, k, adhoc_rates_g,
350 sizeof(adhoc_rates_g));
351 break;
352 case BAND_B | BAND_G:
353 case BAND_B | BAND_G | BAND_GN:
354 dev_dbg(adapter->dev, "info: adhoc BG\n");
355 k = mwifiex_copy_rates(rates, k, adhoc_rates_bg,
356 sizeof(adhoc_rates_bg));
357 break;
358 case BAND_A:
359 case BAND_A | BAND_AN:
360 dev_dbg(adapter->dev, "info: adhoc A\n");
361 k = mwifiex_copy_rates(rates, k, adhoc_rates_a,
362 sizeof(adhoc_rates_a));
363 break;
364 }
365 }
366
367 return k;
368}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
new file mode 100644
index 00000000000..3a8fe1e122f
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -0,0 +1,1463 @@
1/*
2 * Marvell Wireless LAN device driver: commands and events
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * This function initializes a command node.
30 *
31 * The actual allocation of the node is not done by this function. It only
32 * initiates a node by filling it with default parameters. Similarly,
33 * allocation of the different buffers used (IOCTL buffer, data buffer) are
34 * not done by this function either.
35 */
36static void
37mwifiex_init_cmd_node(struct mwifiex_private *priv,
38 struct cmd_ctrl_node *cmd_node,
39 u32 cmd_oid, void *wait_queue, void *data_buf)
40{
41 cmd_node->priv = priv;
42 cmd_node->cmd_oid = cmd_oid;
43 cmd_node->wq_buf = wait_queue;
44 cmd_node->data_buf = data_buf;
45 cmd_node->cmd_skb = cmd_node->skb;
46}
47
48/*
49 * This function returns a command node from the free queue depending upon
50 * availability.
51 */
52static struct cmd_ctrl_node *
53mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
54{
55 struct cmd_ctrl_node *cmd_node;
56 unsigned long flags;
57
58 spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
59 if (list_empty(&adapter->cmd_free_q)) {
60 dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n");
61 spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
62 return NULL;
63 }
64 cmd_node = list_first_entry(&adapter->cmd_free_q,
65 struct cmd_ctrl_node, list);
66 list_del(&cmd_node->list);
67 spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
68
69 return cmd_node;
70}
71
72/*
73 * This function cleans up a command node.
74 *
75 * The function resets the fields including the buffer pointers.
76 * This function does not try to free the buffers. They must be
77 * freed before calling this function.
78 *
79 * This function will however call the receive completion callback
80 * in case a response buffer is still available before resetting
81 * the pointer.
82 */
83static void
84mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
85 struct cmd_ctrl_node *cmd_node)
86{
87 cmd_node->cmd_oid = 0;
88 cmd_node->cmd_flag = 0;
89 cmd_node->wq_buf = NULL;
90 cmd_node->data_buf = NULL;
91
92 if (cmd_node->resp_skb) {
93 mwifiex_recv_complete(adapter, cmd_node->resp_skb, 0);
94 cmd_node->resp_skb = NULL;
95 }
96
97 return;
98}
99
100/*
101 * This function returns a command node from the pending queue which
102 * matches the given IOCTL request.
103 */
104static struct cmd_ctrl_node *
105mwifiex_get_pending_ioctl_cmd(struct mwifiex_adapter *adapter,
106 struct mwifiex_wait_queue *wait_queue)
107{
108 unsigned long flags;
109 struct cmd_ctrl_node *cmd_node;
110
111 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
112 list_for_each_entry(cmd_node, &adapter->cmd_pending_q, list) {
113 if (cmd_node->wq_buf == wait_queue) {
114 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
115 flags);
116 return cmd_node;
117 }
118 }
119 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
120
121 return NULL;
122}
123
124/*
125 * This function sends a host command to the firmware.
126 *
127 * The function copies the host command into the driver command
128 * buffer, which will be transferred to the firmware later by the
129 * main thread.
130 */
131static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
132 struct host_cmd_ds_command *cmd, void *data_buf)
133{
134 struct mwifiex_ds_misc_cmd *pcmd_ptr =
135 (struct mwifiex_ds_misc_cmd *) data_buf;
136
137 /* Copy the HOST command to command buffer */
138 memcpy((void *) cmd, pcmd_ptr->cmd, pcmd_ptr->len);
139 dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len);
140 return 0;
141}
142
143/*
144 * This function downloads a command to the firmware.
145 *
146 * The function performs sanity tests, sets the command sequence
147 * number and size, converts the header fields to CPU format before
148 * sending. Afterwards, it logs the command ID and action for debugging
149 * and sets up the command timeout timer.
150 */
151static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
152 struct cmd_ctrl_node *cmd_node)
153{
154
155 struct mwifiex_adapter *adapter = priv->adapter;
156 int ret = 0;
157 struct host_cmd_ds_command *host_cmd;
158 struct mwifiex_wait_queue *wait_queue = NULL;
159 uint16_t cmd_code;
160 uint16_t cmd_size;
161 struct timeval tstamp;
162 unsigned long flags;
163
164 if (!adapter || !cmd_node)
165 return -1;
166
167 host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
168 if (cmd_node->wq_buf)
169 wait_queue = (struct mwifiex_wait_queue *) cmd_node->wq_buf;
170
171 /* Sanity test */
172 if (host_cmd == NULL || host_cmd->size == 0) {
173 dev_err(adapter->dev, "DNLD_CMD: host_cmd is null"
174 " or cmd size is 0, not sending\n");
175 if (wait_queue)
176 wait_queue->status = MWIFIEX_ERROR_CMD_DNLD_FAIL;
177 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
178 return -1;
179 }
180
181 /* Set command sequence number */
182 adapter->seq_num++;
183 host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
184 (adapter->seq_num, cmd_node->priv->bss_num,
185 cmd_node->priv->bss_type));
186
187 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
188 adapter->curr_cmd = cmd_node;
189 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
190
191 cmd_code = le16_to_cpu(host_cmd->command);
192 cmd_size = le16_to_cpu(host_cmd->size);
193
194 skb_trim(cmd_node->cmd_skb, cmd_size);
195
196 do_gettimeofday(&tstamp);
197 dev_dbg(adapter->dev, "cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d,"
198 " seqno %#x\n",
199 tstamp.tv_sec, tstamp.tv_usec, cmd_code,
200 le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
201 le16_to_cpu(host_cmd->seq_num));
202
203 skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
204
205 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
206 cmd_node->cmd_skb->data,
207 cmd_node->cmd_skb->len, NULL);
208
209 if (ret == -1) {
210 dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
211 if (wait_queue)
212 wait_queue->status = MWIFIEX_ERROR_CMD_DNLD_FAIL;
213 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
214
215 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
216 adapter->curr_cmd = NULL;
217 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
218
219 adapter->dbg.num_cmd_host_to_card_failure++;
220 return -1;
221 }
222
223 /* Save the last command id and action to debug log */
224 adapter->dbg.last_cmd_index =
225 (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM;
226 adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code;
227 adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
228 le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN));
229
230 /* Clear BSS_NO_BITS from HostCmd */
231 cmd_code &= HostCmd_CMD_ID_MASK;
232
233 /* Setup the timer after transmit command */
234 mod_timer(&adapter->cmd_timer,
235 jiffies + (MWIFIEX_TIMER_10S * HZ) / 1000);
236
237 return 0;
238}
239
240/*
241 * This function downloads a sleep confirm command to the firmware.
242 *
243 * The function performs sanity tests, sets the command sequence
244 * number and size, converts the header fields to CPU format before
245 * sending.
246 *
247 * No responses are needed for sleep confirm command.
248 */
249static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
250{
251 int ret = 0;
252 u16 cmd_len = 0;
253 struct mwifiex_private *priv;
254 struct mwifiex_opt_sleep_confirm_buffer *sleep_cfm_buf =
255 (struct mwifiex_opt_sleep_confirm_buffer *)
256 adapter->sleep_cfm->data;
257 cmd_len = sizeof(struct mwifiex_opt_sleep_confirm);
258 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
259
260 sleep_cfm_buf->ps_cfm_sleep.seq_num =
261 cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
262 (adapter->seq_num, priv->bss_num,
263 priv->bss_type)));
264 adapter->seq_num++;
265
266 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
267 adapter->sleep_cfm->data,
268 adapter->sleep_cfm->len +
269 INTF_HEADER_LEN, NULL);
270
271 if (ret == -1) {
272 dev_err(adapter->dev, "SLEEP_CFM: failed\n");
273 adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
274 return -1;
275 }
276 if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY))
277 == MWIFIEX_BSS_ROLE_STA) {
278 if (!sleep_cfm_buf->ps_cfm_sleep.sleep_cfm.resp_ctrl)
279 /* Response is not needed for sleep
280 confirm command */
281 adapter->ps_state = PS_STATE_SLEEP;
282 else
283 adapter->ps_state = PS_STATE_SLEEP_CFM;
284
285 if (!sleep_cfm_buf->ps_cfm_sleep.sleep_cfm.resp_ctrl
286 && (adapter->is_hs_configured
287 && !adapter->sleep_period.period)) {
288 adapter->pm_wakeup_card_req = true;
289 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
290 MWIFIEX_BSS_ROLE_STA), true);
291 }
292 }
293
294 return ret;
295}
296
297/*
298 * This function allocates the command buffers and links them to
299 * the command free queue.
300 *
301 * The driver uses a pre allocated number of command buffers, which
302 * are created at driver initializations and freed at driver cleanup.
303 * Every command needs to obtain a command buffer from this pool before
304 * it can be issued. The command free queue lists the command buffers
305 * currently free to use, while the command pending queue lists the
306 * command buffers already in use and awaiting handling. Command buffers
307 * are returned to the free queue after use.
308 */
309int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
310{
311 struct cmd_ctrl_node *cmd_array;
312 u32 buf_size;
313 u32 i;
314
315 /* Allocate and initialize struct cmd_ctrl_node */
316 buf_size = sizeof(struct cmd_ctrl_node) * MWIFIEX_NUM_OF_CMD_BUFFER;
317 cmd_array = kzalloc(buf_size, GFP_KERNEL);
318 if (!cmd_array) {
319 dev_err(adapter->dev, "%s: failed to alloc cmd_array\n",
320 __func__);
321 return -1;
322 }
323
324 adapter->cmd_pool = cmd_array;
325 memset(adapter->cmd_pool, 0, buf_size);
326
327 /* Allocate and initialize command buffers */
328 for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
329 cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
330 if (!cmd_array[i].skb) {
331 dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n");
332 return -1;
333 }
334 }
335
336 for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++)
337 mwifiex_insert_cmd_to_free_q(adapter, &cmd_array[i]);
338
339 return 0;
340}
341
342/*
343 * This function frees the command buffers.
344 *
345 * The function calls the completion callback for all the command
346 * buffers that still have response buffers associated with them.
347 */
348int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
349{
350 struct cmd_ctrl_node *cmd_array;
351 u32 i;
352
353 /* Need to check if cmd pool is allocated or not */
354 if (!adapter->cmd_pool) {
355 dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n");
356 return 0;
357 }
358
359 cmd_array = adapter->cmd_pool;
360
361 /* Release shared memory buffers */
362 for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
363 if (cmd_array[i].skb) {
364 dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i);
365 dev_kfree_skb_any(cmd_array[i].skb);
366 }
367 if (!cmd_array[i].resp_skb)
368 continue;
369 mwifiex_recv_complete(adapter, cmd_array[i].resp_skb, 0);
370 }
371 /* Release struct cmd_ctrl_node */
372 if (adapter->cmd_pool) {
373 dev_dbg(adapter->dev, "cmd: free cmd pool\n");
374 kfree(adapter->cmd_pool);
375 adapter->cmd_pool = NULL;
376 }
377
378 return 0;
379}
380
381/*
382 * This function handles events generated by firmware.
383 *
384 * Event body of events received from firmware are not used (though they are
385 * saved), only the event ID is used. Some events are re-invoked by
386 * the driver, with a new event body.
387 *
388 * After processing, the function calls the completion callback
389 * for cleanup.
390 */
391int mwifiex_process_event(struct mwifiex_adapter *adapter)
392{
393 int ret = 0;
394 struct mwifiex_private *priv =
395 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
396 struct sk_buff *skb = adapter->event_skb;
397 u32 eventcause = adapter->event_cause;
398 struct timeval tstamp;
399 struct mwifiex_rxinfo *rx_info = NULL;
400
401 /* Save the last event to debug log */
402 adapter->dbg.last_event_index =
403 (adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
404 adapter->dbg.last_event[adapter->dbg.last_event_index] =
405 (u16) eventcause;
406
407 /* Get BSS number and corresponding priv */
408 priv = mwifiex_get_priv_by_id(adapter, EVENT_GET_BSS_NUM(eventcause),
409 EVENT_GET_BSS_TYPE(eventcause));
410 if (!priv)
411 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
412 /* Clear BSS_NO_BITS from event */
413 eventcause &= EVENT_ID_MASK;
414 adapter->event_cause = eventcause;
415
416 if (skb) {
417 rx_info = MWIFIEX_SKB_RXCB(skb);
418 rx_info->bss_index = priv->bss_index;
419 }
420
421 if (eventcause != EVENT_PS_SLEEP && eventcause != EVENT_PS_AWAKE) {
422 do_gettimeofday(&tstamp);
423 dev_dbg(adapter->dev, "event: %lu.%lu: cause: %#x\n",
424 tstamp.tv_sec, tstamp.tv_usec, eventcause);
425 }
426
427 ret = mwifiex_process_sta_event(priv);
428
429 adapter->event_cause = 0;
430 adapter->event_skb = NULL;
431
432 mwifiex_recv_complete(adapter, skb, 0);
433
434 return ret;
435}
436
437/*
438 * This function prepares a command before sending it to the firmware.
439 *
440 * Preparation includes -
441 * - Sanity tests to make sure the card is still present or the FW
442 * is not reset
443 * - Getting a new command node from the command free queue
444 * - Initializing the command node for default parameters
445 * - Fill up the non-default parameters and buffer pointers
446 * - Add the command to pending queue
447 */
448int mwifiex_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
449 u16 cmd_action, u32 cmd_oid,
450 void *wait_queue, void *data_buf)
451{
452 int ret = 0;
453 struct mwifiex_adapter *adapter = priv->adapter;
454 struct cmd_ctrl_node *cmd_node = NULL;
455 struct host_cmd_ds_command *cmd_ptr = NULL;
456
457 if (!adapter) {
458 pr_err("PREP_CMD: adapter is NULL\n");
459 return -1;
460 }
461
462 if (adapter->is_suspended) {
463 dev_err(adapter->dev, "PREP_CMD: device in suspended state\n");
464 return -1;
465 }
466
467 if (adapter->surprise_removed) {
468 dev_err(adapter->dev, "PREP_CMD: card is removed\n");
469 return -1;
470 }
471
472 if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
473 if (cmd_no != HostCmd_CMD_FUNC_INIT) {
474 dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
475 return -1;
476 }
477 }
478
479 /* Get a new command node */
480 cmd_node = mwifiex_get_cmd_node(adapter);
481
482 if (!cmd_node) {
483 dev_err(adapter->dev, "PREP_CMD: no free cmd node\n");
484 return -1;
485 }
486
487 /* Initialize the command node */
488 mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, wait_queue, data_buf);
489
490 if (!cmd_node->cmd_skb) {
491 dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
492 return -1;
493 }
494
495 memset(skb_put(cmd_node->cmd_skb, sizeof(struct host_cmd_ds_command)),
496 0, sizeof(struct host_cmd_ds_command));
497
498 cmd_ptr = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
499 cmd_ptr->command = cpu_to_le16(cmd_no);
500 cmd_ptr->result = 0;
501
502 /* Prepare command */
503 if (cmd_no) {
504 ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action,
505 cmd_oid, data_buf, cmd_ptr);
506 } else {
507 ret = mwifiex_cmd_host_cmd(priv, cmd_ptr, data_buf);
508 cmd_node->cmd_flag |= CMD_F_HOSTCMD;
509 }
510
511 /* Return error, since the command preparation failed */
512 if (ret) {
513 dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n",
514 cmd_no);
515 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
516 return -1;
517 }
518
519 /* Send command */
520 if (cmd_no == HostCmd_CMD_802_11_SCAN)
521 mwifiex_queue_scan_cmd(priv, cmd_node);
522 else
523 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
524
525 return ret;
526}
527
528/*
529 * This function returns a command to the command free queue.
530 *
531 * The function also calls the completion callback if required, before
532 * cleaning the command node and re-inserting it into the free queue.
533 */
534void
535mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
536 struct cmd_ctrl_node *cmd_node)
537{
538 struct mwifiex_wait_queue *wait_queue = NULL;
539 unsigned long flags;
540
541 if (cmd_node == NULL)
542 return;
543 if (cmd_node->wq_buf) {
544 wait_queue = (struct mwifiex_wait_queue *) cmd_node->wq_buf;
545 if (wait_queue->status != MWIFIEX_ERROR_NO_ERROR)
546 mwifiex_ioctl_complete(adapter, wait_queue, -1);
547 else
548 mwifiex_ioctl_complete(adapter, wait_queue, 0);
549 }
550 /* Clean the node */
551 mwifiex_clean_cmd_node(adapter, cmd_node);
552
553 /* Insert node into cmd_free_q */
554 spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
555 list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
556 spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
557
558 return;
559}
560
561/*
562 * This function queues a command to the command pending queue.
563 *
564 * This in effect adds the command to the command list to be executed.
565 * Exit PS command is handled specially, by placing it always to the
566 * front of the command queue.
567 */
568void
569mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
570 struct cmd_ctrl_node *cmd_node, u32 add_tail)
571{
572 struct host_cmd_ds_command *host_cmd = NULL;
573 u16 command;
574 unsigned long flags;
575
576 host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
577 if (!host_cmd) {
578 dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n");
579 return;
580 }
581
582 command = le16_to_cpu(host_cmd->command);
583
584 /* Exit_PS command needs to be queued in the header always. */
585 if (command == HostCmd_CMD_802_11_PS_MODE_ENH) {
586 struct host_cmd_ds_802_11_ps_mode_enh *pm =
587 &host_cmd->params.psmode_enh;
588 if ((le16_to_cpu(pm->action) == DIS_PS)
589 || (le16_to_cpu(pm->action) == DIS_AUTO_PS)) {
590 if (adapter->ps_state != PS_STATE_AWAKE)
591 add_tail = false;
592 }
593 }
594
595 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
596 if (add_tail)
597 list_add_tail(&cmd_node->list, &adapter->cmd_pending_q);
598 else
599 list_add(&cmd_node->list, &adapter->cmd_pending_q);
600 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
601
602 dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x is queued\n", command);
603
604 return;
605}
606
607/*
608 * This function executes the next command in command pending queue.
609 *
610 * This function will fail if a command is already in processing stage,
611 * otherwise it will dequeue the first command from the command pending
612 * queue and send to the firmware.
613 *
614 * If the device is currently in host sleep mode, any commands, except the
615 * host sleep configuration command will de-activate the host sleep. For PS
616 * mode, the function will put the firmware back to sleep if applicable.
617 */
618int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
619{
620 struct mwifiex_private *priv = NULL;
621 struct cmd_ctrl_node *cmd_node = NULL;
622 int ret = 0;
623 struct host_cmd_ds_command *host_cmd;
624 unsigned long cmd_flags;
625 unsigned long cmd_pending_q_flags;
626
627 /* Check if already in processing */
628 if (adapter->curr_cmd) {
629 dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n");
630 return -1;
631 }
632
633 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
634 /* Check if any command is pending */
635 spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
636 if (list_empty(&adapter->cmd_pending_q)) {
637 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
638 cmd_pending_q_flags);
639 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
640 return 0;
641 }
642 cmd_node = list_first_entry(&adapter->cmd_pending_q,
643 struct cmd_ctrl_node, list);
644 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
645 cmd_pending_q_flags);
646
647 host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
648 priv = cmd_node->priv;
649
650 if (adapter->ps_state != PS_STATE_AWAKE) {
651 dev_err(adapter->dev, "%s: cannot send cmd in sleep state,"
652 " this should not happen\n", __func__);
653 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
654 return ret;
655 }
656
657 spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
658 list_del(&cmd_node->list);
659 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
660 cmd_pending_q_flags);
661
662 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
663 ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node);
664 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
665 /* Any command sent to the firmware when host is in sleep
666 * mode should de-configure host sleep. We should skip the
667 * host sleep configuration command itself though
668 */
669 if (priv && (host_cmd->command !=
670 cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
671 if (adapter->hs_activated) {
672 adapter->is_hs_configured = false;
673 mwifiex_hs_activated_event(priv, false);
674 }
675 }
676
677 return ret;
678}
679
680/*
681 * This function handles the command response.
682 *
683 * After processing, the function cleans the command node and puts
684 * it back to the command free queue.
685 */
686int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
687{
688 struct host_cmd_ds_command *resp = NULL;
689 struct mwifiex_private *priv =
690 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
691 int ret = 0;
692 uint16_t orig_cmdresp_no;
693 uint16_t cmdresp_no;
694 uint16_t cmdresp_result;
695 struct mwifiex_wait_queue *wait_queue = NULL;
696 struct timeval tstamp;
697 unsigned long flags;
698
699 /* Now we got response from FW, cancel the command timer */
700 del_timer(&adapter->cmd_timer);
701
702 if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
703 resp = (struct host_cmd_ds_command *) adapter->upld_buf;
704 dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n",
705 le16_to_cpu(resp->command));
706 return -1;
707 }
708
709 if (adapter->curr_cmd->wq_buf)
710 wait_queue = (struct mwifiex_wait_queue *)
711 adapter->curr_cmd->wq_buf;
712
713 adapter->num_cmd_timeout = 0;
714
715 resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
716 if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
717 dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
718 le16_to_cpu(resp->command));
719 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
720 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
721 adapter->curr_cmd = NULL;
722 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
723 return -1;
724 }
725
726 if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
727 /* Copy original response back to response buffer */
728 struct mwifiex_ds_misc_cmd *hostcmd = NULL;
729 uint16_t size = le16_to_cpu(resp->size);
730 dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size);
731 size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER);
732 if (adapter->curr_cmd->data_buf) {
733 hostcmd = (struct mwifiex_ds_misc_cmd *)
734 adapter->curr_cmd->data_buf;
735 hostcmd->len = size;
736 memcpy(hostcmd->cmd, (void *) resp, size);
737 }
738 }
739 orig_cmdresp_no = le16_to_cpu(resp->command);
740
741 /* Get BSS number and corresponding priv */
742 priv = mwifiex_get_priv_by_id(adapter,
743 HostCmd_GET_BSS_NO(le16_to_cpu(resp->seq_num)),
744 HostCmd_GET_BSS_TYPE(le16_to_cpu(resp->seq_num)));
745 if (!priv)
746 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
747 /* Clear RET_BIT from HostCmd */
748 resp->command = cpu_to_le16(orig_cmdresp_no & HostCmd_CMD_ID_MASK);
749
750 cmdresp_no = le16_to_cpu(resp->command);
751 cmdresp_result = le16_to_cpu(resp->result);
752
753 /* Save the last command response to debug log */
754 adapter->dbg.last_cmd_resp_index =
755 (adapter->dbg.last_cmd_resp_index + 1) % DBG_CMD_NUM;
756 adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
757 orig_cmdresp_no;
758
759 do_gettimeofday(&tstamp);
760 dev_dbg(adapter->dev, "cmd: CMD_RESP: (%lu.%lu): 0x%x, result %d,"
761 " len %d, seqno 0x%x\n",
762 tstamp.tv_sec, tstamp.tv_usec, orig_cmdresp_no, cmdresp_result,
763 le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
764
765 if (!(orig_cmdresp_no & HostCmd_RET_BIT)) {
766 dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n");
767 if (wait_queue)
768 wait_queue->status = MWIFIEX_ERROR_FW_CMDRESP;
769
770 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
771 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
772 adapter->curr_cmd = NULL;
773 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
774 return -1;
775 }
776
777 if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
778 adapter->curr_cmd->cmd_flag &= ~CMD_F_HOSTCMD;
779 if ((cmdresp_result == HostCmd_RESULT_OK)
780 && (cmdresp_no == HostCmd_CMD_802_11_HS_CFG_ENH))
781 ret = mwifiex_ret_802_11_hs_cfg(priv, resp);
782 } else {
783 /* handle response */
784 ret = mwifiex_process_sta_cmdresp(priv, cmdresp_no, resp,
785 wait_queue);
786 }
787
788 /* Check init command response */
789 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
790 if (ret == -1) {
791 dev_err(adapter->dev, "%s: cmd %#x failed during "
792 "initialization\n", __func__, cmdresp_no);
793 mwifiex_init_fw_complete(adapter);
794 return -1;
795 } else if (adapter->last_init_cmd == cmdresp_no)
796 adapter->hw_status = MWIFIEX_HW_STATUS_INIT_DONE;
797 }
798
799 if (adapter->curr_cmd) {
800 if (wait_queue && (!ret))
801 wait_queue->status = MWIFIEX_ERROR_NO_ERROR;
802 else if (wait_queue && (ret == -1))
803 wait_queue->status = MWIFIEX_ERROR_CMD_RESP_FAIL;
804
805 /* Clean up and put current command back to cmd_free_q */
806 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
807
808 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
809 adapter->curr_cmd = NULL;
810 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
811 }
812
813 return ret;
814}
815
816/*
817 * This function handles the timeout of command sending.
818 *
819 * It will re-send the same command again.
820 */
821void
822mwifiex_cmd_timeout_func(unsigned long function_context)
823{
824 struct mwifiex_adapter *adapter =
825 (struct mwifiex_adapter *) function_context;
826 struct cmd_ctrl_node *cmd_node = NULL;
827 struct mwifiex_wait_queue *wait_queue = NULL;
828 struct timeval tstamp;
829
830 adapter->num_cmd_timeout++;
831 adapter->dbg.num_cmd_timeout++;
832 if (!adapter->curr_cmd) {
833 dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
834 return;
835 }
836 cmd_node = adapter->curr_cmd;
837 if (cmd_node->wq_buf) {
838 wait_queue = (struct mwifiex_wait_queue *) cmd_node->wq_buf;
839 wait_queue->status = MWIFIEX_ERROR_CMD_TIMEOUT;
840 }
841
842 if (cmd_node) {
843 adapter->dbg.timeout_cmd_id =
844 adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
845 adapter->dbg.timeout_cmd_act =
846 adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
847 do_gettimeofday(&tstamp);
848 dev_err(adapter->dev, "%s: Timeout cmd id (%lu.%lu) = %#x,"
849 " act = %#x\n", __func__,
850 tstamp.tv_sec, tstamp.tv_usec,
851 adapter->dbg.timeout_cmd_id,
852 adapter->dbg.timeout_cmd_act);
853
854 dev_err(adapter->dev, "num_data_h2c_failure = %d\n",
855 adapter->dbg.num_tx_host_to_card_failure);
856 dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
857 adapter->dbg.num_cmd_host_to_card_failure);
858
859 dev_err(adapter->dev, "num_cmd_timeout = %d\n",
860 adapter->dbg.num_cmd_timeout);
861 dev_err(adapter->dev, "num_tx_timeout = %d\n",
862 adapter->dbg.num_tx_timeout);
863
864 dev_err(adapter->dev, "last_cmd_index = %d\n",
865 adapter->dbg.last_cmd_index);
866 print_hex_dump_bytes("last_cmd_id: ", DUMP_PREFIX_OFFSET,
867 adapter->dbg.last_cmd_id, DBG_CMD_NUM);
868 print_hex_dump_bytes("last_cmd_act: ", DUMP_PREFIX_OFFSET,
869 adapter->dbg.last_cmd_act, DBG_CMD_NUM);
870
871 dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
872 adapter->dbg.last_cmd_resp_index);
873 print_hex_dump_bytes("last_cmd_resp_id: ", DUMP_PREFIX_OFFSET,
874 adapter->dbg.last_cmd_resp_id, DBG_CMD_NUM);
875
876 dev_err(adapter->dev, "last_event_index = %d\n",
877 adapter->dbg.last_event_index);
878 print_hex_dump_bytes("last_event: ", DUMP_PREFIX_OFFSET,
879 adapter->dbg.last_event, DBG_CMD_NUM);
880
881 dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
882 adapter->data_sent, adapter->cmd_sent);
883
884 dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
885 adapter->ps_mode, adapter->ps_state);
886 }
887 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
888 mwifiex_init_fw_complete(adapter);
889
890 return;
891}
892
893/*
894 * This function cancels all the pending commands.
895 *
896 * The current command, all commands in command pending queue and all scan
897 * commands in scan pending queue are cancelled. All the completion callbacks
898 * are called with failure status to ensure cleanup.
899 */
900void
901mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
902{
903 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
904 struct mwifiex_wait_queue *wait_queue = NULL;
905 unsigned long flags;
906
907 /* Cancel current cmd */
908 if ((adapter->curr_cmd) && (adapter->curr_cmd->wq_buf)) {
909 wait_queue =
910 (struct mwifiex_wait_queue *) adapter->curr_cmd->wq_buf;
911 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
912 adapter->curr_cmd->wq_buf = NULL;
913 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
914 wait_queue->status = MWIFIEX_ERROR_CMD_CANCEL;
915 mwifiex_ioctl_complete(adapter, wait_queue, -1);
916 }
917 /* Cancel all pending command */
918 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
919 list_for_each_entry_safe(cmd_node, tmp_node,
920 &adapter->cmd_pending_q, list) {
921 list_del(&cmd_node->list);
922 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
923
924 if (cmd_node->wq_buf) {
925 wait_queue =
926 (struct mwifiex_wait_queue *) cmd_node->wq_buf;
927 wait_queue->status = MWIFIEX_ERROR_CMD_CANCEL;
928 mwifiex_ioctl_complete(adapter, wait_queue, -1);
929 cmd_node->wq_buf = NULL;
930 }
931 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
932 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
933 }
934 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
935
936 /* Cancel all pending scan command */
937 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
938 list_for_each_entry_safe(cmd_node, tmp_node,
939 &adapter->scan_pending_q, list) {
940 list_del(&cmd_node->list);
941 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
942
943 cmd_node->wq_buf = NULL;
944 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
945 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
946 }
947 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
948
949 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
950 adapter->scan_processing = false;
951 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
952}
953
954/*
955 * This function cancels all pending commands that matches with
956 * the given IOCTL request.
957 *
958 * Both the current command buffer and the pending command queue are
959 * searched for matching IOCTL request. The completion callback of
960 * the matched command is called with failure status to ensure cleanup.
961 * In case of scan commands, all pending commands in scan pending queue
962 * are cancelled.
963 */
964void
965mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter,
966 struct mwifiex_wait_queue *wait_queue)
967{
968 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
969 unsigned long cmd_flags;
970 unsigned long cmd_pending_q_flags;
971 unsigned long scan_pending_q_flags;
972 uint16_t cancel_scan_cmd = false;
973
974 if ((adapter->curr_cmd) &&
975 (adapter->curr_cmd->wq_buf == wait_queue)) {
976 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
977 cmd_node = adapter->curr_cmd;
978 cmd_node->wq_buf = NULL;
979 cmd_node->cmd_flag |= CMD_F_CANCELED;
980 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
981 }
982
983 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
984 while (1) {
985 cmd_node = mwifiex_get_pending_ioctl_cmd(adapter, wait_queue);
986 if (!cmd_node)
987 break;
988
989 spin_lock_irqsave(&adapter->cmd_pending_q_lock,
990 cmd_pending_q_flags);
991 list_del(&cmd_node->list);
992 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
993 cmd_pending_q_flags);
994
995 cmd_node->wq_buf = NULL;
996 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
997 }
998 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
999 /* Cancel all pending scan command */
1000 spin_lock_irqsave(&adapter->scan_pending_q_lock,
1001 scan_pending_q_flags);
1002 list_for_each_entry_safe(cmd_node, tmp_node,
1003 &adapter->scan_pending_q, list) {
1004 if (cmd_node->wq_buf == wait_queue) {
1005 list_del(&cmd_node->list);
1006 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1007 scan_pending_q_flags);
1008 cmd_node->wq_buf = NULL;
1009 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
1010 spin_lock_irqsave(&adapter->scan_pending_q_lock,
1011 scan_pending_q_flags);
1012 cancel_scan_cmd = true;
1013 }
1014 }
1015 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1016 scan_pending_q_flags);
1017
1018 if (cancel_scan_cmd) {
1019 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
1020 adapter->scan_processing = false;
1021 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
1022 }
1023 wait_queue->status = MWIFIEX_ERROR_CMD_CANCEL;
1024 mwifiex_ioctl_complete(adapter, wait_queue, -1);
1025
1026 return;
1027}
1028
1029/*
1030 * This function sends the sleep confirm command to firmware, if
1031 * possible.
1032 *
1033 * The sleep confirm command cannot be issued if command response,
1034 * data response or event response is awaiting handling, or if we
1035 * are in the middle of sending a command, or expecting a command
1036 * response.
1037 */
1038void
1039mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
1040{
1041 if (!adapter->cmd_sent &&
1042 !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter))
1043 mwifiex_dnld_sleep_confirm_cmd(adapter);
1044 else
1045 dev_dbg(adapter->dev,
1046 "cmd: Delay Sleep Confirm (%s%s%s)\n",
1047 (adapter->cmd_sent) ? "D" : "",
1048 (adapter->curr_cmd) ? "C" : "",
1049 (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
1050}
1051
1052/*
1053 * This function sends a Host Sleep activated event to applications.
1054 *
1055 * This event is generated by the driver, with a blank event body.
1056 */
1057void
1058mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
1059{
1060 if (activated) {
1061 if (priv->adapter->is_hs_configured) {
1062 priv->adapter->hs_activated = true;
1063 dev_dbg(priv->adapter->dev, "event: hs_activated\n");
1064 priv->adapter->hs_activate_wait_q_woken = true;
1065 wake_up_interruptible(
1066 &priv->adapter->hs_activate_wait_q);
1067 } else {
1068 dev_dbg(priv->adapter->dev, "event: HS not configured\n");
1069 }
1070 } else {
1071 dev_dbg(priv->adapter->dev, "event: hs_deactivated\n");
1072 priv->adapter->hs_activated = false;
1073 }
1074}
1075
1076/*
1077 * This function handles the command response of a Host Sleep configuration
1078 * command.
1079 *
1080 * Handling includes changing the header fields into CPU format
1081 * and setting the current host sleep activation status in driver.
1082 *
1083 * In case host sleep status change, the function generates an event to
1084 * notify the applications.
1085 */
1086int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1087 struct host_cmd_ds_command *resp)
1088{
1089 struct mwifiex_adapter *adapter = priv->adapter;
1090 struct host_cmd_ds_802_11_hs_cfg_enh *phs_cfg =
1091 &resp->params.opt_hs_cfg;
1092 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1093
1094 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE)) {
1095 mwifiex_hs_activated_event(priv, true);
1096 return 0;
1097 } else {
1098 dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply"
1099 " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
1100 resp->result, conditions,
1101 phs_cfg->params.hs_config.gpio,
1102 phs_cfg->params.hs_config.gap);
1103 }
1104 if (conditions != HOST_SLEEP_CFG_CANCEL) {
1105 adapter->is_hs_configured = true;
1106 } else {
1107 adapter->is_hs_configured = false;
1108 if (adapter->hs_activated)
1109 mwifiex_hs_activated_event(priv, false);
1110 }
1111
1112 return 0;
1113}
1114
1115/*
1116 * This function wakes up the adapter and generates a Host Sleep
1117 * cancel event on receiving the power up interrupt.
1118 */
1119void
1120mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1121{
1122 dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep"
1123 " since there is interrupt from the firmware\n", __func__);
1124
1125 adapter->if_ops.wakeup(adapter);
1126 adapter->hs_activated = false;
1127 adapter->is_hs_configured = false;
1128 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1129 MWIFIEX_BSS_ROLE_ANY), false);
1130 return;
1131}
1132
1133/*
1134 * This function handles the command response of a sleep confirm command.
1135 *
1136 * The function sets the card state to SLEEP if the response indicates success.
1137 */
1138void
1139mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
1140 u8 *pbuf, u32 upld_len)
1141{
1142 struct host_cmd_ds_command *cmd = (struct host_cmd_ds_command *) pbuf;
1143 struct mwifiex_private *priv =
1144 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
1145 uint16_t result = le16_to_cpu(cmd->result);
1146 uint16_t command = le16_to_cpu(cmd->command);
1147 uint16_t seq_num = le16_to_cpu(cmd->seq_num);
1148
1149 if (!upld_len) {
1150 dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
1151 return;
1152 }
1153
1154 /* Get BSS number and corresponding priv */
1155 priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
1156 HostCmd_GET_BSS_TYPE(seq_num));
1157 if (!priv)
1158 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
1159
1160 /* Update sequence number */
1161 seq_num = HostCmd_GET_SEQ_NO(seq_num);
1162 /* Clear RET_BIT from HostCmd */
1163 command &= HostCmd_CMD_ID_MASK;
1164
1165 if (command != HostCmd_CMD_802_11_PS_MODE_ENH) {
1166 dev_err(adapter->dev, "%s: received unexpected response for"
1167 " cmd %x, result = %x\n", __func__, command, result);
1168 return;
1169 }
1170
1171 if (result) {
1172 dev_err(adapter->dev, "%s: sleep confirm cmd failed\n",
1173 __func__);
1174 adapter->pm_wakeup_card_req = false;
1175 adapter->ps_state = PS_STATE_AWAKE;
1176 return;
1177 }
1178 adapter->pm_wakeup_card_req = true;
1179 if (adapter->is_hs_configured)
1180 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1181 MWIFIEX_BSS_ROLE_ANY), true);
1182 adapter->ps_state = PS_STATE_SLEEP;
1183 cmd->command = cpu_to_le16(command);
1184 cmd->seq_num = cpu_to_le16(seq_num);
1185}
1186EXPORT_SYMBOL_GPL(mwifiex_process_sleep_confirm_resp);
1187
1188/*
1189 * This function prepares an enhanced power mode command.
1190 *
1191 * This function can be used to disable power save or to configure
1192 * power save with auto PS or STA PS or auto deep sleep.
1193 *
1194 * Preparation includes -
1195 * - Setting command ID, action and proper size
1196 * - Setting Power Save bitmap, PS parameters TLV, PS mode TLV,
1197 * auto deep sleep TLV (as required)
1198 * - Ensuring correct endian-ness
1199 */
1200int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
1201 struct host_cmd_ds_command *cmd,
1202 u16 cmd_action, uint16_t ps_bitmap,
1203 void *data_buf)
1204{
1205 struct host_cmd_ds_802_11_ps_mode_enh *psmode_enh =
1206 &cmd->params.psmode_enh;
1207 u8 *tlv = NULL;
1208 u16 cmd_size = 0;
1209
1210 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
1211 if (cmd_action == DIS_AUTO_PS) {
1212 psmode_enh->action = cpu_to_le16(DIS_AUTO_PS);
1213 psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
1214 cmd->size = cpu_to_le16(S_DS_GEN + AUTO_PS_FIX_SIZE);
1215 } else if (cmd_action == GET_PS) {
1216 psmode_enh->action = cpu_to_le16(GET_PS);
1217 psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
1218 cmd->size = cpu_to_le16(S_DS_GEN + AUTO_PS_FIX_SIZE);
1219 } else if (cmd_action == EN_AUTO_PS) {
1220 psmode_enh->action = cpu_to_le16(EN_AUTO_PS);
1221 psmode_enh->params.auto_ps.ps_bitmap = cpu_to_le16(ps_bitmap);
1222 cmd_size = S_DS_GEN + AUTO_PS_FIX_SIZE;
1223 tlv = (u8 *) cmd + cmd_size;
1224 if (ps_bitmap & BITMAP_STA_PS) {
1225 struct mwifiex_adapter *adapter = priv->adapter;
1226 struct mwifiex_ie_types_ps_param *ps_tlv =
1227 (struct mwifiex_ie_types_ps_param *) tlv;
1228 struct mwifiex_ps_param *ps_mode = &ps_tlv->param;
1229 ps_tlv->header.type = cpu_to_le16(TLV_TYPE_PS_PARAM);
1230 ps_tlv->header.len = cpu_to_le16(sizeof(*ps_tlv) -
1231 sizeof(struct mwifiex_ie_types_header));
1232 cmd_size += sizeof(*ps_tlv);
1233 tlv += sizeof(*ps_tlv);
1234 dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n");
1235 ps_mode->null_pkt_interval =
1236 cpu_to_le16(adapter->null_pkt_interval);
1237 ps_mode->multiple_dtims =
1238 cpu_to_le16(adapter->multiple_dtim);
1239 ps_mode->bcn_miss_timeout =
1240 cpu_to_le16(adapter->bcn_miss_time_out);
1241 ps_mode->local_listen_interval =
1242 cpu_to_le16(adapter->local_listen_interval);
1243 ps_mode->adhoc_wake_period =
1244 cpu_to_le16(adapter->adhoc_awake_period);
1245 ps_mode->delay_to_ps =
1246 cpu_to_le16(adapter->delay_to_ps);
1247 ps_mode->mode =
1248 cpu_to_le16(adapter->enhanced_ps_mode);
1249
1250 }
1251 if (ps_bitmap & BITMAP_AUTO_DS) {
1252 struct mwifiex_ie_types_auto_ds_param *auto_ps_tlv =
1253 (struct mwifiex_ie_types_auto_ds_param *) tlv;
1254 struct mwifiex_auto_ds_param *auto_ds =
1255 &auto_ps_tlv->param;
1256 u16 idletime = 0;
1257 auto_ps_tlv->header.type =
1258 cpu_to_le16(TLV_TYPE_AUTO_DS_PARAM);
1259 auto_ps_tlv->header.len =
1260 cpu_to_le16(sizeof(*auto_ps_tlv) -
1261 sizeof(struct mwifiex_ie_types_header));
1262 cmd_size += sizeof(*auto_ps_tlv);
1263 tlv += sizeof(*auto_ps_tlv);
1264 if (data_buf)
1265 idletime = ((struct mwifiex_ds_auto_ds *)
1266 data_buf)->idle_time;
1267 dev_dbg(priv->adapter->dev,
1268 "cmd: PS Command: Enter Auto Deep Sleep\n");
1269 auto_ds->deep_sleep_timeout = cpu_to_le16(idletime);
1270 }
1271 cmd->size = cpu_to_le16(cmd_size);
1272 }
1273 return 0;
1274}
1275
1276/*
1277 * This function handles the command response of an enhanced power mode
1278 * command.
1279 *
1280 * Handling includes changing the header fields into CPU format
1281 * and setting the current enhanced power mode in driver.
1282 */
1283int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
1284 struct host_cmd_ds_command *resp,
1285 void *data_buf)
1286{
1287 struct mwifiex_adapter *adapter = priv->adapter;
1288 struct host_cmd_ds_802_11_ps_mode_enh *ps_mode =
1289 &resp->params.psmode_enh;
1290 uint16_t action = le16_to_cpu(ps_mode->action);
1291 uint16_t ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap);
1292 uint16_t auto_ps_bitmap =
1293 le16_to_cpu(ps_mode->params.auto_ps.ps_bitmap);
1294
1295 dev_dbg(adapter->dev, "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
1296 __func__, resp->result, action);
1297 if (action == EN_AUTO_PS) {
1298 if (auto_ps_bitmap & BITMAP_AUTO_DS) {
1299 dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n");
1300 priv->adapter->is_deep_sleep = true;
1301 }
1302 if (auto_ps_bitmap & BITMAP_STA_PS) {
1303 dev_dbg(adapter->dev, "cmd: Enabled STA power save\n");
1304 if (adapter->sleep_period.period)
1305 dev_dbg(adapter->dev, "cmd: set to uapsd/pps mode\n");
1306 }
1307 } else if (action == DIS_AUTO_PS) {
1308 if (ps_bitmap & BITMAP_AUTO_DS) {
1309 priv->adapter->is_deep_sleep = false;
1310 dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n");
1311 }
1312 if (ps_bitmap & BITMAP_STA_PS) {
1313 dev_dbg(adapter->dev, "cmd: Disabled STA power save\n");
1314 if (adapter->sleep_period.period) {
1315 adapter->delay_null_pkt = false;
1316 adapter->tx_lock_flag = false;
1317 adapter->pps_uapsd_mode = false;
1318 }
1319 }
1320 } else if (action == GET_PS) {
1321 if (ps_bitmap & (BITMAP_STA_PS | BITMAP_UAP_INACT_PS
1322 | BITMAP_UAP_DTIM_PS))
1323 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
1324 else
1325 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
1326
1327 dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap);
1328
1329 if (data_buf) {
1330 /* This section is for get power save mode */
1331 struct mwifiex_ds_pm_cfg *pm_cfg =
1332 (struct mwifiex_ds_pm_cfg *)data_buf;
1333 if (ps_bitmap & BITMAP_STA_PS)
1334 pm_cfg->param.ps_mode = 1;
1335 else
1336 pm_cfg->param.ps_mode = 0;
1337 }
1338 }
1339 return 0;
1340}
1341
1342/*
1343 * This function prepares command to get hardware specifications.
1344 *
1345 * Preparation includes -
1346 * - Setting command ID, action and proper size
1347 * - Setting permanent address parameter
1348 * - Ensuring correct endian-ness
1349 */
1350int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv,
1351 struct host_cmd_ds_command *cmd)
1352{
1353 struct host_cmd_ds_get_hw_spec *hw_spec = &cmd->params.hw_spec;
1354
1355 cmd->command = cpu_to_le16(HostCmd_CMD_GET_HW_SPEC);
1356 cmd->size =
1357 cpu_to_le16(sizeof(struct host_cmd_ds_get_hw_spec) + S_DS_GEN);
1358 memcpy(hw_spec->permanent_addr, priv->curr_addr, ETH_ALEN);
1359
1360 return 0;
1361}
1362
1363/*
1364 * This function handles the command response of get hardware
1365 * specifications.
1366 *
1367 * Handling includes changing the header fields into CPU format
1368 * and saving/updating the following parameters in driver -
1369 * - Firmware capability information
1370 * - Firmware band settings
1371 * - Ad-hoc start band and channel
1372 * - Ad-hoc 11n activation status
1373 * - Firmware release number
1374 * - Number of antennas
1375 * - Hardware address
1376 * - Hardware interface version
1377 * - Firmware version
1378 * - Region code
1379 * - 11n capabilities
1380 * - MCS support fields
1381 * - MP end port
1382 */
1383int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
1384 struct host_cmd_ds_command *resp)
1385{
1386 struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
1387 struct mwifiex_adapter *adapter = priv->adapter;
1388 int i;
1389
1390 adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
1391
1392 if (IS_SUPPORT_MULTI_BANDS(adapter))
1393 adapter->fw_bands = (u8) GET_FW_DEFAULT_BANDS(adapter);
1394 else
1395 adapter->fw_bands = BAND_B;
1396
1397 adapter->config_bands = adapter->fw_bands;
1398
1399 if (adapter->fw_bands & BAND_A) {
1400 if (adapter->fw_bands & BAND_GN) {
1401 adapter->config_bands |= BAND_AN;
1402 adapter->fw_bands |= BAND_AN;
1403 }
1404 if (adapter->fw_bands & BAND_AN) {
1405 adapter->adhoc_start_band = BAND_A | BAND_AN;
1406 adapter->adhoc_11n_enabled = true;
1407 } else {
1408 adapter->adhoc_start_band = BAND_A;
1409 }
1410 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL_A;
1411 } else if (adapter->fw_bands & BAND_GN) {
1412 adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN;
1413 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
1414 adapter->adhoc_11n_enabled = true;
1415 } else if (adapter->fw_bands & BAND_G) {
1416 adapter->adhoc_start_band = BAND_G | BAND_B;
1417 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
1418 } else if (adapter->fw_bands & BAND_B) {
1419 adapter->adhoc_start_band = BAND_B;
1420 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
1421 }
1422
1423 adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number);
1424 adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
1425
1426 dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
1427 adapter->fw_release_number);
1428 dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
1429 hw_spec->permanent_addr);
1430 dev_dbg(adapter->dev, "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
1431 le16_to_cpu(hw_spec->hw_if_version),
1432 le16_to_cpu(hw_spec->version));
1433
1434 if (priv->curr_addr[0] == 0xff)
1435 memmove(priv->curr_addr, hw_spec->permanent_addr, ETH_ALEN);
1436
1437 adapter->region_code = le16_to_cpu(hw_spec->region_code);
1438
1439 for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++)
1440 /* Use the region code to search for the index */
1441 if (adapter->region_code == region_code_index[i])
1442 break;
1443
1444 /* If it's unidentified region code, use the default (USA) */
1445 if (i >= MWIFIEX_MAX_REGION_CODE) {
1446 adapter->region_code = 0x10;
1447 dev_dbg(adapter->dev, "cmd: unknown region code, use default (USA)\n");
1448 }
1449
1450 adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
1451 adapter->usr_dot_11n_dev_cap = adapter->hw_dot_11n_dev_cap &
1452 DEFAULT_11N_CAP_MASK;
1453 adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support;
1454 adapter->usr_dev_mcs_support = adapter->hw_dev_mcs_support;
1455 mwifiex_show_dot_11n_dev_cap(adapter, adapter->hw_dot_11n_dev_cap);
1456 mwifiex_show_dev_mcs_support(adapter, adapter->hw_dev_mcs_support);
1457
1458 if (adapter->if_ops.update_mp_end_port)
1459 adapter->if_ops.update_mp_end_port(adapter,
1460 le16_to_cpu(hw_spec->mp_end_port));
1461
1462 return 0;
1463}
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
new file mode 100644
index 00000000000..63b09692f27
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -0,0 +1,773 @@
1/*
2 * Marvell Wireless LAN device driver: debugfs
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include <linux/debugfs.h>
21
22#include "main.h"
23#include "11n.h"
24
25
26static struct dentry *mwifiex_dfs_dir;
27
28static char *bss_modes[] = {
29 "Unknown",
30 "Managed",
31 "Ad-hoc",
32 "Auto"
33};
34
35/* size/addr for mwifiex_debug_info */
36#define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n))
37#define item_addr(n) (offsetof(struct mwifiex_debug_info, n))
38
39/* size/addr for struct mwifiex_adapter */
40#define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n))
41#define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n))
42
43struct mwifiex_debug_data {
44 char name[32]; /* variable/array name */
45 u32 size; /* size of the variable/array */
46 size_t addr; /* address of the variable/array */
47 int num; /* number of variables in an array */
48};
49
50static struct mwifiex_debug_data items[] = {
51 {"int_counter", item_size(int_counter),
52 item_addr(int_counter), 1},
53 {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
54 item_addr(packets_out[WMM_AC_VO]), 1},
55 {"wmm_ac_vi", item_size(packets_out[WMM_AC_VI]),
56 item_addr(packets_out[WMM_AC_VI]), 1},
57 {"wmm_ac_be", item_size(packets_out[WMM_AC_BE]),
58 item_addr(packets_out[WMM_AC_BE]), 1},
59 {"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
60 item_addr(packets_out[WMM_AC_BK]), 1},
61 {"max_tx_buf_size", item_size(max_tx_buf_size),
62 item_addr(max_tx_buf_size), 1},
63 {"tx_buf_size", item_size(tx_buf_size),
64 item_addr(tx_buf_size), 1},
65 {"curr_tx_buf_size", item_size(curr_tx_buf_size),
66 item_addr(curr_tx_buf_size), 1},
67 {"ps_mode", item_size(ps_mode),
68 item_addr(ps_mode), 1},
69 {"ps_state", item_size(ps_state),
70 item_addr(ps_state), 1},
71 {"is_deep_sleep", item_size(is_deep_sleep),
72 item_addr(is_deep_sleep), 1},
73 {"wakeup_dev_req", item_size(pm_wakeup_card_req),
74 item_addr(pm_wakeup_card_req), 1},
75 {"wakeup_tries", item_size(pm_wakeup_fw_try),
76 item_addr(pm_wakeup_fw_try), 1},
77 {"hs_configured", item_size(is_hs_configured),
78 item_addr(is_hs_configured), 1},
79 {"hs_activated", item_size(hs_activated),
80 item_addr(hs_activated), 1},
81 {"num_tx_timeout", item_size(num_tx_timeout),
82 item_addr(num_tx_timeout), 1},
83 {"num_cmd_timeout", item_size(num_cmd_timeout),
84 item_addr(num_cmd_timeout), 1},
85 {"timeout_cmd_id", item_size(timeout_cmd_id),
86 item_addr(timeout_cmd_id), 1},
87 {"timeout_cmd_act", item_size(timeout_cmd_act),
88 item_addr(timeout_cmd_act), 1},
89 {"last_cmd_id", item_size(last_cmd_id),
90 item_addr(last_cmd_id), DBG_CMD_NUM},
91 {"last_cmd_act", item_size(last_cmd_act),
92 item_addr(last_cmd_act), DBG_CMD_NUM},
93 {"last_cmd_index", item_size(last_cmd_index),
94 item_addr(last_cmd_index), 1},
95 {"last_cmd_resp_id", item_size(last_cmd_resp_id),
96 item_addr(last_cmd_resp_id), DBG_CMD_NUM},
97 {"last_cmd_resp_index", item_size(last_cmd_resp_index),
98 item_addr(last_cmd_resp_index), 1},
99 {"last_event", item_size(last_event),
100 item_addr(last_event), DBG_CMD_NUM},
101 {"last_event_index", item_size(last_event_index),
102 item_addr(last_event_index), 1},
103 {"num_cmd_h2c_fail", item_size(num_cmd_host_to_card_failure),
104 item_addr(num_cmd_host_to_card_failure), 1},
105 {"num_cmd_sleep_cfm_fail",
106 item_size(num_cmd_sleep_cfm_host_to_card_failure),
107 item_addr(num_cmd_sleep_cfm_host_to_card_failure), 1},
108 {"num_tx_h2c_fail", item_size(num_tx_host_to_card_failure),
109 item_addr(num_tx_host_to_card_failure), 1},
110 {"num_evt_deauth", item_size(num_event_deauth),
111 item_addr(num_event_deauth), 1},
112 {"num_evt_disassoc", item_size(num_event_disassoc),
113 item_addr(num_event_disassoc), 1},
114 {"num_evt_link_lost", item_size(num_event_link_lost),
115 item_addr(num_event_link_lost), 1},
116 {"num_cmd_deauth", item_size(num_cmd_deauth),
117 item_addr(num_cmd_deauth), 1},
118 {"num_cmd_assoc_ok", item_size(num_cmd_assoc_success),
119 item_addr(num_cmd_assoc_success), 1},
120 {"num_cmd_assoc_fail", item_size(num_cmd_assoc_failure),
121 item_addr(num_cmd_assoc_failure), 1},
122 {"cmd_sent", item_size(cmd_sent),
123 item_addr(cmd_sent), 1},
124 {"data_sent", item_size(data_sent),
125 item_addr(data_sent), 1},
126 {"cmd_resp_received", item_size(cmd_resp_received),
127 item_addr(cmd_resp_received), 1},
128 {"event_received", item_size(event_received),
129 item_addr(event_received), 1},
130
131 /* variables defined in struct mwifiex_adapter */
132 {"ioctl_pending", adapter_item_size(ioctl_pending),
133 adapter_item_addr(ioctl_pending), 1},
134 {"tx_pending", adapter_item_size(tx_pending),
135 adapter_item_addr(tx_pending), 1},
136 {"rx_pending", adapter_item_size(rx_pending),
137 adapter_item_addr(rx_pending), 1},
138};
139
140static int num_of_items = ARRAY_SIZE(items);
141
142/*
143 * Generic proc file open handler.
144 *
145 * This function is called every time a file is accessed for read or write.
146 */
147static int
148mwifiex_open_generic(struct inode *inode, struct file *file)
149{
150 file->private_data = inode->i_private;
151 return 0;
152}
153
154/*
155 * Proc info file read handler.
156 *
157 * This function is called when the 'info' file is opened for reading.
158 * It prints the following driver related information -
159 * - Driver name
160 * - Driver version
161 * - Driver extended version
162 * - Interface name
163 * - BSS mode
164 * - Media state (connected or disconnected)
165 * - MAC address
166 * - Total number of Tx bytes
167 * - Total number of Rx bytes
168 * - Total number of Tx packets
169 * - Total number of Rx packets
170 * - Total number of dropped Tx packets
171 * - Total number of dropped Rx packets
172 * - Total number of corrupted Tx packets
173 * - Total number of corrupted Rx packets
174 * - Carrier status (on or off)
175 * - Tx queue status (started or stopped)
176 *
177 * For STA mode drivers, it also prints the following extra -
178 * - ESSID
179 * - BSSID
180 * - Channel
181 * - Region code
182 * - Multicast count
183 * - Multicast addresses
184 */
185static ssize_t
186mwifiex_info_read(struct file *file, char __user *ubuf,
187 size_t count, loff_t *ppos)
188{
189 struct mwifiex_private *priv =
190 (struct mwifiex_private *) file->private_data;
191 struct net_device *netdev = priv->netdev;
192 struct netdev_hw_addr *ha;
193 unsigned long page = get_zeroed_page(GFP_KERNEL);
194 char *p = (char *) page, fmt[64];
195 struct mwifiex_bss_info info;
196 ssize_t ret = 0;
197 int i = 0;
198
199 if (!p)
200 return -ENOMEM;
201
202 memset(&info, 0, sizeof(info));
203 ret = mwifiex_get_bss_info(priv, &info);
204 if (ret)
205 goto free_and_exit;
206
207 mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1);
208
209 if (!priv->version_str[0])
210 mwifiex_get_ver_ext(priv);
211
212 p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
213 p += sprintf(p, "driver_version = %s", fmt);
214 p += sprintf(p, "\nverext = %s", priv->version_str);
215 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
216 p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
217 p += sprintf(p, "media_state=\"%s\"\n",
218 (!priv->media_connected ? "Disconnected" : "Connected"));
219 p += sprintf(p, "mac_address=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n",
220 netdev->dev_addr[0], netdev->dev_addr[1],
221 netdev->dev_addr[2], netdev->dev_addr[3],
222 netdev->dev_addr[4], netdev->dev_addr[5]);
223
224 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
225 p += sprintf(p, "multicast_count=\"%d\"\n",
226 netdev_mc_count(netdev));
227 p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
228 p += sprintf(p, "bssid=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n",
229 info.bssid[0], info.bssid[1],
230 info.bssid[2], info.bssid[3],
231 info.bssid[4], info.bssid[5]);
232 p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
233 p += sprintf(p, "region_code = \"%02x\"\n", info.region_code);
234
235 netdev_for_each_mc_addr(ha, netdev)
236 p += sprintf(p, "multicast_address[%d]="
237 "\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", i++,
238 ha->addr[0], ha->addr[1],
239 ha->addr[2], ha->addr[3],
240 ha->addr[4], ha->addr[5]);
241 }
242
243 p += sprintf(p, "num_tx_bytes = %lu\n", priv->stats.tx_bytes);
244 p += sprintf(p, "num_rx_bytes = %lu\n", priv->stats.rx_bytes);
245 p += sprintf(p, "num_tx_pkts = %lu\n", priv->stats.tx_packets);
246 p += sprintf(p, "num_rx_pkts = %lu\n", priv->stats.rx_packets);
247 p += sprintf(p, "num_tx_pkts_dropped = %lu\n", priv->stats.tx_dropped);
248 p += sprintf(p, "num_rx_pkts_dropped = %lu\n", priv->stats.rx_dropped);
249 p += sprintf(p, "num_tx_pkts_err = %lu\n", priv->stats.tx_errors);
250 p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors);
251 p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev))
252 ? "on" : "off"));
253 p += sprintf(p, "tx queue %s\n", ((netif_queue_stopped(priv->netdev))
254 ? "stopped" : "started"));
255
256 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
257 (unsigned long) p - page);
258
259free_and_exit:
260 free_page(page);
261 return ret;
262}
263
264/*
265 * Proc getlog file read handler.
266 *
267 * This function is called when the 'getlog' file is opened for reading
268 * It prints the following log information -
269 * - Number of multicast Tx frames
270 * - Number of failed packets
271 * - Number of Tx retries
272 * - Number of multicast Tx retries
273 * - Number of duplicate frames
274 * - Number of RTS successes
275 * - Number of RTS failures
276 * - Number of ACK failures
277 * - Number of fragmented Rx frames
278 * - Number of multicast Rx frames
279 * - Number of FCS errors
280 * - Number of Tx frames
281 * - WEP ICV error counts
282 */
283static ssize_t
284mwifiex_getlog_read(struct file *file, char __user *ubuf,
285 size_t count, loff_t *ppos)
286{
287 struct mwifiex_private *priv =
288 (struct mwifiex_private *) file->private_data;
289 unsigned long page = get_zeroed_page(GFP_KERNEL);
290 char *p = (char *) page;
291 ssize_t ret = 0;
292 struct mwifiex_ds_get_stats stats;
293
294 if (!p)
295 return -ENOMEM;
296
297 memset(&stats, 0, sizeof(stats));
298 ret = mwifiex_get_stats_info(priv, &stats);
299 if (ret)
300 goto free_and_exit;
301
302 p += sprintf(p, "\n"
303 "mcasttxframe %u\n"
304 "failed %u\n"
305 "retry %u\n"
306 "multiretry %u\n"
307 "framedup %u\n"
308 "rtssuccess %u\n"
309 "rtsfailure %u\n"
310 "ackfailure %u\n"
311 "rxfrag %u\n"
312 "mcastrxframe %u\n"
313 "fcserror %u\n"
314 "txframe %u\n"
315 "wepicverrcnt-1 %u\n"
316 "wepicverrcnt-2 %u\n"
317 "wepicverrcnt-3 %u\n"
318 "wepicverrcnt-4 %u\n",
319 stats.mcast_tx_frame,
320 stats.failed,
321 stats.retry,
322 stats.multi_retry,
323 stats.frame_dup,
324 stats.rts_success,
325 stats.rts_failure,
326 stats.ack_failure,
327 stats.rx_frag,
328 stats.mcast_rx_frame,
329 stats.fcs_error,
330 stats.tx_frame,
331 stats.wep_icv_error[0],
332 stats.wep_icv_error[1],
333 stats.wep_icv_error[2],
334 stats.wep_icv_error[3]);
335
336
337 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
338 (unsigned long) p - page);
339
340free_and_exit:
341 free_page(page);
342 return ret;
343}
344
345static struct mwifiex_debug_info info;
346
347/*
348 * Proc debug file read handler.
349 *
350 * This function is called when the 'debug' file is opened for reading
351 * It prints the following log information -
352 * - Interrupt count
353 * - WMM AC VO packets count
354 * - WMM AC VI packets count
355 * - WMM AC BE packets count
356 * - WMM AC BK packets count
357 * - Maximum Tx buffer size
358 * - Tx buffer size
359 * - Current Tx buffer size
360 * - Power Save mode
361 * - Power Save state
362 * - Deep Sleep status
363 * - Device wakeup required status
364 * - Number of wakeup tries
365 * - Host Sleep configured status
366 * - Host Sleep activated status
367 * - Number of Tx timeouts
368 * - Number of command timeouts
369 * - Last timed out command ID
370 * - Last timed out command action
371 * - Last command ID
372 * - Last command action
373 * - Last command index
374 * - Last command response ID
375 * - Last command response index
376 * - Last event
377 * - Last event index
378 * - Number of host to card command failures
379 * - Number of sleep confirm command failures
380 * - Number of host to card data failure
381 * - Number of deauthentication events
382 * - Number of disassociation events
383 * - Number of link lost events
384 * - Number of deauthentication commands
385 * - Number of association success commands
386 * - Number of association failure commands
387 * - Number of commands sent
388 * - Number of data packets sent
389 * - Number of command responses received
390 * - Number of events received
391 * - Tx BA stream table (TID, RA)
392 * - Rx reorder table (TID, TA, Start window, Window size, Buffer)
393 */
394static ssize_t
395mwifiex_debug_read(struct file *file, char __user *ubuf,
396 size_t count, loff_t *ppos)
397{
398 struct mwifiex_private *priv =
399 (struct mwifiex_private *) file->private_data;
400 struct mwifiex_debug_data *d = &items[0];
401 unsigned long page = get_zeroed_page(GFP_KERNEL);
402 char *p = (char *) page;
403 ssize_t ret = 0;
404 size_t size, addr;
405 long val;
406 int i, j;
407
408 if (!p)
409 return -ENOMEM;
410
411 ret = mwifiex_get_debug_info(priv, &info);
412 if (ret)
413 goto free_and_exit;
414
415 for (i = 0; i < num_of_items; i++) {
416 p += sprintf(p, "%s=", d[i].name);
417
418 size = d[i].size / d[i].num;
419
420 if (i < (num_of_items - 3))
421 addr = d[i].addr + (size_t) &info;
422 else /* The last 3 items are struct mwifiex_adapter variables */
423 addr = d[i].addr + (size_t) priv->adapter;
424
425 for (j = 0; j < d[i].num; j++) {
426 switch (size) {
427 case 1:
428 val = *((u8 *) addr);
429 break;
430 case 2:
431 val = *((u16 *) addr);
432 break;
433 case 4:
434 val = *((u32 *) addr);
435 break;
436 case 8:
437 val = *((long long *) addr);
438 break;
439 default:
440 val = -1;
441 break;
442 }
443
444 p += sprintf(p, "%#lx ", val);
445 addr += size;
446 }
447
448 p += sprintf(p, "\n");
449 }
450
451 if (info.tx_tbl_num) {
452 p += sprintf(p, "Tx BA stream table:\n");
453 for (i = 0; i < info.tx_tbl_num; i++)
454 p += sprintf(p, "tid = %d, "
455 "ra = %02x:%02x:%02x:%02x:%02x:%02x\n",
456 info.tx_tbl[i].tid, info.tx_tbl[i].ra[0],
457 info.tx_tbl[i].ra[1], info.tx_tbl[i].ra[2],
458 info.tx_tbl[i].ra[3], info.tx_tbl[i].ra[4],
459 info.tx_tbl[i].ra[5]);
460 }
461
462 if (info.rx_tbl_num) {
463 p += sprintf(p, "Rx reorder table:\n");
464 for (i = 0; i < info.rx_tbl_num; i++) {
465
466 p += sprintf(p, "tid = %d, "
467 "ta = %02x:%02x:%02x:%02x:%02x:%02x, "
468 "start_win = %d, "
469 "win_size = %d, buffer: ",
470 info.rx_tbl[i].tid,
471 info.rx_tbl[i].ta[0], info.rx_tbl[i].ta[1],
472 info.rx_tbl[i].ta[2], info.rx_tbl[i].ta[3],
473 info.rx_tbl[i].ta[4], info.rx_tbl[i].ta[5],
474 info.rx_tbl[i].start_win,
475 info.rx_tbl[i].win_size);
476
477 for (j = 0; j < info.rx_tbl[i].win_size; j++)
478 p += sprintf(p, "%c ",
479 info.rx_tbl[i].buffer[j] ?
480 '1' : '0');
481
482 p += sprintf(p, "\n");
483 }
484 }
485
486 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
487 (unsigned long) p - page);
488
489free_and_exit:
490 free_page(page);
491 return ret;
492}
493
494static u32 saved_reg_type, saved_reg_offset, saved_reg_value;
495
496/*
497 * Proc regrdwr file write handler.
498 *
499 * This function is called when the 'regrdwr' file is opened for writing
500 *
501 * This function can be used to write to a register.
502 */
503static ssize_t
504mwifiex_regrdwr_write(struct file *file,
505 const char __user *ubuf, size_t count, loff_t *ppos)
506{
507 unsigned long addr = get_zeroed_page(GFP_KERNEL);
508 char *buf = (char *) addr;
509 size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
510 int ret = 0;
511 u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX;
512
513 if (!buf)
514 return -ENOMEM;
515
516
517 if (copy_from_user(buf, ubuf, buf_size)) {
518 ret = -EFAULT;
519 goto done;
520 }
521
522 sscanf(buf, "%u %x %x", &reg_type, &reg_offset, &reg_value);
523
524 if (reg_type == 0 || reg_offset == 0) {
525 ret = -EINVAL;
526 goto done;
527 } else {
528 saved_reg_type = reg_type;
529 saved_reg_offset = reg_offset;
530 saved_reg_value = reg_value;
531 ret = count;
532 }
533done:
534 free_page(addr);
535 return ret;
536}
537
538/*
539 * Proc regrdwr file read handler.
540 *
541 * This function is called when the 'regrdwr' file is opened for reading
542 *
543 * This function can be used to read from a register.
544 */
545static ssize_t
546mwifiex_regrdwr_read(struct file *file, char __user *ubuf,
547 size_t count, loff_t *ppos)
548{
549 struct mwifiex_private *priv =
550 (struct mwifiex_private *) file->private_data;
551 unsigned long addr = get_zeroed_page(GFP_KERNEL);
552 char *buf = (char *) addr;
553 int pos = 0, ret = 0;
554 u32 reg_value;
555
556 if (!buf)
557 return -ENOMEM;
558
559 if (!saved_reg_type) {
560 /* No command has been given */
561 pos += snprintf(buf, PAGE_SIZE, "0");
562 goto done;
563 }
564 /* Set command has been given */
565 if (saved_reg_value != UINT_MAX) {
566 ret = mwifiex_reg_write(priv, saved_reg_type, saved_reg_offset,
567 saved_reg_value);
568
569 pos += snprintf(buf, PAGE_SIZE, "%u 0x%x 0x%x\n",
570 saved_reg_type, saved_reg_offset,
571 saved_reg_value);
572
573 ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
574
575 goto done;
576 }
577 /* Get command has been given */
578 ret = mwifiex_reg_read(priv, saved_reg_type,
579 saved_reg_offset, &reg_value);
580 if (ret) {
581 ret = -EINVAL;
582 goto done;
583 }
584
585 pos += snprintf(buf, PAGE_SIZE, "%u 0x%x 0x%x\n", saved_reg_type,
586 saved_reg_offset, reg_value);
587
588 ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
589
590done:
591 free_page(addr);
592 return ret;
593}
594
595static u32 saved_offset = -1, saved_bytes = -1;
596
597/*
598 * Proc rdeeprom file write handler.
599 *
600 * This function is called when the 'rdeeprom' file is opened for writing
601 *
602 * This function can be used to write to a RDEEPROM location.
603 */
604static ssize_t
605mwifiex_rdeeprom_write(struct file *file,
606 const char __user *ubuf, size_t count, loff_t *ppos)
607{
608 unsigned long addr = get_zeroed_page(GFP_KERNEL);
609 char *buf = (char *) addr;
610 size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
611 int ret = 0;
612 int offset = -1, bytes = -1;
613
614 if (!buf)
615 return -ENOMEM;
616
617
618 if (copy_from_user(buf, ubuf, buf_size)) {
619 ret = -EFAULT;
620 goto done;
621 }
622
623 sscanf(buf, "%d %d", &offset, &bytes);
624
625 if (offset == -1 || bytes == -1) {
626 ret = -EINVAL;
627 goto done;
628 } else {
629 saved_offset = offset;
630 saved_bytes = bytes;
631 ret = count;
632 }
633done:
634 free_page(addr);
635 return ret;
636}
637
638/*
639 * Proc rdeeprom read write handler.
640 *
641 * This function is called when the 'rdeeprom' file is opened for reading
642 *
643 * This function can be used to read from a RDEEPROM location.
644 */
645static ssize_t
646mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
647 size_t count, loff_t *ppos)
648{
649 struct mwifiex_private *priv =
650 (struct mwifiex_private *) file->private_data;
651 unsigned long addr = get_zeroed_page(GFP_KERNEL);
652 char *buf = (char *) addr;
653 int pos = 0, ret = 0, i = 0;
654 u8 value[MAX_EEPROM_DATA];
655
656 if (!buf)
657 return -ENOMEM;
658
659 if (saved_offset == -1) {
660 /* No command has been given */
661 pos += snprintf(buf, PAGE_SIZE, "0");
662 goto done;
663 }
664
665 /* Get command has been given */
666 ret = mwifiex_eeprom_read(priv, (u16) saved_offset,
667 (u16) saved_bytes, value);
668 if (ret) {
669 ret = -EINVAL;
670 goto done;
671 }
672
673 pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
674
675 for (i = 0; i < saved_bytes; i++)
676 pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]);
677
678 ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
679
680done:
681 free_page(addr);
682 return ret;
683}
684
685
686#define MWIFIEX_DFS_ADD_FILE(name) do { \
687 if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \
688 priv, &mwifiex_dfs_##name##_fops)) \
689 return; \
690} while (0);
691
692#define MWIFIEX_DFS_FILE_OPS(name) \
693static const struct file_operations mwifiex_dfs_##name##_fops = { \
694 .read = mwifiex_##name##_read, \
695 .write = mwifiex_##name##_write, \
696 .open = mwifiex_open_generic, \
697};
698
699#define MWIFIEX_DFS_FILE_READ_OPS(name) \
700static const struct file_operations mwifiex_dfs_##name##_fops = { \
701 .read = mwifiex_##name##_read, \
702 .open = mwifiex_open_generic, \
703};
704
705#define MWIFIEX_DFS_FILE_WRITE_OPS(name) \
706static const struct file_operations mwifiex_dfs_##name##_fops = { \
707 .write = mwifiex_##name##_write, \
708 .open = mwifiex_open_generic, \
709};
710
711
712MWIFIEX_DFS_FILE_READ_OPS(info);
713MWIFIEX_DFS_FILE_READ_OPS(debug);
714MWIFIEX_DFS_FILE_READ_OPS(getlog);
715MWIFIEX_DFS_FILE_OPS(regrdwr);
716MWIFIEX_DFS_FILE_OPS(rdeeprom);
717
718/*
719 * This function creates the debug FS directory structure and the files.
720 */
721void
722mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
723{
724 if (!mwifiex_dfs_dir || !priv)
725 return;
726
727 priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
728 mwifiex_dfs_dir);
729
730 if (!priv->dfs_dev_dir)
731 return;
732
733 MWIFIEX_DFS_ADD_FILE(info);
734 MWIFIEX_DFS_ADD_FILE(debug);
735 MWIFIEX_DFS_ADD_FILE(getlog);
736 MWIFIEX_DFS_ADD_FILE(regrdwr);
737 MWIFIEX_DFS_ADD_FILE(rdeeprom);
738
739 return;
740}
741
742/*
743 * This function removes the debug FS directory structure and the files.
744 */
745void
746mwifiex_dev_debugfs_remove(struct mwifiex_private *priv)
747{
748 if (!priv)
749 return;
750
751 debugfs_remove_recursive(priv->dfs_dev_dir);
752 return;
753}
754
755/*
756 * This function creates the top level proc directory.
757 */
758void
759mwifiex_debugfs_init(void)
760{
761 if (!mwifiex_dfs_dir)
762 mwifiex_dfs_dir = debugfs_create_dir("mwifiex", NULL);
763}
764
765/*
766 * This function removes the top level proc directory.
767 */
768void
769mwifiex_debugfs_remove(void)
770{
771 if (mwifiex_dfs_dir)
772 debugfs_remove(mwifiex_dfs_dir);
773}
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
new file mode 100644
index 00000000000..4e1f115d3ec
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -0,0 +1,177 @@
1/*
2 * Marvell Wireless LAN device driver: generic data structures and APIs
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_DECL_H_
21#define _MWIFIEX_DECL_H_
22
23#undef pr_fmt
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/wait.h>
27#include <linux/timer.h>
28#include <linux/ieee80211.h>
29
30
31#define MWIFIEX_MAX_BSS_NUM (1)
32
33#define MWIFIEX_MIN_DATA_HEADER_LEN 32 /* (sizeof(mwifiex_txpd)) */
34
35#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
36#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
37
38#define MWIFIEX_AMPDU_DEF_TXWINSIZE 32
39#define MWIFIEX_AMPDU_DEF_RXWINSIZE 16
40#define MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT 0xffff
41
42#define MWIFIEX_RATE_INDEX_HRDSSS0 0
43#define MWIFIEX_RATE_INDEX_HRDSSS3 3
44#define MWIFIEX_RATE_INDEX_OFDM0 4
45#define MWIFIEX_RATE_INDEX_OFDM7 11
46#define MWIFIEX_RATE_INDEX_MCS0 12
47
48#define MWIFIEX_RATE_BITMAP_OFDM0 16
49#define MWIFIEX_RATE_BITMAP_OFDM7 23
50#define MWIFIEX_RATE_BITMAP_MCS0 32
51#define MWIFIEX_RATE_BITMAP_MCS127 159
52
53#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024)
54#define MWIFIEX_RX_CMD_BUF_SIZE (2 * 1024)
55
56#define MWIFIEX_RTS_MIN_VALUE (0)
57#define MWIFIEX_RTS_MAX_VALUE (2347)
58#define MWIFIEX_FRAG_MIN_VALUE (256)
59#define MWIFIEX_FRAG_MAX_VALUE (2346)
60
61#define MWIFIEX_SDIO_BLOCK_SIZE 256
62
63#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
64
65enum mwifiex_error_code {
66 MWIFIEX_ERROR_NO_ERROR = 0,
67 MWIFIEX_ERROR_FW_NOT_READY = 0x00000001,
68 MWIFIEX_ERROR_FW_BUSY,
69 MWIFIEX_ERROR_FW_CMDRESP,
70 MWIFIEX_ERROR_PKT_SIZE_INVALID = 0x80000001,
71 MWIFIEX_ERROR_PKT_TIMEOUT,
72 MWIFIEX_ERROR_CMD_INVALID,
73 MWIFIEX_ERROR_CMD_TIMEOUT,
74 MWIFIEX_ERROR_CMD_DNLD_FAIL,
75 MWIFIEX_ERROR_CMD_CANCEL,
76 MWIFIEX_ERROR_CMD_RESP_FAIL,
77 MWIFIEX_ERROR_ASSOC_FAIL,
78 MWIFIEX_ERROR_EVENT_UNKNOWN,
79 MWIFIEX_ERROR_INVALID_PARAMETER,
80};
81
82enum mwifiex_bss_type {
83 MWIFIEX_BSS_TYPE_STA = 0,
84 MWIFIEX_BSS_TYPE_UAP = 1,
85 MWIFIEX_BSS_TYPE_ANY = 0xff,
86};
87
88enum mwifiex_bss_role {
89 MWIFIEX_BSS_ROLE_STA = 0,
90 MWIFIEX_BSS_ROLE_UAP = 1,
91 MWIFIEX_BSS_ROLE_ANY = 0xff,
92};
93
94#define BSS_ROLE_BIT_MASK BIT(0)
95
96#define GET_BSS_ROLE(priv) ((priv)->bss_role & BSS_ROLE_BIT_MASK)
97
98enum mwifiex_data_frame_type {
99 MWIFIEX_DATA_FRAME_TYPE_ETH_II = 0,
100 MWIFIEX_DATA_FRAME_TYPE_802_11,
101};
102
103struct mwifiex_fw_image {
104 u8 *helper_buf;
105 u32 helper_len;
106 u8 *fw_buf;
107 u32 fw_len;
108};
109
110struct mwifiex_802_11_ssid {
111 u32 ssid_len;
112 u8 ssid[IEEE80211_MAX_SSID_LEN];
113};
114
115struct mwifiex_wait_queue {
116 u32 bss_index;
117 wait_queue_head_t *wait;
118 u16 *condition;
119 u32 start_time;
120 int status;
121 u32 enabled;
122};
123
124struct mwifiex_rxinfo {
125 u8 bss_index;
126 struct sk_buff *parent;
127 u8 use_count;
128};
129
130struct mwifiex_txinfo {
131 u32 status_code;
132 u8 flags;
133 u8 bss_index;
134};
135
136struct mwifiex_bss_attr {
137 u32 bss_type;
138 u32 frame_type;
139 u32 active;
140 u32 bss_priority;
141 u32 bss_num;
142};
143
144enum mwifiex_cmd_result_e {
145 MWIFIEX_CMD_RESULT_SUCCESS = 0,
146 MWIFIEX_CMD_RESULT_FAILURE = 1,
147 MWIFIEX_CMD_RESULT_TIMEOUT = 2,
148 MWIFIEX_CMD_RESULT_INVALID_DATA = 3
149} __packed;
150
151enum mwifiex_wmm_ac_e {
152 WMM_AC_BK,
153 WMM_AC_BE,
154 WMM_AC_VI,
155 WMM_AC_VO
156} __packed;
157
158enum mwifiex_wmm_queue_config_action_e {
159 MWIFIEX_WMM_QUEUE_CONFIG_ACTION_GET = 0,
160 MWIFIEX_WMM_QUEUE_CONFIG_ACTION_SET = 1,
161 MWIFIEX_WMM_QUEUE_CONFIG_ACTION_DEFAULT = 2,
162 MWIFIEX_WMM_QUEUE_CONFIG_ACTION_MAX
163} __packed;
164
165enum mwifiex_wmm_queue_stats_action_e {
166 MWIFIEX_WMM_STATS_ACTION_START = 0,
167 MWIFIEX_WMM_STATS_ACTION_STOP = 1,
168 MWIFIEX_WMM_STATS_ACTION_GET_CLR = 2,
169 MWIFIEX_WMM_STATS_ACTION_SET_CFG = 3, /* Not currently used */
170 MWIFIEX_WMM_STATS_ACTION_GET_CFG = 4, /* Not currently used */
171 MWIFIEX_WMM_STATS_ACTION_MAX
172} __packed;
173
174struct mwifiex_device {
175 struct mwifiex_bss_attr bss_attr[MWIFIEX_MAX_BSS_NUM];
176};
177#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
new file mode 100644
index 00000000000..e5dae45b11d
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -0,0 +1,1376 @@
1/*
2 * Marvell Wireless LAN device driver: Firmware specific macros & structures
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_FW_H_
21#define _MWIFIEX_FW_H_
22
23#include <linux/if_ether.h>
24
25
26#define INTF_HEADER_LEN 4
27
28struct rfc_1042_hdr {
29 u8 llc_dsap;
30 u8 llc_ssap;
31 u8 llc_ctrl;
32 u8 snap_oui[3];
33 u16 snap_type;
34};
35
36struct rx_packet_hdr {
37 struct ethhdr eth803_hdr;
38 struct rfc_1042_hdr rfc1042_hdr;
39};
40
41struct tx_packet_hdr {
42 struct ethhdr eth803_hdr;
43 struct rfc_1042_hdr rfc1042_hdr;
44};
45
46#define B_SUPPORTED_RATES 5
47#define G_SUPPORTED_RATES 9
48#define BG_SUPPORTED_RATES 13
49#define A_SUPPORTED_RATES 9
50#define HOSTCMD_SUPPORTED_RATES 14
51#define N_SUPPORTED_RATES 3
52#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN)
53
54#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11))
55#define IS_SUPPORT_MULTI_BANDS(adapter) \
56 (adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
57#define GET_FW_DEFAULT_BANDS(adapter) \
58 ((adapter->fw_cap_info >> 8) & ALL_802_11_BANDS)
59
60#define SHORT_SLOT_TIME_DISABLED(CapInfo) (CapInfo &= ~BIT(10))
61#define SHORT_SLOT_TIME_ENABLED(CapInfo) (CapInfo |= BIT(10))
62
63extern u8 supported_rates_b[B_SUPPORTED_RATES];
64extern u8 supported_rates_g[G_SUPPORTED_RATES];
65extern u8 supported_rates_bg[BG_SUPPORTED_RATES];
66extern u8 supported_rates_a[A_SUPPORTED_RATES];
67extern u8 supported_rates_n[N_SUPPORTED_RATES];
68
69#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
70
71#define KEY_INFO_ENABLED 0x01
72enum KEY_TYPE_ID {
73 KEY_TYPE_ID_WEP = 0,
74 KEY_TYPE_ID_TKIP,
75 KEY_TYPE_ID_AES,
76 KEY_TYPE_ID_WAPI,
77};
78
79enum KEY_INFO_WEP {
80 KEY_INFO_WEP_MCAST = 0x01,
81 KEY_INFO_WEP_UNICAST = 0x02,
82 KEY_INFO_WEP_ENABLED = 0x04
83};
84
85enum KEY_INFO_TKIP {
86 KEY_INFO_TKIP_MCAST = 0x01,
87 KEY_INFO_TKIP_UNICAST = 0x02,
88 KEY_INFO_TKIP_ENABLED = 0x04
89};
90
91enum KEY_INFO_AES {
92 KEY_INFO_AES_MCAST = 0x01,
93 KEY_INFO_AES_UNICAST = 0x02,
94 KEY_INFO_AES_ENABLED = 0x04
95};
96
97#define WAPI_KEY_LEN 50
98
99enum KEY_INFO_WAPI {
100 KEY_INFO_WAPI_MCAST = 0x01,
101 KEY_INFO_WAPI_UNICAST = 0x02,
102 KEY_INFO_WAPI_ENABLED = 0x04
103};
104
105#define MAX_POLL_TRIES 100
106
107#define MAX_MULTI_INTERFACE_POLL_TRIES 1000
108
109#define MAX_FIRMWARE_POLL_TRIES 100
110
111#define FIRMWARE_READY 0xfedc
112
113#define FIRMWARE_TRANSFER_NBLOCK 2
114
115enum MWIFIEX_802_11_PRIVACY_FILTER {
116 MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL,
117 MWIFIEX_802_11_PRIV_FILTER_8021X_WEP
118};
119
120enum MWIFIEX_802_11_WEP_STATUS {
121 MWIFIEX_802_11_WEP_ENABLED,
122 MWIFIEX_802_11_WEP_DISABLED,
123};
124
125#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF)))
126
127#define PROPRIETARY_TLV_BASE_ID 0x0100
128#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
129#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1)
130#define TLV_TYPE_NUMPROBES (PROPRIETARY_TLV_BASE_ID + 2)
131#define TLV_TYPE_RSSI_LOW (PROPRIETARY_TLV_BASE_ID + 4)
132#define TLV_TYPE_SNR_LOW (PROPRIETARY_TLV_BASE_ID + 5)
133#define TLV_TYPE_FAILCOUNT (PROPRIETARY_TLV_BASE_ID + 6)
134#define TLV_TYPE_BCNMISS (PROPRIETARY_TLV_BASE_ID + 7)
135#define TLV_TYPE_LEDBEHAVIOR (PROPRIETARY_TLV_BASE_ID + 9)
136#define TLV_TYPE_PASSTHROUGH (PROPRIETARY_TLV_BASE_ID + 10)
137#define TLV_TYPE_POWER_TBL_2_4GHZ (PROPRIETARY_TLV_BASE_ID + 12)
138#define TLV_TYPE_POWER_TBL_5GHZ (PROPRIETARY_TLV_BASE_ID + 13)
139#define TLV_TYPE_WMMQSTATUS (PROPRIETARY_TLV_BASE_ID + 16)
140#define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18)
141#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
142#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
143#define TLV_TYPE_SNR_HIGH (PROPRIETARY_TLV_BASE_ID + 23)
144
145#define TLV_TYPE_STARTBGSCANLATER (PROPRIETARY_TLV_BASE_ID + 30)
146#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
147#define TLV_TYPE_LINK_QUALITY (PROPRIETARY_TLV_BASE_ID + 36)
148#define TLV_TYPE_RSSI_LOW_DATA (PROPRIETARY_TLV_BASE_ID + 38)
149#define TLV_TYPE_SNR_LOW_DATA (PROPRIETARY_TLV_BASE_ID + 39)
150#define TLV_TYPE_RSSI_HIGH_DATA (PROPRIETARY_TLV_BASE_ID + 40)
151#define TLV_TYPE_SNR_HIGH_DATA (PROPRIETARY_TLV_BASE_ID + 41)
152
153#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
154#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
155#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
156
157#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
158
159#define TLV_TYPE_HT_CAP (PROPRIETARY_TLV_BASE_ID + 74)
160#define TLV_TYPE_HT_INFO (PROPRIETARY_TLV_BASE_ID + 75)
161#define TLV_SECONDARY_CHANNEL_OFFSET (PROPRIETARY_TLV_BASE_ID + 76)
162#define TLV_TYPE_2040BSS_COEXISTENCE (PROPRIETARY_TLV_BASE_ID + 77)
163#define TLV_TYPE_OVERLAP_BSS_SCAN_PARAM (PROPRIETARY_TLV_BASE_ID + 78)
164#define TLV_TYPE_EXTCAP (PROPRIETARY_TLV_BASE_ID + 79)
165#define TLV_TYPE_HT_OPERATIONAL_MCS_SET (PROPRIETARY_TLV_BASE_ID + 80)
166
167#define ADDBA_TID_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5))
168#define DELBA_TID_MASK (BIT(12) | BIT(13) | BIT(14) | BIT(15))
169#define SSN_MASK 0xfff0
170
171#define BA_RESULT_SUCCESS 0x0
172#define BA_RESULT_FAILURE 0x1
173#define BA_RESULT_TIMEOUT 0x2
174#define BA_RESULT_DATA_INVALID 0x3
175
176#define IS_BASTREAM_SETUP(ptr) (ptr->ba_status)
177
178#define BA_STREAM_NOT_ALLOWED 0xff
179
180#define IS_11N_ENABLED(priv) ((priv->adapter->config_bands & BAND_GN || \
181 priv->adapter->config_bands & BAND_AN) \
182 && priv->curr_bss_params.bss_descriptor.bcn_ht_cap)
183#define INITIATOR_BIT(DelBAParamSet) (((DelBAParamSet) &\
184 BIT(DELBA_INITIATOR_POS)) >> DELBA_INITIATOR_POS)
185
186#define MWIFIEX_TX_DATA_BUF_SIZE_4K 4096
187#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
188#define MAX_RX_AMPDU_SIZE_64K 0x03
189#define NON_GREENFIELD_STAS 0x04
190
191#define HWSPEC_GREENFIELD_SUPP BIT(29)
192#define HWSPEC_RXSTBC_SUPP BIT(26)
193#define HWSPEC_SHORTGI40_SUPP BIT(24)
194#define HWSPEC_SHORTGI20_SUPP BIT(23)
195#define HWSPEC_CHANBW40_SUPP BIT(17)
196
197#define DEFAULT_11N_CAP_MASK (HWSPEC_SHORTGI20_SUPP | HWSPEC_RXSTBC_SUPP)
198#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
199#define ISSUPP_MAXAMSDU(Dot11nDevCap) (Dot11nDevCap & BIT(31))
200#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
201#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
202#define ISSUPP_AMPDU(Dot11nDevCap) (Dot11nDevCap & BIT(28))
203#define ISSUPP_MIMOPS(Dot11nDevCap) (Dot11nDevCap & BIT(27))
204#define ISSUPP_RXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(26))
205#define ISSUPP_TXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(25))
206#define ISSUPP_SHORTGI40(Dot11nDevCap) (Dot11nDevCap & BIT(24))
207#define ISSUPP_SHORTGI20(Dot11nDevCap) (Dot11nDevCap & BIT(23))
208#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
209#define GET_DELAYEDBACK(Dot11nDevCap) (((Dot11nDevCap >> 20) & 0x03))
210#define GET_IMMEDIATEBACK(Dot11nDevCap) (((Dot11nDevCap >> 18) & 0x03))
211#define ISSUPP_CHANWIDTH40(Dot11nDevCap) (Dot11nDevCap & BIT(17))
212#define ISSUPP_CHANWIDTH20(Dot11nDevCap) (Dot11nDevCap & BIT(16))
213#define ISSUPP_CHANWIDTH10(Dot11nDevCap) (Dot11nDevCap & BIT(15))
214#define ISENABLED_40MHZ_INTOLARENT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
215#define ISSUPP_RXANTENNAD(Dot11nDevCap) (Dot11nDevCap & BIT(7))
216#define ISSUPP_RXANTENNAC(Dot11nDevCap) (Dot11nDevCap & BIT(6))
217#define ISSUPP_RXANTENNAB(Dot11nDevCap) (Dot11nDevCap & BIT(5))
218#define ISSUPP_RXANTENNAA(Dot11nDevCap) (Dot11nDevCap & BIT(4))
219#define ISSUPP_TXANTENNAD(Dot11nDevCap) (Dot11nDevCap & BIT(3))
220#define ISSUPP_TXANTENNAC(Dot11nDevCap) (Dot11nDevCap & BIT(2))
221#define ISSUPP_TXANTENNAB(Dot11nDevCap) (Dot11nDevCap & BIT(1))
222#define ISSUPP_TXANTENNAA(Dot11nDevCap) (Dot11nDevCap & BIT(0))
223#define SETSUPP_CHANWIDTH40(Dot11nDevCap) (Dot11nDevCap |= BIT(17))
224#define RESETSUPP_CHANWIDTH40(Dot11nDevCap) (Dot11nDevCap &= ~BIT(17))
225#define GET_TXMCSSUPP(DevMCSSupported) (DevMCSSupported >> 4)
226#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
227#define GETHT_SUPPCHANWIDTH(HTCapInfo) (HTCapInfo & BIT(1))
228#define GETHT_GREENFIELD(HTCapInfo) (HTCapInfo & BIT(4))
229#define GETHT_SHORTGI20(HTCapInfo) (HTCapInfo & BIT(5))
230#define GETHT_SHORTGI40(HTCapInfo) (HTCapInfo & BIT(6))
231#define GETHT_TXSTBC(HTCapInfo) (HTCapInfo & BIT(7))
232#define GETHT_RXSTBC(HTCapInfo) ((HTCapInfo >> 8) & 0x03)
233#define GETHT_DELAYEDBACK(HTCapInfo) (HTCapInfo & BIT(10))
234#define GETHT_MAXAMSDU(HTCapInfo) (HTCapInfo & BIT(11))
235#define SETHT_SUPPCHANWIDTH(HTCapInfo) (HTCapInfo |= BIT(1))
236#define SETHT_GREENFIELD(HTCapInfo) (HTCapInfo |= BIT(4))
237#define SETHT_SHORTGI20(HTCapInfo) (HTCapInfo |= BIT(5))
238#define SETHT_SHORTGI40(HTCapInfo) (HTCapInfo |= BIT(6))
239#define SETHT_TXSTBC(HTCapInfo) (HTCapInfo |= BIT(7))
240#define SETHT_RXSTBC(HTCapInfo, value) (HTCapInfo |= (value << 8))
241#define SETHT_DELAYEDBACK(HTCapInfo) (HTCapInfo |= BIT(10))
242#define SETHT_MAXAMSDU(HTCapInfo) (HTCapInfo |= BIT(11))
243#define SETHT_DSSSCCK40(HTCapInfo) (HTCapInfo |= BIT(12))
244#define SETHT_40MHZ_INTOLARANT(HTCapInfo) (HTCapInfo |= BIT(14))
245#define RESETHT_SUPPCHANWIDTH(HTCapInfo) (HTCapInfo &= ~BIT(1))
246#define RESETHT_GREENFIELD(HTCapInfo) (HTCapInfo &= ~BIT(4))
247#define RESETHT_SHORTGI20(HTCapInfo) (HTCapInfo &= ~BIT(5))
248#define RESETHT_SHORTGI40(HTCapInfo) (HTCapInfo &= ~BIT(6))
249#define RESETHT_TXSTBC(HTCapInfo) (HTCapInfo &= ~BIT(7))
250#define RESETHT_RXSTBC(HTCapInfo) (HTCapInfo &= ~(0x03 << 8))
251#define RESETHT_DELAYEDBACK(HTCapInfo) (HTCapInfo &= ~BIT(10))
252#define RESETHT_MAXAMSDU(HTCapInfo) (HTCapInfo &= ~BIT(11))
253#define RESETHT_40MHZ_INTOLARANT(HTCapInfo) (HTCapInfo &= ~BIT(14))
254#define RESETHT_EXTCAP_RDG(HTExtCap) (HTExtCap &= ~BIT(11))
255#define SETHT_MCS32(x) (x[4] |= 1)
256#define SETHT_MCS_SET_DEFINED(x) (x[12] |= 1)
257#define SETHT_RX_HIGHEST_DT_SUPP(x, y) ((*(u16 *) (x + 10)) = y)
258#define AMPDU_FACTOR_64K 0x03
259#define SETAMPDU_SIZE(x, y) do { \
260 x = x & ~0x03; \
261 x |= y & 0x03; \
262} while (0) \
263
264#define SETAMPDU_SPACING(x, y) do { \
265 x = x & ~0x1c; \
266 x |= (y & 0x07) << 2; \
267} while (0) \
268
269#define ISSUPP_BANDA(FwCapInfo) (FwCapInfo & BIT(10))
270#define ISALLOWED_CHANWIDTH40(Field2) (Field2 & BIT(2))
271#define SET_CHANWIDTH40(Field2) (Field2 |= BIT(2))
272#define RESET_CHANWIDTH40(Field2) (Field2 &= ~(BIT(0) | BIT(1) | BIT(2)))
273#define GET_SECONDARYCHAN(Field2) (Field2 & (BIT(0) | BIT(1)))
274#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
275
276#define LLC_SNAP_LEN 8
277
278#define TLV_TYPE_RATE_DROP_PATTERN (PROPRIETARY_TLV_BASE_ID + 81)
279#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
280#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
281
282#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
283
284#define MOD_CLASS_HR_DSSS 0x03
285#define MOD_CLASS_OFDM 0x07
286#define MOD_CLASS_HT 0x08
287#define HT_BW_20 0
288#define HT_BW_40 1
289
290#define HostCmd_CMD_GET_HW_SPEC 0x0003
291#define HostCmd_CMD_802_11_SCAN 0x0006
292#define HostCmd_CMD_802_11_GET_LOG 0x000b
293#define HostCmd_CMD_MAC_MULTICAST_ADR 0x0010
294#define HostCmd_CMD_802_11_EEPROM_ACCESS 0x0059
295#define HostCmd_CMD_802_11_ASSOCIATE 0x0012
296#define HostCmd_CMD_802_11_SNMP_MIB 0x0016
297#define HostCmd_CMD_MAC_REG_ACCESS 0x0019
298#define HostCmd_CMD_BBP_REG_ACCESS 0x001a
299#define HostCmd_CMD_RF_REG_ACCESS 0x001b
300#define HostCmd_CMD_PMIC_REG_ACCESS 0x00ad
301#define HostCmd_CMD_802_11_RF_CHANNEL 0x001d
302#define HostCmd_CMD_802_11_DEAUTHENTICATE 0x0024
303#define HostCmd_CMD_MAC_CONTROL 0x0028
304#define HostCmd_CMD_802_11_AD_HOC_START 0x002b
305#define HostCmd_CMD_802_11_AD_HOC_JOIN 0x002c
306#define HostCmd_CMD_802_11_AD_HOC_STOP 0x0040
307#define HostCmd_CMD_802_11_MAC_ADDRESS 0x004D
308#define HostCmd_CMD_802_11D_DOMAIN_INFO 0x005b
309#define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e
310#define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c
311#define HostCmd_CMD_WMM_GET_STATUS 0x0071
312#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
313#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
314#define HostCmd_CMD_VERSION_EXT 0x0097
315#define HostCmd_CMD_RSSI_INFO 0x00a4
316#define HostCmd_CMD_FUNC_INIT 0x00a9
317#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa
318#define HostCmd_CMD_11N_CFG 0x00cd
319#define HostCmd_CMD_11N_ADDBA_REQ 0x00ce
320#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf
321#define HostCmd_CMD_11N_DELBA 0x00d0
322#define HostCmd_CMD_RECONFIGURE_TX_BUFF 0x00d9
323#define HostCmd_CMD_AMSDU_AGGR_CTRL 0x00df
324#define HostCmd_CMD_TXPWR_CFG 0x00d1
325#define HostCmd_CMD_TX_RATE_CFG 0x00d6
326#define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4
327#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5
328#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
329#define HostCmd_CMD_SET_BSS_MODE 0x00f7
330
331
332enum ENH_PS_MODES {
333 EN_PS = 1,
334 DIS_PS = 2,
335 EN_AUTO_DS = 3,
336 DIS_AUTO_DS = 4,
337 SLEEP_CONFIRM = 5,
338 GET_PS = 0,
339 EN_AUTO_PS = 0xff,
340 DIS_AUTO_PS = 0xfe,
341};
342
343#define HostCmd_RET_BIT 0x8000
344#define HostCmd_ACT_GEN_GET 0x0000
345#define HostCmd_ACT_GEN_SET 0x0001
346#define HostCmd_ACT_GEN_REMOVE 0x0004
347#define HostCmd_ACT_SET_BOTH 0x0003
348#define HostCmd_ACT_GET_BOTH 0x000c
349#define HostCmd_RESULT_OK 0x0000
350#define HostCmd_RESULT_ERROR 0x0001
351#define HostCmd_RESULT_NOT_SUPPORT 0x0002
352#define HostCmd_RESULT_PENDING 0x0003
353#define HostCmd_RESULT_BUSY 0x0004
354#define HostCmd_RESULT_PARTIAL_DATA 0x0005
355
356#define HostCmd_ACT_MAC_RX_ON 0x0001
357#define HostCmd_ACT_MAC_TX_ON 0x0002
358#define HostCmd_ACT_MAC_WEP_ENABLE 0x0008
359#define HostCmd_ACT_MAC_ETHERNETII_ENABLE 0x0010
360#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE 0x0080
361#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
362#define HostCmd_ACT_MAC_RTS_CTS_ENABLE 0x0200
363#define HostCmd_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400
364#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON 0x2000
365
366#define HostCmd_BSS_MODE_BSS 0x0001
367#define HostCmd_BSS_MODE_IBSS 0x0002
368#define HostCmd_BSS_MODE_ANY 0x0003
369
370#define HostCmd_SCAN_RADIO_TYPE_BG 0
371#define HostCmd_SCAN_RADIO_TYPE_A 1
372
373#define HOST_SLEEP_CFG_CANCEL 0xffffffff
374#define HOST_SLEEP_CFG_COND_DEF 0x0000000f
375#define HOST_SLEEP_CFG_GPIO_DEF 0xff
376#define HOST_SLEEP_CFG_GAP_DEF 0
377
378#define CMD_F_HOSTCMD (1 << 0)
379#define CMD_F_CANCELED (1 << 1)
380
381#define HostCmd_CMD_ID_MASK 0x0fff
382
383#define HostCmd_SEQ_NUM_MASK 0x00ff
384
385#define HostCmd_BSS_NUM_MASK 0x0f00
386
387#define HostCmd_BSS_TYPE_MASK 0xf000
388
389#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
390 (((seq) & 0x00ff) | \
391 (((num) & 0x000f) << 8)) | \
392 (((type) & 0x000f) << 12); }
393
394#define HostCmd_GET_SEQ_NO(seq) \
395 ((seq) & HostCmd_SEQ_NUM_MASK)
396
397#define HostCmd_GET_BSS_NO(seq) \
398 (((seq) & HostCmd_BSS_NUM_MASK) >> 8)
399
400#define HostCmd_GET_BSS_TYPE(seq) \
401 (((seq) & HostCmd_BSS_TYPE_MASK) >> 12)
402
403#define EVENT_DUMMY_HOST_WAKEUP_SIGNAL 0x00000001
404#define EVENT_LINK_LOST 0x00000003
405#define EVENT_LINK_SENSED 0x00000004
406#define EVENT_MIB_CHANGED 0x00000006
407#define EVENT_INIT_DONE 0x00000007
408#define EVENT_DEAUTHENTICATED 0x00000008
409#define EVENT_DISASSOCIATED 0x00000009
410#define EVENT_PS_AWAKE 0x0000000a
411#define EVENT_PS_SLEEP 0x0000000b
412#define EVENT_MIC_ERR_MULTICAST 0x0000000d
413#define EVENT_MIC_ERR_UNICAST 0x0000000e
414#define EVENT_DEEP_SLEEP_AWAKE 0x00000010
415#define EVENT_ADHOC_BCN_LOST 0x00000011
416
417#define EVENT_WMM_STATUS_CHANGE 0x00000017
418#define EVENT_BG_SCAN_REPORT 0x00000018
419#define EVENT_RSSI_LOW 0x00000019
420#define EVENT_SNR_LOW 0x0000001a
421#define EVENT_MAX_FAIL 0x0000001b
422#define EVENT_RSSI_HIGH 0x0000001c
423#define EVENT_SNR_HIGH 0x0000001d
424#define EVENT_IBSS_COALESCED 0x0000001e
425#define EVENT_DATA_RSSI_LOW 0x00000024
426#define EVENT_DATA_SNR_LOW 0x00000025
427#define EVENT_DATA_RSSI_HIGH 0x00000026
428#define EVENT_DATA_SNR_HIGH 0x00000027
429#define EVENT_LINK_QUALITY 0x00000028
430#define EVENT_PORT_RELEASE 0x0000002b
431#define EVENT_PRE_BEACON_LOST 0x00000031
432#define EVENT_ADDBA 0x00000033
433#define EVENT_DELBA 0x00000034
434#define EVENT_BA_STREAM_TIEMOUT 0x00000037
435#define EVENT_AMSDU_AGGR_CTRL 0x00000042
436#define EVENT_WEP_ICV_ERR 0x00000046
437#define EVENT_HS_ACT_REQ 0x00000047
438#define EVENT_BW_CHANGE 0x00000048
439
440#define EVENT_HOSTWAKE_STAIE 0x0000004d
441
442#define EVENT_ID_MASK 0xffff
443#define BSS_NUM_MASK 0xf
444
445#define EVENT_GET_BSS_NUM(event_cause) \
446 (((event_cause) >> 16) & BSS_NUM_MASK)
447
448#define EVENT_GET_BSS_TYPE(event_cause) \
449 (((event_cause) >> 24) & 0x00ff)
450
451struct mwifiex_event_wep_icv_err {
452 u16 reason_code;
453 u8 src_mac_addr[ETH_ALEN];
454 u8 wep_key_index;
455 u8 wep_key_length;
456 u8 key[WLAN_KEY_LEN_WEP104];
457};
458
459struct mwifiex_802_11_fixed_ies {
460 u8 time_stamp[8];
461 __le16 beacon_interval;
462 __le16 capabilities;
463};
464
465struct mwifiex_ie_types_header {
466 __le16 type;
467 __le16 len;
468} __packed;
469
470struct mwifiex_ie_types_data {
471 struct mwifiex_ie_types_header header;
472 u8 data[1];
473} __packed;
474
475#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
476#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
477
478struct txpd {
479 u8 bss_type;
480 u8 bss_num;
481 __le16 tx_pkt_length;
482 __le16 tx_pkt_offset;
483 __le16 tx_pkt_type;
484 __le32 tx_control;
485 u8 priority;
486 u8 flags;
487 u8 pkt_delay_2ms;
488 u8 reserved1;
489} __packed;
490
491struct rxpd {
492 u8 bss_type;
493 u8 bss_num;
494 u16 rx_pkt_length;
495 u16 rx_pkt_offset;
496 u16 rx_pkt_type;
497 u16 seq_num;
498 u8 priority;
499 u8 rx_rate;
500 s8 snr;
501 s8 nf;
502 /* Ht Info [Bit 0] RxRate format: LG=0, HT=1
503 * [Bit 1] HT Bandwidth: BW20 = 0, BW40 = 1
504 * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 */
505 u8 ht_info;
506 u8 reserved;
507} __packed;
508
509enum mwifiex_chan_scan_mode_bitmasks {
510 MWIFIEX_PASSIVE_SCAN = BIT(0),
511 MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
512};
513
514#define SECOND_CHANNEL_BELOW 0x30
515#define SECOND_CHANNEL_ABOVE 0x10
516struct mwifiex_chan_scan_param_set {
517 u8 radio_type;
518 u8 chan_number;
519 u8 chan_scan_mode_bitmap;
520 __le16 min_scan_time;
521 __le16 max_scan_time;
522} __packed;
523
524struct mwifiex_ie_types_chan_list_param_set {
525 struct mwifiex_ie_types_header header;
526 struct mwifiex_chan_scan_param_set chan_scan_param[1];
527} __packed;
528
529struct chan_band_param_set {
530 u8 radio_type;
531 u8 chan_number;
532};
533
534struct mwifiex_ie_types_chan_band_list_param_set {
535 struct mwifiex_ie_types_header header;
536 struct chan_band_param_set chan_band_param[1];
537} __packed;
538
539struct mwifiex_ie_types_rates_param_set {
540 struct mwifiex_ie_types_header header;
541 u8 rates[1];
542} __packed;
543
544struct mwifiex_ie_types_ssid_param_set {
545 struct mwifiex_ie_types_header header;
546 u8 ssid[1];
547} __packed;
548
549struct mwifiex_ie_types_num_probes {
550 struct mwifiex_ie_types_header header;
551 __le16 num_probes;
552} __packed;
553
554struct mwifiex_ie_types_wildcard_ssid_params {
555 struct mwifiex_ie_types_header header;
556 u8 max_ssid_length;
557 u8 ssid[1];
558} __packed;
559
560#define TSF_DATA_SIZE 8
561struct mwifiex_ie_types_tsf_timestamp {
562 struct mwifiex_ie_types_header header;
563 u8 tsf_data[1];
564} __packed;
565
566struct mwifiex_cf_param_set {
567 u8 cfp_cnt;
568 u8 cfp_period;
569 u16 cfp_max_duration;
570 u16 cfp_duration_remaining;
571} __packed;
572
573struct mwifiex_ibss_param_set {
574 u16 atim_window;
575} __packed;
576
577struct mwifiex_ie_types_ss_param_set {
578 struct mwifiex_ie_types_header header;
579 union {
580 struct mwifiex_cf_param_set cf_param_set[1];
581 struct mwifiex_ibss_param_set ibss_param_set[1];
582 } cf_ibss;
583} __packed;
584
585struct mwifiex_fh_param_set {
586 u16 dwell_time;
587 u8 hop_set;
588 u8 hop_pattern;
589 u8 hop_index;
590} __packed;
591
592struct mwifiex_ds_param_set {
593 u8 current_chan;
594} __packed;
595
596struct mwifiex_ie_types_phy_param_set {
597 struct mwifiex_ie_types_header header;
598 union {
599 struct mwifiex_fh_param_set fh_param_set[1];
600 struct mwifiex_ds_param_set ds_param_set[1];
601 } fh_ds;
602} __packed;
603
604struct mwifiex_ie_types_auth_type {
605 struct mwifiex_ie_types_header header;
606 __le16 auth_type;
607} __packed;
608
609struct mwifiex_ie_types_vendor_param_set {
610 struct mwifiex_ie_types_header header;
611 u8 ie[MWIFIEX_MAX_VSIE_LEN];
612};
613
614struct mwifiex_ie_types_rsn_param_set {
615 struct mwifiex_ie_types_header header;
616 u8 rsn_ie[1];
617} __packed;
618
619#define KEYPARAMSET_FIXED_LEN 6
620
621struct mwifiex_ie_type_key_param_set {
622 __le16 type;
623 __le16 length;
624 __le16 key_type_id;
625 __le16 key_info;
626 __le16 key_len;
627 u8 key[50];
628} __packed;
629
630struct host_cmd_ds_802_11_key_material {
631 __le16 action;
632 struct mwifiex_ie_type_key_param_set key_param_set;
633} __packed;
634
635struct host_cmd_ds_gen {
636 u16 command;
637 u16 size;
638 u16 seq_num;
639 u16 result;
640};
641
642#define S_DS_GEN sizeof(struct host_cmd_ds_gen)
643
644enum sleep_resp_ctrl {
645 RESP_NOT_NEEDED = 0,
646 RESP_NEEDED,
647};
648
649struct mwifiex_ps_param {
650 __le16 null_pkt_interval;
651 __le16 multiple_dtims;
652 __le16 bcn_miss_timeout;
653 __le16 local_listen_interval;
654 __le16 adhoc_wake_period;
655 __le16 mode;
656 __le16 delay_to_ps;
657};
658
659struct mwifiex_auto_ds_param {
660 __le16 deep_sleep_timeout;
661};
662
663struct sleep_confirm_param {
664 __le16 resp_ctrl;
665};
666
667#define BITMAP_AUTO_DS 0x01
668#define BITMAP_STA_PS 0x10
669#define BITMAP_UAP_INACT_PS 0x100
670#define BITMAP_UAP_DTIM_PS 0x200
671struct auto_ps_param {
672 __le16 ps_bitmap;
673 /* auto deep sleep parameter,
674 * sta power save parameter
675 * uap inactivity parameter
676 * uap DTIM parameter */
677};
678
679#define AUTO_PS_FIX_SIZE 4
680
681#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
682#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
683
684struct mwifiex_ie_types_auto_ds_param {
685 struct mwifiex_ie_types_header header;
686 struct mwifiex_auto_ds_param param;
687} __packed;
688
689struct mwifiex_ie_types_ps_param {
690 struct mwifiex_ie_types_header header;
691 struct mwifiex_ps_param param;
692} __packed;
693
694struct host_cmd_ds_802_11_ps_mode_enh {
695 __le16 action;
696
697 union {
698 struct mwifiex_ps_param opt_ps;
699 struct mwifiex_auto_ds_param auto_ds;
700 struct sleep_confirm_param sleep_cfm;
701 __le16 ps_bitmap;
702 struct auto_ps_param auto_ps;
703 } params;
704} __packed;
705
706struct host_cmd_ds_get_hw_spec {
707 __le16 hw_if_version;
708 __le16 version;
709 __le16 reserved;
710 __le16 num_of_mcast_adr;
711 u8 permanent_addr[ETH_ALEN];
712 __le16 region_code;
713 __le16 number_of_antenna;
714 __le32 fw_release_number;
715 __le32 reserved_1;
716 __le32 reserved_2;
717 __le32 reserved_3;
718 __le32 fw_cap_info;
719 __le32 dot_11n_dev_cap;
720 u8 dev_mcs_support;
721 __le16 mp_end_port; /* SDIO only, reserved for other interfacces */
722 __le16 reserved_4;
723} __packed;
724
725struct host_cmd_ds_802_11_rssi_info {
726 __le16 action;
727 __le16 ndata;
728 __le16 nbcn;
729 __le16 reserved[9];
730 long long reserved_1;
731};
732
733struct host_cmd_ds_802_11_rssi_info_rsp {
734 __le16 action;
735 __le16 ndata;
736 __le16 nbcn;
737 __le16 data_rssi_last;
738 __le16 data_nf_last;
739 __le16 data_rssi_avg;
740 __le16 data_nf_avg;
741 __le16 bcn_rssi_last;
742 __le16 bcn_nf_last;
743 __le16 bcn_rssi_avg;
744 __le16 bcn_nf_avg;
745 long long tsf_bcn;
746};
747
748struct host_cmd_ds_802_11_mac_address {
749 __le16 action;
750 u8 mac_addr[ETH_ALEN];
751};
752
753struct host_cmd_ds_mac_control {
754 __le16 action;
755 __le16 reserved;
756};
757
758struct host_cmd_ds_mac_multicast_adr {
759 __le16 action;
760 __le16 num_of_adrs;
761 u8 mac_list[MWIFIEX_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
762} __packed;
763
764struct host_cmd_ds_802_11_deauthenticate {
765 u8 mac_addr[ETH_ALEN];
766 __le16 reason_code;
767} __packed;
768
769struct host_cmd_ds_802_11_associate {
770 u8 peer_sta_addr[ETH_ALEN];
771 __le16 cap_info_bitmap;
772 __le16 listen_interval;
773 __le16 beacon_period;
774 u8 dtim_period;
775} __packed;
776
777struct ieee_types_assoc_rsp {
778 __le16 cap_info_bitmap;
779 __le16 status_code;
780 __le16 a_id;
781 u8 ie_buffer[1];
782} __packed;
783
784struct host_cmd_ds_802_11_associate_rsp {
785 struct ieee_types_assoc_rsp assoc_rsp;
786} __packed;
787
788struct ieee_types_cf_param_set {
789 u8 element_id;
790 u8 len;
791 u8 cfp_cnt;
792 u8 cfp_period;
793 u16 cfp_max_duration;
794 u16 cfp_duration_remaining;
795} __packed;
796
797struct ieee_types_ibss_param_set {
798 u8 element_id;
799 u8 len;
800 __le16 atim_window;
801} __packed;
802
803union ieee_types_ss_param_set {
804 struct ieee_types_cf_param_set cf_param_set;
805 struct ieee_types_ibss_param_set ibss_param_set;
806} __packed;
807
808struct ieee_types_fh_param_set {
809 u8 element_id;
810 u8 len;
811 __le16 dwell_time;
812 u8 hop_set;
813 u8 hop_pattern;
814 u8 hop_index;
815} __packed;
816
817struct ieee_types_ds_param_set {
818 u8 element_id;
819 u8 len;
820 u8 current_chan;
821} __packed;
822
823union ieee_types_phy_param_set {
824 struct ieee_types_fh_param_set fh_param_set;
825 struct ieee_types_ds_param_set ds_param_set;
826} __packed;
827
828struct host_cmd_ds_802_11_ad_hoc_start {
829 u8 ssid[IEEE80211_MAX_SSID_LEN];
830 u8 bss_mode;
831 __le16 beacon_period;
832 u8 dtim_period;
833 union ieee_types_ss_param_set ss_param_set;
834 union ieee_types_phy_param_set phy_param_set;
835 u16 reserved1;
836 __le16 cap_info_bitmap;
837 u8 DataRate[HOSTCMD_SUPPORTED_RATES];
838} __packed;
839
840struct host_cmd_ds_802_11_ad_hoc_result {
841 u8 pad[3];
842 u8 bssid[ETH_ALEN];
843} __packed;
844
845struct adhoc_bss_desc {
846 u8 bssid[ETH_ALEN];
847 u8 ssid[IEEE80211_MAX_SSID_LEN];
848 u8 bss_mode;
849 __le16 beacon_period;
850 u8 dtim_period;
851 u8 time_stamp[8];
852 u8 local_time[8];
853 union ieee_types_phy_param_set phy_param_set;
854 union ieee_types_ss_param_set ss_param_set;
855 __le16 cap_info_bitmap;
856 u8 data_rates[HOSTCMD_SUPPORTED_RATES];
857
858 /*
859 * DO NOT ADD ANY FIELDS TO THIS STRUCTURE.
860 * It is used in the Adhoc join command and will cause a
861 * binary layout mismatch with the firmware
862 */
863} __packed;
864
865struct host_cmd_ds_802_11_ad_hoc_join {
866 struct adhoc_bss_desc bss_descriptor;
867 u16 reserved1;
868 u16 reserved2;
869} __packed;
870
871struct host_cmd_ds_802_11_get_log {
872 __le32 mcast_tx_frame;
873 __le32 failed;
874 __le32 retry;
875 __le32 multi_retry;
876 __le32 frame_dup;
877 __le32 rts_success;
878 __le32 rts_failure;
879 __le32 ack_failure;
880 __le32 rx_frag;
881 __le32 mcast_rx_frame;
882 __le32 fcs_error;
883 __le32 tx_frame;
884 __le32 reserved;
885 __le32 wep_icv_err_cnt[4];
886};
887
888struct host_cmd_ds_tx_rate_query {
889 u8 tx_rate;
890 /* Ht Info [Bit 0] RxRate format: LG=0, HT=1
891 * [Bit 1] HT Bandwidth: BW20 = 0, BW40 = 1
892 * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 */
893 u8 ht_info;
894} __packed;
895
896enum Host_Sleep_Action {
897 HS_CONFIGURE = 0x0001,
898 HS_ACTIVATE = 0x0002,
899};
900
901struct mwifiex_hs_config_param {
902 __le32 conditions;
903 u8 gpio;
904 u8 gap;
905} __packed;
906
907struct hs_activate_param {
908 u16 resp_ctrl;
909} __packed;
910
911struct host_cmd_ds_802_11_hs_cfg_enh {
912 __le16 action;
913
914 union {
915 struct mwifiex_hs_config_param hs_config;
916 struct hs_activate_param hs_activate;
917 } params;
918} __packed;
919
920enum SNMP_MIB_INDEX {
921 OP_RATE_SET_I = 1,
922 DTIM_PERIOD_I = 3,
923 RTS_THRESH_I = 5,
924 SHORT_RETRY_LIM_I = 6,
925 LONG_RETRY_LIM_I = 7,
926 FRAG_THRESH_I = 8,
927 DOT11D_I = 9,
928};
929
930#define MAX_SNMP_BUF_SIZE 128
931
932struct host_cmd_ds_802_11_snmp_mib {
933 __le16 query_type;
934 __le16 oid;
935 __le16 buf_size;
936 u8 value[1];
937} __packed;
938
939#define RADIO_ON 0x01
940#define RADIO_OFF 0x00
941
942struct mwifiex_rate_scope {
943 __le16 type;
944 __le16 length;
945 __le16 hr_dsss_rate_bitmap;
946 __le16 ofdm_rate_bitmap;
947 __le16 ht_mcs_rate_bitmap[8];
948} __packed;
949
950struct mwifiex_rate_drop_pattern {
951 __le16 type;
952 __le16 length;
953 __le32 rate_drop_mode;
954} __packed;
955
956struct host_cmd_ds_tx_rate_cfg {
957 __le16 action;
958 __le16 cfg_index;
959} __packed;
960
961struct mwifiex_power_group {
962 u8 modulation_class;
963 u8 first_rate_code;
964 u8 last_rate_code;
965 s8 power_step;
966 s8 power_min;
967 s8 power_max;
968 u8 ht_bandwidth;
969 u8 reserved;
970} __packed;
971
972struct mwifiex_types_power_group {
973 u16 type;
974 u16 length;
975} __packed;
976
977struct host_cmd_ds_txpwr_cfg {
978 __le16 action;
979 __le16 cfg_index;
980 __le32 mode;
981} __packed;
982
983#define MWIFIEX_USER_SCAN_CHAN_MAX 50
984
985#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
986
987struct mwifiex_scan_cmd_config {
988 /*
989 * BSS Type to be sent in the firmware command
990 *
991 * Field can be used to restrict the types of networks returned in the
992 * scan. Valid settings are:
993 *
994 * - MWIFIEX_SCAN_MODE_BSS (infrastructure)
995 * - MWIFIEX_SCAN_MODE_IBSS (adhoc)
996 * - MWIFIEX_SCAN_MODE_ANY (unrestricted, adhoc and infrastructure)
997 */
998 u8 bss_mode;
999
1000 /* Specific BSSID used to filter scan results in the firmware */
1001 u8 specific_bssid[ETH_ALEN];
1002
1003 /* Length of TLVs sent in command starting at tlvBuffer */
1004 u32 tlv_buf_len;
1005
1006 /*
1007 * SSID TLV(s) and ChanList TLVs to be sent in the firmware command
1008 *
1009 * TLV_TYPE_CHANLIST, mwifiex_ie_types_chan_list_param_set
1010 * WLAN_EID_SSID, mwifiex_ie_types_ssid_param_set
1011 */
1012 u8 tlv_buf[1]; /* SSID TLV(s) and ChanList TLVs are stored
1013 here */
1014} __packed;
1015
1016struct mwifiex_user_scan_chan {
1017 u8 chan_number;
1018 u8 radio_type;
1019 u8 scan_type;
1020 u8 reserved;
1021 u32 scan_time;
1022} __packed;
1023
1024struct mwifiex_user_scan_ssid {
1025 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
1026 u8 max_len;
1027} __packed;
1028
1029struct mwifiex_user_scan_cfg {
1030 /*
1031 * Flag set to keep the previous scan table intact
1032 *
1033 * If set, the scan results will accumulate, replacing any previous
1034 * matched entries for a BSS with the new scan data
1035 */
1036 u8 keep_previous_scan;
1037 /*
1038 * BSS mode to be sent in the firmware command
1039 *
1040 * Field can be used to restrict the types of networks returned in the
1041 * scan. Valid settings are:
1042 *
1043 * - MWIFIEX_SCAN_MODE_BSS (infrastructure)
1044 * - MWIFIEX_SCAN_MODE_IBSS (adhoc)
1045 * - MWIFIEX_SCAN_MODE_ANY (unrestricted, adhoc and infrastructure)
1046 */
1047 u8 bss_mode;
1048 /* Configure the number of probe requests for active chan scans */
1049 u8 num_probes;
1050 u8 reserved;
1051 /* BSSID filter sent in the firmware command to limit the results */
1052 u8 specific_bssid[ETH_ALEN];
1053 /* SSID filter list used in the to limit the scan results */
1054 struct mwifiex_user_scan_ssid ssid_list[MWIFIEX_MAX_SSID_LIST_LENGTH];
1055 /* Variable number (fixed maximum) of channels to scan up */
1056 struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX];
1057} __packed;
1058
1059struct ie_body {
1060 u8 grp_key_oui[4];
1061 u8 ptk_cnt[2];
1062 u8 ptk_body[4];
1063} __packed;
1064
1065struct host_cmd_ds_802_11_scan {
1066 u8 bss_mode;
1067 u8 bssid[ETH_ALEN];
1068 u8 tlv_buffer[1];
1069} __packed;
1070
1071struct host_cmd_ds_802_11_scan_rsp {
1072 __le16 bss_descript_size;
1073 u8 number_of_sets;
1074 u8 bss_desc_and_tlv_buffer[1];
1075} __packed;
1076
1077struct host_cmd_ds_802_11_bg_scan_query {
1078 u8 flush;
1079} __packed;
1080
1081struct host_cmd_ds_802_11_bg_scan_query_rsp {
1082 u32 report_condition;
1083 struct host_cmd_ds_802_11_scan_rsp scan_resp;
1084} __packed;
1085
1086struct mwifiex_ietypes_domain_param_set {
1087 struct mwifiex_ie_types_header header;
1088 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
1089 struct ieee80211_country_ie_triplet triplet[1];
1090} __packed;
1091
1092struct host_cmd_ds_802_11d_domain_info {
1093 __le16 action;
1094 struct mwifiex_ietypes_domain_param_set domain;
1095} __packed;
1096
1097struct host_cmd_ds_802_11d_domain_info_rsp {
1098 __le16 action;
1099 struct mwifiex_ietypes_domain_param_set domain;
1100} __packed;
1101
1102struct host_cmd_ds_11n_addba_req {
1103 u8 add_req_result;
1104 u8 peer_mac_addr[ETH_ALEN];
1105 u8 dialog_token;
1106 __le16 block_ack_param_set;
1107 __le16 block_ack_tmo;
1108 __le16 ssn;
1109} __packed;
1110
1111struct host_cmd_ds_11n_addba_rsp {
1112 u8 add_rsp_result;
1113 u8 peer_mac_addr[ETH_ALEN];
1114 u8 dialog_token;
1115 __le16 status_code;
1116 __le16 block_ack_param_set;
1117 __le16 block_ack_tmo;
1118 __le16 ssn;
1119} __packed;
1120
1121struct host_cmd_ds_11n_delba {
1122 u8 del_result;
1123 u8 peer_mac_addr[ETH_ALEN];
1124 __le16 del_ba_param_set;
1125 __le16 reason_code;
1126 u8 reserved;
1127} __packed;
1128
1129struct host_cmd_ds_11n_batimeout {
1130 u8 tid;
1131 u8 peer_mac_addr[ETH_ALEN];
1132 u8 origninator;
1133} __packed;
1134
1135struct host_cmd_ds_11n_cfg {
1136 __le16 action;
1137 __le16 ht_tx_cap;
1138 __le16 ht_tx_info;
1139} __packed;
1140
1141struct host_cmd_ds_txbuf_cfg {
1142 __le16 action;
1143 __le16 buff_size;
1144 __le16 mp_end_port; /* SDIO only, reserved for other interfacces */
1145 __le16 reserved3;
1146} __packed;
1147
1148struct host_cmd_ds_amsdu_aggr_ctrl {
1149 __le16 action;
1150 __le16 enable;
1151 __le16 curr_buf_size;
1152} __packed;
1153
1154struct mwifiex_ie_types_wmm_param_set {
1155 struct mwifiex_ie_types_header header;
1156 u8 wmm_ie[1];
1157};
1158
1159struct mwifiex_ie_types_wmm_queue_status {
1160 struct mwifiex_ie_types_header header;
1161 u8 queue_index;
1162 u8 disabled;
1163 u16 medium_time;
1164 u8 flow_required;
1165 u8 flow_created;
1166 u32 reserved;
1167};
1168
1169struct ieee_types_vendor_header {
1170 u8 element_id;
1171 u8 len;
1172 u8 oui[3];
1173 u8 oui_type;
1174 u8 oui_subtype;
1175 u8 version;
1176} __packed;
1177
1178struct ieee_types_wmm_ac_parameters {
1179 u8 aci_aifsn_bitmap;
1180 u8 ecw_bitmap;
1181 __le16 tx_op_limit;
1182} __packed;
1183
1184struct ieee_types_wmm_parameter {
1185 /*
1186 * WMM Parameter IE - Vendor Specific Header:
1187 * element_id [221/0xdd]
1188 * Len [24]
1189 * Oui [00:50:f2]
1190 * OuiType [2]
1191 * OuiSubType [1]
1192 * Version [1]
1193 */
1194 struct ieee_types_vendor_header vend_hdr;
1195 u8 qos_info_bitmap;
1196 u8 reserved;
1197 struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_MAX_QUEUES];
1198} __packed;
1199
1200struct ieee_types_wmm_info {
1201
1202 /*
1203 * WMM Info IE - Vendor Specific Header:
1204 * element_id [221/0xdd]
1205 * Len [7]
1206 * Oui [00:50:f2]
1207 * OuiType [2]
1208 * OuiSubType [0]
1209 * Version [1]
1210 */
1211 struct ieee_types_vendor_header vend_hdr;
1212
1213 u8 qos_info_bitmap;
1214} __packed;
1215
1216struct host_cmd_ds_wmm_get_status {
1217 u8 queue_status_tlv[sizeof(struct mwifiex_ie_types_wmm_queue_status) *
1218 IEEE80211_MAX_QUEUES];
1219 u8 wmm_param_tlv[sizeof(struct ieee_types_wmm_parameter) + 2];
1220} __packed;
1221
1222struct mwifiex_wmm_ac_status {
1223 u8 disabled;
1224 u8 flow_required;
1225 u8 flow_created;
1226};
1227
1228struct mwifiex_ie_types_htcap {
1229 struct mwifiex_ie_types_header header;
1230 struct ieee80211_ht_cap ht_cap;
1231} __packed;
1232
1233struct mwifiex_ie_types_htinfo {
1234 struct mwifiex_ie_types_header header;
1235 struct ieee80211_ht_info ht_info;
1236} __packed;
1237
1238struct mwifiex_ie_types_2040bssco {
1239 struct mwifiex_ie_types_header header;
1240 u8 bss_co_2040;
1241} __packed;
1242
1243struct mwifiex_ie_types_extcap {
1244 struct mwifiex_ie_types_header header;
1245 u8 ext_cap;
1246} __packed;
1247
1248struct host_cmd_ds_mac_reg_access {
1249 __le16 action;
1250 __le16 offset;
1251 __le32 value;
1252} __packed;
1253
1254struct host_cmd_ds_bbp_reg_access {
1255 __le16 action;
1256 __le16 offset;
1257 u8 value;
1258 u8 reserved[3];
1259} __packed;
1260
1261struct host_cmd_ds_rf_reg_access {
1262 __le16 action;
1263 __le16 offset;
1264 u8 value;
1265 u8 reserved[3];
1266} __packed;
1267
1268struct host_cmd_ds_pmic_reg_access {
1269 __le16 action;
1270 __le16 offset;
1271 u8 value;
1272 u8 reserved[3];
1273} __packed;
1274
1275struct host_cmd_ds_802_11_eeprom_access {
1276 __le16 action;
1277
1278 __le16 offset;
1279 __le16 byte_count;
1280 u8 value;
1281} __packed;
1282
1283struct host_cmd_ds_802_11_rf_channel {
1284 __le16 action;
1285 __le16 current_channel;
1286 __le16 rf_type;
1287 __le16 reserved;
1288 u8 reserved_1[32];
1289} __packed;
1290
1291struct host_cmd_ds_version_ext {
1292 u8 version_str_sel;
1293 char version_str[128];
1294} __packed;
1295
1296struct host_cmd_ds_802_11_ibss_status {
1297 __le16 action;
1298 __le16 enable;
1299 u8 bssid[ETH_ALEN];
1300 __le16 beacon_interval;
1301 __le16 atim_window;
1302 __le16 use_g_rate_protect;
1303} __packed;
1304
1305#define CONNECTION_TYPE_INFRA 0
1306#define CONNECTION_TYPE_ADHOC 1
1307
1308struct host_cmd_ds_set_bss_mode {
1309 u8 con_type;
1310} __packed;
1311
1312struct host_cmd_ds_command {
1313 __le16 command;
1314 __le16 size;
1315 __le16 seq_num;
1316 __le16 result;
1317 union {
1318 struct host_cmd_ds_get_hw_spec hw_spec;
1319 struct host_cmd_ds_mac_control mac_ctrl;
1320 struct host_cmd_ds_802_11_mac_address mac_addr;
1321 struct host_cmd_ds_mac_multicast_adr mc_addr;
1322 struct host_cmd_ds_802_11_get_log get_log;
1323 struct host_cmd_ds_802_11_rssi_info rssi_info;
1324 struct host_cmd_ds_802_11_rssi_info_rsp rssi_info_rsp;
1325 struct host_cmd_ds_802_11_snmp_mib smib;
1326 struct host_cmd_ds_802_11_rf_channel rf_channel;
1327 struct host_cmd_ds_tx_rate_query tx_rate;
1328 struct host_cmd_ds_tx_rate_cfg tx_rate_cfg;
1329 struct host_cmd_ds_txpwr_cfg txp_cfg;
1330 struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
1331 struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
1332 struct host_cmd_ds_802_11_scan scan;
1333 struct host_cmd_ds_802_11_scan_rsp scan_resp;
1334 struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
1335 struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
1336 struct host_cmd_ds_802_11_associate associate;
1337 struct host_cmd_ds_802_11_associate_rsp associate_rsp;
1338 struct host_cmd_ds_802_11_deauthenticate deauth;
1339 struct host_cmd_ds_802_11_ad_hoc_start adhoc_start;
1340 struct host_cmd_ds_802_11_ad_hoc_result adhoc_result;
1341 struct host_cmd_ds_802_11_ad_hoc_join adhoc_join;
1342 struct host_cmd_ds_802_11d_domain_info domain_info;
1343 struct host_cmd_ds_802_11d_domain_info_rsp domain_info_resp;
1344 struct host_cmd_ds_11n_addba_req add_ba_req;
1345 struct host_cmd_ds_11n_addba_rsp add_ba_rsp;
1346 struct host_cmd_ds_11n_delba del_ba;
1347 struct host_cmd_ds_txbuf_cfg tx_buf;
1348 struct host_cmd_ds_amsdu_aggr_ctrl amsdu_aggr_ctrl;
1349 struct host_cmd_ds_11n_cfg htcfg;
1350 struct host_cmd_ds_wmm_get_status get_wmm_status;
1351 struct host_cmd_ds_802_11_key_material key_material;
1352 struct host_cmd_ds_version_ext verext;
1353 struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
1354 struct host_cmd_ds_mac_reg_access mac_reg;
1355 struct host_cmd_ds_bbp_reg_access bbp_reg;
1356 struct host_cmd_ds_rf_reg_access rf_reg;
1357 struct host_cmd_ds_pmic_reg_access pmic_reg;
1358 struct host_cmd_ds_set_bss_mode bss_mode;
1359 struct host_cmd_ds_802_11_eeprom_access eeprom;
1360 } params;
1361} __packed;
1362
1363struct mwifiex_opt_sleep_confirm {
1364 __le16 command;
1365 __le16 size;
1366 __le16 seq_num;
1367 __le16 result;
1368 __le16 action;
1369 struct sleep_confirm_param sleep_cfm;
1370} __packed;
1371
1372struct mwifiex_opt_sleep_confirm_buffer {
1373 u8 hdr[4];
1374 struct mwifiex_opt_sleep_confirm ps_cfm_sleep;
1375} __packed;
1376#endif /* !_MWIFIEX_FW_H_ */
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
new file mode 100644
index 00000000000..07ebc97e19c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -0,0 +1,665 @@
1/*
2 * Marvell Wireless LAN device driver: HW/FW Initialization
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * This function adds a BSS priority table to the table list.
30 *
31 * The function allocates a new BSS priority table node and adds it to
32 * the end of BSS priority table list, kept in driver memory.
33 */
34static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
35{
36 struct mwifiex_adapter *adapter = priv->adapter;
37 struct mwifiex_bss_prio_node *bss_prio;
38 int status = 0;
39 unsigned long flags;
40
41 bss_prio = kzalloc(sizeof(struct mwifiex_bss_prio_node), GFP_KERNEL);
42 if (!bss_prio) {
43 dev_err(adapter->dev, "%s: failed to alloc bss_prio\n",
44 __func__);
45 return -1;
46 }
47
48 bss_prio->priv = priv;
49 INIT_LIST_HEAD(&bss_prio->list);
50 if (!adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur)
51 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
52 bss_prio;
53
54 spin_lock_irqsave(&adapter->bss_prio_tbl[priv->bss_priority]
55 .bss_prio_lock, flags);
56 list_add_tail(&bss_prio->list,
57 &adapter->bss_prio_tbl[priv->bss_priority]
58 .bss_prio_head);
59 spin_unlock_irqrestore(&adapter->bss_prio_tbl[priv->bss_priority]
60 .bss_prio_lock, flags);
61
62 return status;
63}
64
65/*
66 * This function initializes the private structure and sets default
67 * values to the members.
68 *
69 * Additionally, it also initializes all the locks and sets up all the
70 * lists.
71 */
72static int mwifiex_init_priv(struct mwifiex_private *priv)
73{
74 u32 i;
75 int ret = 0;
76
77 priv->media_connected = false;
78 memset(priv->curr_addr, 0xff, ETH_ALEN);
79
80 priv->pkt_tx_ctrl = 0;
81 priv->bss_mode = MWIFIEX_BSS_MODE_INFRA;
82 priv->data_rate = 0; /* Initially indicate the rate as auto */
83 priv->is_data_rate_auto = true;
84 priv->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR;
85 priv->data_avg_factor = DEFAULT_DATA_AVG_FACTOR;
86
87 priv->sec_info.wep_status = MWIFIEX_802_11_WEP_DISABLED;
88 priv->sec_info.authentication_mode = MWIFIEX_AUTH_MODE_OPEN;
89 priv->sec_info.encryption_mode = MWIFIEX_ENCRYPTION_MODE_NONE;
90 for (i = 0; i < ARRAY_SIZE(priv->wep_key); i++)
91 memset(&priv->wep_key[i], 0, sizeof(struct mwifiex_wep_key));
92 priv->wep_key_curr_index = 0;
93 priv->curr_pkt_filter = HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON |
94 HostCmd_ACT_MAC_ETHERNETII_ENABLE;
95
96 priv->beacon_period = 100; /* beacon interval */ ;
97 priv->attempted_bss_desc = NULL;
98 memset(&priv->curr_bss_params, 0, sizeof(priv->curr_bss_params));
99 priv->listen_interval = MWIFIEX_DEFAULT_LISTEN_INTERVAL;
100
101 memset(&priv->prev_ssid, 0, sizeof(priv->prev_ssid));
102 memset(&priv->prev_bssid, 0, sizeof(priv->prev_bssid));
103 memset(&priv->assoc_rsp_buf, 0, sizeof(priv->assoc_rsp_buf));
104 priv->assoc_rsp_size = 0;
105 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
106 priv->atim_window = 0;
107 priv->adhoc_state = ADHOC_IDLE;
108 priv->tx_power_level = 0;
109 priv->max_tx_power_level = 0;
110 priv->min_tx_power_level = 0;
111 priv->tx_rate = 0;
112 priv->rxpd_htinfo = 0;
113 priv->rxpd_rate = 0;
114 priv->rate_bitmap = 0;
115 priv->data_rssi_last = 0;
116 priv->data_rssi_avg = 0;
117 priv->data_nf_avg = 0;
118 priv->data_nf_last = 0;
119 priv->bcn_rssi_last = 0;
120 priv->bcn_rssi_avg = 0;
121 priv->bcn_nf_avg = 0;
122 priv->bcn_nf_last = 0;
123 memset(&priv->wpa_ie, 0, sizeof(priv->wpa_ie));
124 memset(&priv->aes_key, 0, sizeof(priv->aes_key));
125 priv->wpa_ie_len = 0;
126 priv->wpa_is_gtk_set = false;
127
128 memset(&priv->assoc_tlv_buf, 0, sizeof(priv->assoc_tlv_buf));
129 priv->assoc_tlv_buf_len = 0;
130 memset(&priv->wps, 0, sizeof(priv->wps));
131 memset(&priv->gen_ie_buf, 0, sizeof(priv->gen_ie_buf));
132 priv->gen_ie_buf_len = 0;
133 memset(priv->vs_ie, 0, sizeof(priv->vs_ie));
134
135 priv->wmm_required = true;
136 priv->wmm_enabled = false;
137 priv->wmm_qosinfo = 0;
138 priv->curr_bcn_buf = NULL;
139 priv->curr_bcn_size = 0;
140
141 priv->scan_block = false;
142
143 ret = mwifiex_add_bss_prio_tbl(priv);
144
145 return ret;
146}
147
148/*
149 * This function allocates buffers for members of the adapter
150 * structure.
151 *
152 * The memory allocated includes scan table, command buffers, and
153 * sleep confirm command buffer. In addition, the queues are
154 * also initialized.
155 */
156static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
157{
158 int ret = 0;
159 u32 buf_size;
160 struct mwifiex_bssdescriptor *temp_scan_table;
161
162 /* Allocate buffer to store the BSSID list */
163 buf_size = sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP;
164 temp_scan_table = kzalloc(buf_size, GFP_KERNEL);
165 if (!temp_scan_table) {
166 dev_err(adapter->dev, "%s: failed to alloc temp_scan_table\n",
167 __func__);
168 return -1;
169 }
170
171 adapter->scan_table = temp_scan_table;
172
173 /* Allocate command buffer */
174 ret = mwifiex_alloc_cmd_buffer(adapter);
175 if (ret) {
176 dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n",
177 __func__);
178 return -1;
179 }
180
181 adapter->sleep_cfm =
182 dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm_buffer)
183 + INTF_HEADER_LEN);
184
185 if (!adapter->sleep_cfm) {
186 dev_err(adapter->dev, "%s: failed to alloc sleep cfm"
187 " cmd buffer\n", __func__);
188 return -1;
189 }
190 skb_reserve(adapter->sleep_cfm, INTF_HEADER_LEN);
191
192 return 0;
193}
194
195/*
196 * This function initializes the adapter structure and sets default
197 * values to the members of adapter.
198 *
199 * This also initializes the WMM related parameters in the driver private
200 * structures.
201 */
202static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
203{
204 struct mwifiex_opt_sleep_confirm_buffer *sleep_cfm_buf = NULL;
205
206 skb_put(adapter->sleep_cfm, sizeof(sleep_cfm_buf->ps_cfm_sleep));
207 sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm_buffer *)
208 (adapter->sleep_cfm->data);
209
210 adapter->cmd_sent = false;
211 adapter->data_sent = true;
212 adapter->cmd_resp_received = false;
213 adapter->event_received = false;
214 adapter->data_received = false;
215
216 adapter->surprise_removed = false;
217
218 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
219
220 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
221 adapter->ps_state = PS_STATE_AWAKE;
222 adapter->need_to_wakeup = false;
223
224 adapter->scan_mode = HostCmd_BSS_MODE_ANY;
225 adapter->specific_scan_time = MWIFIEX_SPECIFIC_SCAN_CHAN_TIME;
226 adapter->active_scan_time = MWIFIEX_ACTIVE_SCAN_CHAN_TIME;
227 adapter->passive_scan_time = MWIFIEX_PASSIVE_SCAN_CHAN_TIME;
228
229 adapter->num_in_scan_table = 0;
230 memset(adapter->scan_table, 0,
231 (sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP));
232 adapter->scan_probes = 1;
233
234 memset(adapter->bcn_buf, 0, sizeof(adapter->bcn_buf));
235 adapter->bcn_buf_end = adapter->bcn_buf;
236
237 adapter->radio_on = RADIO_ON;
238 adapter->multiple_dtim = 1;
239
240 adapter->local_listen_interval = 0; /* default value in firmware
241 will be used */
242
243 adapter->is_deep_sleep = false;
244
245 adapter->delay_null_pkt = false;
246 adapter->delay_to_ps = 1000;
247 adapter->enhanced_ps_mode = PS_MODE_AUTO;
248
249 adapter->gen_null_pkt = false; /* Disable NULL Pkg generation by
250 default */
251 adapter->pps_uapsd_mode = false; /* Disable pps/uapsd mode by
252 default */
253 adapter->pm_wakeup_card_req = false;
254
255 adapter->pm_wakeup_fw_try = false;
256
257 adapter->max_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
258 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
259 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
260
261 adapter->is_hs_configured = false;
262 adapter->hs_cfg.conditions = cpu_to_le32(HOST_SLEEP_CFG_COND_DEF);
263 adapter->hs_cfg.gpio = HOST_SLEEP_CFG_GPIO_DEF;
264 adapter->hs_cfg.gap = HOST_SLEEP_CFG_GAP_DEF;
265 adapter->hs_activated = false;
266
267 memset(adapter->event_body, 0, sizeof(adapter->event_body));
268 adapter->hw_dot_11n_dev_cap = 0;
269 adapter->hw_dev_mcs_support = 0;
270 adapter->usr_dot_11n_dev_cap = 0;
271 adapter->usr_dev_mcs_support = 0;
272 adapter->chan_offset = 0;
273 adapter->adhoc_11n_enabled = false;
274
275 mwifiex_wmm_init(adapter);
276
277 if (adapter->sleep_cfm) {
278 memset(&sleep_cfm_buf->ps_cfm_sleep, 0,
279 adapter->sleep_cfm->len);
280 sleep_cfm_buf->ps_cfm_sleep.command =
281 cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
282 sleep_cfm_buf->ps_cfm_sleep.size =
283 cpu_to_le16(adapter->sleep_cfm->len);
284 sleep_cfm_buf->ps_cfm_sleep.result = 0;
285 sleep_cfm_buf->ps_cfm_sleep.action = cpu_to_le16(SLEEP_CONFIRM);
286 sleep_cfm_buf->ps_cfm_sleep.sleep_cfm.resp_ctrl =
287 cpu_to_le16(RESP_NEEDED);
288 }
289 memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params));
290 memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period));
291 adapter->tx_lock_flag = false;
292 adapter->null_pkt_interval = 0;
293 adapter->fw_bands = 0;
294 adapter->config_bands = 0;
295 adapter->adhoc_start_band = 0;
296 adapter->scan_channels = NULL;
297 adapter->fw_release_number = 0;
298 adapter->fw_cap_info = 0;
299 memset(&adapter->upld_buf, 0, sizeof(adapter->upld_buf));
300 adapter->event_cause = 0;
301 adapter->region_code = 0;
302 adapter->bcn_miss_time_out = DEFAULT_BCN_MISS_TIMEOUT;
303 adapter->adhoc_awake_period = 0;
304 memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
305 adapter->arp_filter_size = 0;
306
307 return;
308}
309
310/*
311 * This function frees the adapter structure.
312 *
313 * The freeing operation is done recursively, by canceling all
314 * pending commands, freeing the member buffers previously
315 * allocated (command buffers, scan table buffer, sleep confirm
316 * command buffer), stopping the timers and calling the cleanup
317 * routines for every interface, before the actual adapter
318 * structure is freed.
319 */
320static void
321mwifiex_free_adapter(struct mwifiex_adapter *adapter)
322{
323 if (!adapter) {
324 pr_err("%s: adapter is NULL\n", __func__);
325 return;
326 }
327
328 mwifiex_cancel_all_pending_cmd(adapter);
329
330 /* Free lock variables */
331 mwifiex_free_lock_list(adapter);
332
333 /* Free command buffer */
334 dev_dbg(adapter->dev, "info: free cmd buffer\n");
335 mwifiex_free_cmd_buffer(adapter);
336
337 del_timer(&adapter->cmd_timer);
338
339 dev_dbg(adapter->dev, "info: free scan table\n");
340 kfree(adapter->scan_table);
341 adapter->scan_table = NULL;
342
343 adapter->if_ops.cleanup_if(adapter);
344
345 dev_kfree_skb_any(adapter->sleep_cfm);
346
347 return;
348}
349
350/*
351 * This function intializes the lock variables and
352 * the list heads.
353 */
354int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
355{
356 struct mwifiex_private *priv = NULL;
357 s32 i = 0;
358 u32 j = 0;
359
360 spin_lock_init(&adapter->mwifiex_lock);
361 spin_lock_init(&adapter->int_lock);
362 spin_lock_init(&adapter->main_proc_lock);
363 spin_lock_init(&adapter->mwifiex_cmd_lock);
364 for (i = 0; i < adapter->priv_num; i++) {
365 if (adapter->priv[i]) {
366 priv = adapter->priv[i];
367 spin_lock_init(&priv->rx_pkt_lock);
368 spin_lock_init(&priv->wmm.ra_list_spinlock);
369 spin_lock_init(&priv->curr_bcn_buf_lock);
370 }
371 }
372
373 /* Initialize cmd_free_q */
374 INIT_LIST_HEAD(&adapter->cmd_free_q);
375 /* Initialize cmd_pending_q */
376 INIT_LIST_HEAD(&adapter->cmd_pending_q);
377 /* Initialize scan_pending_q */
378 INIT_LIST_HEAD(&adapter->scan_pending_q);
379
380 spin_lock_init(&adapter->cmd_free_q_lock);
381 spin_lock_init(&adapter->cmd_pending_q_lock);
382 spin_lock_init(&adapter->scan_pending_q_lock);
383
384 for (i = 0; i < adapter->priv_num; ++i) {
385 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
386 adapter->bss_prio_tbl[i].bss_prio_cur = NULL;
387 spin_lock_init(&adapter->bss_prio_tbl[i].bss_prio_lock);
388 }
389
390 for (i = 0; i < adapter->priv_num; i++) {
391 if (!adapter->priv[i])
392 continue;
393 priv = adapter->priv[i];
394 for (j = 0; j < MAX_NUM_TID; ++j) {
395 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[j].ra_list);
396 spin_lock_init(&priv->wmm.tid_tbl_ptr[j].tid_tbl_lock);
397 }
398 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
399 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
400
401 spin_lock_init(&priv->tx_ba_stream_tbl_lock);
402 spin_lock_init(&priv->rx_reorder_tbl_lock);
403 }
404
405 return 0;
406}
407
408/*
409 * This function releases the lock variables and frees the locks and
410 * associated locks.
411 */
412void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
413{
414 struct mwifiex_private *priv = NULL;
415 s32 i = 0;
416 s32 j = 0;
417
418 /* Free lists */
419 list_del(&adapter->cmd_free_q);
420 list_del(&adapter->cmd_pending_q);
421 list_del(&adapter->scan_pending_q);
422
423 for (i = 0; i < adapter->priv_num; i++)
424 list_del(&adapter->bss_prio_tbl[i].bss_prio_head);
425
426 for (i = 0; i < adapter->priv_num; i++) {
427 if (adapter->priv[i]) {
428 priv = adapter->priv[i];
429 for (j = 0; j < MAX_NUM_TID; ++j)
430 list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
431 list_del(&priv->tx_ba_stream_tbl_ptr);
432 list_del(&priv->rx_reorder_tbl_ptr);
433 }
434 }
435
436 return;
437}
438
439/*
440 * This function initializes the firmware.
441 *
442 * The following operations are performed sequentially -
443 * - Allocate adapter structure
444 * - Initialize the adapter structure
445 * - Initialize the private structure
446 * - Add BSS priority tables to the adapter structure
447 * - For each interface, send the init commands to firmware
448 * - Send the first command in command pending queue, if available
449 */
450int mwifiex_init_fw(struct mwifiex_adapter *adapter)
451{
452 int ret = 0;
453 struct mwifiex_private *priv = NULL;
454 u8 i = 0;
455 u8 first_sta = true;
456 int is_cmd_pend_q_empty;
457 unsigned long flags;
458
459 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
460
461 /* Allocate memory for member of adapter structure */
462 ret = mwifiex_allocate_adapter(adapter);
463 if (ret)
464 return -1;
465
466 /* Initialize adapter structure */
467 mwifiex_init_adapter(adapter);
468
469 for (i = 0; i < adapter->priv_num; i++) {
470 if (adapter->priv[i]) {
471 priv = adapter->priv[i];
472
473 /* Initialize private structure */
474 ret = mwifiex_init_priv(priv);
475 if (ret)
476 return -1;
477 }
478 }
479 for (i = 0; i < adapter->priv_num; i++) {
480 if (adapter->priv[i]) {
481 ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta);
482 if (ret == -1)
483 return -1;
484
485 first_sta = false;
486 }
487 }
488
489 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
490 is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
491 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
492 if (!is_cmd_pend_q_empty) {
493 /* Send the first command in queue and return */
494 if (mwifiex_main_process(adapter) != -1)
495 ret = -EINPROGRESS;
496 } else {
497 adapter->hw_status = MWIFIEX_HW_STATUS_READY;
498 }
499
500 return ret;
501}
502
503/*
504 * This function deletes the BSS priority tables.
505 *
506 * The function traverses through all the allocated BSS priority nodes
507 * in every BSS priority table and frees them.
508 */
509static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
510{
511 int i;
512 struct mwifiex_adapter *adapter = priv->adapter;
513 struct mwifiex_bss_prio_node *bssprio_node = NULL, *tmp_node = NULL,
514 **cur = NULL;
515 struct list_head *head;
516 spinlock_t *lock;
517 unsigned long flags;
518
519 for (i = 0; i < adapter->priv_num; ++i) {
520 head = &adapter->bss_prio_tbl[i].bss_prio_head;
521 cur = &adapter->bss_prio_tbl[i].bss_prio_cur;
522 lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
523 dev_dbg(adapter->dev, "info: delete BSS priority table,"
524 " index = %d, i = %d, head = %p, cur = %p\n",
525 priv->bss_index, i, head, *cur);
526 if (*cur) {
527 spin_lock_irqsave(lock, flags);
528 if (list_empty(head)) {
529 spin_unlock_irqrestore(lock, flags);
530 continue;
531 }
532 bssprio_node = list_first_entry(head,
533 struct mwifiex_bss_prio_node, list);
534 spin_unlock_irqrestore(lock, flags);
535
536 list_for_each_entry_safe(bssprio_node, tmp_node, head,
537 list) {
538 if (bssprio_node->priv == priv) {
539 dev_dbg(adapter->dev, "info: Delete "
540 "node %p, next = %p\n",
541 bssprio_node, tmp_node);
542 spin_lock_irqsave(lock, flags);
543 list_del(&bssprio_node->list);
544 spin_unlock_irqrestore(lock, flags);
545 kfree(bssprio_node);
546 }
547 }
548 *cur = (struct mwifiex_bss_prio_node *)head;
549 }
550 }
551}
552
553/*
554 * This function is used to shutdown the driver.
555 *
556 * The following operations are performed sequentially -
557 * - Check if already shut down
558 * - Make sure the main process has stopped
559 * - Clean up the Tx and Rx queues
560 * - Delete BSS priority tables
561 * - Free the adapter
562 * - Notify completion
563 */
564int
565mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
566{
567 int ret = -EINPROGRESS;
568 struct mwifiex_private *priv = NULL;
569 s32 i = 0;
570 unsigned long flags;
571
572 /* mwifiex already shutdown */
573 if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
574 return 0;
575
576 adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING;
577 /* wait for mwifiex_process to complete */
578 if (adapter->mwifiex_processing) {
579 dev_warn(adapter->dev, "main process is still running\n");
580 return ret;
581 }
582
583 /* shut down mwifiex */
584 dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
585
586 /* Clean up Tx/Rx queues and delete BSS priority table */
587 for (i = 0; i < adapter->priv_num; i++) {
588 if (adapter->priv[i]) {
589 priv = adapter->priv[i];
590
591 mwifiex_clean_txrx(priv);
592 mwifiex_delete_bss_prio_tbl(priv);
593 }
594 }
595
596 spin_lock_irqsave(&adapter->mwifiex_lock, flags);
597
598 /* Free adapter structure */
599 mwifiex_free_adapter(adapter);
600
601 spin_unlock_irqrestore(&adapter->mwifiex_lock, flags);
602
603 /* Notify completion */
604 ret = mwifiex_shutdown_fw_complete(adapter);
605
606 return ret;
607}
608
609/*
610 * This function downloads the firmware to the card.
611 *
612 * The actual download is preceded by two sanity checks -
613 * - Check if firmware is already running
614 * - Check if the interface is the winner to download the firmware
615 *
616 * ...and followed by another -
617 * - Check if the firmware is downloaded successfully
618 *
619 * After download is successfully completed, the host interrupts are enabled.
620 */
621int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
622 struct mwifiex_fw_image *pmfw)
623{
624 int ret = 0;
625 u32 poll_num = 1;
626 int winner;
627
628 /* Check if firmware is already running */
629 ret = adapter->if_ops.check_fw_status(adapter, poll_num, &winner);
630 if (!ret) {
631 dev_notice(adapter->dev,
632 "WLAN FW already running! Skip FW download\n");
633 goto done;
634 }
635 poll_num = MAX_FIRMWARE_POLL_TRIES;
636
637 /* Check if we are the winner for downloading FW */
638 if (!winner) {
639 dev_notice(adapter->dev,
640 "Other interface already running!"
641 " Skip FW download\n");
642 poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
643 goto poll_fw;
644 }
645 if (pmfw) {
646 /* Download firmware with helper */
647 ret = adapter->if_ops.prog_fw(adapter, pmfw);
648 if (ret) {
649 dev_err(adapter->dev, "prog_fw failed ret=%#x\n", ret);
650 return ret;
651 }
652 }
653
654poll_fw:
655 /* Check if the firmware is downloaded successfully or not */
656 ret = adapter->if_ops.check_fw_status(adapter, poll_num, NULL);
657 if (ret) {
658 dev_err(adapter->dev, "FW failed to be active in time\n");
659 return -1;
660 }
661done:
662 /* re-enable host interrupt for mwifiex after fw dnld is successful */
663 adapter->if_ops.enable_int(adapter);
664 return ret;
665}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
new file mode 100644
index 00000000000..d6babfb1495
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -0,0 +1,433 @@
1/*
2 * Marvell Wireless LAN device driver: ioctl data structures & APIs
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_IOCTL_H_
21#define _MWIFIEX_IOCTL_H_
22
23#include <net/mac80211.h>
24
25enum {
26 MWIFIEX_SCAN_MODE_UNCHANGED = 0,
27 MWIFIEX_SCAN_MODE_BSS,
28 MWIFIEX_SCAN_MODE_IBSS,
29 MWIFIEX_SCAN_MODE_ANY
30};
31
32enum {
33 MWIFIEX_SCAN_TYPE_UNCHANGED = 0,
34 MWIFIEX_SCAN_TYPE_ACTIVE,
35 MWIFIEX_SCAN_TYPE_PASSIVE
36};
37
38struct mwifiex_get_scan_table_fixed {
39 u8 bssid[ETH_ALEN];
40 u8 channel;
41 u8 rssi;
42 long long network_tsf;
43};
44
45struct mwifiex_scan_time_params {
46 u32 specific_scan_time;
47 u32 active_scan_time;
48 u32 passive_scan_time;
49};
50
51struct mwifiex_user_scan {
52 u32 scan_cfg_len;
53 u8 scan_cfg_buf[1];
54};
55
56struct mwifiex_scan_req {
57 u32 scan_mode;
58 u32 scan_type;
59 struct mwifiex_802_11_ssid scan_ssid;
60 struct mwifiex_scan_time_params scan_time;
61 struct mwifiex_user_scan user_scan;
62};
63
64struct mwifiex_scan_resp {
65 u32 num_in_scan_table;
66 u8 *scan_table;
67};
68
69enum {
70 MWIFIEX_BSS_MODE_INFRA = 1,
71 MWIFIEX_BSS_MODE_IBSS,
72 MWIFIEX_BSS_MODE_AUTO
73};
74
75#define MWIFIEX_PROMISC_MODE 1
76#define MWIFIEX_MULTICAST_MODE 2
77#define MWIFIEX_ALL_MULTI_MODE 4
78#define MWIFIEX_MAX_MULTICAST_LIST_SIZE 32
79
80struct mwifiex_multicast_list {
81 u32 mode;
82 u32 num_multicast_addr;
83 u8 mac_list[MWIFIEX_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
84};
85
86#define MWIFIEX_MAX_CHANNEL_NUM 128
87
88struct mwifiex_chan_freq {
89 u32 channel;
90 u32 freq;
91};
92
93struct mwifiex_chan_list {
94 u32 num_of_chan;
95 struct mwifiex_chan_freq cf[MWIFIEX_MAX_CHANNEL_NUM];
96};
97
98struct mwifiex_ssid_bssid {
99 struct mwifiex_802_11_ssid ssid;
100 u8 bssid[ETH_ALEN];
101};
102
103enum {
104 BAND_B = 1,
105 BAND_G = 2,
106 BAND_A = 4,
107 BAND_GN = 8,
108 BAND_AN = 16,
109};
110
111#define NO_SEC_CHANNEL 0
112#define SEC_CHANNEL_ABOVE 1
113#define SEC_CHANNEL_BELOW 3
114
115struct mwifiex_ds_band_cfg {
116 u32 config_bands;
117 u32 adhoc_start_band;
118 u32 adhoc_channel;
119 u32 sec_chan_offset;
120};
121
122enum {
123 ADHOC_IDLE,
124 ADHOC_STARTED,
125 ADHOC_JOINED,
126 ADHOC_COALESCED
127};
128
129struct mwifiex_ds_get_stats {
130 u32 mcast_tx_frame;
131 u32 failed;
132 u32 retry;
133 u32 multi_retry;
134 u32 frame_dup;
135 u32 rts_success;
136 u32 rts_failure;
137 u32 ack_failure;
138 u32 rx_frag;
139 u32 mcast_rx_frame;
140 u32 fcs_error;
141 u32 tx_frame;
142 u32 wep_icv_error[4];
143};
144
145#define BCN_RSSI_LAST_MASK 0x00000001
146#define BCN_RSSI_AVG_MASK 0x00000002
147#define DATA_RSSI_LAST_MASK 0x00000004
148#define DATA_RSSI_AVG_MASK 0x00000008
149#define BCN_SNR_LAST_MASK 0x00000010
150#define BCN_SNR_AVG_MASK 0x00000020
151#define DATA_SNR_LAST_MASK 0x00000040
152#define DATA_SNR_AVG_MASK 0x00000080
153#define BCN_NF_LAST_MASK 0x00000100
154#define BCN_NF_AVG_MASK 0x00000200
155#define DATA_NF_LAST_MASK 0x00000400
156#define DATA_NF_AVG_MASK 0x00000800
157#define ALL_RSSI_INFO_MASK 0x00000fff
158
159struct mwifiex_ds_get_signal {
160 /*
161 * Bit0: Last Beacon RSSI, Bit1: Average Beacon RSSI,
162 * Bit2: Last Data RSSI, Bit3: Average Data RSSI,
163 * Bit4: Last Beacon SNR, Bit5: Average Beacon SNR,
164 * Bit6: Last Data SNR, Bit7: Average Data SNR,
165 * Bit8: Last Beacon NF, Bit9: Average Beacon NF,
166 * Bit10: Last Data NF, Bit11: Average Data NF
167 */
168 u16 selector;
169 s16 bcn_rssi_last;
170 s16 bcn_rssi_avg;
171 s16 data_rssi_last;
172 s16 data_rssi_avg;
173 s16 bcn_snr_last;
174 s16 bcn_snr_avg;
175 s16 data_snr_last;
176 s16 data_snr_avg;
177 s16 bcn_nf_last;
178 s16 bcn_nf_avg;
179 s16 data_nf_last;
180 s16 data_nf_avg;
181};
182
183struct mwifiex_fw_info {
184 u32 fw_ver;
185 u8 mac_addr[ETH_ALEN];
186};
187
188#define MWIFIEX_MAX_VER_STR_LEN 128
189
190struct mwifiex_ver_ext {
191 u32 version_str_sel;
192 char version_str[MWIFIEX_MAX_VER_STR_LEN];
193};
194
195struct mwifiex_bss_info {
196 u32 bss_mode;
197 struct mwifiex_802_11_ssid ssid;
198 u32 scan_table_idx;
199 u32 bss_chan;
200 u32 region_code;
201 u32 media_connected;
202 u32 radio_on;
203 u32 max_power_level;
204 u32 min_power_level;
205 u32 adhoc_state;
206 signed int bcn_nf_last;
207 u32 wep_status;
208 u32 is_hs_configured;
209 u32 is_deep_sleep;
210 u8 bssid[ETH_ALEN];
211};
212
213#define MAX_NUM_TID 8
214
215#define MAX_RX_WINSIZE 64
216
217struct mwifiex_ds_rx_reorder_tbl {
218 u16 tid;
219 u8 ta[ETH_ALEN];
220 u32 start_win;
221 u32 win_size;
222 u32 buffer[MAX_RX_WINSIZE];
223};
224
225struct mwifiex_ds_tx_ba_stream_tbl {
226 u16 tid;
227 u8 ra[ETH_ALEN];
228};
229
230#define DBG_CMD_NUM 5
231
232struct mwifiex_debug_info {
233 u32 int_counter;
234 u32 packets_out[MAX_NUM_TID];
235 u32 max_tx_buf_size;
236 u32 tx_buf_size;
237 u32 curr_tx_buf_size;
238 u32 tx_tbl_num;
239 struct mwifiex_ds_tx_ba_stream_tbl
240 tx_tbl[MWIFIEX_MAX_TX_BASTREAM_SUPPORTED];
241 u32 rx_tbl_num;
242 struct mwifiex_ds_rx_reorder_tbl rx_tbl
243 [MWIFIEX_MAX_RX_BASTREAM_SUPPORTED];
244 u16 ps_mode;
245 u32 ps_state;
246 u8 is_deep_sleep;
247 u8 pm_wakeup_card_req;
248 u32 pm_wakeup_fw_try;
249 u8 is_hs_configured;
250 u8 hs_activated;
251 u32 num_cmd_host_to_card_failure;
252 u32 num_cmd_sleep_cfm_host_to_card_failure;
253 u32 num_tx_host_to_card_failure;
254 u32 num_event_deauth;
255 u32 num_event_disassoc;
256 u32 num_event_link_lost;
257 u32 num_cmd_deauth;
258 u32 num_cmd_assoc_success;
259 u32 num_cmd_assoc_failure;
260 u32 num_tx_timeout;
261 u32 num_cmd_timeout;
262 u16 timeout_cmd_id;
263 u16 timeout_cmd_act;
264 u16 last_cmd_id[DBG_CMD_NUM];
265 u16 last_cmd_act[DBG_CMD_NUM];
266 u16 last_cmd_index;
267 u16 last_cmd_resp_id[DBG_CMD_NUM];
268 u16 last_cmd_resp_index;
269 u16 last_event[DBG_CMD_NUM];
270 u16 last_event_index;
271 u8 data_sent;
272 u8 cmd_sent;
273 u8 cmd_resp_received;
274 u8 event_received;
275};
276
277enum {
278 MWIFIEX_AUTH_MODE_OPEN = 0x00,
279 MWIFIEX_AUTH_MODE_SHARED = 0x01,
280 MWIFIEX_AUTH_MODE_NETWORKEAP = 0x80,
281 MWIFIEX_AUTH_MODE_AUTO = 0xFF,
282};
283
284enum {
285 MWIFIEX_ENCRYPTION_MODE_NONE = 0,
286 MWIFIEX_ENCRYPTION_MODE_WEP40 = 1,
287 MWIFIEX_ENCRYPTION_MODE_TKIP = 2,
288 MWIFIEX_ENCRYPTION_MODE_CCMP = 3,
289 MWIFIEX_ENCRYPTION_MODE_WEP104 = 4,
290};
291
292#define MWIFIEX_KEY_INDEX_UNICAST 0x40000000
293#define MWIFIEX_MAX_KEY_LENGTH 32
294#define WAPI_RXPN_LEN 16
295
296struct mwifiex_ds_encrypt_key {
297 u32 key_disable;
298 u32 key_index;
299 u32 key_len;
300 u8 key_material[MWIFIEX_MAX_KEY_LENGTH];
301 u8 mac_addr[ETH_ALEN];
302 u32 is_wapi_key;
303 u8 wapi_rxpn[WAPI_RXPN_LEN];
304};
305
306struct mwifiex_rate_cfg {
307 u32 action;
308 u32 is_rate_auto;
309 u32 rate;
310};
311
312struct mwifiex_data_rate {
313 u32 tx_data_rate;
314 u32 rx_data_rate;
315};
316
317struct mwifiex_power_cfg {
318 u32 is_power_auto;
319 u32 power_level;
320};
321
322struct mwifiex_ds_hs_cfg {
323 u32 is_invoke_hostcmd;
324 /* Bit0: non-unicast data
325 * Bit1: unicast data
326 * Bit2: mac events
327 * Bit3: magic packet
328 */
329 u32 conditions;
330 u32 gpio;
331 u32 gap;
332};
333
334#define DEEP_SLEEP_ON 1
335#define DEEP_SLEEP_OFF 0
336
337#define DEEP_SLEEP_IDLE_TIME 100
338
339struct mwifiex_ds_auto_ds {
340 u16 auto_ds;
341 u16 idle_time;
342};
343
344#define PS_MODE_UNCHANGED 0
345#define PS_MODE_AUTO 1
346#define PS_MODE_POLL 2
347#define PS_MODE_NULL 3
348
349
350struct mwifiex_ds_pm_cfg {
351 union {
352 u32 ps_mode;
353 struct mwifiex_ds_hs_cfg hs_cfg;
354 struct mwifiex_ds_auto_ds auto_deep_sleep;
355 u32 sleep_period;
356 } param;
357};
358
359struct mwifiex_ioctl_wmm_queue_status_ac {
360 u8 wmm_acm;
361 u8 flow_required;
362 u8 flow_created;
363 u8 disabled;
364};
365
366struct mwifiex_ds_wmm_queue_status {
367 struct mwifiex_ioctl_wmm_queue_status_ac
368 ac_status[IEEE80211_MAX_QUEUES];
369};
370
371struct mwifiex_ds_11n_tx_cfg {
372 u16 tx_htcap;
373 u16 tx_htinfo;
374};
375
376struct mwifiex_ds_11n_amsdu_aggr_ctrl {
377 u16 enable;
378 u16 curr_buf_size;
379};
380
381#define MWIFIEX_NUM_OF_CMD_BUFFER 20
382#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
383
384enum {
385 MWIFIEX_IE_TYPE_GEN_IE = 0,
386 MWIFIEX_IE_TYPE_ARP_FILTER,
387};
388
389enum {
390 MWIFIEX_REG_MAC = 1,
391 MWIFIEX_REG_BBP,
392 MWIFIEX_REG_RF,
393 MWIFIEX_REG_PMIC,
394 MWIFIEX_REG_CAU,
395};
396
397struct mwifiex_ds_reg_rw {
398 __le32 type;
399 __le32 offset;
400 __le32 value;
401};
402
403#define MAX_EEPROM_DATA 256
404
405struct mwifiex_ds_read_eeprom {
406 __le16 offset;
407 __le16 byte_count;
408 u8 value[MAX_EEPROM_DATA];
409};
410
411struct mwifiex_ds_misc_gen_ie {
412 u32 type;
413 u32 len;
414 u8 ie_data[IW_CUSTOM_MAX];
415};
416
417struct mwifiex_ds_misc_cmd {
418 u32 len;
419 u8 cmd[MWIFIEX_SIZE_OF_CMD_BUFFER];
420};
421
422#define MWIFIEX_MAX_VSIE_LEN (256)
423#define MWIFIEX_MAX_VSIE_NUM (8)
424#define MWIFIEX_VSIE_MASK_SCAN 0x01
425#define MWIFIEX_VSIE_MASK_ASSOC 0x02
426#define MWIFIEX_VSIE_MASK_ADHOC 0x04
427
428enum {
429 MWIFIEX_FUNC_INIT = 1,
430 MWIFIEX_FUNC_SHUTDOWN,
431};
432
433#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
new file mode 100644
index 00000000000..d06f4c2d1d3
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -0,0 +1,1464 @@
1/*
2 * Marvell Wireless LAN device driver: association and ad-hoc start/join
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28#define CAPINFO_MASK (~(BIT(15) | BIT(14) | BIT(12) | BIT(11) | BIT(9)))
29
30/*
31 * Append a generic IE as a pass through TLV to a TLV buffer.
32 *
33 * This function is called from the network join command preparation routine.
34 *
35 * If the IE buffer has been setup by the application, this routine appends
36 * the buffer as a pass through TLV type to the request.
37 */
38static int
39mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer)
40{
41 int ret_len = 0;
42 struct mwifiex_ie_types_header ie_header;
43
44 /* Null Checks */
45 if (!buffer)
46 return 0;
47 if (!(*buffer))
48 return 0;
49
50 /*
51 * If there is a generic ie buffer setup, append it to the return
52 * parameter buffer pointer.
53 */
54 if (priv->gen_ie_buf_len) {
55 dev_dbg(priv->adapter->dev, "info: %s: append generic %d to %p\n",
56 __func__, priv->gen_ie_buf_len, *buffer);
57
58 /* Wrap the generic IE buffer with a pass through TLV type */
59 ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
60 ie_header.len = cpu_to_le16(priv->gen_ie_buf_len);
61 memcpy(*buffer, &ie_header, sizeof(ie_header));
62
63 /* Increment the return size and the return buffer pointer
64 param */
65 *buffer += sizeof(ie_header);
66 ret_len += sizeof(ie_header);
67
68 /* Copy the generic IE buffer to the output buffer, advance
69 pointer */
70 memcpy(*buffer, priv->gen_ie_buf, priv->gen_ie_buf_len);
71
72 /* Increment the return size and the return buffer pointer
73 param */
74 *buffer += priv->gen_ie_buf_len;
75 ret_len += priv->gen_ie_buf_len;
76
77 /* Reset the generic IE buffer */
78 priv->gen_ie_buf_len = 0;
79 }
80
81 /* return the length appended to the buffer */
82 return ret_len;
83}
84
85/*
86 * Append TSF tracking info from the scan table for the target AP.
87 *
88 * This function is called from the network join command preparation routine.
89 *
90 * The TSF table TSF sent to the firmware contains two TSF values:
91 * - The TSF of the target AP from its previous beacon/probe response
92 * - The TSF timestamp of our local MAC at the time we observed the
93 * beacon/probe response.
94 *
95 * The firmware uses the timestamp values to set an initial TSF value
96 * in the MAC for the new association after a reassociation attempt.
97 */
98static int
99mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
100 struct mwifiex_bssdescriptor *bss_desc)
101{
102 struct mwifiex_ie_types_tsf_timestamp tsf_tlv;
103 long long tsf_val;
104
105 /* Null Checks */
106 if (buffer == NULL)
107 return 0;
108 if (*buffer == NULL)
109 return 0;
110
111 memset(&tsf_tlv, 0x00, sizeof(struct mwifiex_ie_types_tsf_timestamp));
112
113 tsf_tlv.header.type = cpu_to_le16(TLV_TYPE_TSFTIMESTAMP);
114 tsf_tlv.header.len = cpu_to_le16(2 * sizeof(tsf_val));
115
116 memcpy(*buffer, &tsf_tlv, sizeof(tsf_tlv.header));
117 *buffer += sizeof(tsf_tlv.header);
118
119 memcpy(*buffer, &tsf_val, sizeof(tsf_val));
120 *buffer += sizeof(tsf_val);
121
122 memcpy(&tsf_val, bss_desc->time_stamp, sizeof(tsf_val));
123
124 dev_dbg(priv->adapter->dev, "info: %s: TSF offset calc: %016llx - "
125 "%016llx\n", __func__, tsf_val, bss_desc->network_tsf);
126
127 memcpy(*buffer, &tsf_val, sizeof(tsf_val));
128 *buffer += sizeof(tsf_val);
129
130 return sizeof(tsf_tlv.header) + (2 * sizeof(tsf_val));
131}
132
133/*
134 * This function finds out the common rates between rate1 and rate2.
135 *
136 * It will fill common rates in rate1 as output if found.
137 *
138 * NOTE: Setting the MSB of the basic rates needs to be taken
139 * care of, either before or after calling this function.
140 */
141static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
142 u32 rate1_size, u8 *rate2, u32 rate2_size)
143{
144 int ret = 0;
145 u8 *ptr = rate1;
146 u8 *tmp = NULL;
147 u32 i, j;
148
149 tmp = kmalloc(rate1_size, GFP_KERNEL);
150 if (!tmp) {
151 dev_err(priv->adapter->dev, "failed to alloc tmp buf\n");
152 return -ENOMEM;
153 }
154
155 memcpy(tmp, rate1, rate1_size);
156 memset(rate1, 0, rate1_size);
157
158 for (i = 0; rate2[i] && i < rate2_size; i++) {
159 for (j = 0; tmp[j] && j < rate1_size; j++) {
160 /* Check common rate, excluding the bit for
161 basic rate */
162 if ((rate2[i] & 0x7F) == (tmp[j] & 0x7F)) {
163 *rate1++ = tmp[j];
164 break;
165 }
166 }
167 }
168
169 dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n",
170 priv->data_rate);
171
172 if (!priv->is_data_rate_auto) {
173 while (*ptr) {
174 if ((*ptr & 0x7f) == priv->data_rate) {
175 ret = 0;
176 goto done;
177 }
178 ptr++;
179 }
180 dev_err(priv->adapter->dev, "previously set fixed data rate %#x"
181 " is not compatible with the network\n",
182 priv->data_rate);
183
184 ret = -1;
185 goto done;
186 }
187
188 ret = 0;
189done:
190 kfree(tmp);
191 return ret;
192}
193
194/*
195 * This function creates the intersection of the rates supported by a
196 * target BSS and our adapter settings for use in an assoc/join command.
197 */
198static int
199mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
200 struct mwifiex_bssdescriptor *bss_desc,
201 u8 *out_rates, u32 *out_rates_size)
202{
203 u8 card_rates[MWIFIEX_SUPPORTED_RATES];
204 u32 card_rates_size = 0;
205
206 /* Copy AP supported rates */
207 memcpy(out_rates, bss_desc->supported_rates, MWIFIEX_SUPPORTED_RATES);
208 /* Get the STA supported rates */
209 card_rates_size = mwifiex_get_active_data_rates(priv, card_rates);
210 /* Get the common rates between AP and STA supported rates */
211 if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES,
212 card_rates, card_rates_size)) {
213 *out_rates_size = 0;
214 dev_err(priv->adapter->dev, "%s: cannot get common rates\n",
215 __func__);
216 return -1;
217 }
218
219 *out_rates_size =
220 min_t(size_t, strlen(out_rates), MWIFIEX_SUPPORTED_RATES);
221
222 return 0;
223}
224
225/*
226 * This function updates the scan entry TSF timestamps to reflect
227 * a new association.
228 */
229static void
230mwifiex_update_tsf_timestamps(struct mwifiex_private *priv,
231 struct mwifiex_bssdescriptor *new_bss_desc)
232{
233 struct mwifiex_adapter *adapter = priv->adapter;
234 u32 table_idx;
235 long long new_tsf_base;
236 signed long long tsf_delta;
237
238 memcpy(&new_tsf_base, new_bss_desc->time_stamp, sizeof(new_tsf_base));
239
240 tsf_delta = new_tsf_base - new_bss_desc->network_tsf;
241
242 dev_dbg(adapter->dev, "info: TSF: update TSF timestamps, "
243 "0x%016llx -> 0x%016llx\n",
244 new_bss_desc->network_tsf, new_tsf_base);
245
246 for (table_idx = 0; table_idx < adapter->num_in_scan_table;
247 table_idx++)
248 adapter->scan_table[table_idx].network_tsf += tsf_delta;
249}
250
251/*
252 * This function appends a WAPI IE.
253 *
254 * This function is called from the network join command preparation routine.
255 *
256 * If the IE buffer has been setup by the application, this routine appends
257 * the buffer as a WAPI TLV type to the request.
258 */
259static int
260mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer)
261{
262 int retLen = 0;
263 struct mwifiex_ie_types_header ie_header;
264
265 /* Null Checks */
266 if (buffer == NULL)
267 return 0;
268 if (*buffer == NULL)
269 return 0;
270
271 /*
272 * If there is a wapi ie buffer setup, append it to the return
273 * parameter buffer pointer.
274 */
275 if (priv->wapi_ie_len) {
276 dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n",
277 priv->wapi_ie_len, *buffer);
278
279 /* Wrap the generic IE buffer with a pass through TLV type */
280 ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE);
281 ie_header.len = cpu_to_le16(priv->wapi_ie_len);
282 memcpy(*buffer, &ie_header, sizeof(ie_header));
283
284 /* Increment the return size and the return buffer pointer
285 param */
286 *buffer += sizeof(ie_header);
287 retLen += sizeof(ie_header);
288
289 /* Copy the wapi IE buffer to the output buffer, advance
290 pointer */
291 memcpy(*buffer, priv->wapi_ie, priv->wapi_ie_len);
292
293 /* Increment the return size and the return buffer pointer
294 param */
295 *buffer += priv->wapi_ie_len;
296 retLen += priv->wapi_ie_len;
297
298 }
299 /* return the length appended to the buffer */
300 return retLen;
301}
302
303/*
304 * This function appends rsn ie tlv for wpa/wpa2 security modes.
305 * It is called from the network join command preparation routine.
306 */
307static int mwifiex_append_rsn_ie_wpa_wpa2(struct mwifiex_private *priv,
308 u8 **buffer)
309{
310 struct mwifiex_ie_types_rsn_param_set *rsn_ie_tlv;
311 int rsn_ie_len;
312
313 if (!buffer || !(*buffer))
314 return 0;
315
316 rsn_ie_tlv = (struct mwifiex_ie_types_rsn_param_set *) (*buffer);
317 rsn_ie_tlv->header.type = cpu_to_le16((u16) priv->wpa_ie[0]);
318 rsn_ie_tlv->header.type = cpu_to_le16(
319 le16_to_cpu(rsn_ie_tlv->header.type) & 0x00FF);
320 rsn_ie_tlv->header.len = cpu_to_le16((u16) priv->wpa_ie[1]);
321 rsn_ie_tlv->header.len = cpu_to_le16(le16_to_cpu(rsn_ie_tlv->header.len)
322 & 0x00FF);
323 if (le16_to_cpu(rsn_ie_tlv->header.len) <= (sizeof(priv->wpa_ie) - 2))
324 memcpy(rsn_ie_tlv->rsn_ie, &priv->wpa_ie[2],
325 le16_to_cpu(rsn_ie_tlv->header.len));
326 else
327 return -1;
328
329 rsn_ie_len = sizeof(rsn_ie_tlv->header) +
330 le16_to_cpu(rsn_ie_tlv->header.len);
331 *buffer += rsn_ie_len;
332
333 return rsn_ie_len;
334}
335
336/*
337 * This function prepares command for association.
338 *
339 * This sets the following parameters -
340 * - Peer MAC address
341 * - Listen interval
342 * - Beacon interval
343 * - Capability information
344 *
345 * ...and the following TLVs, as required -
346 * - SSID TLV
347 * - PHY TLV
348 * - SS TLV
349 * - Rates TLV
350 * - Authentication TLV
351 * - Channel TLV
352 * - WPA/WPA2 IE
353 * - 11n TLV
354 * - Vendor specific TLV
355 * - WMM TLV
356 * - WAPI IE
357 * - Generic IE
358 * - TSF TLV
359 *
360 * Preparation also includes -
361 * - Setting command ID and proper size
362 * - Ensuring correct endian-ness
363 */
364int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
365 struct host_cmd_ds_command *cmd,
366 void *data_buf)
367{
368 struct host_cmd_ds_802_11_associate *assoc = &cmd->params.associate;
369 struct mwifiex_bssdescriptor *bss_desc;
370 struct mwifiex_ie_types_ssid_param_set *ssid_tlv;
371 struct mwifiex_ie_types_phy_param_set *phy_tlv;
372 struct mwifiex_ie_types_ss_param_set *ss_tlv;
373 struct mwifiex_ie_types_rates_param_set *rates_tlv;
374 struct mwifiex_ie_types_auth_type *auth_tlv;
375 struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
376 u8 rates[MWIFIEX_SUPPORTED_RATES];
377 u32 rates_size;
378 u16 tmp_cap;
379 u8 *pos;
380 int rsn_ie_len = 0;
381
382 bss_desc = (struct mwifiex_bssdescriptor *) data_buf;
383 pos = (u8 *) assoc;
384
385 mwifiex_cfg_tx_buf(priv, bss_desc);
386
387 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_ASSOCIATE);
388
389 /* Save so we know which BSS Desc to use in the response handler */
390 priv->attempted_bss_desc = bss_desc;
391
392 memcpy(assoc->peer_sta_addr,
393 bss_desc->mac_address, sizeof(assoc->peer_sta_addr));
394 pos += sizeof(assoc->peer_sta_addr);
395
396 /* Set the listen interval */
397 assoc->listen_interval = cpu_to_le16(priv->listen_interval);
398 /* Set the beacon period */
399 assoc->beacon_period = cpu_to_le16(bss_desc->beacon_period);
400
401 pos += sizeof(assoc->cap_info_bitmap);
402 pos += sizeof(assoc->listen_interval);
403 pos += sizeof(assoc->beacon_period);
404 pos += sizeof(assoc->dtim_period);
405
406 ssid_tlv = (struct mwifiex_ie_types_ssid_param_set *) pos;
407 ssid_tlv->header.type = cpu_to_le16(WLAN_EID_SSID);
408 ssid_tlv->header.len = cpu_to_le16((u16) bss_desc->ssid.ssid_len);
409 memcpy(ssid_tlv->ssid, bss_desc->ssid.ssid,
410 le16_to_cpu(ssid_tlv->header.len));
411 pos += sizeof(ssid_tlv->header) + le16_to_cpu(ssid_tlv->header.len);
412
413 phy_tlv = (struct mwifiex_ie_types_phy_param_set *) pos;
414 phy_tlv->header.type = cpu_to_le16(WLAN_EID_DS_PARAMS);
415 phy_tlv->header.len = cpu_to_le16(sizeof(phy_tlv->fh_ds.ds_param_set));
416 memcpy(&phy_tlv->fh_ds.ds_param_set,
417 &bss_desc->phy_param_set.ds_param_set.current_chan,
418 sizeof(phy_tlv->fh_ds.ds_param_set));
419 pos += sizeof(phy_tlv->header) + le16_to_cpu(phy_tlv->header.len);
420
421 ss_tlv = (struct mwifiex_ie_types_ss_param_set *) pos;
422 ss_tlv->header.type = cpu_to_le16(WLAN_EID_CF_PARAMS);
423 ss_tlv->header.len = cpu_to_le16(sizeof(ss_tlv->cf_ibss.cf_param_set));
424 pos += sizeof(ss_tlv->header) + le16_to_cpu(ss_tlv->header.len);
425
426 /* Get the common rates supported between the driver and the BSS Desc */
427 if (mwifiex_setup_rates_from_bssdesc
428 (priv, bss_desc, rates, &rates_size))
429 return -1;
430
431 /* Save the data rates into Current BSS state structure */
432 priv->curr_bss_params.num_of_rates = rates_size;
433 memcpy(&priv->curr_bss_params.data_rates, rates, rates_size);
434
435 /* Setup the Rates TLV in the association command */
436 rates_tlv = (struct mwifiex_ie_types_rates_param_set *) pos;
437 rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
438 rates_tlv->header.len = cpu_to_le16((u16) rates_size);
439 memcpy(rates_tlv->rates, rates, rates_size);
440 pos += sizeof(rates_tlv->header) + rates_size;
441 dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n",
442 rates_size);
443
444 /* Add the Authentication type to be used for Auth frames if needed */
445 if (priv->sec_info.authentication_mode != MWIFIEX_AUTH_MODE_AUTO) {
446 auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
447 auth_tlv->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
448 auth_tlv->header.len = cpu_to_le16(sizeof(auth_tlv->auth_type));
449 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
450 auth_tlv->auth_type = cpu_to_le16((u16) priv->sec_info.
451 authentication_mode);
452 else
453 auth_tlv->auth_type =
454 cpu_to_le16(MWIFIEX_AUTH_MODE_OPEN);
455 pos += sizeof(auth_tlv->header) +
456 le16_to_cpu(auth_tlv->header.len);
457 }
458
459 if (IS_SUPPORT_MULTI_BANDS(priv->adapter)
460 && !(ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
461 && (!bss_desc->disable_11n)
462 && (priv->adapter->config_bands & BAND_GN
463 || priv->adapter->config_bands & BAND_AN)
464 && (bss_desc->bcn_ht_cap)
465 )
466 ) {
467 /* Append a channel TLV for the channel the attempted AP was
468 found on */
469 chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
470 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
471 chan_tlv->header.len =
472 cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
473
474 memset(chan_tlv->chan_scan_param, 0x00,
475 sizeof(struct mwifiex_chan_scan_param_set));
476 chan_tlv->chan_scan_param[0].chan_number =
477 (bss_desc->phy_param_set.ds_param_set.current_chan);
478 dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n",
479 chan_tlv->chan_scan_param[0].chan_number);
480
481 chan_tlv->chan_scan_param[0].radio_type =
482 mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
483
484 dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n",
485 chan_tlv->chan_scan_param[0].radio_type);
486 pos += sizeof(chan_tlv->header) +
487 sizeof(struct mwifiex_chan_scan_param_set);
488 }
489
490 if (!priv->wps.session_enable) {
491 if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
492 rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
493
494 if (rsn_ie_len == -1)
495 return -1;
496 }
497
498 if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
499 && (!bss_desc->disable_11n)
500 && (priv->adapter->config_bands & BAND_GN
501 || priv->adapter->config_bands & BAND_AN))
502 mwifiex_cmd_append_11n_tlv(priv, bss_desc, &pos);
503
504 /* Append vendor specific IE TLV */
505 mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_ASSOC, &pos);
506
507 mwifiex_wmm_process_association_req(priv, &pos, &bss_desc->wmm_ie,
508 bss_desc->bcn_ht_cap);
509 if (priv->sec_info.wapi_enabled && priv->wapi_ie_len)
510 mwifiex_cmd_append_wapi_ie(priv, &pos);
511
512
513 mwifiex_cmd_append_generic_ie(priv, &pos);
514
515 mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc);
516
517 cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN);
518
519 /* Set the Capability info at last */
520 tmp_cap = bss_desc->cap_info_bitmap;
521
522 if (priv->adapter->config_bands == BAND_B)
523 SHORT_SLOT_TIME_DISABLED(tmp_cap);
524
525 tmp_cap &= CAPINFO_MASK;
526 dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
527 tmp_cap, CAPINFO_MASK);
528 assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);
529
530 return 0;
531}
532
533/*
534 * Association firmware command response handler
535 *
536 * The response buffer for the association command has the following
537 * memory layout.
538 *
539 * For cases where an association response was not received (indicated
540 * by the CapInfo and AId field):
541 *
542 * .------------------------------------------------------------.
543 * | Header(4 * sizeof(t_u16)): Standard command response hdr |
544 * .------------------------------------------------------------.
545 * | cap_info/Error Return(t_u16): |
546 * | 0xFFFF(-1): Internal error |
547 * | 0xFFFE(-2): Authentication unhandled message |
548 * | 0xFFFD(-3): Authentication refused |
549 * | 0xFFFC(-4): Timeout waiting for AP response |
550 * .------------------------------------------------------------.
551 * | status_code(t_u16): |
552 * | If cap_info is -1: |
553 * | An internal firmware failure prevented the |
554 * | command from being processed. The status_code |
555 * | will be set to 1. |
556 * | |
557 * | If cap_info is -2: |
558 * | An authentication frame was received but was |
559 * | not handled by the firmware. IEEE Status |
560 * | code for the failure is returned. |
561 * | |
562 * | If cap_info is -3: |
563 * | An authentication frame was received and the |
564 * | status_code is the IEEE Status reported in the |
565 * | response. |
566 * | |
567 * | If cap_info is -4: |
568 * | (1) Association response timeout |
569 * | (2) Authentication response timeout |
570 * .------------------------------------------------------------.
571 * | a_id(t_u16): 0xFFFF |
572 * .------------------------------------------------------------.
573 *
574 *
575 * For cases where an association response was received, the IEEE
576 * standard association response frame is returned:
577 *
578 * .------------------------------------------------------------.
579 * | Header(4 * sizeof(t_u16)): Standard command response hdr |
580 * .------------------------------------------------------------.
581 * | cap_info(t_u16): IEEE Capability |
582 * .------------------------------------------------------------.
583 * | status_code(t_u16): IEEE Status Code |
584 * .------------------------------------------------------------.
585 * | a_id(t_u16): IEEE Association ID |
586 * .------------------------------------------------------------.
587 * | IEEE IEs(variable): Any received IEs comprising the |
588 * | remaining portion of a received |
589 * | association response frame. |
590 * .------------------------------------------------------------.
591 *
592 * For simplistic handling, the status_code field can be used to determine
593 * an association success (0) or failure (non-zero).
594 */
595int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
596 struct host_cmd_ds_command *resp, void *wq_buf)
597{
598 int ret = 0;
599 struct mwifiex_wait_queue *wait_queue =
600 (struct mwifiex_wait_queue *) wq_buf;
601 struct ieee_types_assoc_rsp *assoc_rsp;
602 struct mwifiex_bssdescriptor *bss_desc;
603 u8 enable_data = true;
604
605 assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
606
607 priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
608 sizeof(priv->assoc_rsp_buf));
609
610 memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
611
612 if (le16_to_cpu(assoc_rsp->status_code)) {
613 priv->adapter->dbg.num_cmd_assoc_failure++;
614 dev_err(priv->adapter->dev, "ASSOC_RESP: association failed, "
615 "status code = %d, error = 0x%x, a_id = 0x%x\n",
616 le16_to_cpu(assoc_rsp->status_code),
617 le16_to_cpu(assoc_rsp->cap_info_bitmap),
618 le16_to_cpu(assoc_rsp->a_id));
619
620 ret = -1;
621 goto done;
622 }
623
624 /* Send a Media Connected event, according to the Spec */
625 priv->media_connected = true;
626
627 priv->adapter->ps_state = PS_STATE_AWAKE;
628 priv->adapter->pps_uapsd_mode = false;
629 priv->adapter->tx_lock_flag = false;
630
631 /* Set the attempted BSSID Index to current */
632 bss_desc = priv->attempted_bss_desc;
633
634 dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n",
635 bss_desc->ssid.ssid);
636
637 /* Make a copy of current BSSID descriptor */
638 memcpy(&priv->curr_bss_params.bss_descriptor,
639 bss_desc, sizeof(struct mwifiex_bssdescriptor));
640
641 /* Update curr_bss_params */
642 priv->curr_bss_params.bss_descriptor.channel
643 = bss_desc->phy_param_set.ds_param_set.current_chan;
644
645 priv->curr_bss_params.band = (u8) bss_desc->bss_band;
646
647 /*
648 * Adjust the timestamps in the scan table to be relative to the newly
649 * associated AP's TSF
650 */
651 mwifiex_update_tsf_timestamps(priv, bss_desc);
652
653 if (bss_desc->wmm_ie.vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC)
654 priv->curr_bss_params.wmm_enabled = true;
655 else
656 priv->curr_bss_params.wmm_enabled = false;
657
658 if ((priv->wmm_required || bss_desc->bcn_ht_cap)
659 && priv->curr_bss_params.wmm_enabled)
660 priv->wmm_enabled = true;
661 else
662 priv->wmm_enabled = false;
663
664 priv->curr_bss_params.wmm_uapsd_enabled = false;
665
666 if (priv->wmm_enabled)
667 priv->curr_bss_params.wmm_uapsd_enabled
668 = ((bss_desc->wmm_ie.qos_info_bitmap &
669 IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
670
671 dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
672 priv->curr_pkt_filter);
673 if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
674 priv->wpa_is_gtk_set = false;
675
676 if (priv->wmm_enabled) {
677 /* Don't re-enable carrier until we get the WMM_GET_STATUS
678 event */
679 enable_data = false;
680 } else {
681 /* Since WMM is not enabled, setup the queues with the
682 defaults */
683 mwifiex_wmm_setup_queue_priorities(priv, NULL);
684 mwifiex_wmm_setup_ac_downgrade(priv);
685 }
686
687 if (enable_data)
688 dev_dbg(priv->adapter->dev,
689 "info: post association, re-enabling data flow\n");
690
691 /* Reset SNR/NF/RSSI values */
692 priv->data_rssi_last = 0;
693 priv->data_nf_last = 0;
694 priv->data_rssi_avg = 0;
695 priv->data_nf_avg = 0;
696 priv->bcn_rssi_last = 0;
697 priv->bcn_nf_last = 0;
698 priv->bcn_rssi_avg = 0;
699 priv->bcn_nf_avg = 0;
700 priv->rxpd_rate = 0;
701 priv->rxpd_htinfo = 0;
702
703 mwifiex_save_curr_bcn(priv);
704
705 priv->adapter->dbg.num_cmd_assoc_success++;
706
707 dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n");
708
709 /* Add the ra_list here for infra mode as there will be only 1 ra
710 always */
711 mwifiex_ralist_add(priv,
712 priv->curr_bss_params.bss_descriptor.mac_address);
713
714 if (!netif_carrier_ok(priv->netdev))
715 netif_carrier_on(priv->netdev);
716 if (netif_queue_stopped(priv->netdev))
717 netif_wake_queue(priv->netdev);
718
719 if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
720 priv->scan_block = true;
721
722done:
723 /* Need to indicate IOCTL complete */
724 if (wait_queue) {
725 if (ret) {
726 if (assoc_rsp->status_code)
727 wait_queue->status =
728 le16_to_cpu(assoc_rsp->status_code);
729 else
730 wait_queue->status = MWIFIEX_ERROR_ASSOC_FAIL;
731 } else {
732 wait_queue->status = MWIFIEX_ERROR_NO_ERROR;
733 }
734 }
735
736 return ret;
737}
738
739/*
740 * This function prepares command for ad-hoc start.
741 *
742 * Driver will fill up SSID, BSS mode, IBSS parameters, physical
743 * parameters, probe delay, and capability information. Firmware
744 * will fill up beacon period, basic rates and operational rates.
745 *
746 * In addition, the following TLVs are added -
747 * - Channel TLV
748 * - Vendor specific IE
749 * - WPA/WPA2 IE
750 * - HT Capabilities IE
751 * - HT Information IE
752 *
753 * Preparation also includes -
754 * - Setting command ID and proper size
755 * - Ensuring correct endian-ness
756 */
757int
758mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
759 struct host_cmd_ds_command *cmd, void *data_buf)
760{
761 int ret = 0, rsn_ie_len = 0;
762 struct mwifiex_adapter *adapter = priv->adapter;
763 struct host_cmd_ds_802_11_ad_hoc_start *adhoc_start =
764 &cmd->params.adhoc_start;
765 struct mwifiex_bssdescriptor *bss_desc;
766 u32 cmd_append_size = 0;
767 u32 i;
768 u16 tmp_cap;
769 uint16_t ht_cap_info;
770 struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
771
772 struct mwifiex_ie_types_htcap *ht_cap;
773 struct mwifiex_ie_types_htinfo *ht_info;
774 u8 *pos = (u8 *) adhoc_start +
775 sizeof(struct host_cmd_ds_802_11_ad_hoc_start);
776
777 if (!adapter)
778 return -1;
779
780 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_START);
781
782 bss_desc = &priv->curr_bss_params.bss_descriptor;
783 priv->attempted_bss_desc = bss_desc;
784
785 /*
786 * Fill in the parameters for 2 data structures:
787 * 1. struct host_cmd_ds_802_11_ad_hoc_start command
788 * 2. bss_desc
789 * Driver will fill up SSID, bss_mode,IBSS param, Physical Param,
790 * probe delay, and Cap info.
791 * Firmware will fill up beacon period, Basic rates
792 * and operational rates.
793 */
794
795 memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
796
797 memcpy(adhoc_start->ssid,
798 ((struct mwifiex_802_11_ssid *) data_buf)->ssid,
799 ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len);
800
801 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
802 adhoc_start->ssid);
803
804 memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
805 memcpy(bss_desc->ssid.ssid,
806 ((struct mwifiex_802_11_ssid *) data_buf)->ssid,
807 ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len);
808
809 bss_desc->ssid.ssid_len =
810 ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len;
811
812 /* Set the BSS mode */
813 adhoc_start->bss_mode = HostCmd_BSS_MODE_IBSS;
814 bss_desc->bss_mode = MWIFIEX_BSS_MODE_IBSS;
815 adhoc_start->beacon_period = cpu_to_le16(priv->beacon_period);
816 bss_desc->beacon_period = priv->beacon_period;
817
818 /* Set Physical param set */
819/* Parameter IE Id */
820#define DS_PARA_IE_ID 3
821/* Parameter IE length */
822#define DS_PARA_IE_LEN 1
823
824 adhoc_start->phy_param_set.ds_param_set.element_id = DS_PARA_IE_ID;
825 adhoc_start->phy_param_set.ds_param_set.len = DS_PARA_IE_LEN;
826
827 if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
828 (priv, adapter->adhoc_start_band, (u16)
829 priv->adhoc_channel)) {
830 struct mwifiex_chan_freq_power *cfp;
831 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
832 adapter->adhoc_start_band, FIRST_VALID_CHANNEL);
833 if (cfp)
834 priv->adhoc_channel = (u8) cfp->channel;
835 }
836
837 if (!priv->adhoc_channel) {
838 dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
839 return -1;
840 }
841
842 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
843 priv->adhoc_channel);
844
845 priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
846 priv->curr_bss_params.band = adapter->adhoc_start_band;
847
848 bss_desc->channel = priv->adhoc_channel;
849 adhoc_start->phy_param_set.ds_param_set.current_chan =
850 priv->adhoc_channel;
851
852 memcpy(&bss_desc->phy_param_set, &adhoc_start->phy_param_set,
853 sizeof(union ieee_types_phy_param_set));
854
855 /* Set IBSS param set */
856/* IBSS parameter IE Id */
857#define IBSS_PARA_IE_ID 6
858/* IBSS parameter IE length */
859#define IBSS_PARA_IE_LEN 2
860
861 adhoc_start->ss_param_set.ibss_param_set.element_id = IBSS_PARA_IE_ID;
862 adhoc_start->ss_param_set.ibss_param_set.len = IBSS_PARA_IE_LEN;
863 adhoc_start->ss_param_set.ibss_param_set.atim_window
864 = cpu_to_le16(priv->atim_window);
865 memcpy(&bss_desc->ss_param_set, &adhoc_start->ss_param_set,
866 sizeof(union ieee_types_ss_param_set));
867
868 /* Set Capability info */
869 bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_IBSS;
870 tmp_cap = le16_to_cpu(adhoc_start->cap_info_bitmap);
871 tmp_cap &= ~WLAN_CAPABILITY_ESS;
872 tmp_cap |= WLAN_CAPABILITY_IBSS;
873
874 /* Set up privacy in bss_desc */
875 if (priv->sec_info.encryption_mode != MWIFIEX_ENCRYPTION_MODE_NONE) {
876 /* Ad-Hoc capability privacy on */
877 dev_dbg(adapter->dev,
878 "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
879 bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
880 tmp_cap |= WLAN_CAPABILITY_PRIVACY;
881 } else {
882 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set,"
883 " setting privacy to ACCEPT ALL\n");
884 bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
885 }
886
887 memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
888 mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
889 if ((adapter->adhoc_start_band & BAND_G) &&
890 (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
891 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_MAC_CONTROL,
892 HostCmd_ACT_GEN_SET,
893 0, NULL, &priv->curr_pkt_filter);
894
895 if (ret) {
896 dev_err(adapter->dev,
897 "ADHOC_S_CMD: G Protection config failed\n");
898 return -1;
899 }
900 }
901 /* Find the last non zero */
902 for (i = 0; i < sizeof(adhoc_start->DataRate) &&
903 adhoc_start->DataRate[i];
904 i++)
905 ;
906
907 priv->curr_bss_params.num_of_rates = i;
908
909 /* Copy the ad-hoc creating rates into Current BSS rate structure */
910 memcpy(&priv->curr_bss_params.data_rates,
911 &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);
912
913 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
914 adhoc_start->DataRate[0], adhoc_start->DataRate[1],
915 adhoc_start->DataRate[2], adhoc_start->DataRate[3]);
916
917 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
918
919 if (IS_SUPPORT_MULTI_BANDS(adapter)) {
920 /* Append a channel TLV */
921 chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
922 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
923 chan_tlv->header.len =
924 cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
925
926 memset(chan_tlv->chan_scan_param, 0x00,
927 sizeof(struct mwifiex_chan_scan_param_set));
928 chan_tlv->chan_scan_param[0].chan_number =
929 (u8) priv->curr_bss_params.bss_descriptor.channel;
930
931 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n",
932 chan_tlv->chan_scan_param[0].chan_number);
933
934 chan_tlv->chan_scan_param[0].radio_type
935 = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
936 if (adapter->adhoc_start_band & BAND_GN
937 || adapter->adhoc_start_band & BAND_AN) {
938 if (adapter->chan_offset == SEC_CHANNEL_ABOVE)
939 chan_tlv->chan_scan_param[0].radio_type |=
940 SECOND_CHANNEL_ABOVE;
941 else if (adapter->chan_offset == SEC_CHANNEL_BELOW)
942 chan_tlv->chan_scan_param[0].radio_type |=
943 SECOND_CHANNEL_BELOW;
944 }
945 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
946 chan_tlv->chan_scan_param[0].radio_type);
947 pos += sizeof(chan_tlv->header) +
948 sizeof(struct mwifiex_chan_scan_param_set);
949 cmd_append_size +=
950 sizeof(chan_tlv->header) +
951 sizeof(struct mwifiex_chan_scan_param_set);
952 }
953
954 /* Append vendor specific IE TLV */
955 cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
956 MWIFIEX_VSIE_MASK_ADHOC, &pos);
957
958 if (priv->sec_info.wpa_enabled) {
959 rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
960 if (rsn_ie_len == -1)
961 return -1;
962 cmd_append_size += rsn_ie_len;
963 }
964
965 if (adapter->adhoc_11n_enabled) {
966 {
967 ht_cap = (struct mwifiex_ie_types_htcap *) pos;
968 memset(ht_cap, 0,
969 sizeof(struct mwifiex_ie_types_htcap));
970 ht_cap->header.type =
971 cpu_to_le16(WLAN_EID_HT_CAPABILITY);
972 ht_cap->header.len =
973 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
974 ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
975
976 SETHT_SHORTGI20(ht_cap_info);
977 if (adapter->chan_offset) {
978 SETHT_SHORTGI40(ht_cap_info);
979 SETHT_DSSSCCK40(ht_cap_info);
980 SETHT_SUPPCHANWIDTH(ht_cap_info);
981 SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
982 }
983
984 ht_cap->ht_cap.ampdu_params_info
985 = MAX_RX_AMPDU_SIZE_64K;
986 ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
987 pos += sizeof(struct mwifiex_ie_types_htcap);
988 cmd_append_size +=
989 sizeof(struct mwifiex_ie_types_htcap);
990 }
991 {
992 ht_info = (struct mwifiex_ie_types_htinfo *) pos;
993 memset(ht_info, 0,
994 sizeof(struct mwifiex_ie_types_htinfo));
995 ht_info->header.type =
996 cpu_to_le16(WLAN_EID_HT_INFORMATION);
997 ht_info->header.len =
998 cpu_to_le16(sizeof(struct ieee80211_ht_info));
999 ht_info->ht_info.control_chan =
1000 (u8) priv->curr_bss_params.bss_descriptor.
1001 channel;
1002 if (adapter->chan_offset) {
1003 ht_info->ht_info.ht_param =
1004 adapter->chan_offset;
1005 SET_CHANWIDTH40(ht_info->ht_info.ht_param);
1006 }
1007 ht_info->ht_info.operation_mode =
1008 cpu_to_le16(NON_GREENFIELD_STAS);
1009 ht_info->ht_info.basic_set[0] = 0xff;
1010 pos += sizeof(struct mwifiex_ie_types_htinfo);
1011 cmd_append_size +=
1012 sizeof(struct mwifiex_ie_types_htinfo);
1013 }
1014 }
1015
1016 cmd->size = cpu_to_le16((u16)
1017 (sizeof(struct host_cmd_ds_802_11_ad_hoc_start)
1018 + S_DS_GEN + cmd_append_size));
1019
1020 if (adapter->adhoc_start_band == BAND_B)
1021 SHORT_SLOT_TIME_DISABLED(tmp_cap);
1022 else
1023 SHORT_SLOT_TIME_ENABLED(tmp_cap);
1024
1025 adhoc_start->cap_info_bitmap = cpu_to_le16(tmp_cap);
1026
1027 return 0;
1028}
1029
1030/*
1031 * This function prepares command for ad-hoc join.
1032 *
1033 * Most of the parameters are set up by copying from the target BSS descriptor
1034 * from the scan response.
1035 *
1036 * In addition, the following TLVs are added -
1037 * - Channel TLV
1038 * - Vendor specific IE
1039 * - WPA/WPA2 IE
1040 * - 11n IE
1041 *
1042 * Preparation also includes -
1043 * - Setting command ID and proper size
1044 * - Ensuring correct endian-ness
1045 */
1046int
1047mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
1048 struct host_cmd_ds_command *cmd, void *data_buf)
1049{
1050 int ret = 0, rsn_ie_len = 0;
1051 struct host_cmd_ds_802_11_ad_hoc_join *adhoc_join =
1052 &cmd->params.adhoc_join;
1053 struct mwifiex_bssdescriptor *bss_desc =
1054 (struct mwifiex_bssdescriptor *) data_buf;
1055 struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
1056 u32 cmd_append_size = 0;
1057 u16 tmp_cap;
1058 u32 i, rates_size = 0;
1059 u16 curr_pkt_filter;
1060 u8 *pos =
1061 (u8 *) adhoc_join +
1062 sizeof(struct host_cmd_ds_802_11_ad_hoc_join);
1063
1064/* Use G protection */
1065#define USE_G_PROTECTION 0x02
1066 if (bss_desc->erp_flags & USE_G_PROTECTION) {
1067 curr_pkt_filter =
1068 priv->
1069 curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;
1070
1071 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_MAC_CONTROL,
1072 HostCmd_ACT_GEN_SET, 0, NULL,
1073 &curr_pkt_filter);
1074 if (ret) {
1075 dev_err(priv->adapter->dev,
1076 "ADHOC_J_CMD: G Protection config failed\n");
1077 return -1;
1078 }
1079 }
1080
1081 priv->attempted_bss_desc = bss_desc;
1082
1083 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_JOIN);
1084
1085 adhoc_join->bss_descriptor.bss_mode = HostCmd_BSS_MODE_IBSS;
1086
1087 adhoc_join->bss_descriptor.beacon_period
1088 = cpu_to_le16(bss_desc->beacon_period);
1089
1090 memcpy(&adhoc_join->bss_descriptor.bssid,
1091 &bss_desc->mac_address, ETH_ALEN);
1092
1093 memcpy(&adhoc_join->bss_descriptor.ssid,
1094 &bss_desc->ssid.ssid, bss_desc->ssid.ssid_len);
1095
1096 memcpy(&adhoc_join->bss_descriptor.phy_param_set,
1097 &bss_desc->phy_param_set,
1098 sizeof(union ieee_types_phy_param_set));
1099
1100 memcpy(&adhoc_join->bss_descriptor.ss_param_set,
1101 &bss_desc->ss_param_set, sizeof(union ieee_types_ss_param_set));
1102
1103 tmp_cap = bss_desc->cap_info_bitmap;
1104
1105 tmp_cap &= CAPINFO_MASK;
1106
1107 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: tmp_cap=%4X"
1108 " CAPINFO_MASK=%4lX\n", tmp_cap, CAPINFO_MASK);
1109
1110 /* Information on BSSID descriptor passed to FW */
1111 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID = %pM, SSID = %s\n",
1112 adhoc_join->bss_descriptor.bssid,
1113 adhoc_join->bss_descriptor.ssid);
1114
1115 for (i = 0; bss_desc->supported_rates[i] &&
1116 i < MWIFIEX_SUPPORTED_RATES;
1117 i++)
1118 ;
1119 rates_size = i;
1120
1121 /* Copy Data Rates from the Rates recorded in scan response */
1122 memset(adhoc_join->bss_descriptor.data_rates, 0,
1123 sizeof(adhoc_join->bss_descriptor.data_rates));
1124 memcpy(adhoc_join->bss_descriptor.data_rates,
1125 bss_desc->supported_rates, rates_size);
1126
1127 /* Copy the adhoc join rates into Current BSS state structure */
1128 priv->curr_bss_params.num_of_rates = rates_size;
1129 memcpy(&priv->curr_bss_params.data_rates, bss_desc->supported_rates,
1130 rates_size);
1131
1132 /* Copy the channel information */
1133 priv->curr_bss_params.bss_descriptor.channel = bss_desc->channel;
1134 priv->curr_bss_params.band = (u8) bss_desc->bss_band;
1135
1136 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED
1137 || priv->sec_info.wpa_enabled)
1138 tmp_cap |= WLAN_CAPABILITY_PRIVACY;
1139
1140 if (IS_SUPPORT_MULTI_BANDS(priv->adapter)) {
1141 /* Append a channel TLV */
1142 chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
1143 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
1144 chan_tlv->header.len =
1145 cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
1146
1147 memset(chan_tlv->chan_scan_param, 0x00,
1148 sizeof(struct mwifiex_chan_scan_param_set));
1149 chan_tlv->chan_scan_param[0].chan_number =
1150 (bss_desc->phy_param_set.ds_param_set.current_chan);
1151 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan = %d\n",
1152 chan_tlv->chan_scan_param[0].chan_number);
1153
1154 chan_tlv->chan_scan_param[0].radio_type =
1155 mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
1156
1157 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band = %d\n",
1158 chan_tlv->chan_scan_param[0].radio_type);
1159 pos += sizeof(chan_tlv->header) +
1160 sizeof(struct mwifiex_chan_scan_param_set);
1161 cmd_append_size += sizeof(chan_tlv->header) +
1162 sizeof(struct mwifiex_chan_scan_param_set);
1163 }
1164
1165 if (priv->sec_info.wpa_enabled)
1166 rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
1167 if (rsn_ie_len == -1)
1168 return -1;
1169 cmd_append_size += rsn_ie_len;
1170
1171 if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
1172 cmd_append_size += mwifiex_cmd_append_11n_tlv(priv,
1173 bss_desc, &pos);
1174
1175 /* Append vendor specific IE TLV */
1176 cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
1177 MWIFIEX_VSIE_MASK_ADHOC, &pos);
1178
1179 cmd->size = cpu_to_le16((u16)
1180 (sizeof(struct host_cmd_ds_802_11_ad_hoc_join)
1181 + S_DS_GEN + cmd_append_size));
1182
1183 adhoc_join->bss_descriptor.cap_info_bitmap = cpu_to_le16(tmp_cap);
1184
1185 return ret;
1186}
1187
1188/*
1189 * This function handles the command response of ad-hoc start and
1190 * ad-hoc join.
1191 *
1192 * The function generates a device-connected event to notify
1193 * the applications, in case of successful ad-hoc start/join, and
1194 * saves the beacon buffer.
1195 */
1196int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
1197 struct host_cmd_ds_command *resp, void *wq_buf)
1198{
1199 int ret = 0;
1200 struct mwifiex_wait_queue *wait_queue =
1201 (struct mwifiex_wait_queue *) wq_buf;
1202 struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result;
1203 struct mwifiex_bssdescriptor *bss_desc;
1204 u16 command = le16_to_cpu(resp->command);
1205 u16 result = le16_to_cpu(resp->result);
1206
1207 adhoc_result = &resp->params.adhoc_result;
1208
1209 bss_desc = priv->attempted_bss_desc;
1210
1211 /* Join result code 0 --> SUCCESS */
1212 if (result) {
1213 dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n");
1214 if (priv->media_connected)
1215 mwifiex_reset_connect_state(priv);
1216
1217 memset(&priv->curr_bss_params.bss_descriptor,
1218 0x00, sizeof(struct mwifiex_bssdescriptor));
1219
1220 ret = -1;
1221 goto done;
1222 }
1223
1224 /* Send a Media Connected event, according to the Spec */
1225 priv->media_connected = true;
1226
1227 if (command == HostCmd_CMD_802_11_AD_HOC_START) {
1228 dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n",
1229 bss_desc->ssid.ssid);
1230
1231 /* Update the created network descriptor with the new BSSID */
1232 memcpy(bss_desc->mac_address,
1233 adhoc_result->bssid, ETH_ALEN);
1234
1235 priv->adhoc_state = ADHOC_STARTED;
1236 } else {
1237 /*
1238 * Now the join cmd should be successful.
1239 * If BSSID has changed use SSID to compare instead of BSSID
1240 */
1241 dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n",
1242 bss_desc->ssid.ssid);
1243
1244 /*
1245 * Make a copy of current BSSID descriptor, only needed for
1246 * join since the current descriptor is already being used
1247 * for adhoc start
1248 */
1249 memcpy(&priv->curr_bss_params.bss_descriptor,
1250 bss_desc, sizeof(struct mwifiex_bssdescriptor));
1251
1252 priv->adhoc_state = ADHOC_JOINED;
1253 }
1254
1255 dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n",
1256 priv->adhoc_channel);
1257 dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n",
1258 priv->curr_bss_params.bss_descriptor.mac_address);
1259
1260 if (!netif_carrier_ok(priv->netdev))
1261 netif_carrier_on(priv->netdev);
1262 if (netif_queue_stopped(priv->netdev))
1263 netif_wake_queue(priv->netdev);
1264
1265 mwifiex_save_curr_bcn(priv);
1266
1267done:
1268 /* Need to indicate IOCTL complete */
1269 if (wait_queue) {
1270 if (ret)
1271 wait_queue->status = MWIFIEX_ERROR_ASSOC_FAIL;
1272 else
1273 wait_queue->status = MWIFIEX_ERROR_NO_ERROR;
1274
1275 }
1276
1277 return ret;
1278}
1279
1280/*
1281 * This function associates to a specific BSS discovered in a scan.
1282 *
1283 * It clears any past association response stored for application
1284 * retrieval and calls the command preparation routine to send the
1285 * command to firmware.
1286 */
1287int mwifiex_associate(struct mwifiex_private *priv,
1288 void *wait_queue, struct mwifiex_bssdescriptor *bss_desc)
1289{
1290 int ret = 0;
1291 u8 current_bssid[ETH_ALEN];
1292
1293 /* Return error if the adapter or table entry is not marked as infra */
1294 if ((priv->bss_mode != MWIFIEX_BSS_MODE_INFRA) ||
1295 (bss_desc->bss_mode != MWIFIEX_BSS_MODE_INFRA))
1296 return -1;
1297
1298 memcpy(&current_bssid,
1299 &priv->curr_bss_params.bss_descriptor.mac_address,
1300 sizeof(current_bssid));
1301
1302 /* Clear any past association response stored for application
1303 retrieval */
1304 priv->assoc_rsp_size = 0;
1305
1306 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_ASSOCIATE,
1307 HostCmd_ACT_GEN_SET, 0, wait_queue,
1308 bss_desc);
1309
1310 return ret;
1311}
1312
1313/*
1314 * This function starts an ad-hoc network.
1315 *
1316 * It calls the command preparation routine to send the command to firmware.
1317 */
1318int
1319mwifiex_adhoc_start(struct mwifiex_private *priv,
1320 void *wait_queue, struct mwifiex_802_11_ssid *adhoc_ssid)
1321{
1322 int ret = 0;
1323
1324 dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n",
1325 priv->adhoc_channel);
1326 dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
1327 priv->curr_bss_params.bss_descriptor.channel);
1328 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n",
1329 priv->curr_bss_params.band);
1330
1331 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_AD_HOC_START,
1332 HostCmd_ACT_GEN_SET, 0, wait_queue,
1333 adhoc_ssid);
1334
1335 return ret;
1336}
1337
1338/*
1339 * This function joins an ad-hoc network found in a previous scan.
1340 *
1341 * It calls the command preparation routine to send the command to firmware,
1342 * if already not connected to the requested SSID.
1343 */
1344int mwifiex_adhoc_join(struct mwifiex_private *priv,
1345 void *wait_queue, struct mwifiex_bssdescriptor *bss_desc)
1346{
1347 int ret = 0;
1348
1349 dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n",
1350 priv->curr_bss_params.bss_descriptor.ssid.ssid);
1351 dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n",
1352 priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
1353 dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n",
1354 bss_desc->ssid.ssid);
1355 dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n",
1356 bss_desc->ssid.ssid_len);
1357
1358 /* Check if the requested SSID is already joined */
1359 if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
1360 !mwifiex_ssid_cmp(&bss_desc->ssid,
1361 &priv->curr_bss_params.bss_descriptor.ssid) &&
1362 (priv->curr_bss_params.bss_descriptor.bss_mode ==
1363 MWIFIEX_BSS_MODE_IBSS)) {
1364 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: new ad-hoc SSID"
1365 " is the same as current; not attempting to re-join\n");
1366 return -1;
1367 }
1368
1369 dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
1370 priv->curr_bss_params.bss_descriptor.channel);
1371 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
1372 priv->curr_bss_params.band);
1373
1374 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
1375 HostCmd_ACT_GEN_SET, 0, wait_queue,
1376 bss_desc);
1377
1378 return ret;
1379}
1380
1381/*
1382 * This function deauthenticates/disconnects from infra network by sending
1383 * deauthentication request.
1384 */
1385static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv,
1386 struct mwifiex_wait_queue *wait,
1387 u8 *mac)
1388{
1389 u8 mac_address[ETH_ALEN];
1390 int ret = 0;
1391 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
1392
1393 if (mac) {
1394 if (!memcmp(mac, zero_mac, sizeof(zero_mac)))
1395 memcpy((u8 *) &mac_address,
1396 (u8 *) &priv->curr_bss_params.bss_descriptor.
1397 mac_address, ETH_ALEN);
1398 else
1399 memcpy((u8 *) &mac_address, (u8 *) mac, ETH_ALEN);
1400 } else {
1401 memcpy((u8 *) &mac_address, (u8 *) &priv->curr_bss_params.
1402 bss_descriptor.mac_address, ETH_ALEN);
1403 }
1404
1405 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
1406 HostCmd_ACT_GEN_SET, 0, wait, &mac_address);
1407
1408 if (!ret && wait)
1409 ret = -EINPROGRESS;
1410
1411 return ret;
1412}
1413
1414/*
1415 * This function deauthenticates/disconnects from a BSS.
1416 *
1417 * In case of infra made, it sends deauthentication request, and
1418 * in case of ad-hoc mode, a stop network request is sent to the firmware.
1419 */
1420int mwifiex_deauthenticate(struct mwifiex_private *priv,
1421 struct mwifiex_wait_queue *wait, u8 *mac)
1422{
1423 int ret = 0;
1424
1425 if (priv->media_connected) {
1426 if (priv->bss_mode == MWIFIEX_BSS_MODE_INFRA) {
1427 ret = mwifiex_deauthenticate_infra(priv, wait, mac);
1428 } else if (priv->bss_mode == MWIFIEX_BSS_MODE_IBSS) {
1429 ret = mwifiex_prepare_cmd(priv,
1430 HostCmd_CMD_802_11_AD_HOC_STOP,
1431 HostCmd_ACT_GEN_SET, 0, wait, NULL);
1432
1433 if (!ret && wait)
1434 ret = -EINPROGRESS;
1435 }
1436 }
1437
1438 return ret;
1439}
1440
1441/*
1442 * This function converts band to radio type used in channel TLV.
1443 */
1444u8
1445mwifiex_band_to_radio_type(u8 band)
1446{
1447 u8 ret_radio_type;
1448
1449 switch (band) {
1450 case BAND_A:
1451 case BAND_AN:
1452 case BAND_A | BAND_AN:
1453 ret_radio_type = HostCmd_SCAN_RADIO_TYPE_A;
1454 break;
1455 case BAND_B:
1456 case BAND_G:
1457 case BAND_B | BAND_G:
1458 default:
1459 ret_radio_type = HostCmd_SCAN_RADIO_TYPE_BG;
1460 break;
1461 }
1462
1463 return ret_radio_type;
1464}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
new file mode 100644
index 00000000000..ed89ca41a90
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -0,0 +1,1102 @@
1/*
2 * Marvell Wireless LAN device driver: major functions
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "main.h"
21#include "wmm.h"
22#include "cfg80211.h"
23#include "11n.h"
24
25#define VERSION "1.0"
26
27const char driver_version[] = "mwifiex " VERSION " (%s) ";
28
29struct mwifiex_adapter *g_adapter;
30EXPORT_SYMBOL_GPL(g_adapter);
31
32static struct mwifiex_bss_attr mwifiex_bss_sta[] = {
33 {MWIFIEX_BSS_TYPE_STA, MWIFIEX_DATA_FRAME_TYPE_ETH_II, true, 0, 0},
34};
35
36static int drv_mode = DRV_MODE_STA;
37
38static char fw_name[32] = DEFAULT_FW_NAME;
39
40/* Supported drv_mode table */
41static struct mwifiex_drv_mode mwifiex_drv_mode_tbl[] = {
42 {
43 /* drv_mode */
44 .drv_mode = DRV_MODE_STA,
45 /* intf number */
46 .intf_num = ARRAY_SIZE(mwifiex_bss_sta),
47 /* bss_attr */
48 .bss_attr = mwifiex_bss_sta,
49 }
50 ,
51};
52
53/*
54 * This function registers the device and performs all the necessary
55 * initializations.
56 *
57 * The following initialization operations are performed -
58 * - Allocate adapter structure
59 * - Save interface specific operations table in adapter
60 * - Call interface specific initialization routine
61 * - Allocate private structures
62 * - Set default adapter structure parameters
63 * - Initialize locks
64 *
65 * In case of any errors during inittialization, this function also ensures
66 * proper cleanup before exiting.
67 */
68static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
69 struct mwifiex_device *mdevice, void **padapter)
70{
71 int ret = 0;
72 struct mwifiex_adapter *adapter = NULL;
73 u8 i = 0;
74
75 adapter = kzalloc(sizeof(struct mwifiex_adapter), GFP_KERNEL);
76 /* Allocate memory for adapter structure */
77 if (!adapter)
78 return -1;
79
80 g_adapter = adapter;
81 adapter->card = card;
82
83 /* Save interface specific operations in adapter */
84 memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
85
86 /* card specific initialization has been deferred until now .. */
87 ret = adapter->if_ops.init_if(adapter);
88 if (ret)
89 goto error;
90
91 adapter->priv_num = 0;
92 for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) {
93 adapter->priv[i] = NULL;
94
95 if (!mdevice->bss_attr[i].active)
96 continue;
97
98 /* For valid bss_attr,
99 allocate memory for private structure */
100 adapter->priv[i] = kzalloc(sizeof(struct mwifiex_private),
101 GFP_KERNEL);
102 if (!adapter->priv[i]) {
103 dev_err(adapter->dev, "%s: failed to alloc priv[%d]\n",
104 __func__, i);
105 goto error;
106 }
107
108 adapter->priv_num++;
109 memset(adapter->priv[i], 0,
110 sizeof(struct mwifiex_private));
111 adapter->priv[i]->adapter = adapter;
112 /* Save bss_type, frame_type & bss_priority */
113 adapter->priv[i]->bss_type = (u8) mdevice->bss_attr[i].bss_type;
114 adapter->priv[i]->frame_type =
115 (u8) mdevice->bss_attr[i].frame_type;
116 adapter->priv[i]->bss_priority =
117 (u8) mdevice->bss_attr[i].bss_priority;
118 if (mdevice->bss_attr[i].bss_type == MWIFIEX_BSS_TYPE_STA)
119 adapter->priv[i]->bss_role = MWIFIEX_BSS_ROLE_STA;
120 else if (mdevice->bss_attr[i].bss_type == MWIFIEX_BSS_TYPE_UAP)
121 adapter->priv[i]->bss_role = MWIFIEX_BSS_ROLE_UAP;
122
123 /* Save bss_index & bss_num */
124 adapter->priv[i]->bss_index = i;
125 adapter->priv[i]->bss_num = mdevice->bss_attr[i].bss_num;
126 }
127
128 /* Initialize lock variables */
129 if (mwifiex_init_lock_list(adapter))
130 goto error;
131
132 init_timer(&adapter->cmd_timer);
133 adapter->cmd_timer.function = mwifiex_cmd_timeout_func;
134 adapter->cmd_timer.data = (unsigned long) adapter;
135
136 /* Return pointer of struct mwifiex_adapter */
137 *padapter = adapter;
138 return 0;
139
140error:
141 dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n");
142
143 /* Free lock variables */
144 mwifiex_free_lock_list(adapter);
145 for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++)
146 kfree(adapter->priv[i]);
147 kfree(adapter);
148
149 return -1;
150}
151
152/*
153 * This function unregisters the device and performs all the necessary
154 * cleanups.
155 *
156 * The following cleanup operations are performed -
157 * - Free the timers
158 * - Free beacon buffers
159 * - Free private structures
160 * - Free adapter structure
161 */
162static int mwifiex_unregister(struct mwifiex_adapter *adapter)
163{
164 s32 i = 0;
165
166 del_timer(&adapter->cmd_timer);
167
168 /* Free private structures */
169 for (i = 0; i < adapter->priv_num; i++) {
170 if (adapter->priv[i]) {
171 mwifiex_free_curr_bcn(adapter->priv[i]);
172 kfree(adapter->priv[i]);
173 }
174 }
175
176 kfree(adapter);
177 return 0;
178}
179
180/*
181 * The main process.
182 *
183 * This function is the main procedure of the driver and handles various driver
184 * operations. It runs in a loop and provides the core functionalities.
185 *
186 * The main responsibilities of this function are -
187 * - Ensure concurrency control
188 * - Handle pending interrupts and call interrupt handlers
189 * - Wake up the card if required
190 * - Handle command responses and call response handlers
191 * - Handle events and call event handlers
192 * - Execute pending commands
193 * - Transmit pending data packets
194 */
195int mwifiex_main_process(struct mwifiex_adapter *adapter)
196{
197 int ret = 0;
198 unsigned long flags;
199
200 spin_lock_irqsave(&adapter->main_proc_lock, flags);
201
202 /* Check if already processing */
203 if (adapter->mwifiex_processing) {
204 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
205 goto exit_main_proc;
206 } else {
207 adapter->mwifiex_processing = true;
208 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
209 }
210process_start:
211 do {
212 if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
213 (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
214 break;
215
216 /* Handle pending interrupt if any */
217 if (adapter->int_status) {
218 if (adapter->hs_activated)
219 mwifiex_process_hs_config(adapter);
220 adapter->if_ops.process_int_status(adapter);
221 }
222
223 /* Need to wake up the card ? */
224 if ((adapter->ps_state == PS_STATE_SLEEP) &&
225 (adapter->pm_wakeup_card_req &&
226 !adapter->pm_wakeup_fw_try) &&
227 (is_command_pending(adapter)
228 || !mwifiex_wmm_lists_empty(adapter))) {
229 adapter->pm_wakeup_fw_try = true;
230 adapter->if_ops.wakeup(adapter);
231 continue;
232 }
233 if (IS_CARD_RX_RCVD(adapter)) {
234 adapter->pm_wakeup_fw_try = false;
235 if (adapter->ps_state == PS_STATE_SLEEP)
236 adapter->ps_state = PS_STATE_AWAKE;
237 } else {
238 /* We have tried to wakeup the card already */
239 if (adapter->pm_wakeup_fw_try)
240 break;
241 if (adapter->ps_state != PS_STATE_AWAKE ||
242 adapter->tx_lock_flag)
243 break;
244
245 if (adapter->scan_processing || adapter->data_sent
246 || mwifiex_wmm_lists_empty(adapter)) {
247 if (adapter->cmd_sent || adapter->curr_cmd
248 || (!is_command_pending(adapter)))
249 break;
250 }
251 }
252
253 /* Check for Cmd Resp */
254 if (adapter->cmd_resp_received) {
255 adapter->cmd_resp_received = false;
256 mwifiex_process_cmdresp(adapter);
257
258 /* call mwifiex back when init_fw is done */
259 if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) {
260 adapter->hw_status = MWIFIEX_HW_STATUS_READY;
261 mwifiex_init_fw_complete(adapter);
262 }
263 }
264
265 /* Check for event */
266 if (adapter->event_received) {
267 adapter->event_received = false;
268 mwifiex_process_event(adapter);
269 }
270
271 /* Check if we need to confirm Sleep Request
272 received previously */
273 if (adapter->ps_state == PS_STATE_PRE_SLEEP) {
274 if (!adapter->cmd_sent && !adapter->curr_cmd)
275 mwifiex_check_ps_cond(adapter);
276 }
277
278 /* * The ps_state may have been changed during processing of
279 * Sleep Request event.
280 */
281 if ((adapter->ps_state == PS_STATE_SLEEP)
282 || (adapter->ps_state == PS_STATE_PRE_SLEEP)
283 || (adapter->ps_state == PS_STATE_SLEEP_CFM)
284 || adapter->tx_lock_flag)
285 continue;
286
287 if (!adapter->cmd_sent && !adapter->curr_cmd) {
288 if (mwifiex_exec_next_cmd(adapter) == -1) {
289 ret = -1;
290 break;
291 }
292 }
293
294 if (!adapter->scan_processing && !adapter->data_sent &&
295 !mwifiex_wmm_lists_empty(adapter)) {
296 mwifiex_wmm_process_tx(adapter);
297 if (adapter->hs_activated) {
298 adapter->is_hs_configured = false;
299 mwifiex_hs_activated_event
300 (mwifiex_get_priv
301 (adapter, MWIFIEX_BSS_ROLE_ANY),
302 false);
303 }
304 }
305
306 if (adapter->delay_null_pkt && !adapter->cmd_sent &&
307 !adapter->curr_cmd && !is_command_pending(adapter)
308 && mwifiex_wmm_lists_empty(adapter)) {
309 if (!mwifiex_send_null_packet
310 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
311 MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
312 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET)) {
313 adapter->delay_null_pkt = false;
314 adapter->ps_state = PS_STATE_SLEEP;
315 }
316 break;
317 }
318 } while (true);
319
320 if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
321 goto process_start;
322
323 spin_lock_irqsave(&adapter->main_proc_lock, flags);
324 adapter->mwifiex_processing = false;
325 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
326
327exit_main_proc:
328 if (adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING)
329 mwifiex_shutdown_drv(adapter);
330 return ret;
331}
332
333/*
334 * This function initializes the software.
335 *
336 * The main work includes allocating and initializing the adapter structure
337 * and initializing the private structures.
338 */
339static int
340mwifiex_init_sw(void *card, struct mwifiex_if_ops *if_ops, void **pmwifiex)
341{
342 int i;
343 struct mwifiex_device device;
344 struct mwifiex_drv_mode *drv_mode_ptr;
345
346 /* find mwifiex_drv_mode entry from mwifiex_drv_mode_tbl */
347 drv_mode_ptr = NULL;
348 for (i = 0; i < ARRAY_SIZE(mwifiex_drv_mode_tbl); i++) {
349 if (mwifiex_drv_mode_tbl[i].drv_mode == drv_mode) {
350 drv_mode_ptr = &mwifiex_drv_mode_tbl[i];
351 break;
352 }
353 }
354
355 if (!drv_mode_ptr) {
356 pr_err("invalid drv_mode=%d\n", drv_mode);
357 return -1;
358 }
359
360 memset(&device, 0, sizeof(struct mwifiex_device));
361
362 for (i = 0; i < drv_mode_ptr->intf_num; i++) {
363 device.bss_attr[i].bss_type =
364 drv_mode_ptr->bss_attr[i].bss_type;
365 device.bss_attr[i].frame_type =
366 drv_mode_ptr->bss_attr[i].frame_type;
367 device.bss_attr[i].active = drv_mode_ptr->bss_attr[i].active;
368 device.bss_attr[i].bss_priority =
369 drv_mode_ptr->bss_attr[i].bss_priority;
370 device.bss_attr[i].bss_num = drv_mode_ptr->bss_attr[i].bss_num;
371 }
372
373 if (mwifiex_register(card, if_ops, &device, pmwifiex))
374 return -1;
375
376 return 0;
377}
378
379/*
380 * This function frees the adapter structure.
381 *
382 * Additionally, this closes the netlink socket, frees the timers
383 * and private structures.
384 */
385static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
386{
387 if (!adapter) {
388 pr_err("%s: adapter is NULL\n", __func__);
389 return;
390 }
391
392 mwifiex_unregister(adapter);
393 pr_debug("info: %s: free adapter\n", __func__);
394}
395
396/*
397 * This function initializes the hardware and firmware.
398 *
399 * The main initialization steps followed are -
400 * - Download the correct firmware to card
401 * - Allocate and initialize the adapter structure
402 * - Initialize the private structures
403 * - Issue the init commands to firmware
404 */
405static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
406{
407 int ret = 0;
408 int err;
409 struct mwifiex_fw_image fw;
410
411 memset(&fw, 0, sizeof(struct mwifiex_fw_image));
412
413 switch (adapter->revision_id) {
414 case SD8787_W0:
415 case SD8787_W1:
416 strcpy(fw_name, SD8787_W1_FW_NAME);
417 break;
418 case SD8787_A0:
419 case SD8787_A1:
420 strcpy(fw_name, SD8787_AX_FW_NAME);
421 break;
422 default:
423 break;
424 }
425
426 err = request_firmware(&adapter->firmware, fw_name, adapter->dev);
427 if (err < 0) {
428 dev_err(adapter->dev, "request_firmware() returned"
429 " error code %#x\n", err);
430 ret = -1;
431 goto done;
432 }
433 fw.fw_buf = (u8 *) adapter->firmware->data;
434 fw.fw_len = adapter->firmware->size;
435
436 ret = mwifiex_dnld_fw(adapter, &fw);
437 if (ret == -1)
438 goto done;
439
440 dev_notice(adapter->dev, "WLAN FW is active\n");
441
442 adapter->init_wait_q_woken = false;
443 ret = mwifiex_init_fw(adapter);
444 if (ret == -1) {
445 goto done;
446 } else if (!ret) {
447 adapter->hw_status = MWIFIEX_HW_STATUS_READY;
448 goto done;
449 }
450 /* Wait for mwifiex_init to complete */
451 wait_event_interruptible(adapter->init_wait_q,
452 adapter->init_wait_q_woken);
453 if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) {
454 ret = -1;
455 goto done;
456 }
457 ret = 0;
458
459done:
460 if (adapter->firmware)
461 release_firmware(adapter->firmware);
462 if (ret)
463 ret = -1;
464 return ret;
465}
466
467/*
468 * This function fills a driver buffer.
469 *
470 * The function associates a given SKB with the provided driver buffer
471 * and also updates some of the SKB parameters, including IP header,
472 * priority and timestamp.
473 */
474static void
475mwifiex_fill_buffer(struct sk_buff *skb)
476{
477 struct ethhdr *eth = NULL;
478 struct iphdr *iph;
479 struct timeval tv;
480 u8 tid = 0;
481
482 eth = (struct ethhdr *) skb->data;
483 switch (eth->h_proto) {
484 case __constant_htons(ETH_P_IP):
485 iph = ip_hdr(skb);
486 tid = IPTOS_PREC(iph->tos);
487 pr_debug("data: packet type ETH_P_IP: %04x, tid=%#x prio=%#x\n",
488 eth->h_proto, tid, skb->priority);
489 break;
490 case __constant_htons(ETH_P_ARP):
491 pr_debug("data: ARP packet: %04x\n", eth->h_proto);
492 default:
493 break;
494 }
495/* Offset for TOS field in the IP header */
496#define IPTOS_OFFSET 5
497 tid = (tid >> IPTOS_OFFSET);
498 skb->priority = tid;
499 /* Record the current time the packet was queued; used to
500 determine the amount of time the packet was queued in
501 the driver before it was sent to the firmware.
502 The delay is then sent along with the packet to the
503 firmware for aggregate delay calculation for stats and
504 MSDU lifetime expiry.
505 */
506 do_gettimeofday(&tv);
507 skb->tstamp = timeval_to_ktime(tv);
508 return;
509}
510
511/*
512 * CFG802.11 network device handler for open.
513 *
514 * Starts the data queue.
515 */
516static int
517mwifiex_open(struct net_device *dev)
518{
519 netif_start_queue(dev);
520 return 0;
521}
522
523/*
524 * CFG802.11 network device handler for close.
525 */
526static int
527mwifiex_close(struct net_device *dev)
528{
529 return 0;
530}
531
532/*
533 * CFG802.11 network device handler for data transmission.
534 */
535static int
536mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
537{
538 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
539 struct sk_buff *new_skb = NULL;
540 struct mwifiex_txinfo *tx_info;
541
542 dev_dbg(priv->adapter->dev, "data: %lu BSS(%d): Data <= kernel\n",
543 jiffies, priv->bss_index);
544
545 if (priv->adapter->surprise_removed) {
546 kfree(skb);
547 priv->stats.tx_dropped++;
548 return 0;
549 }
550 if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
551 dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
552 kfree(skb);
553 priv->stats.tx_dropped++;
554 return 0;
555 }
556 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
557 dev_dbg(priv->adapter->dev,
558 "data: Tx: insufficient skb headroom %d\n",
559 skb_headroom(skb));
560 /* Insufficient skb headroom - allocate a new skb */
561 new_skb =
562 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
563 if (unlikely(!new_skb)) {
564 dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
565 kfree(skb);
566 priv->stats.tx_dropped++;
567 return 0;
568 }
569 kfree_skb(skb);
570 skb = new_skb;
571 dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
572 skb_headroom(skb));
573 }
574
575 tx_info = MWIFIEX_SKB_TXCB(skb);
576 tx_info->bss_index = priv->bss_index;
577 mwifiex_fill_buffer(skb);
578
579 mwifiex_wmm_add_buf_txqueue(priv->adapter, skb);
580 atomic_inc(&priv->adapter->tx_pending);
581
582 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
583 netif_stop_queue(priv->netdev);
584 dev->trans_start = jiffies;
585 }
586
587 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
588
589 return 0;
590}
591
592/*
593 * CFG802.11 network device handler for setting MAC address.
594 */
595static int
596mwifiex_set_mac_address(struct net_device *dev, void *addr)
597{
598 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
599 struct sockaddr *hw_addr = (struct sockaddr *) addr;
600
601 memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
602
603 if (mwifiex_request_set_mac_address(priv)) {
604 dev_err(priv->adapter->dev, "set MAC address failed\n");
605 return -EFAULT;
606 }
607 memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
608
609 return 0;
610}
611
612/*
613 * CFG802.11 network device handler for setting multicast list.
614 */
615static void mwifiex_set_multicast_list(struct net_device *dev)
616{
617 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
618 mwifiex_request_set_multicast_list(priv, dev);
619}
620
621/*
622 * CFG802.11 network device handler for transmission timeout.
623 */
624static void
625mwifiex_tx_timeout(struct net_device *dev)
626{
627 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
628
629 dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
630 jiffies, priv->bss_index);
631 dev->trans_start = jiffies;
632 priv->num_tx_timeout++;
633}
634
635/*
636 * CFG802.11 network device handler for statistics retrieval.
637 */
638static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
639{
640 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
641
642 return &priv->stats;
643}
644
645/* Network device handlers */
646static const struct net_device_ops mwifiex_netdev_ops = {
647 .ndo_open = mwifiex_open,
648 .ndo_stop = mwifiex_close,
649 .ndo_start_xmit = mwifiex_hard_start_xmit,
650 .ndo_set_mac_address = mwifiex_set_mac_address,
651 .ndo_tx_timeout = mwifiex_tx_timeout,
652 .ndo_get_stats = mwifiex_get_stats,
653 .ndo_set_multicast_list = mwifiex_set_multicast_list,
654};
655
656/*
657 * This function initializes the private structure parameters.
658 *
659 * The following wait queues are initialized -
660 * - IOCTL wait queue
661 * - Command wait queue
662 * - Statistics wait queue
663 *
664 * ...and the following default parameters are set -
665 * - Current key index : Set to 0
666 * - Rate index : Set to auto
667 * - Media connected : Set to disconnected
668 * - Adhoc link sensed : Set to false
669 * - Nick name : Set to null
670 * - Number of Tx timeout : Set to 0
671 * - Device address : Set to current address
672 *
673 * In addition, the CFG80211 work queue is also created.
674 */
675static void
676mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev)
677{
678 dev->netdev_ops = &mwifiex_netdev_ops;
679 /* Initialize private structure */
680 init_waitqueue_head(&priv->ioctl_wait_q);
681 init_waitqueue_head(&priv->cmd_wait_q);
682 init_waitqueue_head(&priv->w_stats_wait_q);
683 priv->current_key_index = 0;
684 priv->media_connected = false;
685 memset(&priv->nick_name, 0, sizeof(priv->nick_name));
686 priv->num_tx_timeout = 0;
687 priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
688 INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
689 memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
690}
691
692/*
693 * This function adds a new logical interface.
694 *
695 * It allocates, initializes and registers the interface by performing
696 * the following opearations -
697 * - Allocate a new net device structure
698 * - Assign device name
699 * - Register the new device with CFG80211 subsystem
700 * - Initialize semaphore and private structure
701 * - Register the new device with kernel
702 * - Create the complete debug FS structure if configured
703 */
704static struct mwifiex_private *mwifiex_add_interface(
705 struct mwifiex_adapter *adapter,
706 u8 bss_index, u8 bss_type)
707{
708 struct net_device *dev = NULL;
709 struct mwifiex_private *priv = NULL;
710 void *mdev_priv = NULL;
711
712 dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), "mlan%d",
713 ether_setup, 1);
714 if (!dev) {
715 dev_err(adapter->dev, "no memory available for netdevice\n");
716 goto error;
717 }
718 if (dev_alloc_name(dev, dev->name)) {
719 dev_err(adapter->dev, "unable to alloc name for netdevice\n");
720 goto error;
721 }
722
723 if (mwifiex_register_cfg80211(dev, adapter->priv[bss_index]->curr_addr,
724 adapter->priv[bss_index]) != 0) {
725 dev_err(adapter->dev, "cannot register netdevice with cfg80211\n");
726 goto error;
727 }
728 /* Save the priv pointer in netdev */
729 priv = adapter->priv[bss_index];
730 mdev_priv = netdev_priv(dev);
731 *((unsigned long *) mdev_priv) = (unsigned long) priv;
732
733 priv->netdev = dev;
734
735 sema_init(&priv->async_sem, 1);
736 priv->scan_pending_on_block = false;
737
738 mwifiex_init_priv_params(priv, dev);
739
740 SET_NETDEV_DEV(dev, adapter->dev);
741
742 /* Register network device */
743 if (register_netdev(dev)) {
744 dev_err(adapter->dev, "cannot register virtual network device\n");
745 goto error;
746 }
747
748 dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
749#ifdef CONFIG_DEBUG_FS
750 mwifiex_dev_debugfs_init(priv);
751#endif
752 return priv;
753error:
754 if (dev)
755 free_netdev(dev);
756 return NULL;
757}
758
759/*
760 * This function removes a logical interface.
761 *
762 * It deregisters, resets and frees the interface by performing
763 * the following operations -
764 * - Disconnect the device if connected, send wireless event to
765 * notify applications.
766 * - Remove the debug FS structure if configured
767 * - Unregister the device from kernel
768 * - Free the net device structure
769 * - Cancel all works and destroy work queue
770 * - Unregister and free the wireless device from CFG80211 subsystem
771 */
772static void
773mwifiex_remove_interface(struct mwifiex_adapter *adapter, u8 bss_index)
774{
775 struct net_device *dev = NULL;
776 struct mwifiex_private *priv = adapter->priv[bss_index];
777
778 if (!priv)
779 return;
780 dev = priv->netdev;
781
782 if (priv->media_connected)
783 priv->media_connected = false;
784
785#ifdef CONFIG_DEBUG_FS
786 mwifiex_dev_debugfs_remove(priv);
787#endif
788 /* Last reference is our one */
789 dev_dbg(adapter->dev, "info: %s: refcnt = %d\n",
790 dev->name, netdev_refcnt_read(dev));
791
792 if (dev->reg_state == NETREG_REGISTERED)
793 unregister_netdev(dev);
794
795 /* Clear the priv in adapter */
796 priv->netdev = NULL;
797 if (dev)
798 free_netdev(dev);
799
800 cancel_work_sync(&priv->cfg_workqueue);
801 flush_workqueue(priv->workqueue);
802 destroy_workqueue(priv->workqueue);
803 wiphy_unregister(priv->wdev->wiphy);
804 wiphy_free(priv->wdev->wiphy);
805 kfree(priv->wdev);
806
807 return;
808}
809
810/*
811 * Sends IOCTL request to shutdown firmware.
812 *
813 * This function allocates the IOCTL request buffer, fills it
814 * with requisite parameters and calls the IOCTL handler.
815 */
816int mwifiex_shutdown_fw(struct mwifiex_private *priv, u8 wait_option)
817{
818 struct mwifiex_wait_queue *wait = NULL;
819 int status = 0;
820
821 /* Allocate an IOCTL request buffer */
822 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
823 if (!wait)
824 return -ENOMEM;
825
826 status = mwifiex_misc_ioctl_init_shutdown(priv->adapter, wait,
827 MWIFIEX_FUNC_SHUTDOWN);
828
829 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
830
831 kfree(wait);
832 return status;
833}
834EXPORT_SYMBOL_GPL(mwifiex_shutdown_fw);
835
836/*
837 * This function check if command is pending.
838 */
839int is_command_pending(struct mwifiex_adapter *adapter)
840{
841 unsigned long flags;
842 int is_cmd_pend_q_empty;
843
844 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
845 is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
846 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
847
848 return !is_cmd_pend_q_empty;
849}
850
851/*
852 * This function returns the correct private structure pointer based
853 * upon the BSS number.
854 */
855struct mwifiex_private *
856mwifiex_bss_index_to_priv(struct mwifiex_adapter *adapter, u8 bss_index)
857{
858 if (!adapter || (bss_index >= adapter->priv_num))
859 return NULL;
860 return adapter->priv[bss_index];
861}
862
863/*
864 * This is the main work queue function.
865 *
866 * It handles the main process, which in turn handles the complete
867 * driver operations.
868 */
869static void mwifiex_main_work_queue(struct work_struct *work)
870{
871 struct mwifiex_adapter *adapter =
872 container_of(work, struct mwifiex_adapter, main_work);
873
874 if (adapter->surprise_removed)
875 return;
876 mwifiex_main_process(adapter);
877}
878
879/*
880 * This function cancels all works in the queue and destroys
881 * the main workqueue.
882 */
883static void
884mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
885{
886 flush_workqueue(adapter->workqueue);
887 destroy_workqueue(adapter->workqueue);
888 adapter->workqueue = NULL;
889}
890
891/*
892 * This function adds the card.
893 *
894 * This function follows the following major steps to set up the device -
895 * - Initialize software. This includes probing the card, registering
896 * the interface operations table, and allocating/initializing the
897 * adapter structure
898 * - Set up the netlink socket
899 * - Create and start the main work queue
900 * - Register the device
901 * - Initialize firmware and hardware
902 * - Add logical interfaces
903 */
904int
905mwifiex_add_card(void *card, struct semaphore *sem,
906 struct mwifiex_if_ops *if_ops)
907{
908 int status = 0;
909 int i;
910 struct mwifiex_adapter *adapter = NULL;
911 struct mwifiex_drv_mode *drv_mode_info = &mwifiex_drv_mode_tbl[0];
912
913 if (down_interruptible(sem))
914 goto exit_sem_err;
915
916 if (mwifiex_init_sw(card, if_ops, (void **) &adapter)) {
917 pr_err("%s: software init failed\n", __func__);
918 goto err_init_sw;
919 }
920
921 adapter->drv_mode = drv_mode_info;
922
923 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
924 /* PnP and power profile */
925 adapter->surprise_removed = false;
926 init_waitqueue_head(&adapter->init_wait_q);
927 adapter->is_suspended = false;
928 adapter->hs_activated = false;
929 init_waitqueue_head(&adapter->hs_activate_wait_q);
930
931 /* Create workqueue */
932 adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
933 if (!adapter->workqueue)
934 goto err_kmalloc;
935
936 INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
937
938 /* Register the device. Fill up the private data structure with relevant
939 information from the card and request for the required IRQ. */
940 if (adapter->if_ops.register_dev(adapter)) {
941 pr_err("%s: failed to register mwifiex device\n", __func__);
942 goto err_registerdev;
943 }
944
945 /* Init FW and HW */
946 if (mwifiex_init_hw_fw(adapter)) {
947 pr_err("%s: firmware init failed\n", __func__);
948 goto err_init_fw;
949 }
950 /* Add interfaces */
951 for (i = 0; i < drv_mode_info->intf_num; i++) {
952 if (!mwifiex_add_interface(adapter, i,
953 adapter->drv_mode->bss_attr[i].bss_type)) {
954 status = -1;
955 break;
956 }
957 }
958 if (status)
959 goto err_add_intf;
960
961 up(sem);
962
963 return 0;
964
965err_add_intf:
966 for (i = 0; i < adapter->priv_num; i++)
967 mwifiex_remove_interface(adapter, i);
968err_init_fw:
969 /* Unregister device */
970 pr_debug("info: %s: unregister device\n", __func__);
971 adapter->if_ops.unregister_dev(adapter);
972err_registerdev:
973 adapter->surprise_removed = true;
974 mwifiex_terminate_workqueue(adapter);
975err_kmalloc:
976 if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
977 (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
978 pr_debug("info: %s: shutdown mwifiex\n", __func__);
979 adapter->init_wait_q_woken = false;
980 status = mwifiex_shutdown_drv(adapter);
981 if (status == -EINPROGRESS)
982 wait_event_interruptible(adapter->init_wait_q,
983 adapter->init_wait_q_woken);
984 }
985
986 mwifiex_free_adapter(adapter);
987
988err_init_sw:
989 up(sem);
990
991exit_sem_err:
992 return -1;
993}
994EXPORT_SYMBOL_GPL(mwifiex_add_card);
995
996/*
997 * This function removes the card.
998 *
999 * This function follows the following major steps to remove the device -
1000 * - Stop data traffic
1001 * - Shutdown firmware
1002 * - Remove the logical interfaces
1003 * - Terminate the work queue
1004 * - Unregister the device
1005 * - Free the adapter structure
1006 */
1007int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
1008{
1009 struct mwifiex_private *priv = NULL;
1010 int status;
1011 int i;
1012
1013 if (down_interruptible(sem))
1014 goto exit_sem_err;
1015
1016 if (!adapter)
1017 goto exit_remove;
1018
1019 adapter->surprise_removed = true;
1020
1021 /* Stop data */
1022 for (i = 0; i < adapter->priv_num; i++) {
1023 priv = adapter->priv[i];
1024 if (priv) {
1025 if (!netif_queue_stopped(priv->netdev))
1026 netif_stop_queue(priv->netdev);
1027 if (netif_carrier_ok(priv->netdev))
1028 netif_carrier_off(priv->netdev);
1029 }
1030 }
1031
1032 dev_dbg(adapter->dev, "cmd: calling mwifiex_shutdown_drv...\n");
1033 adapter->init_wait_q_woken = false;
1034 status = mwifiex_shutdown_drv(adapter);
1035 if (status == -EINPROGRESS)
1036 wait_event_interruptible(adapter->init_wait_q,
1037 adapter->init_wait_q_woken);
1038 dev_dbg(adapter->dev, "cmd: mwifiex_shutdown_drv done\n");
1039 if (atomic_read(&adapter->rx_pending) ||
1040 atomic_read(&adapter->tx_pending) ||
1041 atomic_read(&adapter->ioctl_pending)) {
1042 dev_err(adapter->dev, "rx_pending=%d, tx_pending=%d, "
1043 "ioctl_pending=%d\n",
1044 atomic_read(&adapter->rx_pending),
1045 atomic_read(&adapter->tx_pending),
1046 atomic_read(&adapter->ioctl_pending));
1047 }
1048
1049 /* Remove interface */
1050 for (i = 0; i < adapter->priv_num; i++)
1051 mwifiex_remove_interface(adapter, i);
1052
1053 mwifiex_terminate_workqueue(adapter);
1054
1055 /* Unregister device */
1056 dev_dbg(adapter->dev, "info: unregister device\n");
1057 adapter->if_ops.unregister_dev(adapter);
1058 /* Free adapter structure */
1059 dev_dbg(adapter->dev, "info: free adapter\n");
1060 mwifiex_free_adapter(adapter);
1061
1062exit_remove:
1063 up(sem);
1064exit_sem_err:
1065 return 0;
1066}
1067EXPORT_SYMBOL_GPL(mwifiex_remove_card);
1068
1069/*
1070 * This function initializes the module.
1071 *
1072 * The debug FS is also initialized if configured.
1073 */
1074static int
1075mwifiex_init_module(void)
1076{
1077#ifdef CONFIG_DEBUG_FS
1078 mwifiex_debugfs_init();
1079#endif
1080 return 0;
1081}
1082
1083/*
1084 * This function cleans up the module.
1085 *
1086 * The debug FS is removed if available.
1087 */
1088static void
1089mwifiex_cleanup_module(void)
1090{
1091#ifdef CONFIG_DEBUG_FS
1092 mwifiex_debugfs_remove();
1093#endif
1094}
1095
1096module_init(mwifiex_init_module);
1097module_exit(mwifiex_cleanup_module);
1098
1099MODULE_AUTHOR("Marvell International Ltd.");
1100MODULE_DESCRIPTION("Marvell WiFi-Ex Driver version " VERSION);
1101MODULE_VERSION(VERSION);
1102MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
new file mode 100644
index 00000000000..2b0ad8e3d6e
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -0,0 +1,1081 @@
1/*
2 * Marvell Wireless LAN device driver: major data structures and prototypes
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_MAIN_H_
21#define _MWIFIEX_MAIN_H_
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/sched.h>
26#include <linux/semaphore.h>
27#include <linux/ip.h>
28#include <linux/skbuff.h>
29#include <linux/if_arp.h>
30#include <linux/etherdevice.h>
31#include <net/sock.h>
32#include <net/lib80211.h>
33#include <linux/firmware.h>
34#include <linux/ctype.h>
35
36#include "decl.h"
37#include "ioctl.h"
38#include "util.h"
39#include "fw.h"
40
41extern const char driver_version[];
42extern struct mwifiex_adapter *g_adapter;
43
44enum {
45 MWIFIEX_NO_WAIT,
46 MWIFIEX_IOCTL_WAIT,
47 MWIFIEX_CMD_WAIT,
48 MWIFIEX_PROC_WAIT,
49 MWIFIEX_WSTATS_WAIT
50};
51
52#define DRV_MODE_STA 0x1
53#define DRV_MODE_UAP 0x2
54#define DRV_MODE_UAP_STA 0x3
55
56#define SD8787_W0 0x30
57#define SD8787_W1 0x31
58#define SD8787_A0 0x40
59#define SD8787_A1 0x41
60
61#define DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
62#define SD8787_W1_FW_NAME "mrvl/sd8787_uapsta_w1.bin"
63#define SD8787_AX_FW_NAME "mrvl/sd8787_uapsta.bin"
64
65struct mwifiex_drv_mode {
66 u16 drv_mode;
67 u16 intf_num;
68 struct mwifiex_bss_attr *bss_attr;
69};
70
71
72#define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ)
73
74#define MWIFIEX_TIMER_10S 10000
75#define MWIFIEX_TIMER_1S 1000
76
77#define NL_MAX_PAYLOAD 1024
78#define NL_MULTICAST_GROUP 1
79
80#define MAX_TX_PENDING 60
81
82#define HEADER_ALIGNMENT 8
83
84#define MWIFIEX_UPLD_SIZE (2312)
85
86#define MAX_EVENT_SIZE 1024
87
88#define ARP_FILTER_MAX_BUF_SIZE 68
89
90#define MWIFIEX_KEY_BUFFER_SIZE 16
91#define MWIFIEX_DEFAULT_LISTEN_INTERVAL 10
92#define MWIFIEX_MAX_REGION_CODE 7
93
94#define DEFAULT_BCN_AVG_FACTOR 8
95#define DEFAULT_DATA_AVG_FACTOR 8
96
97#define FIRST_VALID_CHANNEL 0xff
98#define DEFAULT_AD_HOC_CHANNEL 6
99#define DEFAULT_AD_HOC_CHANNEL_A 36
100
101#define DEFAULT_BCN_MISS_TIMEOUT 5
102
103#define MAX_SCAN_BEACON_BUFFER 8000
104
105#define SCAN_BEACON_ENTRY_PAD 6
106
107#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 200
108#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 200
109#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 110
110
111#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
112
113#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
114
115#define RSN_GTK_OUI_OFFSET 2
116
117#define MWIFIEX_OUI_NOT_PRESENT 0
118#define MWIFIEX_OUI_PRESENT 1
119
120#define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \
121 adapter->event_received || \
122 adapter->data_received)
123
124#define MWIFIEX_TYPE_CMD 1
125#define MWIFIEX_TYPE_DATA 0
126#define MWIFIEX_TYPE_EVENT 3
127
128#define DBG_CMD_NUM 5
129
130#define MAX_BITMAP_RATES_SIZE 10
131
132#define MAX_CHANNEL_BAND_BG 14
133
134#define MAX_FREQUENCY_BAND_BG 2484
135
136struct mwifiex_dbg {
137 u32 num_cmd_host_to_card_failure;
138 u32 num_cmd_sleep_cfm_host_to_card_failure;
139 u32 num_tx_host_to_card_failure;
140 u32 num_event_deauth;
141 u32 num_event_disassoc;
142 u32 num_event_link_lost;
143 u32 num_cmd_deauth;
144 u32 num_cmd_assoc_success;
145 u32 num_cmd_assoc_failure;
146 u32 num_tx_timeout;
147 u32 num_cmd_timeout;
148 u16 timeout_cmd_id;
149 u16 timeout_cmd_act;
150 u16 last_cmd_id[DBG_CMD_NUM];
151 u16 last_cmd_act[DBG_CMD_NUM];
152 u16 last_cmd_index;
153 u16 last_cmd_resp_id[DBG_CMD_NUM];
154 u16 last_cmd_resp_index;
155 u16 last_event[DBG_CMD_NUM];
156 u16 last_event_index;
157};
158
159enum MWIFIEX_HARDWARE_STATUS {
160 MWIFIEX_HW_STATUS_READY,
161 MWIFIEX_HW_STATUS_INITIALIZING,
162 MWIFIEX_HW_STATUS_FW_READY,
163 MWIFIEX_HW_STATUS_INIT_DONE,
164 MWIFIEX_HW_STATUS_RESET,
165 MWIFIEX_HW_STATUS_CLOSING,
166 MWIFIEX_HW_STATUS_NOT_READY
167};
168
169enum MWIFIEX_802_11_POWER_MODE {
170 MWIFIEX_802_11_POWER_MODE_CAM,
171 MWIFIEX_802_11_POWER_MODE_PSP
172};
173
174struct mwifiex_tx_param {
175 u32 next_pkt_len;
176};
177
178enum MWIFIEX_PS_STATE {
179 PS_STATE_AWAKE,
180 PS_STATE_PRE_SLEEP,
181 PS_STATE_SLEEP_CFM,
182 PS_STATE_SLEEP
183};
184
185struct mwifiex_add_ba_param {
186 u32 tx_win_size;
187 u32 rx_win_size;
188 u32 timeout;
189};
190
191struct mwifiex_tx_aggr {
192 u8 ampdu_user;
193 u8 ampdu_ap;
194 u8 amsdu;
195};
196
197struct mwifiex_ra_list_tbl {
198 struct list_head list;
199 struct sk_buff_head skb_head;
200 u8 ra[ETH_ALEN];
201 u32 total_pkts_size;
202 u32 is_11n_enabled;
203};
204
205struct mwifiex_tid_tbl {
206 struct list_head ra_list;
207 /* spin lock for tid table */
208 spinlock_t tid_tbl_lock;
209 struct mwifiex_ra_list_tbl *ra_list_curr;
210};
211
212#define WMM_HIGHEST_PRIORITY 7
213#define HIGH_PRIO_TID 7
214#define LOW_PRIO_TID 0
215
216struct mwifiex_wmm_desc {
217 struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
218 u32 packets_out[MAX_NUM_TID];
219 /* spin lock to protect ra_list */
220 spinlock_t ra_list_spinlock;
221 struct mwifiex_wmm_ac_status ac_status[IEEE80211_MAX_QUEUES];
222 enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_MAX_QUEUES];
223 u32 drv_pkt_delay_max;
224 u8 queue_priority[IEEE80211_MAX_QUEUES];
225 u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */
226
227};
228
229struct mwifiex_802_11_security {
230 u8 wpa_enabled;
231 u8 wpa2_enabled;
232 u8 wapi_enabled;
233 u8 wapi_key_on;
234 enum MWIFIEX_802_11_WEP_STATUS wep_status;
235 u32 authentication_mode;
236 u32 encryption_mode;
237};
238
239struct ieee_types_header {
240 u8 element_id;
241 u8 len;
242} __packed;
243
244struct ieee_obss_scan_param {
245 u16 obss_scan_passive_dwell;
246 u16 obss_scan_active_dwell;
247 u16 bss_chan_width_trigger_scan_int;
248 u16 obss_scan_passive_total;
249 u16 obss_scan_active_total;
250 u16 bss_width_chan_trans_delay;
251 u16 obss_scan_active_threshold;
252} __packed;
253
254struct ieee_types_obss_scan_param {
255 struct ieee_types_header ieee_hdr;
256 struct ieee_obss_scan_param obss_scan;
257} __packed;
258
259#define MWIFIEX_SUPPORTED_RATES 14
260
261#define MWIFIEX_SUPPORTED_RATES_EXT 32
262
263#define IEEE_MAX_IE_SIZE 256
264
265struct ieee_types_vendor_specific {
266 struct ieee_types_vendor_header vend_hdr;
267 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)];
268} __packed;
269
270struct ieee_types_generic {
271 struct ieee_types_header ieee_hdr;
272 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)];
273} __packed;
274
275struct mwifiex_bssdescriptor {
276 u8 mac_address[ETH_ALEN];
277 struct mwifiex_802_11_ssid ssid;
278 u32 privacy;
279 s32 rssi;
280 u32 channel;
281 u32 freq;
282 u16 beacon_period;
283 u8 erp_flags;
284 u32 bss_mode;
285 u8 supported_rates[MWIFIEX_SUPPORTED_RATES];
286 u8 data_rates[MWIFIEX_SUPPORTED_RATES];
287 /* Network band.
288 * BAND_B(0x01): 'b' band
289 * BAND_G(0x02): 'g' band
290 * BAND_A(0X04): 'a' band
291 */
292 u16 bss_band;
293 long long network_tsf;
294 u8 time_stamp[8];
295 union ieee_types_phy_param_set phy_param_set;
296 union ieee_types_ss_param_set ss_param_set;
297 u16 cap_info_bitmap;
298 struct ieee_types_wmm_parameter wmm_ie;
299 u8 disable_11n;
300 struct ieee80211_ht_cap *bcn_ht_cap;
301 u16 ht_cap_offset;
302 struct ieee80211_ht_info *bcn_ht_info;
303 u16 ht_info_offset;
304 u8 *bcn_bss_co_2040;
305 u16 bss_co_2040_offset;
306 u8 *bcn_ext_cap;
307 u16 ext_cap_offset;
308 struct ieee_types_obss_scan_param *bcn_obss_scan;
309 u16 overlap_bss_offset;
310 struct ieee_types_vendor_specific *bcn_wpa_ie;
311 u16 wpa_offset;
312 struct ieee_types_generic *bcn_rsn_ie;
313 u16 rsn_offset;
314 struct ieee_types_generic *bcn_wapi_ie;
315 u16 wapi_offset;
316 u8 *beacon_buf;
317 u32 beacon_buf_size;
318 u32 beacon_buf_size_max;
319
320};
321
322struct mwifiex_current_bss_params {
323 struct mwifiex_bssdescriptor bss_descriptor;
324 u8 wmm_enabled;
325 u8 wmm_uapsd_enabled;
326 u8 band;
327 u32 num_of_rates;
328 u8 data_rates[MWIFIEX_SUPPORTED_RATES];
329};
330
331struct mwifiex_sleep_params {
332 u16 sp_error;
333 u16 sp_offset;
334 u16 sp_stable_time;
335 u8 sp_cal_control;
336 u8 sp_ext_sleep_clk;
337 u16 sp_reserved;
338};
339
340struct mwifiex_sleep_period {
341 u16 period;
342 u16 reserved;
343};
344
345struct mwifiex_wep_key {
346 u32 length;
347 u32 key_index;
348 u32 key_length;
349 u8 key_material[MWIFIEX_KEY_BUFFER_SIZE];
350};
351
352#define MAX_REGION_CHANNEL_NUM 2
353
354struct mwifiex_chan_freq_power {
355 u16 channel;
356 u32 freq;
357 u16 max_tx_power;
358 u8 unsupported;
359};
360
361enum state_11d_t {
362 DISABLE_11D = 0,
363 ENABLE_11D = 1,
364};
365
366#define MWIFIEX_MAX_TRIPLET_802_11D 83
367
368struct mwifiex_802_11d_domain_reg {
369 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
370 u8 no_of_triplet;
371 struct ieee80211_country_ie_triplet
372 triplet[MWIFIEX_MAX_TRIPLET_802_11D];
373};
374
375struct mwifiex_vendor_spec_cfg_ie {
376 u16 mask;
377 u16 flag;
378 u8 ie[MWIFIEX_MAX_VSIE_LEN];
379};
380
381struct wps {
382 u8 session_enable;
383};
384
385struct mwifiex_adapter;
386struct mwifiex_private;
387
388struct mwifiex_private {
389 struct mwifiex_adapter *adapter;
390 u8 bss_index;
391 u8 bss_type;
392 u8 bss_role;
393 u8 bss_priority;
394 u8 bss_num;
395 u8 frame_type;
396 u8 curr_addr[ETH_ALEN];
397 u8 media_connected;
398 u32 num_tx_timeout;
399 struct net_device *netdev;
400 struct net_device_stats stats;
401 u16 curr_pkt_filter;
402 u32 bss_mode;
403 u32 pkt_tx_ctrl;
404 u16 tx_power_level;
405 u8 max_tx_power_level;
406 u8 min_tx_power_level;
407 u8 tx_rate;
408 u8 tx_htinfo;
409 u8 rxpd_htinfo;
410 u8 rxpd_rate;
411 u16 rate_bitmap;
412 u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
413 u32 data_rate;
414 u8 is_data_rate_auto;
415 u16 bcn_avg_factor;
416 u16 data_avg_factor;
417 s16 data_rssi_last;
418 s16 data_nf_last;
419 s16 data_rssi_avg;
420 s16 data_nf_avg;
421 s16 bcn_rssi_last;
422 s16 bcn_nf_last;
423 s16 bcn_rssi_avg;
424 s16 bcn_nf_avg;
425 struct mwifiex_bssdescriptor *attempted_bss_desc;
426 struct mwifiex_802_11_ssid prev_ssid;
427 u8 prev_bssid[ETH_ALEN];
428 struct mwifiex_current_bss_params curr_bss_params;
429 u16 beacon_period;
430 u16 listen_interval;
431 u16 atim_window;
432 u8 adhoc_channel;
433 u8 adhoc_is_link_sensed;
434 u8 adhoc_state;
435 struct mwifiex_802_11_security sec_info;
436 struct mwifiex_wep_key wep_key[NUM_WEP_KEYS];
437 u16 wep_key_curr_index;
438 u8 wpa_ie[256];
439 u8 wpa_ie_len;
440 u8 wpa_is_gtk_set;
441 struct host_cmd_ds_802_11_key_material aes_key;
442 u8 wapi_ie[256];
443 u8 wapi_ie_len;
444 u8 wmm_required;
445 u8 wmm_enabled;
446 u8 wmm_qosinfo;
447 struct mwifiex_wmm_desc wmm;
448 struct list_head tx_ba_stream_tbl_ptr;
449 /* spin lock for tx_ba_stream_tbl_ptr queue */
450 spinlock_t tx_ba_stream_tbl_lock;
451 struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID];
452 struct mwifiex_add_ba_param add_ba_param;
453 u16 rx_seq[MAX_NUM_TID];
454 struct list_head rx_reorder_tbl_ptr;
455 /* spin lock for rx_reorder_tbl_ptr queue */
456 spinlock_t rx_reorder_tbl_lock;
457 /* spin lock for Rx packets */
458 spinlock_t rx_pkt_lock;
459
460#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
461 u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
462 u32 assoc_rsp_size;
463
464#define MWIFIEX_GENIE_BUF_SIZE 256
465 u8 gen_ie_buf[MWIFIEX_GENIE_BUF_SIZE];
466 u8 gen_ie_buf_len;
467
468 struct mwifiex_vendor_spec_cfg_ie vs_ie[MWIFIEX_MAX_VSIE_NUM];
469
470#define MWIFIEX_ASSOC_TLV_BUF_SIZE 256
471 u8 assoc_tlv_buf[MWIFIEX_ASSOC_TLV_BUF_SIZE];
472 u8 assoc_tlv_buf_len;
473
474 u8 *curr_bcn_buf;
475 u32 curr_bcn_size;
476 /* spin lock for beacon buffer */
477 spinlock_t curr_bcn_buf_lock;
478 u16 ioctl_wait_q_woken;
479 wait_queue_head_t ioctl_wait_q;
480 u16 cmd_wait_q_woken;
481 wait_queue_head_t cmd_wait_q;
482 struct wireless_dev *wdev;
483 struct mwifiex_chan_freq_power cfp;
484 char version_str[128];
485#ifdef CONFIG_DEBUG_FS
486 struct dentry *dfs_dev_dir;
487#endif
488 u8 nick_name[16];
489 struct iw_statistics w_stats;
490 u16 w_stats_wait_q_woken;
491 wait_queue_head_t w_stats_wait_q;
492 u16 current_key_index;
493 struct semaphore async_sem;
494 u8 scan_pending_on_block;
495 u8 report_scan_result;
496 struct cfg80211_scan_request *scan_request;
497 int scan_result_status;
498 bool assoc_request;
499 u16 assoc_result;
500 bool ibss_join_request;
501 u16 ibss_join_result;
502 bool disconnect;
503 u8 cfg_bssid[6];
504 struct workqueue_struct *workqueue;
505 struct work_struct cfg_workqueue;
506 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
507 struct wps wps;
508 u8 scan_block;
509};
510
511enum mwifiex_ba_status {
512 BA_STREAM_NOT_SETUP = 0,
513 BA_STREAM_SETUP_INPROGRESS,
514 BA_STREAM_SETUP_COMPLETE
515};
516
517struct mwifiex_tx_ba_stream_tbl {
518 struct list_head list;
519 int tid;
520 u8 ra[ETH_ALEN];
521 enum mwifiex_ba_status ba_status;
522};
523
524struct mwifiex_rx_reorder_tbl;
525
526struct reorder_tmr_cnxt {
527 struct timer_list timer;
528 struct mwifiex_rx_reorder_tbl *ptr;
529 struct mwifiex_private *priv;
530};
531
532struct mwifiex_rx_reorder_tbl {
533 struct list_head list;
534 int tid;
535 u8 ta[ETH_ALEN];
536 int start_win;
537 int win_size;
538 void **rx_reorder_ptr;
539 struct reorder_tmr_cnxt timer_context;
540};
541
542struct mwifiex_bss_prio_node {
543 struct list_head list;
544 struct mwifiex_private *priv;
545};
546
547struct mwifiex_bss_prio_tbl {
548 struct list_head bss_prio_head;
549 /* spin lock for bss priority */
550 spinlock_t bss_prio_lock;
551 struct mwifiex_bss_prio_node *bss_prio_cur;
552};
553
554struct cmd_ctrl_node {
555 struct list_head list;
556 struct mwifiex_private *priv;
557 u32 cmd_oid;
558 u32 cmd_flag;
559 struct sk_buff *cmd_skb;
560 struct sk_buff *resp_skb;
561 void *data_buf;
562 void *wq_buf;
563 struct sk_buff *skb;
564};
565
566struct mwifiex_if_ops {
567 int (*init_if) (struct mwifiex_adapter *);
568 void (*cleanup_if) (struct mwifiex_adapter *);
569 int (*check_fw_status) (struct mwifiex_adapter *, u32, int *);
570 int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
571 int (*register_dev) (struct mwifiex_adapter *);
572 void (*unregister_dev) (struct mwifiex_adapter *);
573 int (*enable_int) (struct mwifiex_adapter *);
574 int (*process_int_status) (struct mwifiex_adapter *);
575 int (*host_to_card) (struct mwifiex_adapter *, u8,
576 u8 *payload, u32 pkt_len,
577 struct mwifiex_tx_param *);
578 int (*wakeup) (struct mwifiex_adapter *);
579 int (*wakeup_complete) (struct mwifiex_adapter *);
580
581 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
582 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
583};
584
585struct mwifiex_adapter {
586 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
587 u8 priv_num;
588 struct mwifiex_drv_mode *drv_mode;
589 const struct firmware *firmware;
590 struct device *dev;
591 bool surprise_removed;
592 u32 fw_release_number;
593 u32 revision_id;
594 u16 init_wait_q_woken;
595 wait_queue_head_t init_wait_q;
596 void *card;
597 struct mwifiex_if_ops if_ops;
598 atomic_t rx_pending;
599 atomic_t tx_pending;
600 atomic_t ioctl_pending;
601 struct workqueue_struct *workqueue;
602 struct work_struct main_work;
603 struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM];
604 /* spin lock for init/shutdown */
605 spinlock_t mwifiex_lock;
606 /* spin lock for main process */
607 spinlock_t main_proc_lock;
608 u32 mwifiex_processing;
609 u16 max_tx_buf_size;
610 u16 tx_buf_size;
611 u16 curr_tx_buf_size;
612 u32 ioport;
613 enum MWIFIEX_HARDWARE_STATUS hw_status;
614 u16 radio_on;
615 u16 number_of_antenna;
616 u32 fw_cap_info;
617 /* spin lock for interrupt handling */
618 spinlock_t int_lock;
619 u8 int_status;
620 u32 event_cause;
621 struct sk_buff *event_skb;
622 u8 upld_buf[MWIFIEX_UPLD_SIZE];
623 u8 data_sent;
624 u8 cmd_sent;
625 u8 cmd_resp_received;
626 u8 event_received;
627 u8 data_received;
628 u16 seq_num;
629 struct cmd_ctrl_node *cmd_pool;
630 struct cmd_ctrl_node *curr_cmd;
631 /* spin lock for command */
632 spinlock_t mwifiex_cmd_lock;
633 u32 num_cmd_timeout;
634 u16 last_init_cmd;
635 struct timer_list cmd_timer;
636 struct list_head cmd_free_q;
637 /* spin lock for cmd_free_q */
638 spinlock_t cmd_free_q_lock;
639 struct list_head cmd_pending_q;
640 /* spin lock for cmd_pending_q */
641 spinlock_t cmd_pending_q_lock;
642 struct list_head scan_pending_q;
643 /* spin lock for scan_pending_q */
644 spinlock_t scan_pending_q_lock;
645 u32 scan_processing;
646 u16 region_code;
647 struct mwifiex_802_11d_domain_reg domain_reg;
648 struct mwifiex_bssdescriptor *scan_table;
649 u32 num_in_scan_table;
650 u16 scan_probes;
651 u32 scan_mode;
652 u16 specific_scan_time;
653 u16 active_scan_time;
654 u16 passive_scan_time;
655 u8 bcn_buf[MAX_SCAN_BEACON_BUFFER];
656 u8 *bcn_buf_end;
657 u8 fw_bands;
658 u8 adhoc_start_band;
659 u8 config_bands;
660 struct mwifiex_chan_scan_param_set *scan_channels;
661 u8 tx_lock_flag;
662 struct mwifiex_sleep_params sleep_params;
663 struct mwifiex_sleep_period sleep_period;
664 u16 ps_mode;
665 u32 ps_state;
666 u8 need_to_wakeup;
667 u16 multiple_dtim;
668 u16 local_listen_interval;
669 u16 null_pkt_interval;
670 struct sk_buff *sleep_cfm;
671 u16 bcn_miss_time_out;
672 u16 adhoc_awake_period;
673 u8 is_deep_sleep;
674 u8 delay_null_pkt;
675 u16 delay_to_ps;
676 u16 enhanced_ps_mode;
677 u8 pm_wakeup_card_req;
678 u16 gen_null_pkt;
679 u16 pps_uapsd_mode;
680 u32 pm_wakeup_fw_try;
681 u8 is_hs_configured;
682 struct mwifiex_hs_config_param hs_cfg;
683 u8 hs_activated;
684 u16 hs_activate_wait_q_woken;
685 wait_queue_head_t hs_activate_wait_q;
686 bool is_suspended;
687 u8 event_body[MAX_EVENT_SIZE];
688 u32 hw_dot_11n_dev_cap;
689 u8 hw_dev_mcs_support;
690 u32 usr_dot_11n_dev_cap;
691 u8 usr_dev_mcs_support;
692 u8 adhoc_11n_enabled;
693 u8 chan_offset;
694 struct mwifiex_dbg dbg;
695 u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
696 u32 arp_filter_size;
697};
698
699int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
700void mwifiex_free_lock_list(struct mwifiex_adapter *adapter);
701
702int mwifiex_init_fw(struct mwifiex_adapter *adapter);
703
704int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
705
706int mwifiex_shutdown_drv(struct mwifiex_adapter *adapter);
707
708int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
709
710int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
711
712int mwifiex_recv_complete(struct mwifiex_adapter *,
713 struct sk_buff *skb,
714 int status);
715
716int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
717
718int mwifiex_process_event(struct mwifiex_adapter *adapter);
719
720int mwifiex_ioctl_complete(struct mwifiex_adapter *adapter,
721 struct mwifiex_wait_queue *ioctl_wq,
722 int status);
723
724int mwifiex_prepare_cmd(struct mwifiex_private *priv,
725 uint16_t cmd_no,
726 u16 cmd_action,
727 u32 cmd_oid,
728 void *wait_queue, void *data_buf);
729
730void mwifiex_cmd_timeout_func(unsigned long function_context);
731
732int mwifiex_misc_ioctl_init_shutdown(struct mwifiex_adapter *adapter,
733 struct mwifiex_wait_queue *wait_queue,
734 u32 func_init_shutdown);
735int mwifiex_get_debug_info(struct mwifiex_private *,
736 struct mwifiex_debug_info *);
737
738int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter);
739int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
740void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter);
741void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter,
742 struct mwifiex_wait_queue *ioctl_wq);
743
744void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
745 struct cmd_ctrl_node *cmd_node);
746
747void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
748 struct cmd_ctrl_node *cmd_node,
749 u32 addtail);
750
751int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter);
752int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter);
753int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
754 struct sk_buff *skb);
755int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
756 struct mwifiex_tx_param *tx_param);
757int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags);
758int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
759 struct sk_buff *skb, int status);
760int mwifiex_recv_packet_complete(struct mwifiex_adapter *,
761 struct sk_buff *skb, int status);
762void mwifiex_clean_txrx(struct mwifiex_private *priv);
763u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv);
764void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter);
765void mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *, u8 *,
766 u32);
767int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
768 struct host_cmd_ds_command *cmd,
769 u16 cmd_action, uint16_t ps_bitmap,
770 void *data_buf);
771int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
772 struct host_cmd_ds_command *resp,
773 void *data_buf);
774void mwifiex_process_hs_config(struct mwifiex_adapter *adapter);
775void mwifiex_hs_activated_event(struct mwifiex_private *priv,
776 u8 activated);
777int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
778 struct host_cmd_ds_command *resp);
779int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
780 struct sk_buff *skb);
781int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no,
782 u16 cmd_action, u32 cmd_oid,
783 void *data_buf, void *cmd_buf);
784int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
785 void *cmd_buf, void *ioctl);
786int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
787 struct sk_buff *skb);
788int mwifiex_process_sta_event(struct mwifiex_private *);
789void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
790int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
791int mwifiex_scan_networks(struct mwifiex_private *priv, void *wait_queue,
792 u16 action,
793 const struct mwifiex_user_scan_cfg
794 *user_scan_in, struct mwifiex_scan_resp *);
795int mwifiex_cmd_802_11_scan(struct mwifiex_private *priv,
796 struct host_cmd_ds_command *cmd,
797 void *data_buf);
798void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
799 struct cmd_ctrl_node *cmd_node);
800int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
801 struct host_cmd_ds_command *resp,
802 void *wait_queue);
803s32 mwifiex_find_ssid_in_list(struct mwifiex_private *priv,
804 struct mwifiex_802_11_ssid *ssid, u8 *bssid,
805 u32 mode);
806s32 mwifiex_find_bssid_in_list(struct mwifiex_private *priv, u8 *bssid,
807 u32 mode);
808int mwifiex_find_best_network(struct mwifiex_private *priv,
809 struct mwifiex_ssid_bssid *req_ssid_bssid);
810s32 mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
811 struct mwifiex_802_11_ssid *ssid2);
812int mwifiex_associate(struct mwifiex_private *priv, void *wait_queue,
813 struct mwifiex_bssdescriptor *bss_desc);
814int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
815 struct host_cmd_ds_command
816 *cmd, void *data_buf);
817int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
818 struct host_cmd_ds_command *resp,
819 void *wait_queue);
820void mwifiex_reset_connect_state(struct mwifiex_private *priv);
821void mwifiex_2040_coex_event(struct mwifiex_private *priv);
822u8 mwifiex_band_to_radio_type(u8 band);
823int mwifiex_deauthenticate(struct mwifiex_private *priv,
824 struct mwifiex_wait_queue *wait_queue,
825 u8 *mac);
826int mwifiex_adhoc_start(struct mwifiex_private *priv, void *wait_queue,
827 struct mwifiex_802_11_ssid *adhoc_ssid);
828int mwifiex_adhoc_join(struct mwifiex_private *priv, void *wait_queue,
829 struct mwifiex_bssdescriptor *bss_desc);
830int mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
831 struct host_cmd_ds_command *cmd,
832 void *data_buf);
833int mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
834 struct host_cmd_ds_command *cmd,
835 void *data_buf);
836int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
837 struct host_cmd_ds_command *resp,
838 void *wait_queue);
839int mwifiex_cmd_802_11_bg_scan_query(struct mwifiex_private *priv,
840 struct host_cmd_ds_command *cmd,
841 void *data_buf);
842struct mwifiex_chan_freq_power *
843 mwifiex_get_cfp_by_band_and_channel_from_cfg80211(
844 struct mwifiex_private *priv,
845 u8 band, u16 channel);
846struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
847 struct mwifiex_private *priv,
848 u8 band, u32 freq);
849u32 mwifiex_index_to_data_rate(struct mwifiex_adapter *adapter, u8 index,
850 u8 ht_info);
851u32 mwifiex_find_freq_from_band_chan(u8, u8);
852int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
853 u8 **buffer);
854u32 mwifiex_index_to_data_rate(struct mwifiex_adapter *adapter, u8 index,
855 u8 ht_info);
856u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv,
857 u8 *rates);
858u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates);
859u8 mwifiex_data_rate_to_index(struct mwifiex_adapter *adapter, u32 rate);
860u8 mwifiex_is_rate_auto(struct mwifiex_private *priv);
861int mwifiex_get_rate_index(struct mwifiex_adapter *adapter,
862 u16 *rateBitmap, int size);
863extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE];
864void mwifiex_save_curr_bcn(struct mwifiex_private *priv);
865void mwifiex_free_curr_bcn(struct mwifiex_private *priv);
866int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv,
867 struct host_cmd_ds_command *cmd);
868int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
869 struct host_cmd_ds_command *resp);
870int is_command_pending(struct mwifiex_adapter *adapter);
871
872/*
873 * This function checks if the queuing is RA based or not.
874 */
875static inline u8
876mwifiex_queuing_ra_based(struct mwifiex_private *priv)
877{
878 /*
879 * Currently we assume if we are in Infra, then DA=RA. This might not be
880 * true in the future
881 */
882 if ((priv->bss_mode == MWIFIEX_BSS_MODE_INFRA) &&
883 (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA))
884 return false;
885
886 return true;
887}
888
889/*
890 * This function copies rates.
891 */
892static inline u32
893mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
894{
895 int i;
896
897 for (i = 0; i < len && src[i]; i++, pos++) {
898 if (pos >= MWIFIEX_SUPPORTED_RATES)
899 break;
900 dest[pos] = src[i];
901 }
902
903 return pos;
904}
905
906/*
907 * This function returns the correct private structure pointer based
908 * upon the BSS type and BSS number.
909 */
910static inline struct mwifiex_private *
911mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
912 u32 bss_num, u32 bss_type)
913{
914 int i;
915
916 for (i = 0; i < adapter->priv_num; i++) {
917 if (adapter->priv[i]) {
918 if ((adapter->priv[i]->bss_num == bss_num)
919 && (adapter->priv[i]->bss_type == bss_type))
920 break;
921 }
922 }
923 return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
924}
925
926/*
927 * This function returns the first available private structure pointer
928 * based upon the BSS role.
929 */
930static inline struct mwifiex_private *
931mwifiex_get_priv(struct mwifiex_adapter *adapter,
932 enum mwifiex_bss_role bss_role)
933{
934 int i;
935
936 for (i = 0; i < adapter->priv_num; i++) {
937 if (adapter->priv[i]) {
938 if (bss_role == MWIFIEX_BSS_ROLE_ANY ||
939 GET_BSS_ROLE(adapter->priv[i]) == bss_role)
940 break;
941 }
942 }
943
944 return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
945}
946
947/*
948 * This function returns the driver private structure of a network device.
949 */
950static inline struct mwifiex_private *
951mwifiex_netdev_get_priv(struct net_device *dev)
952{
953 return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev));
954}
955
956struct mwifiex_wait_queue *mwifiex_alloc_fill_wait_queue(
957 struct mwifiex_private *,
958 u8 wait_option);
959struct mwifiex_private *mwifiex_bss_index_to_priv(struct mwifiex_adapter
960 *adapter, u8 bss_index);
961int mwifiex_shutdown_fw(struct mwifiex_private *, u8);
962
963int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *);
964int mwifiex_remove_card(struct mwifiex_adapter *, struct semaphore *);
965
966void mwifiex_get_version(struct mwifiex_adapter *adapter, char *version,
967 int maxlen);
968int mwifiex_request_set_mac_address(struct mwifiex_private *priv);
969void mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
970 struct net_device *dev);
971int mwifiex_request_ioctl(struct mwifiex_private *priv,
972 struct mwifiex_wait_queue *req,
973 int, u8 wait_option);
974int mwifiex_disconnect(struct mwifiex_private *, u8, u8 *);
975int mwifiex_bss_start(struct mwifiex_private *priv,
976 u8 wait_option,
977 struct mwifiex_ssid_bssid *ssid_bssid);
978int mwifiex_set_hs_params(struct mwifiex_private *priv,
979 u16 action, u8 wait_option,
980 struct mwifiex_ds_hs_cfg *hscfg);
981int mwifiex_cancel_hs(struct mwifiex_private *priv, u8 wait_option);
982int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
983void mwifiex_process_ioctl_resp(struct mwifiex_private *priv,
984 struct mwifiex_wait_queue *req);
985u32 mwifiex_get_mode(struct mwifiex_private *priv, u8 wait_option);
986int mwifiex_get_signal_info(struct mwifiex_private *priv,
987 u8 wait_option,
988 struct mwifiex_ds_get_signal *signal);
989int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
990 struct mwifiex_rate_cfg *rate);
991int mwifiex_get_channel_list(struct mwifiex_private *priv,
992 u8 wait_option,
993 struct mwifiex_chan_list *chanlist);
994int mwifiex_get_scan_table(struct mwifiex_private *priv,
995 u8 wait_option,
996 struct mwifiex_scan_resp *scanresp);
997int mwifiex_get_auth_mode(struct mwifiex_private *priv,
998 u8 wait_option, u32 *auth_mode);
999int mwifiex_get_encrypt_mode(struct mwifiex_private *priv,
1000 u8 wait_option,
1001 u32 *encrypt_mode);
1002int mwifiex_enable_wep_key(struct mwifiex_private *priv, u8 wait_option);
1003int mwifiex_find_best_bss(struct mwifiex_private *priv, u8 wait_option,
1004 struct mwifiex_ssid_bssid *ssid_bssid);
1005int mwifiex_request_scan(struct mwifiex_private *priv,
1006 u8 wait_option,
1007 struct mwifiex_802_11_ssid *req_ssid);
1008int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
1009 struct mwifiex_user_scan_cfg *scan_req);
1010int mwifiex_change_adhoc_chan(struct mwifiex_private *priv, int channel);
1011int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
1012
1013int mwifiex_drv_get_mode(struct mwifiex_private *priv, u8 wait_option);
1014
1015int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel);
1016
1017int mwifiex_set_auth(struct mwifiex_private *priv, int encrypt_mode,
1018 int auth_mode, int wpa_enabled);
1019
1020int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
1021 int key_len, u8 key_index, int disable);
1022
1023int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
1024
1025int mwifiex_get_ver_ext(struct mwifiex_private *priv);
1026
1027int mwifiex_get_stats_info(struct mwifiex_private *priv,
1028 struct mwifiex_ds_get_stats *log);
1029
1030int mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type,
1031 u32 reg_offset, u32 reg_value);
1032
1033int mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type,
1034 u32 reg_offset, u32 *value);
1035
1036int mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
1037 u8 *value);
1038
1039int mwifiex_set_11n_httx_cfg(struct mwifiex_private *priv, int data);
1040
1041int mwifiex_get_11n_httx_cfg(struct mwifiex_private *priv, int *data);
1042
1043int mwifiex_set_tx_rate_cfg(struct mwifiex_private *priv, int tx_rate_index);
1044
1045int mwifiex_get_tx_rate_cfg(struct mwifiex_private *priv, int *tx_rate_index);
1046
1047int mwifiex_drv_set_power(struct mwifiex_private *priv, bool power_on);
1048
1049int mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter,
1050 char *version, int max_len);
1051
1052int mwifiex_set_tx_power(struct mwifiex_private *priv, int type, int dbm);
1053
1054int mwifiex_main_process(struct mwifiex_adapter *);
1055
1056int mwifiex_bss_ioctl_mode(struct mwifiex_private *,
1057 struct mwifiex_wait_queue *,
1058 u16 action, int *mode);
1059int mwifiex_bss_ioctl_channel(struct mwifiex_private *,
1060 u16 action,
1061 struct mwifiex_chan_freq_power *cfp);
1062int mwifiex_bss_ioctl_find_bss(struct mwifiex_private *,
1063 struct mwifiex_wait_queue *,
1064 struct mwifiex_ssid_bssid *);
1065int mwifiex_radio_ioctl_band_cfg(struct mwifiex_private *,
1066 u16 action,
1067 struct mwifiex_ds_band_cfg *);
1068int mwifiex_snmp_mib_ioctl(struct mwifiex_private *,
1069 struct mwifiex_wait_queue *,
1070 u32 cmd_oid, u16 action, u32 *value);
1071int mwifiex_get_bss_info(struct mwifiex_private *,
1072 struct mwifiex_bss_info *);
1073
1074#ifdef CONFIG_DEBUG_FS
1075void mwifiex_debugfs_init(void);
1076void mwifiex_debugfs_remove(void);
1077
1078void mwifiex_dev_debugfs_init(struct mwifiex_private *priv);
1079void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv);
1080#endif
1081#endif /* !_MWIFIEX_MAIN_H_ */
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
new file mode 100644
index 00000000000..1152beb930a
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -0,0 +1,3098 @@
1/*
2 * Marvell Wireless LAN device driver: scan ioctl and command handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "11n.h"
26#include "cfg80211.h"
27
28/* The maximum number of channels the firmware can scan per command */
29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
30
31#define MWIFIEX_CHANNELS_PER_SCAN_CMD 4
32
33/* Memory needed to store a max sized Channel List TLV for a firmware scan */
34#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
35 + (MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN \
36 *sizeof(struct mwifiex_chan_scan_param_set)))
37
38/* Memory needed to store supported rate */
39#define RATE_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_rates_param_set) \
40 + HOSTCMD_SUPPORTED_RATES)
41
42/* Memory needed to store a max number/size WildCard SSID TLV for a firmware
43 scan */
44#define WILDCARD_SSID_TLV_MAX_SIZE \
45 (MWIFIEX_MAX_SSID_LIST_LENGTH * \
46 (sizeof(struct mwifiex_ie_types_wildcard_ssid_params) \
47 + IEEE80211_MAX_SSID_LEN))
48
49/* Maximum memory needed for a mwifiex_scan_cmd_config with all TLVs at max */
50#define MAX_SCAN_CFG_ALLOC (sizeof(struct mwifiex_scan_cmd_config) \
51 + sizeof(struct mwifiex_ie_types_num_probes) \
52 + sizeof(struct mwifiex_ie_types_htcap) \
53 + CHAN_TLV_MAX_SIZE \
54 + RATE_TLV_MAX_SIZE \
55 + WILDCARD_SSID_TLV_MAX_SIZE)
56
57
58union mwifiex_scan_cmd_config_tlv {
59 /* Scan configuration (variable length) */
60 struct mwifiex_scan_cmd_config config;
61 /* Max allocated block */
62 u8 config_alloc_buf[MAX_SCAN_CFG_ALLOC];
63};
64
65enum cipher_suite {
66 CIPHER_SUITE_TKIP,
67 CIPHER_SUITE_CCMP,
68 CIPHER_SUITE_MAX
69};
70static u8 mwifiex_wpa_oui[CIPHER_SUITE_MAX][4] = {
71 { 0x00, 0x50, 0xf2, 0x02 }, /* TKIP */
72 { 0x00, 0x50, 0xf2, 0x04 }, /* AES */
73};
74static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = {
75 { 0x00, 0x0f, 0xac, 0x02 }, /* TKIP */
76 { 0x00, 0x0f, 0xac, 0x04 }, /* AES */
77};
78
79/*
80 * This function parses a given IE for a given OUI.
81 *
82 * This is used to parse a WPA/RSN IE to find if it has
83 * a given oui in PTK.
84 */
85static u8
86mwifiex_search_oui_in_ie(struct ie_body *iebody, u8 *oui)
87{
88 u8 count;
89
90 count = iebody->ptk_cnt[0];
91
92 /* There could be multiple OUIs for PTK hence
93 1) Take the length.
94 2) Check all the OUIs for AES.
95 3) If one of them is AES then pass success. */
96 while (count) {
97 if (!memcmp(iebody->ptk_body, oui, sizeof(iebody->ptk_body)))
98 return MWIFIEX_OUI_PRESENT;
99
100 --count;
101 if (count)
102 iebody = (struct ie_body *) ((u8 *) iebody +
103 sizeof(iebody->ptk_body));
104 }
105
106 pr_debug("info: %s: OUI is not found in PTK\n", __func__);
107 return MWIFIEX_OUI_NOT_PRESENT;
108}
109
110/*
111 * This function checks if a given OUI is present in a RSN IE.
112 *
113 * The function first checks if a RSN IE is present or not in the
114 * BSS descriptor. It tries to locate the OUI only if such an IE is
115 * present.
116 */
117static u8
118mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
119{
120 u8 *oui = NULL;
121 struct ie_body *iebody = NULL;
122 u8 ret = MWIFIEX_OUI_NOT_PRESENT;
123
124 if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).
125 ieee_hdr.element_id == WLAN_EID_RSN))) {
126 iebody = (struct ie_body *)
127 (((u8 *) bss_desc->bcn_rsn_ie->data) +
128 RSN_GTK_OUI_OFFSET);
129 oui = &mwifiex_rsn_oui[cipher][0];
130 ret = mwifiex_search_oui_in_ie(iebody, oui);
131 if (ret)
132 return ret;
133 }
134 return ret;
135}
136
137/*
138 * This function checks if a given OUI is present in a WPA IE.
139 *
140 * The function first checks if a WPA IE is present or not in the
141 * BSS descriptor. It tries to locate the OUI only if such an IE is
142 * present.
143 */
144static u8
145mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
146{
147 u8 *oui = NULL;
148 struct ie_body *iebody = NULL;
149 u8 ret = MWIFIEX_OUI_NOT_PRESENT;
150
151 if (((bss_desc->bcn_wpa_ie) && ((*(bss_desc->bcn_wpa_ie)).
152 vend_hdr.element_id == WLAN_EID_WPA))) {
153 iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
154 oui = &mwifiex_wpa_oui[cipher][0];
155 ret = mwifiex_search_oui_in_ie(iebody, oui);
156 if (ret)
157 return ret;
158 }
159 return ret;
160}
161
162/*
163 * This function compares two SSIDs and checks if they match.
164 */
165s32
166mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
167 struct mwifiex_802_11_ssid *ssid2)
168{
169 if (!ssid1 || !ssid2 || (ssid1->ssid_len != ssid2->ssid_len))
170 return -1;
171 return memcmp(ssid1->ssid, ssid2->ssid, ssid1->ssid_len);
172}
173
174/*
175 * Sends IOCTL request to get the best BSS.
176 *
177 * This function allocates the IOCTL request buffer, fills it
178 * with requisite parameters and calls the IOCTL handler.
179 */
180int mwifiex_find_best_bss(struct mwifiex_private *priv,
181 u8 wait_option, struct mwifiex_ssid_bssid *ssid_bssid)
182{
183 struct mwifiex_wait_queue *wait = NULL;
184 struct mwifiex_ssid_bssid tmp_ssid_bssid;
185 int ret = 0;
186 u8 *mac = NULL;
187
188 if (!ssid_bssid)
189 return -1;
190
191 /* Allocate wait request buffer */
192 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
193 if (!wait)
194 return -ENOMEM;
195
196 memcpy(&tmp_ssid_bssid, ssid_bssid,
197 sizeof(struct mwifiex_ssid_bssid));
198 ret = mwifiex_bss_ioctl_find_bss(priv, wait, &tmp_ssid_bssid);
199
200 if (!ret) {
201 memcpy(ssid_bssid, &tmp_ssid_bssid,
202 sizeof(struct mwifiex_ssid_bssid));
203 mac = (u8 *) &ssid_bssid->bssid;
204 dev_dbg(priv->adapter->dev, "cmd: found network: ssid=%s,"
205 " %pM\n", ssid_bssid->ssid.ssid, mac);
206 }
207
208 kfree(wait);
209 return ret;
210}
211
212/*
213 * Sends IOCTL request to start a scan with user configurations.
214 *
215 * This function allocates the IOCTL request buffer, fills it
216 * with requisite parameters and calls the IOCTL handler.
217 *
218 * Upon completion, it also generates a wireless event to notify
219 * applications.
220 */
221int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
222 struct mwifiex_user_scan_cfg *scan_req)
223{
224 struct mwifiex_wait_queue *wait = NULL;
225 int status = 0;
226 u8 wait_option = MWIFIEX_IOCTL_WAIT;
227
228 /* Allocate an IOCTL request buffer */
229 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
230 if (!wait)
231 return -ENOMEM;
232
233 status = mwifiex_scan_networks(priv, wait, HostCmd_ACT_GEN_SET,
234 scan_req, NULL);
235
236 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
237
238 if (wait && (status != -EINPROGRESS))
239 kfree(wait);
240 return status;
241}
242
243/*
244 * This function checks if wapi is enabled in driver and scanned network is
245 * compatible with it.
246 */
247static bool
248mwifiex_is_network_compatible_for_wapi(struct mwifiex_private *priv,
249 struct mwifiex_bssdescriptor *bss_desc)
250{
251 if (priv->sec_info.wapi_enabled &&
252 (bss_desc->bcn_wapi_ie &&
253 ((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id ==
254 WLAN_EID_BSS_AC_ACCESS_DELAY))) {
255 return true;
256 }
257 return false;
258}
259
260/*
261 * This function checks if driver is configured with no security mode and
262 * scanned network is compatible with it.
263 */
264static bool
265mwifiex_is_network_compatible_for_no_sec(struct mwifiex_private *priv,
266 struct mwifiex_bssdescriptor *bss_desc)
267{
268 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
269 && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
270 && ((!bss_desc->bcn_wpa_ie) ||
271 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
272 WLAN_EID_WPA))
273 && ((!bss_desc->bcn_rsn_ie) ||
274 ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
275 WLAN_EID_RSN))
276 && priv->sec_info.encryption_mode ==
277 MWIFIEX_ENCRYPTION_MODE_NONE && !bss_desc->privacy) {
278 return true;
279 }
280 return false;
281}
282
283/*
284 * This function checks if static WEP is enabled in driver and scanned network
285 * is compatible with it.
286 */
287static bool
288mwifiex_is_network_compatible_for_static_wep(struct mwifiex_private *priv,
289 struct mwifiex_bssdescriptor *bss_desc)
290{
291 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED
292 && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
293 && bss_desc->privacy) {
294 return true;
295 }
296 return false;
297}
298
299/*
300 * This function checks if wpa is enabled in driver and scanned network is
301 * compatible with it.
302 */
303static bool
304mwifiex_is_network_compatible_for_wpa(struct mwifiex_private *priv,
305 struct mwifiex_bssdescriptor *bss_desc,
306 int index)
307{
308 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
309 && priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
310 && ((bss_desc->bcn_wpa_ie) && ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
311 element_id == WLAN_EID_WPA))
312 /*
313 * Privacy bit may NOT be set in some APs like
314 * LinkSys WRT54G && bss_desc->privacy
315 */
316 ) {
317 dev_dbg(priv->adapter->dev, "info: %s: WPA: index=%d"
318 " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
319 "EncMode=%#x privacy=%#x\n", __func__, index,
320 (bss_desc->bcn_wpa_ie) ?
321 (*(bss_desc->bcn_wpa_ie)).
322 vend_hdr.element_id : 0,
323 (bss_desc->bcn_rsn_ie) ?
324 (*(bss_desc->bcn_rsn_ie)).
325 ieee_hdr.element_id : 0,
326 (priv->sec_info.wep_status ==
327 MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
328 (priv->sec_info.wpa_enabled) ? "e" : "d",
329 (priv->sec_info.wpa2_enabled) ? "e" : "d",
330 priv->sec_info.encryption_mode,
331 bss_desc->privacy);
332 return true;
333 }
334 return false;
335}
336
337/*
338 * This function checks if wpa2 is enabled in driver and scanned network is
339 * compatible with it.
340 */
341static bool
342mwifiex_is_network_compatible_for_wpa2(struct mwifiex_private *priv,
343 struct mwifiex_bssdescriptor *bss_desc,
344 int index)
345{
346 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
347 && !priv->sec_info.wpa_enabled && priv->sec_info.wpa2_enabled
348 && ((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
349 element_id == WLAN_EID_RSN))
350 /*
351 * Privacy bit may NOT be set in some APs like
352 * LinkSys WRT54G && bss_desc->privacy
353 */
354 ) {
355 dev_dbg(priv->adapter->dev, "info: %s: WPA2: index=%d"
356 " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
357 "EncMode=%#x privacy=%#x\n", __func__, index,
358 (bss_desc->bcn_wpa_ie) ?
359 (*(bss_desc->bcn_wpa_ie)).
360 vend_hdr.element_id : 0,
361 (bss_desc->bcn_rsn_ie) ?
362 (*(bss_desc->bcn_rsn_ie)).
363 ieee_hdr.element_id : 0,
364 (priv->sec_info.wep_status ==
365 MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
366 (priv->sec_info.wpa_enabled) ? "e" : "d",
367 (priv->sec_info.wpa2_enabled) ? "e" : "d",
368 priv->sec_info.encryption_mode,
369 bss_desc->privacy);
370 return true;
371 }
372 return false;
373}
374
375/*
376 * This function checks if adhoc AES is enabled in driver and scanned network is
377 * compatible with it.
378 */
379static bool
380mwifiex_is_network_compatible_for_adhoc_aes(struct mwifiex_private *priv,
381 struct mwifiex_bssdescriptor *bss_desc)
382{
383 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
384 && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
385 && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
386 element_id != WLAN_EID_WPA))
387 && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
388 element_id != WLAN_EID_RSN))
389 && priv->sec_info.encryption_mode ==
390 MWIFIEX_ENCRYPTION_MODE_NONE && bss_desc->privacy) {
391 return true;
392 }
393 return false;
394}
395
396/*
397 * This function checks if dynamic WEP is enabled in driver and scanned network
398 * is compatible with it.
399 */
400static bool
401mwifiex_is_network_compatible_for_dynamic_wep(struct mwifiex_private *priv,
402 struct mwifiex_bssdescriptor *bss_desc,
403 int index)
404{
405 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
406 && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
407 && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
408 element_id != WLAN_EID_WPA))
409 && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
410 element_id != WLAN_EID_RSN))
411 && priv->sec_info.encryption_mode !=
412 MWIFIEX_ENCRYPTION_MODE_NONE && bss_desc->privacy) {
413 dev_dbg(priv->adapter->dev, "info: %s: dynamic "
414 "WEP: index=%d wpa_ie=%#x wpa2_ie=%#x "
415 "EncMode=%#x privacy=%#x\n",
416 __func__, index,
417 (bss_desc->bcn_wpa_ie) ?
418 (*(bss_desc->bcn_wpa_ie)).
419 vend_hdr.element_id : 0,
420 (bss_desc->bcn_rsn_ie) ?
421 (*(bss_desc->bcn_rsn_ie)).
422 ieee_hdr.element_id : 0,
423 priv->sec_info.encryption_mode,
424 bss_desc->privacy);
425 return true;
426 }
427 return false;
428}
429
430/*
431 * This function checks if a scanned network is compatible with the driver
432 * settings.
433 *
434 * WEP WPA WPA2 ad-hoc encrypt Network
435 * enabled enabled enabled AES mode Privacy WPA WPA2 Compatible
436 * 0 0 0 0 NONE 0 0 0 yes No security
437 * 0 1 0 0 x 1x 1 x yes WPA (disable
438 * HT if no AES)
439 * 0 0 1 0 x 1x x 1 yes WPA2 (disable
440 * HT if no AES)
441 * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES
442 * 1 0 0 0 NONE 1 0 0 yes Static WEP
443 * (disable HT)
444 * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP
445 *
446 * Compatibility is not matched while roaming, except for mode.
447 */
448static s32
449mwifiex_is_network_compatible(struct mwifiex_private *priv, u32 index, u32 mode)
450{
451 struct mwifiex_adapter *adapter = priv->adapter;
452 struct mwifiex_bssdescriptor *bss_desc;
453
454 bss_desc = &adapter->scan_table[index];
455 bss_desc->disable_11n = false;
456
457 /* Don't check for compatibility if roaming */
458 if (priv->media_connected && (priv->bss_mode == MWIFIEX_BSS_MODE_INFRA)
459 && (bss_desc->bss_mode == MWIFIEX_BSS_MODE_INFRA))
460 return index;
461
462 if (priv->wps.session_enable) {
463 dev_dbg(adapter->dev,
464 "info: return success directly in WPS period\n");
465 return index;
466 }
467
468 if (mwifiex_is_network_compatible_for_wapi(priv, bss_desc)) {
469 dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
470 return index;
471 }
472
473 if (bss_desc->bss_mode == mode) {
474 if (mwifiex_is_network_compatible_for_no_sec(priv, bss_desc)) {
475 /* No security */
476 return index;
477 } else if (mwifiex_is_network_compatible_for_static_wep(priv,
478 bss_desc)) {
479 /* Static WEP enabled */
480 dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
481 bss_desc->disable_11n = true;
482 return index;
483 } else if (mwifiex_is_network_compatible_for_wpa(priv, bss_desc,
484 index)) {
485 /* WPA enabled */
486 if (((priv->adapter->config_bands & BAND_GN
487 || priv->adapter->config_bands & BAND_AN)
488 && bss_desc->bcn_ht_cap)
489 && !mwifiex_is_wpa_oui_present(bss_desc,
490 CIPHER_SUITE_CCMP)) {
491
492 if (mwifiex_is_wpa_oui_present(bss_desc,
493 CIPHER_SUITE_TKIP)) {
494 dev_dbg(adapter->dev,
495 "info: Disable 11n if AES "
496 "is not supported by AP\n");
497 bss_desc->disable_11n = true;
498 } else {
499 return -1;
500 }
501 }
502 return index;
503 } else if (mwifiex_is_network_compatible_for_wpa2(priv,
504 bss_desc, index)) {
505 /* WPA2 enabled */
506 if (((priv->adapter->config_bands & BAND_GN
507 || priv->adapter->config_bands & BAND_AN)
508 && bss_desc->bcn_ht_cap)
509 && !mwifiex_is_rsn_oui_present(bss_desc,
510 CIPHER_SUITE_CCMP)) {
511
512 if (mwifiex_is_rsn_oui_present(bss_desc,
513 CIPHER_SUITE_TKIP)) {
514 dev_dbg(adapter->dev,
515 "info: Disable 11n if AES "
516 "is not supported by AP\n");
517 bss_desc->disable_11n = true;
518 } else {
519 return -1;
520 }
521 }
522 return index;
523 } else if (mwifiex_is_network_compatible_for_adhoc_aes(priv,
524 bss_desc)) {
525 /* Ad-hoc AES enabled */
526 return index;
527 } else if (mwifiex_is_network_compatible_for_dynamic_wep(priv,
528 bss_desc, index)) {
529 /* Dynamic WEP enabled */
530 return index;
531 }
532
533 /* Security doesn't match */
534 dev_dbg(adapter->dev, "info: %s: failed: index=%d "
535 "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s EncMode"
536 "=%#x privacy=%#x\n",
537 __func__, index,
538 (bss_desc->bcn_wpa_ie) ?
539 (*(bss_desc->bcn_wpa_ie)).vend_hdr.
540 element_id : 0,
541 (bss_desc->bcn_rsn_ie) ?
542 (*(bss_desc->bcn_rsn_ie)).ieee_hdr.
543 element_id : 0,
544 (priv->sec_info.wep_status ==
545 MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
546 (priv->sec_info.wpa_enabled) ? "e" : "d",
547 (priv->sec_info.wpa2_enabled) ? "e" : "d",
548 priv->sec_info.encryption_mode, bss_desc->privacy);
549 return -1;
550 }
551
552 /* Mode doesn't match */
553 return -1;
554}
555
556/*
557 * This function finds the best SSID in the scan list.
558 *
559 * It searches the scan table for the best SSID that also matches the current
560 * adapter network preference (mode, security etc.).
561 */
562static s32
563mwifiex_find_best_network_in_list(struct mwifiex_private *priv)
564{
565 struct mwifiex_adapter *adapter = priv->adapter;
566 u32 mode = priv->bss_mode;
567 s32 best_net = -1;
568 s32 best_rssi = 0;
569 u32 i;
570
571 dev_dbg(adapter->dev, "info: num of BSSIDs = %d\n",
572 adapter->num_in_scan_table);
573
574 for (i = 0; i < adapter->num_in_scan_table; i++) {
575 switch (mode) {
576 case MWIFIEX_BSS_MODE_INFRA:
577 case MWIFIEX_BSS_MODE_IBSS:
578 if (mwifiex_is_network_compatible(priv, i, mode) >= 0) {
579 if (SCAN_RSSI(adapter->scan_table[i].rssi) >
580 best_rssi) {
581 best_rssi = SCAN_RSSI(adapter->
582 scan_table[i].rssi);
583 best_net = i;
584 }
585 }
586 break;
587 case MWIFIEX_BSS_MODE_AUTO:
588 default:
589 if (SCAN_RSSI(adapter->scan_table[i].rssi) >
590 best_rssi) {
591 best_rssi = SCAN_RSSI(adapter->scan_table[i].
592 rssi);
593 best_net = i;
594 }
595 break;
596 }
597 }
598
599 return best_net;
600}
601
602/*
603 * This function creates a channel list for the driver to scan, based
604 * on region/band information.
605 *
606 * This routine is used for any scan that is not provided with a
607 * specific channel list to scan.
608 */
609static void
610mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
611 const struct mwifiex_user_scan_cfg
612 *user_scan_in,
613 struct mwifiex_chan_scan_param_set
614 *scan_chan_list,
615 u8 filtered_scan)
616{
617 enum ieee80211_band band;
618 struct ieee80211_supported_band *sband;
619 struct ieee80211_channel *ch;
620 struct mwifiex_adapter *adapter = priv->adapter;
621 int chan_idx = 0, i;
622 u8 scan_type;
623
624 for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
625
626 if (!priv->wdev->wiphy->bands[band])
627 continue;
628
629 sband = priv->wdev->wiphy->bands[band];
630
631 for (i = 0; (i < sband->n_channels) ; i++, chan_idx++) {
632 ch = &sband->channels[i];
633 if (ch->flags & IEEE80211_CHAN_DISABLED)
634 continue;
635 scan_chan_list[chan_idx].radio_type = band;
636 scan_type = ch->flags & IEEE80211_CHAN_PASSIVE_SCAN;
637 if (user_scan_in &&
638 user_scan_in->chan_list[0].scan_time)
639 scan_chan_list[chan_idx].max_scan_time =
640 cpu_to_le16((u16) user_scan_in->
641 chan_list[0].scan_time);
642 else if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
643 scan_chan_list[chan_idx].max_scan_time =
644 cpu_to_le16(adapter->passive_scan_time);
645 else
646 scan_chan_list[chan_idx].max_scan_time =
647 cpu_to_le16(adapter->active_scan_time);
648 if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
649 scan_chan_list[chan_idx].chan_scan_mode_bitmap
650 |= MWIFIEX_PASSIVE_SCAN;
651 else
652 scan_chan_list[chan_idx].chan_scan_mode_bitmap
653 &= ~MWIFIEX_PASSIVE_SCAN;
654 scan_chan_list[chan_idx].chan_number =
655 (u32) ch->hw_value;
656 if (filtered_scan) {
657 scan_chan_list[chan_idx].max_scan_time =
658 cpu_to_le16(adapter->specific_scan_time);
659 scan_chan_list[chan_idx].chan_scan_mode_bitmap
660 |= MWIFIEX_DISABLE_CHAN_FILT;
661 }
662 }
663
664 }
665}
666
667/*
668 * This function constructs and sends multiple scan config commands to
669 * the firmware.
670 *
671 * Previous routines in the code flow have created a scan command configuration
672 * with any requested TLVs. This function splits the channel TLV into maximum
673 * channels supported per scan lists and sends the portion of the channel TLV,
674 * along with the other TLVs, to the firmware.
675 */
676static int
677mwifiex_scan_channel_list(struct mwifiex_private *priv, void *wait_buf,
678 u32 max_chan_per_scan, u8 filtered_scan,
679 struct mwifiex_scan_cmd_config *scan_cfg_out,
680 struct mwifiex_ie_types_chan_list_param_set
681 *chan_tlv_out,
682 struct mwifiex_chan_scan_param_set *scan_chan_list)
683{
684 int ret = 0;
685 struct mwifiex_chan_scan_param_set *tmp_chan_list;
686 struct mwifiex_chan_scan_param_set *start_chan;
687
688 u32 tlv_idx;
689 u32 total_scan_time;
690 u32 done_early;
691
692 if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
693 dev_dbg(priv->adapter->dev,
694 "info: Scan: Null detect: %p, %p, %p\n",
695 scan_cfg_out, chan_tlv_out, scan_chan_list);
696 return -1;
697 }
698
699 chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
700
701 /* Set the temp channel struct pointer to the start of the desired
702 list */
703 tmp_chan_list = scan_chan_list;
704
705 /* Loop through the desired channel list, sending a new firmware scan
706 commands for each max_chan_per_scan channels (or for 1,6,11
707 individually if configured accordingly) */
708 while (tmp_chan_list->chan_number) {
709
710 tlv_idx = 0;
711 total_scan_time = 0;
712 chan_tlv_out->header.len = 0;
713 start_chan = tmp_chan_list;
714 done_early = false;
715
716 /*
717 * Construct the Channel TLV for the scan command. Continue to
718 * insert channel TLVs until:
719 * - the tlv_idx hits the maximum configured per scan command
720 * - the next channel to insert is 0 (end of desired channel
721 * list)
722 * - done_early is set (controlling individual scanning of
723 * 1,6,11)
724 */
725 while (tlv_idx < max_chan_per_scan
726 && tmp_chan_list->chan_number && !done_early) {
727
728 dev_dbg(priv->adapter->dev,
729 "info: Scan: Chan(%3d), Radio(%d),"
730 " Mode(%d, %d), Dur(%d)\n",
731 tmp_chan_list->chan_number,
732 tmp_chan_list->radio_type,
733 tmp_chan_list->chan_scan_mode_bitmap
734 & MWIFIEX_PASSIVE_SCAN,
735 (tmp_chan_list->chan_scan_mode_bitmap
736 & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
737 le16_to_cpu(tmp_chan_list->max_scan_time));
738
739 /* Copy the current channel TLV to the command being
740 prepared */
741 memcpy(chan_tlv_out->chan_scan_param + tlv_idx,
742 tmp_chan_list,
743 sizeof(chan_tlv_out->chan_scan_param));
744
745 /* Increment the TLV header length by the size
746 appended */
747 chan_tlv_out->header.len =
748 cpu_to_le16(le16_to_cpu(chan_tlv_out->header.len) +
749 (sizeof(chan_tlv_out->chan_scan_param)));
750
751 /*
752 * The tlv buffer length is set to the number of bytes
753 * of the between the channel tlv pointer and the start
754 * of the tlv buffer. This compensates for any TLVs
755 * that were appended before the channel list.
756 */
757 scan_cfg_out->tlv_buf_len = (u32) ((u8 *) chan_tlv_out -
758 scan_cfg_out->tlv_buf);
759
760 /* Add the size of the channel tlv header and the data
761 length */
762 scan_cfg_out->tlv_buf_len +=
763 (sizeof(chan_tlv_out->header)
764 + le16_to_cpu(chan_tlv_out->header.len));
765
766 /* Increment the index to the channel tlv we are
767 constructing */
768 tlv_idx++;
769
770 /* Count the total scan time per command */
771 total_scan_time +=
772 le16_to_cpu(tmp_chan_list->max_scan_time);
773
774 done_early = false;
775
776 /* Stop the loop if the *current* channel is in the
777 1,6,11 set and we are not filtering on a BSSID
778 or SSID. */
779 if (!filtered_scan && (tmp_chan_list->chan_number == 1
780 || tmp_chan_list->chan_number == 6
781 || tmp_chan_list->chan_number == 11))
782 done_early = true;
783
784 /* Increment the tmp pointer to the next channel to
785 be scanned */
786 tmp_chan_list++;
787
788 /* Stop the loop if the *next* channel is in the 1,6,11
789 set. This will cause it to be the only channel
790 scanned on the next interation */
791 if (!filtered_scan && (tmp_chan_list->chan_number == 1
792 || tmp_chan_list->chan_number == 6
793 || tmp_chan_list->chan_number == 11))
794 done_early = true;
795 }
796
797 /* The total scan time should be less than scan command timeout
798 value */
799 if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) {
800 dev_err(priv->adapter->dev, "total scan time %dms"
801 " is over limit (%dms), scan skipped\n",
802 total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME);
803 ret = -1;
804 break;
805 }
806
807 priv->adapter->scan_channels = start_chan;
808
809 /* Send the scan command to the firmware with the specified
810 cfg */
811 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_SCAN,
812 HostCmd_ACT_GEN_SET,
813 0, wait_buf, scan_cfg_out);
814 if (ret)
815 break;
816 }
817
818 if (ret)
819 return -1;
820
821 return 0;
822}
823
824/*
825 * This function constructs a scan command configuration structure to use
826 * in scan commands.
827 *
828 * Application layer or other functions can invoke network scanning
829 * with a scan configuration supplied in a user scan configuration structure.
830 * This structure is used as the basis of one or many scan command configuration
831 * commands that are sent to the command processing module and eventually to the
832 * firmware.
833 *
834 * This function creates a scan command configuration structure based on the
835 * following user supplied parameters (if present):
836 * - SSID filter
837 * - BSSID filter
838 * - Number of Probes to be sent
839 * - Channel list
840 *
841 * If the SSID or BSSID filter is not present, the filter is disabled/cleared.
842 * If the number of probes is not set, adapter default setting is used.
843 */
844static void
845mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
846 const struct mwifiex_user_scan_cfg *user_scan_in,
847 struct mwifiex_scan_cmd_config *scan_cfg_out,
848 struct mwifiex_ie_types_chan_list_param_set
849 **chan_list_out,
850 struct mwifiex_chan_scan_param_set
851 *scan_chan_list,
852 u8 *max_chan_per_scan, u8 *filtered_scan,
853 u8 *scan_current_only)
854{
855 struct mwifiex_adapter *adapter = priv->adapter;
856 struct mwifiex_ie_types_num_probes *num_probes_tlv;
857 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
858 struct mwifiex_ie_types_rates_param_set *rates_tlv;
859 const u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
860 u8 *tlv_pos;
861 u32 num_probes;
862 u32 ssid_len;
863 u32 chan_idx;
864 u32 scan_type;
865 u16 scan_dur;
866 u8 channel;
867 u8 radio_type;
868 u32 ssid_idx;
869 u8 ssid_filter;
870 u8 rates[MWIFIEX_SUPPORTED_RATES];
871 u32 rates_size;
872 struct mwifiex_ie_types_htcap *ht_cap;
873
874 /* The tlv_buf_len is calculated for each scan command. The TLVs added
875 in this routine will be preserved since the routine that sends the
876 command will append channelTLVs at *chan_list_out. The difference
877 between the *chan_list_out and the tlv_buf start will be used to
878 calculate the size of anything we add in this routine. */
879 scan_cfg_out->tlv_buf_len = 0;
880
881 /* Running tlv pointer. Assigned to chan_list_out at end of function
882 so later routines know where channels can be added to the command
883 buf */
884 tlv_pos = scan_cfg_out->tlv_buf;
885
886 /* Initialize the scan as un-filtered; the flag is later set to TRUE
887 below if a SSID or BSSID filter is sent in the command */
888 *filtered_scan = false;
889
890 /* Initialize the scan as not being only on the current channel. If
891 the channel list is customized, only contains one channel, and is
892 the active channel, this is set true and data flow is not halted. */
893 *scan_current_only = false;
894
895 if (user_scan_in) {
896
897 /* Default the ssid_filter flag to TRUE, set false under
898 certain wildcard conditions and qualified by the existence
899 of an SSID list before marking the scan as filtered */
900 ssid_filter = true;
901
902 /* Set the BSS type scan filter, use Adapter setting if
903 unset */
904 scan_cfg_out->bss_mode =
905 (user_scan_in->bss_mode ? (u8) user_scan_in->
906 bss_mode : (u8) adapter->scan_mode);
907
908 /* Set the number of probes to send, use Adapter setting
909 if unset */
910 num_probes =
911 (user_scan_in->num_probes ? user_scan_in->
912 num_probes : adapter->scan_probes);
913
914 /*
915 * Set the BSSID filter to the incoming configuration,
916 * if non-zero. If not set, it will remain disabled
917 * (all zeros).
918 */
919 memcpy(scan_cfg_out->specific_bssid,
920 user_scan_in->specific_bssid,
921 sizeof(scan_cfg_out->specific_bssid));
922
923 for (ssid_idx = 0;
924 ((ssid_idx < ARRAY_SIZE(user_scan_in->ssid_list))
925 && (*user_scan_in->ssid_list[ssid_idx].ssid
926 || user_scan_in->ssid_list[ssid_idx].max_len));
927 ssid_idx++) {
928
929 ssid_len = strlen(user_scan_in->ssid_list[ssid_idx].
930 ssid) + 1;
931
932 wildcard_ssid_tlv =
933 (struct mwifiex_ie_types_wildcard_ssid_params *)
934 tlv_pos;
935 wildcard_ssid_tlv->header.type =
936 cpu_to_le16(TLV_TYPE_WILDCARDSSID);
937 wildcard_ssid_tlv->header.len = cpu_to_le16(
938 (u16) (ssid_len + sizeof(wildcard_ssid_tlv->
939 max_ssid_length)));
940 wildcard_ssid_tlv->max_ssid_length =
941 user_scan_in->ssid_list[ssid_idx].max_len;
942
943 memcpy(wildcard_ssid_tlv->ssid,
944 user_scan_in->ssid_list[ssid_idx].ssid,
945 ssid_len);
946
947 tlv_pos += (sizeof(wildcard_ssid_tlv->header)
948 + le16_to_cpu(wildcard_ssid_tlv->header.len));
949
950 dev_dbg(adapter->dev, "info: scan: ssid_list[%d]: %s, %d\n",
951 ssid_idx, wildcard_ssid_tlv->ssid,
952 wildcard_ssid_tlv->max_ssid_length);
953
954 /* Empty wildcard ssid with a maxlen will match many or
955 potentially all SSIDs (maxlen == 32), therefore do
956 not treat the scan as
957 filtered. */
958 if (!ssid_len && wildcard_ssid_tlv->max_ssid_length)
959 ssid_filter = false;
960
961 }
962
963 /*
964 * The default number of channels sent in the command is low to
965 * ensure the response buffer from the firmware does not
966 * truncate scan results. That is not an issue with an SSID
967 * or BSSID filter applied to the scan results in the firmware.
968 */
969 if ((ssid_idx && ssid_filter)
970 || memcmp(scan_cfg_out->specific_bssid, &zero_mac,
971 sizeof(zero_mac)))
972 *filtered_scan = true;
973 } else {
974 scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
975 num_probes = adapter->scan_probes;
976 }
977
978 /*
979 * If a specific BSSID or SSID is used, the number of channels in the
980 * scan command will be increased to the absolute maximum.
981 */
982 if (*filtered_scan)
983 *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
984 else
985 *max_chan_per_scan = MWIFIEX_CHANNELS_PER_SCAN_CMD;
986
987 /* If the input config or adapter has the number of Probes set,
988 add tlv */
989 if (num_probes) {
990
991 dev_dbg(adapter->dev, "info: scan: num_probes = %d\n",
992 num_probes);
993
994 num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos;
995 num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
996 num_probes_tlv->header.len =
997 cpu_to_le16(sizeof(num_probes_tlv->num_probes));
998 num_probes_tlv->num_probes = cpu_to_le16((u16) num_probes);
999
1000 tlv_pos += sizeof(num_probes_tlv->header) +
1001 le16_to_cpu(num_probes_tlv->header.len);
1002
1003 }
1004
1005 /* Append rates tlv */
1006 memset(rates, 0, sizeof(rates));
1007
1008 rates_size = mwifiex_get_supported_rates(priv, rates);
1009
1010 rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos;
1011 rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
1012 rates_tlv->header.len = cpu_to_le16((u16) rates_size);
1013 memcpy(rates_tlv->rates, rates, rates_size);
1014 tlv_pos += sizeof(rates_tlv->header) + rates_size;
1015
1016 dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size);
1017
1018 if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
1019 && (priv->adapter->config_bands & BAND_GN
1020 || priv->adapter->config_bands & BAND_AN)) {
1021 ht_cap = (struct mwifiex_ie_types_htcap *) tlv_pos;
1022 memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
1023 ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
1024 ht_cap->header.len =
1025 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
1026 mwifiex_fill_cap_info(priv, ht_cap);
1027 tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
1028 }
1029
1030 /* Append vendor specific IE TLV */
1031 mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_SCAN, &tlv_pos);
1032
1033 /*
1034 * Set the output for the channel TLV to the address in the tlv buffer
1035 * past any TLVs that were added in this function (SSID, num_probes).
1036 * Channel TLVs will be added past this for each scan command,
1037 * preserving the TLVs that were previously added.
1038 */
1039 *chan_list_out =
1040 (struct mwifiex_ie_types_chan_list_param_set *) tlv_pos;
1041
1042 if (user_scan_in && user_scan_in->chan_list[0].chan_number) {
1043
1044 dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n");
1045
1046 for (chan_idx = 0;
1047 chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX
1048 && user_scan_in->chan_list[chan_idx].chan_number;
1049 chan_idx++) {
1050
1051 channel = user_scan_in->chan_list[chan_idx].chan_number;
1052 (scan_chan_list + chan_idx)->chan_number = channel;
1053
1054 radio_type =
1055 user_scan_in->chan_list[chan_idx].radio_type;
1056 (scan_chan_list + chan_idx)->radio_type = radio_type;
1057
1058 scan_type = user_scan_in->chan_list[chan_idx].scan_type;
1059
1060 if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
1061 (scan_chan_list +
1062 chan_idx)->chan_scan_mode_bitmap
1063 |= MWIFIEX_PASSIVE_SCAN;
1064 else
1065 (scan_chan_list +
1066 chan_idx)->chan_scan_mode_bitmap
1067 &= ~MWIFIEX_PASSIVE_SCAN;
1068
1069 if (user_scan_in->chan_list[chan_idx].scan_time) {
1070 scan_dur = (u16) user_scan_in->
1071 chan_list[chan_idx].scan_time;
1072 } else {
1073 if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
1074 scan_dur = adapter->passive_scan_time;
1075 else if (*filtered_scan)
1076 scan_dur = adapter->specific_scan_time;
1077 else
1078 scan_dur = adapter->active_scan_time;
1079 }
1080
1081 (scan_chan_list + chan_idx)->min_scan_time =
1082 cpu_to_le16(scan_dur);
1083 (scan_chan_list + chan_idx)->max_scan_time =
1084 cpu_to_le16(scan_dur);
1085 }
1086
1087 /* Check if we are only scanning the current channel */
1088 if ((chan_idx == 1)
1089 && (user_scan_in->chan_list[0].chan_number
1090 == priv->curr_bss_params.bss_descriptor.channel)) {
1091 *scan_current_only = true;
1092 dev_dbg(adapter->dev,
1093 "info: Scan: Scanning current channel only\n");
1094 }
1095
1096 } else {
1097 dev_dbg(adapter->dev,
1098 "info: Scan: Creating full region channel list\n");
1099 mwifiex_scan_create_channel_list(priv, user_scan_in,
1100 scan_chan_list,
1101 *filtered_scan);
1102 }
1103}
1104
1105/*
1106 * This function inspects the scan response buffer for pointers to
1107 * expected TLVs.
1108 *
1109 * TLVs can be included at the end of the scan response BSS information.
1110 *
1111 * Data in the buffer is parsed pointers to TLVs that can potentially
1112 * be passed back in the response.
1113 */
1114static void
1115mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
1116 struct mwifiex_ie_types_data *tlv,
1117 u32 tlv_buf_size, u32 req_tlv_type,
1118 struct mwifiex_ie_types_data **tlv_data)
1119{
1120 struct mwifiex_ie_types_data *current_tlv;
1121 u32 tlv_buf_left;
1122 u32 tlv_type;
1123 u32 tlv_len;
1124
1125 current_tlv = tlv;
1126 tlv_buf_left = tlv_buf_size;
1127 *tlv_data = NULL;
1128
1129 dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n",
1130 tlv_buf_size);
1131
1132 while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) {
1133
1134 tlv_type = le16_to_cpu(current_tlv->header.type);
1135 tlv_len = le16_to_cpu(current_tlv->header.len);
1136
1137 if (sizeof(tlv->header) + tlv_len > tlv_buf_left) {
1138 dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n");
1139 break;
1140 }
1141
1142 if (req_tlv_type == tlv_type) {
1143 switch (tlv_type) {
1144 case TLV_TYPE_TSFTIMESTAMP:
1145 dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
1146 "timestamp TLV, len = %d\n", tlv_len);
1147 *tlv_data = (struct mwifiex_ie_types_data *)
1148 current_tlv;
1149 break;
1150 case TLV_TYPE_CHANNELBANDLIST:
1151 dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
1152 " band list TLV, len = %d\n", tlv_len);
1153 *tlv_data = (struct mwifiex_ie_types_data *)
1154 current_tlv;
1155 break;
1156 default:
1157 dev_err(adapter->dev,
1158 "SCAN_RESP: unhandled TLV = %d\n",
1159 tlv_type);
1160 /* Give up, this seems corrupted */
1161 return;
1162 }
1163 }
1164
1165 if (*tlv_data)
1166 break;
1167
1168
1169 tlv_buf_left -= (sizeof(tlv->header) + tlv_len);
1170 current_tlv =
1171 (struct mwifiex_ie_types_data *) (current_tlv->data +
1172 tlv_len);
1173
1174 } /* while */
1175}
1176
1177/*
1178 * This function interprets a BSS scan response returned from the firmware.
1179 *
1180 * The various fixed fields and IEs are parsed and passed back for a BSS
1181 * probe response or beacon from scan command. Information is recorded as
1182 * needed in the scan table for that entry.
1183 *
1184 * The following IE types are recognized and parsed -
1185 * - SSID
1186 * - Supported rates
1187 * - FH parameters set
1188 * - DS parameters set
1189 * - CF parameters set
1190 * - IBSS parameters set
1191 * - ERP information
1192 * - Extended supported rates
1193 * - Vendor specific (221)
1194 * - RSN IE
1195 * - WAPI IE
1196 * - HT capability
1197 * - HT operation
1198 * - BSS Coexistence 20/40
1199 * - Extended capability
1200 * - Overlapping BSS scan parameters
1201 */
1202static int
1203mwifiex_interpret_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1204 struct mwifiex_bssdescriptor *bss_entry,
1205 u8 **beacon_info, u32 *bytes_left)
1206{
1207 int ret = 0;
1208 u8 element_id;
1209 struct ieee_types_fh_param_set *fh_param_set;
1210 struct ieee_types_ds_param_set *ds_param_set;
1211 struct ieee_types_cf_param_set *cf_param_set;
1212 struct ieee_types_ibss_param_set *ibss_param_set;
1213 struct mwifiex_802_11_fixed_ies fixed_ie;
1214 u8 *current_ptr;
1215 u8 *rate;
1216 u8 element_len;
1217 u16 total_ie_len;
1218 u8 bytes_to_copy;
1219 u8 rate_size;
1220 u16 beacon_size;
1221 u8 found_data_rate_ie;
1222 u32 bytes_left_for_current_beacon;
1223 struct ieee_types_vendor_specific *vendor_ie;
1224 const u8 wpa_oui[4] = { 0x00, 0x50, 0xf2, 0x01 };
1225 const u8 wmm_oui[4] = { 0x00, 0x50, 0xf2, 0x02 };
1226
1227 found_data_rate_ie = false;
1228 rate_size = 0;
1229 beacon_size = 0;
1230
1231 if (*bytes_left >= sizeof(beacon_size)) {
1232 /* Extract & convert beacon size from the command buffer */
1233 memcpy(&beacon_size, *beacon_info, sizeof(beacon_size));
1234 *bytes_left -= sizeof(beacon_size);
1235 *beacon_info += sizeof(beacon_size);
1236 }
1237
1238 if (!beacon_size || beacon_size > *bytes_left) {
1239 *beacon_info += *bytes_left;
1240 *bytes_left = 0;
1241 return -1;
1242 }
1243
1244 /* Initialize the current working beacon pointer for this BSS
1245 iteration */
1246 current_ptr = *beacon_info;
1247
1248 /* Advance the return beacon pointer past the current beacon */
1249 *beacon_info += beacon_size;
1250 *bytes_left -= beacon_size;
1251
1252 bytes_left_for_current_beacon = beacon_size;
1253
1254 memcpy(bss_entry->mac_address, current_ptr, ETH_ALEN);
1255 dev_dbg(adapter->dev, "info: InterpretIE: AP MAC Addr: %pM\n",
1256 bss_entry->mac_address);
1257
1258 current_ptr += ETH_ALEN;
1259 bytes_left_for_current_beacon -= ETH_ALEN;
1260
1261 if (bytes_left_for_current_beacon < 12) {
1262 dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
1263 return -1;
1264 }
1265
1266 /*
1267 * Next 4 fields are RSSI, time stamp, beacon interval,
1268 * and capability information
1269 */
1270
1271 /* RSSI is 1 byte long */
1272 bss_entry->rssi = (s32) (*current_ptr);
1273 dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", *current_ptr);
1274 current_ptr += 1;
1275 bytes_left_for_current_beacon -= 1;
1276
1277 /*
1278 * The RSSI is not part of the beacon/probe response. After we have
1279 * advanced current_ptr past the RSSI field, save the remaining
1280 * data for use at the application layer
1281 */
1282 bss_entry->beacon_buf = current_ptr;
1283 bss_entry->beacon_buf_size = bytes_left_for_current_beacon;
1284
1285 /* Time stamp is 8 bytes long */
1286 memcpy(fixed_ie.time_stamp, current_ptr, 8);
1287 memcpy(bss_entry->time_stamp, current_ptr, 8);
1288 current_ptr += 8;
1289 bytes_left_for_current_beacon -= 8;
1290
1291 /* Beacon interval is 2 bytes long */
1292 memcpy(&fixed_ie.beacon_interval, current_ptr, 2);
1293 bss_entry->beacon_period = le16_to_cpu(fixed_ie.beacon_interval);
1294 current_ptr += 2;
1295 bytes_left_for_current_beacon -= 2;
1296
1297 /* Capability information is 2 bytes long */
1298 memcpy(&fixed_ie.capabilities, current_ptr, 2);
1299 dev_dbg(adapter->dev, "info: InterpretIE: fixed_ie.capabilities=0x%X\n",
1300 fixed_ie.capabilities);
1301 bss_entry->cap_info_bitmap = le16_to_cpu(fixed_ie.capabilities);
1302 current_ptr += 2;
1303 bytes_left_for_current_beacon -= 2;
1304
1305 /* Rest of the current buffer are IE's */
1306 dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
1307 bytes_left_for_current_beacon);
1308
1309 if (bss_entry->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
1310 dev_dbg(adapter->dev, "info: InterpretIE: AP WEP enabled\n");
1311 bss_entry->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
1312 } else {
1313 bss_entry->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
1314 }
1315
1316 if (bss_entry->cap_info_bitmap & WLAN_CAPABILITY_IBSS)
1317 bss_entry->bss_mode = MWIFIEX_BSS_MODE_IBSS;
1318 else
1319 bss_entry->bss_mode = MWIFIEX_BSS_MODE_INFRA;
1320
1321
1322 /* Process variable IE */
1323 while (bytes_left_for_current_beacon >= 2) {
1324 element_id = *current_ptr;
1325 element_len = *(current_ptr + 1);
1326 total_ie_len = element_len + sizeof(struct ieee_types_header);
1327
1328 if (bytes_left_for_current_beacon < total_ie_len) {
1329 dev_err(adapter->dev, "err: InterpretIE: in processing"
1330 " IE, bytes left < IE length\n");
1331 bytes_left_for_current_beacon = 0;
1332 ret = -1;
1333 continue;
1334 }
1335 switch (element_id) {
1336 case WLAN_EID_SSID:
1337 bss_entry->ssid.ssid_len = element_len;
1338 memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
1339 element_len);
1340 dev_dbg(adapter->dev, "info: InterpretIE: ssid: %-32s\n",
1341 bss_entry->ssid.ssid);
1342 break;
1343
1344 case WLAN_EID_SUPP_RATES:
1345 memcpy(bss_entry->data_rates, current_ptr + 2,
1346 element_len);
1347 memcpy(bss_entry->supported_rates, current_ptr + 2,
1348 element_len);
1349 rate_size = element_len;
1350 found_data_rate_ie = true;
1351 break;
1352
1353 case WLAN_EID_FH_PARAMS:
1354 fh_param_set =
1355 (struct ieee_types_fh_param_set *) current_ptr;
1356 memcpy(&bss_entry->phy_param_set.fh_param_set,
1357 fh_param_set,
1358 sizeof(struct ieee_types_fh_param_set));
1359 break;
1360
1361 case WLAN_EID_DS_PARAMS:
1362 ds_param_set =
1363 (struct ieee_types_ds_param_set *) current_ptr;
1364
1365 bss_entry->channel = ds_param_set->current_chan;
1366
1367 memcpy(&bss_entry->phy_param_set.ds_param_set,
1368 ds_param_set,
1369 sizeof(struct ieee_types_ds_param_set));
1370 break;
1371
1372 case WLAN_EID_CF_PARAMS:
1373 cf_param_set =
1374 (struct ieee_types_cf_param_set *) current_ptr;
1375 memcpy(&bss_entry->ss_param_set.cf_param_set,
1376 cf_param_set,
1377 sizeof(struct ieee_types_cf_param_set));
1378 break;
1379
1380 case WLAN_EID_IBSS_PARAMS:
1381 ibss_param_set =
1382 (struct ieee_types_ibss_param_set *)
1383 current_ptr;
1384 memcpy(&bss_entry->ss_param_set.ibss_param_set,
1385 ibss_param_set,
1386 sizeof(struct ieee_types_ibss_param_set));
1387 break;
1388
1389 case WLAN_EID_ERP_INFO:
1390 bss_entry->erp_flags = *(current_ptr + 2);
1391 break;
1392
1393 case WLAN_EID_EXT_SUPP_RATES:
1394 /*
1395 * Only process extended supported rate
1396 * if data rate is already found.
1397 * Data rate IE should come before
1398 * extended supported rate IE
1399 */
1400 if (found_data_rate_ie) {
1401 if ((element_len + rate_size) >
1402 MWIFIEX_SUPPORTED_RATES)
1403 bytes_to_copy =
1404 (MWIFIEX_SUPPORTED_RATES -
1405 rate_size);
1406 else
1407 bytes_to_copy = element_len;
1408
1409 rate = (u8 *) bss_entry->data_rates;
1410 rate += rate_size;
1411 memcpy(rate, current_ptr + 2, bytes_to_copy);
1412
1413 rate = (u8 *) bss_entry->supported_rates;
1414 rate += rate_size;
1415 memcpy(rate, current_ptr + 2, bytes_to_copy);
1416 }
1417 break;
1418
1419 case WLAN_EID_VENDOR_SPECIFIC:
1420 vendor_ie = (struct ieee_types_vendor_specific *)
1421 current_ptr;
1422
1423 if (!memcmp
1424 (vendor_ie->vend_hdr.oui, wpa_oui,
1425 sizeof(wpa_oui))) {
1426 bss_entry->bcn_wpa_ie =
1427 (struct ieee_types_vendor_specific *)
1428 current_ptr;
1429 bss_entry->wpa_offset = (u16) (current_ptr -
1430 bss_entry->beacon_buf);
1431 } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
1432 sizeof(wmm_oui))) {
1433 if (total_ie_len ==
1434 sizeof(struct ieee_types_wmm_parameter)
1435 || total_ie_len ==
1436 sizeof(struct ieee_types_wmm_info))
1437 /*
1438 * Only accept and copy the WMM IE if
1439 * it matches the size expected for the
1440 * WMM Info IE or the WMM Parameter IE.
1441 */
1442 memcpy((u8 *) &bss_entry->wmm_ie,
1443 current_ptr, total_ie_len);
1444 }
1445 break;
1446 case WLAN_EID_RSN:
1447 bss_entry->bcn_rsn_ie =
1448 (struct ieee_types_generic *) current_ptr;
1449 bss_entry->rsn_offset = (u16) (current_ptr -
1450 bss_entry->beacon_buf);
1451 break;
1452 case WLAN_EID_BSS_AC_ACCESS_DELAY:
1453 bss_entry->bcn_wapi_ie =
1454 (struct ieee_types_generic *) current_ptr;
1455 bss_entry->wapi_offset = (u16) (current_ptr -
1456 bss_entry->beacon_buf);
1457 break;
1458 case WLAN_EID_HT_CAPABILITY:
1459 bss_entry->bcn_ht_cap = (struct ieee80211_ht_cap *)
1460 (current_ptr +
1461 sizeof(struct ieee_types_header));
1462 bss_entry->ht_cap_offset = (u16) (current_ptr +
1463 sizeof(struct ieee_types_header) -
1464 bss_entry->beacon_buf);
1465 break;
1466 case WLAN_EID_HT_INFORMATION:
1467 bss_entry->bcn_ht_info = (struct ieee80211_ht_info *)
1468 (current_ptr +
1469 sizeof(struct ieee_types_header));
1470 bss_entry->ht_info_offset = (u16) (current_ptr +
1471 sizeof(struct ieee_types_header) -
1472 bss_entry->beacon_buf);
1473 break;
1474 case WLAN_EID_BSS_COEX_2040:
1475 bss_entry->bcn_bss_co_2040 = (u8 *) (current_ptr +
1476 sizeof(struct ieee_types_header));
1477 bss_entry->bss_co_2040_offset = (u16) (current_ptr +
1478 sizeof(struct ieee_types_header) -
1479 bss_entry->beacon_buf);
1480 break;
1481 case WLAN_EID_EXT_CAPABILITY:
1482 bss_entry->bcn_ext_cap = (u8 *) (current_ptr +
1483 sizeof(struct ieee_types_header));
1484 bss_entry->ext_cap_offset = (u16) (current_ptr +
1485 sizeof(struct ieee_types_header) -
1486 bss_entry->beacon_buf);
1487 break;
1488 case WLAN_EID_OVERLAP_BSS_SCAN_PARAM:
1489 bss_entry->bcn_obss_scan =
1490 (struct ieee_types_obss_scan_param *)
1491 current_ptr;
1492 bss_entry->overlap_bss_offset = (u16) (current_ptr -
1493 bss_entry->beacon_buf);
1494 break;
1495 default:
1496 break;
1497 }
1498
1499 current_ptr += element_len + 2;
1500
1501 /* Need to account for IE ID and IE Len */
1502 bytes_left_for_current_beacon -= (element_len + 2);
1503
1504 } /* while (bytes_left_for_current_beacon > 2) */
1505 return ret;
1506}
1507
1508/*
1509 * This function adjusts the pointers used in beacon buffers to reflect
1510 * shifts.
1511 *
1512 * The memory allocated for beacon buffers is of fixed sizes where all the
1513 * saved beacons must be stored. New beacons are added in the free portion
1514 * of this memory, space permitting; while duplicate beacon buffers are
1515 * placed at the same start location. However, since duplicate beacon
1516 * buffers may not match the size of the old one, all the following buffers
1517 * in the memory must be shifted to either make space, or to fill up freed
1518 * up space.
1519 *
1520 * This function is used to update the beacon buffer pointers that are past
1521 * an existing beacon buffer that is updated with a new one of different
1522 * size. The pointers are shifted by a fixed amount, either forward or
1523 * backward.
1524 *
1525 * the following pointers in every affected beacon buffers are changed, if
1526 * present -
1527 * - WPA IE pointer
1528 * - RSN IE pointer
1529 * - WAPI IE pointer
1530 * - HT capability IE pointer
1531 * - HT information IE pointer
1532 * - BSS coexistence 20/40 IE pointer
1533 * - Extended capability IE pointer
1534 * - Overlapping BSS scan parameter IE pointer
1535 */
1536static void
1537mwifiex_adjust_beacon_buffer_ptrs(struct mwifiex_private *priv, u8 advance,
1538 u8 *bcn_store, u32 rem_bcn_size,
1539 u32 num_of_ent)
1540{
1541 struct mwifiex_adapter *adapter = priv->adapter;
1542 u32 adj_idx;
1543 for (adj_idx = 0; adj_idx < num_of_ent; adj_idx++) {
1544 if (adapter->scan_table[adj_idx].beacon_buf > bcn_store) {
1545
1546 if (advance)
1547 adapter->scan_table[adj_idx].beacon_buf +=
1548 rem_bcn_size;
1549 else
1550 adapter->scan_table[adj_idx].beacon_buf -=
1551 rem_bcn_size;
1552
1553 if (adapter->scan_table[adj_idx].bcn_wpa_ie)
1554 adapter->scan_table[adj_idx].bcn_wpa_ie =
1555 (struct ieee_types_vendor_specific *)
1556 (adapter->scan_table[adj_idx].beacon_buf +
1557 adapter->scan_table[adj_idx].wpa_offset);
1558 if (adapter->scan_table[adj_idx].bcn_rsn_ie)
1559 adapter->scan_table[adj_idx].bcn_rsn_ie =
1560 (struct ieee_types_generic *)
1561 (adapter->scan_table[adj_idx].beacon_buf +
1562 adapter->scan_table[adj_idx].rsn_offset);
1563 if (adapter->scan_table[adj_idx].bcn_wapi_ie)
1564 adapter->scan_table[adj_idx].bcn_wapi_ie =
1565 (struct ieee_types_generic *)
1566 (adapter->scan_table[adj_idx].beacon_buf +
1567 adapter->scan_table[adj_idx].wapi_offset);
1568 if (adapter->scan_table[adj_idx].bcn_ht_cap)
1569 adapter->scan_table[adj_idx].bcn_ht_cap =
1570 (struct ieee80211_ht_cap *)
1571 (adapter->scan_table[adj_idx].beacon_buf +
1572 adapter->scan_table[adj_idx].ht_cap_offset);
1573
1574 if (adapter->scan_table[adj_idx].bcn_ht_info)
1575 adapter->scan_table[adj_idx].bcn_ht_info =
1576 (struct ieee80211_ht_info *)
1577 (adapter->scan_table[adj_idx].beacon_buf +
1578 adapter->scan_table[adj_idx].ht_info_offset);
1579 if (adapter->scan_table[adj_idx].bcn_bss_co_2040)
1580 adapter->scan_table[adj_idx].bcn_bss_co_2040 =
1581 (u8 *)
1582 (adapter->scan_table[adj_idx].beacon_buf +
1583 adapter->scan_table[adj_idx].bss_co_2040_offset);
1584 if (adapter->scan_table[adj_idx].bcn_ext_cap)
1585 adapter->scan_table[adj_idx].bcn_ext_cap =
1586 (u8 *)
1587 (adapter->scan_table[adj_idx].beacon_buf +
1588 adapter->scan_table[adj_idx].ext_cap_offset);
1589 if (adapter->scan_table[adj_idx].bcn_obss_scan)
1590 adapter->scan_table[adj_idx].bcn_obss_scan =
1591 (struct ieee_types_obss_scan_param *)
1592 (adapter->scan_table[adj_idx].beacon_buf +
1593 adapter->scan_table[adj_idx].overlap_bss_offset);
1594 }
1595 }
1596}
1597
1598/*
1599 * This function updates the pointers used in beacon buffer for given bss
1600 * descriptor to reflect shifts
1601 *
1602 * Following pointers are updated
1603 * - WPA IE pointer
1604 * - RSN IE pointer
1605 * - WAPI IE pointer
1606 * - HT capability IE pointer
1607 * - HT information IE pointer
1608 * - BSS coexistence 20/40 IE pointer
1609 * - Extended capability IE pointer
1610 * - Overlapping BSS scan parameter IE pointer
1611 */
1612static void
1613mwifiex_update_beacon_buffer_ptrs(struct mwifiex_bssdescriptor *beacon)
1614{
1615 if (beacon->bcn_wpa_ie)
1616 beacon->bcn_wpa_ie = (struct ieee_types_vendor_specific *)
1617 (beacon->beacon_buf + beacon->wpa_offset);
1618 if (beacon->bcn_rsn_ie)
1619 beacon->bcn_rsn_ie = (struct ieee_types_generic *)
1620 (beacon->beacon_buf + beacon->rsn_offset);
1621 if (beacon->bcn_wapi_ie)
1622 beacon->bcn_wapi_ie = (struct ieee_types_generic *)
1623 (beacon->beacon_buf + beacon->wapi_offset);
1624 if (beacon->bcn_ht_cap)
1625 beacon->bcn_ht_cap = (struct ieee80211_ht_cap *)
1626 (beacon->beacon_buf + beacon->ht_cap_offset);
1627 if (beacon->bcn_ht_info)
1628 beacon->bcn_ht_info = (struct ieee80211_ht_info *)
1629 (beacon->beacon_buf + beacon->ht_info_offset);
1630 if (beacon->bcn_bss_co_2040)
1631 beacon->bcn_bss_co_2040 = (u8 *) (beacon->beacon_buf +
1632 beacon->bss_co_2040_offset);
1633 if (beacon->bcn_ext_cap)
1634 beacon->bcn_ext_cap = (u8 *) (beacon->beacon_buf +
1635 beacon->ext_cap_offset);
1636 if (beacon->bcn_obss_scan)
1637 beacon->bcn_obss_scan = (struct ieee_types_obss_scan_param *)
1638 (beacon->beacon_buf + beacon->overlap_bss_offset);
1639}
1640
1641/*
1642 * This function stores a beacon or probe response for a BSS returned
1643 * in the scan.
1644 *
1645 * This stores a new scan response or an update for a previous scan response.
1646 * New entries need to verify that they do not exceed the total amount of
1647 * memory allocated for the table.
1648 *
1649 * Replacement entries need to take into consideration the amount of space
1650 * currently allocated for the beacon/probe response and adjust the entry
1651 * as needed.
1652 *
1653 * A small amount of extra pad (SCAN_BEACON_ENTRY_PAD) is generally reserved
1654 * for an entry in case it is a beacon since a probe response for the
1655 * network will by larger per the standard. This helps to reduce the
1656 * amount of memory copying to fit a new probe response into an entry
1657 * already occupied by a network's previously stored beacon.
1658 */
1659static void
1660mwifiex_ret_802_11_scan_store_beacon(struct mwifiex_private *priv,
1661 u32 beacon_idx, u32 num_of_ent,
1662 struct mwifiex_bssdescriptor *new_beacon)
1663{
1664 struct mwifiex_adapter *adapter = priv->adapter;
1665 u8 *bcn_store;
1666 u32 new_bcn_size;
1667 u32 old_bcn_size;
1668 u32 bcn_space;
1669
1670 if (adapter->scan_table[beacon_idx].beacon_buf) {
1671
1672 new_bcn_size = new_beacon->beacon_buf_size;
1673 old_bcn_size = adapter->scan_table[beacon_idx].beacon_buf_size;
1674 bcn_space = adapter->scan_table[beacon_idx].beacon_buf_size_max;
1675 bcn_store = adapter->scan_table[beacon_idx].beacon_buf;
1676
1677 /* Set the max to be the same as current entry unless changed
1678 below */
1679 new_beacon->beacon_buf_size_max = bcn_space;
1680 if (new_bcn_size == old_bcn_size) {
1681 /*
1682 * Beacon is the same size as the previous entry.
1683 * Replace the previous contents with the scan result
1684 */
1685 memcpy(bcn_store, new_beacon->beacon_buf,
1686 new_beacon->beacon_buf_size);
1687
1688 } else if (new_bcn_size <= bcn_space) {
1689 /*
1690 * New beacon size will fit in the amount of space
1691 * we have previously allocated for it
1692 */
1693
1694 /* Copy the new beacon buffer entry over the old one */
1695 memcpy(bcn_store, new_beacon->beacon_buf, new_bcn_size);
1696
1697 /*
1698 * If the old beacon size was less than the maximum
1699 * we had alloted for the entry, and the new entry
1700 * is even smaller, reset the max size to the old
1701 * beacon entry and compress the storage space
1702 * (leaving a new pad space of (old_bcn_size -
1703 * new_bcn_size).
1704 */
1705 if (old_bcn_size < bcn_space
1706 && new_bcn_size <= old_bcn_size) {
1707 /*
1708 * Old Beacon size is smaller than the alloted
1709 * storage size. Shrink the alloted storage
1710 * space.
1711 */
1712 dev_dbg(adapter->dev, "info: AppControl:"
1713 " smaller duplicate beacon "
1714 "(%d), old = %d, new = %d, space = %d,"
1715 "left = %d\n",
1716 beacon_idx, old_bcn_size, new_bcn_size,
1717 bcn_space,
1718 (int)(sizeof(adapter->bcn_buf) -
1719 (adapter->bcn_buf_end -
1720 adapter->bcn_buf)));
1721
1722 /*
1723 * memmove (since the memory overlaps) the
1724 * data after the beacon we just stored to the
1725 * end of the current beacon. This cleans up
1726 * any unused space the old larger beacon was
1727 * using in the buffer
1728 */
1729 memmove(bcn_store + old_bcn_size,
1730 bcn_store + bcn_space,
1731 adapter->bcn_buf_end - (bcn_store +
1732 bcn_space));
1733
1734 /*
1735 * Decrement the end pointer by the difference
1736 * between the old larger size and the new
1737 * smaller size since we are using less space
1738 * due to the new beacon being smaller
1739 */
1740 adapter->bcn_buf_end -=
1741 (bcn_space - old_bcn_size);
1742
1743 /* Set the maximum storage size to the old
1744 beacon size */
1745 new_beacon->beacon_buf_size_max = old_bcn_size;
1746
1747 /* Adjust beacon buffer pointers that are past
1748 the current */
1749 mwifiex_adjust_beacon_buffer_ptrs(priv, 0,
1750 bcn_store, (bcn_space - old_bcn_size),
1751 num_of_ent);
1752 }
1753 } else if (adapter->bcn_buf_end + (new_bcn_size - bcn_space)
1754 < (adapter->bcn_buf + sizeof(adapter->bcn_buf))) {
1755 /*
1756 * Beacon is larger than space previously allocated
1757 * (bcn_space) and there is enough space left in the
1758 * beaconBuffer to store the additional data
1759 */
1760 dev_dbg(adapter->dev, "info: AppControl:"
1761 " larger duplicate beacon (%d), "
1762 "old = %d, new = %d, space = %d, left = %d\n",
1763 beacon_idx, old_bcn_size, new_bcn_size,
1764 bcn_space,
1765 (int)(sizeof(adapter->bcn_buf) -
1766 (adapter->bcn_buf_end -
1767 adapter->bcn_buf)));
1768
1769 /*
1770 * memmove (since the memory overlaps) the data
1771 * after the beacon we just stored to the end of
1772 * the current beacon. This moves the data for
1773 * the beacons after this further in memory to
1774 * make space for the new larger beacon we are
1775 * about to copy in.
1776 */
1777 memmove(bcn_store + new_bcn_size,
1778 bcn_store + bcn_space,
1779 adapter->bcn_buf_end - (bcn_store + bcn_space));
1780
1781 /* Copy the new beacon buffer entry over the old one */
1782 memcpy(bcn_store, new_beacon->beacon_buf, new_bcn_size);
1783
1784 /* Move the beacon end pointer by the amount of new
1785 beacon data we are adding */
1786 adapter->bcn_buf_end += (new_bcn_size - bcn_space);
1787
1788 /*
1789 * This entry is bigger than the alloted max space
1790 * previously reserved. Increase the max space to
1791 * be equal to the new beacon size
1792 */
1793 new_beacon->beacon_buf_size_max = new_bcn_size;
1794
1795 /* Adjust beacon buffer pointers that are past the
1796 current */
1797 mwifiex_adjust_beacon_buffer_ptrs(priv, 1, bcn_store,
1798 (new_bcn_size - bcn_space),
1799 num_of_ent);
1800 } else {
1801 /*
1802 * Beacon is larger than the previously allocated space,
1803 * but there is not enough free space to store the
1804 * additional data.
1805 */
1806 dev_err(adapter->dev, "AppControl: larger duplicate "
1807 " beacon (%d), old = %d new = %d, space = %d,"
1808 " left = %d\n", beacon_idx, old_bcn_size,
1809 new_bcn_size, bcn_space,
1810 (int)(sizeof(adapter->bcn_buf) -
1811 (adapter->bcn_buf_end - adapter->bcn_buf)));
1812
1813 /* Storage failure, keep old beacon intact */
1814 new_beacon->beacon_buf_size = old_bcn_size;
1815 if (new_beacon->bcn_wpa_ie)
1816 new_beacon->wpa_offset =
1817 adapter->scan_table[beacon_idx].
1818 wpa_offset;
1819 if (new_beacon->bcn_rsn_ie)
1820 new_beacon->rsn_offset =
1821 adapter->scan_table[beacon_idx].
1822 rsn_offset;
1823 if (new_beacon->bcn_wapi_ie)
1824 new_beacon->wapi_offset =
1825 adapter->scan_table[beacon_idx].
1826 wapi_offset;
1827 if (new_beacon->bcn_ht_cap)
1828 new_beacon->ht_cap_offset =
1829 adapter->scan_table[beacon_idx].
1830 ht_cap_offset;
1831 if (new_beacon->bcn_ht_info)
1832 new_beacon->ht_info_offset =
1833 adapter->scan_table[beacon_idx].
1834 ht_info_offset;
1835 if (new_beacon->bcn_bss_co_2040)
1836 new_beacon->bss_co_2040_offset =
1837 adapter->scan_table[beacon_idx].
1838 bss_co_2040_offset;
1839 if (new_beacon->bcn_ext_cap)
1840 new_beacon->ext_cap_offset =
1841 adapter->scan_table[beacon_idx].
1842 ext_cap_offset;
1843 if (new_beacon->bcn_obss_scan)
1844 new_beacon->overlap_bss_offset =
1845 adapter->scan_table[beacon_idx].
1846 overlap_bss_offset;
1847 }
1848 /* Point the new entry to its permanent storage space */
1849 new_beacon->beacon_buf = bcn_store;
1850 mwifiex_update_beacon_buffer_ptrs(new_beacon);
1851 } else {
1852 /*
1853 * No existing beacon data exists for this entry, check to see
1854 * if we can fit it in the remaining space
1855 */
1856 if (adapter->bcn_buf_end + new_beacon->beacon_buf_size +
1857 SCAN_BEACON_ENTRY_PAD < (adapter->bcn_buf +
1858 sizeof(adapter->bcn_buf))) {
1859
1860 /*
1861 * Copy the beacon buffer data from the local entry to
1862 * the adapter dev struct buffer space used to store
1863 * the raw beacon data for each entry in the scan table
1864 */
1865 memcpy(adapter->bcn_buf_end, new_beacon->beacon_buf,
1866 new_beacon->beacon_buf_size);
1867
1868 /* Update the beacon ptr to point to the table save
1869 area */
1870 new_beacon->beacon_buf = adapter->bcn_buf_end;
1871 new_beacon->beacon_buf_size_max =
1872 (new_beacon->beacon_buf_size +
1873 SCAN_BEACON_ENTRY_PAD);
1874
1875 mwifiex_update_beacon_buffer_ptrs(new_beacon);
1876
1877 /* Increment the end pointer by the size reserved */
1878 adapter->bcn_buf_end += new_beacon->beacon_buf_size_max;
1879
1880 dev_dbg(adapter->dev, "info: AppControl: beacon[%02d]"
1881 " sz=%03d, used = %04d, left = %04d\n",
1882 beacon_idx,
1883 new_beacon->beacon_buf_size,
1884 (int)(adapter->bcn_buf_end - adapter->bcn_buf),
1885 (int)(sizeof(adapter->bcn_buf) -
1886 (adapter->bcn_buf_end -
1887 adapter->bcn_buf)));
1888 } else {
1889 /* No space for new beacon */
1890 dev_dbg(adapter->dev, "info: AppControl: no space for"
1891 " beacon (%d): %pM sz=%03d, left=%03d\n",
1892 beacon_idx, new_beacon->mac_address,
1893 new_beacon->beacon_buf_size,
1894 (int)(sizeof(adapter->bcn_buf) -
1895 (adapter->bcn_buf_end -
1896 adapter->bcn_buf)));
1897
1898 /* Storage failure; clear storage records for this
1899 bcn */
1900 new_beacon->beacon_buf = NULL;
1901 new_beacon->beacon_buf_size = 0;
1902 new_beacon->beacon_buf_size_max = 0;
1903 new_beacon->bcn_wpa_ie = NULL;
1904 new_beacon->wpa_offset = 0;
1905 new_beacon->bcn_rsn_ie = NULL;
1906 new_beacon->rsn_offset = 0;
1907 new_beacon->bcn_wapi_ie = NULL;
1908 new_beacon->wapi_offset = 0;
1909 new_beacon->bcn_ht_cap = NULL;
1910 new_beacon->ht_cap_offset = 0;
1911 new_beacon->bcn_ht_info = NULL;
1912 new_beacon->ht_info_offset = 0;
1913 new_beacon->bcn_bss_co_2040 = NULL;
1914 new_beacon->bss_co_2040_offset = 0;
1915 new_beacon->bcn_ext_cap = NULL;
1916 new_beacon->ext_cap_offset = 0;
1917 new_beacon->bcn_obss_scan = NULL;
1918 new_beacon->overlap_bss_offset = 0;
1919 }
1920 }
1921}
1922
1923/*
1924 * This function restores a beacon buffer of the current BSS descriptor.
1925 */
1926static void mwifiex_restore_curr_bcn(struct mwifiex_private *priv)
1927{
1928 struct mwifiex_adapter *adapter = priv->adapter;
1929 struct mwifiex_bssdescriptor *curr_bss =
1930 &priv->curr_bss_params.bss_descriptor;
1931 unsigned long flags;
1932
1933 if (priv->curr_bcn_buf &&
1934 ((adapter->bcn_buf_end + priv->curr_bcn_size) <
1935 (adapter->bcn_buf + sizeof(adapter->bcn_buf)))) {
1936 spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
1937
1938 /* restore the current beacon buffer */
1939 memcpy(adapter->bcn_buf_end, priv->curr_bcn_buf,
1940 priv->curr_bcn_size);
1941 curr_bss->beacon_buf = adapter->bcn_buf_end;
1942 curr_bss->beacon_buf_size = priv->curr_bcn_size;
1943 adapter->bcn_buf_end += priv->curr_bcn_size;
1944
1945 /* adjust the pointers in the current BSS descriptor */
1946 if (curr_bss->bcn_wpa_ie)
1947 curr_bss->bcn_wpa_ie =
1948 (struct ieee_types_vendor_specific *)
1949 (curr_bss->beacon_buf +
1950 curr_bss->wpa_offset);
1951
1952 if (curr_bss->bcn_rsn_ie)
1953 curr_bss->bcn_rsn_ie = (struct ieee_types_generic *)
1954 (curr_bss->beacon_buf +
1955 curr_bss->rsn_offset);
1956
1957 if (curr_bss->bcn_ht_cap)
1958 curr_bss->bcn_ht_cap = (struct ieee80211_ht_cap *)
1959 (curr_bss->beacon_buf +
1960 curr_bss->ht_cap_offset);
1961
1962 if (curr_bss->bcn_ht_info)
1963 curr_bss->bcn_ht_info = (struct ieee80211_ht_info *)
1964 (curr_bss->beacon_buf +
1965 curr_bss->ht_info_offset);
1966
1967 if (curr_bss->bcn_bss_co_2040)
1968 curr_bss->bcn_bss_co_2040 =
1969 (u8 *) (curr_bss->beacon_buf +
1970 curr_bss->bss_co_2040_offset);
1971
1972 if (curr_bss->bcn_ext_cap)
1973 curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf +
1974 curr_bss->ext_cap_offset);
1975
1976 if (curr_bss->bcn_obss_scan)
1977 curr_bss->bcn_obss_scan =
1978 (struct ieee_types_obss_scan_param *)
1979 (curr_bss->beacon_buf +
1980 curr_bss->overlap_bss_offset);
1981
1982 spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
1983
1984 dev_dbg(adapter->dev, "info: current beacon restored %d\n",
1985 priv->curr_bcn_size);
1986 } else {
1987 dev_warn(adapter->dev,
1988 "curr_bcn_buf not saved or bcn_buf has no space\n");
1989 }
1990}
1991
1992/*
1993 * This function post processes the scan table after a new scan command has
1994 * completed.
1995 *
1996 * It inspects each entry of the scan table and tries to find an entry that
1997 * matches with our current associated/joined network from the scan. If
1998 * one is found, the stored copy of the BSS descriptor of our current network
1999 * is updated.
2000 *
2001 * It also debug dumps the current scan table contents after processing is over.
2002 */
2003static void
2004mwifiex_process_scan_results(struct mwifiex_private *priv)
2005{
2006 struct mwifiex_adapter *adapter = priv->adapter;
2007 s32 j;
2008 u32 i;
2009 unsigned long flags;
2010
2011 if (priv->media_connected) {
2012
2013 j = mwifiex_find_ssid_in_list(priv, &priv->curr_bss_params.
2014 bss_descriptor.ssid,
2015 priv->curr_bss_params.
2016 bss_descriptor.mac_address,
2017 priv->bss_mode);
2018
2019 if (j >= 0) {
2020 spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
2021 priv->curr_bss_params.bss_descriptor.bcn_wpa_ie = NULL;
2022 priv->curr_bss_params.bss_descriptor.wpa_offset = 0;
2023 priv->curr_bss_params.bss_descriptor.bcn_rsn_ie = NULL;
2024 priv->curr_bss_params.bss_descriptor.rsn_offset = 0;
2025 priv->curr_bss_params.bss_descriptor.bcn_wapi_ie = NULL;
2026 priv->curr_bss_params.bss_descriptor.wapi_offset = 0;
2027 priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
2028 priv->curr_bss_params.bss_descriptor.ht_cap_offset =
2029 0;
2030 priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL;
2031 priv->curr_bss_params.bss_descriptor.ht_info_offset =
2032 0;
2033 priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 =
2034 NULL;
2035 priv->curr_bss_params.bss_descriptor.
2036 bss_co_2040_offset = 0;
2037 priv->curr_bss_params.bss_descriptor.bcn_ext_cap = NULL;
2038 priv->curr_bss_params.bss_descriptor.ext_cap_offset = 0;
2039 priv->curr_bss_params.bss_descriptor.
2040 bcn_obss_scan = NULL;
2041 priv->curr_bss_params.bss_descriptor.
2042 overlap_bss_offset = 0;
2043 priv->curr_bss_params.bss_descriptor.beacon_buf = NULL;
2044 priv->curr_bss_params.bss_descriptor.beacon_buf_size =
2045 0;
2046 priv->curr_bss_params.bss_descriptor.
2047 beacon_buf_size_max = 0;
2048
2049 dev_dbg(adapter->dev, "info: Found current ssid/bssid"
2050 " in list @ index #%d\n", j);
2051 /* Make a copy of current BSSID descriptor */
2052 memcpy(&priv->curr_bss_params.bss_descriptor,
2053 &adapter->scan_table[j],
2054 sizeof(priv->curr_bss_params.bss_descriptor));
2055
2056 mwifiex_save_curr_bcn(priv);
2057 spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
2058
2059 } else {
2060 mwifiex_restore_curr_bcn(priv);
2061 }
2062 }
2063
2064 for (i = 0; i < adapter->num_in_scan_table; i++)
2065 dev_dbg(adapter->dev, "info: scan:(%02d) %pM "
2066 "RSSI[%03d], SSID[%s]\n",
2067 i, adapter->scan_table[i].mac_address,
2068 (s32) adapter->scan_table[i].rssi,
2069 adapter->scan_table[i].ssid.ssid);
2070}
2071
2072/*
2073 * This function converts radio type scan parameter to a band configuration
2074 * to be used in join command.
2075 */
2076static u8
2077mwifiex_radio_type_to_band(u8 radio_type)
2078{
2079 u8 ret_band;
2080
2081 switch (radio_type) {
2082 case HostCmd_SCAN_RADIO_TYPE_A:
2083 ret_band = BAND_A;
2084 break;
2085 case HostCmd_SCAN_RADIO_TYPE_BG:
2086 default:
2087 ret_band = BAND_G;
2088 break;
2089 }
2090
2091 return ret_band;
2092}
2093
2094/*
2095 * This function deletes a specific indexed entry from the scan table.
2096 *
2097 * This also compacts the remaining entries and adjusts any buffering
2098 * of beacon/probe response data if needed.
2099 */
2100static void
2101mwifiex_scan_delete_table_entry(struct mwifiex_private *priv, s32 table_idx)
2102{
2103 struct mwifiex_adapter *adapter = priv->adapter;
2104 u32 del_idx;
2105 u32 beacon_buf_adj;
2106 u8 *beacon_buf;
2107
2108 /*
2109 * Shift the saved beacon buffer data for the scan table back over the
2110 * entry being removed. Update the end of buffer pointer. Save the
2111 * deleted buffer allocation size for pointer adjustments for entries
2112 * compacted after the deleted index.
2113 */
2114 beacon_buf_adj = adapter->scan_table[table_idx].beacon_buf_size_max;
2115
2116 dev_dbg(adapter->dev, "info: Scan: Delete Entry %d, beacon buffer "
2117 "removal = %d bytes\n", table_idx, beacon_buf_adj);
2118
2119 /* Check if the table entry had storage allocated for its beacon */
2120 if (beacon_buf_adj) {
2121 beacon_buf = adapter->scan_table[table_idx].beacon_buf;
2122
2123 /*
2124 * Remove the entry's buffer space, decrement the table end
2125 * pointer by the amount we are removing
2126 */
2127 adapter->bcn_buf_end -= beacon_buf_adj;
2128
2129 dev_dbg(adapter->dev, "info: scan: delete entry %d,"
2130 " compact data: %p <- %p (sz = %d)\n",
2131 table_idx, beacon_buf,
2132 beacon_buf + beacon_buf_adj,
2133 (int)(adapter->bcn_buf_end - beacon_buf));
2134
2135 /*
2136 * Compact data storage. Copy all data after the deleted
2137 * entry's end address (beacon_buf + beacon_buf_adj) back
2138 * to the original start address (beacon_buf).
2139 *
2140 * Scan table entries affected by the move will have their
2141 * entry pointer adjusted below.
2142 *
2143 * Use memmove since the dest/src memory regions overlap.
2144 */
2145 memmove(beacon_buf, beacon_buf + beacon_buf_adj,
2146 adapter->bcn_buf_end - beacon_buf);
2147 }
2148
2149 dev_dbg(adapter->dev,
2150 "info: Scan: Delete Entry %d, num_in_scan_table = %d\n",
2151 table_idx, adapter->num_in_scan_table);
2152
2153 /* Shift all of the entries after the table_idx back by one, compacting
2154 the table and removing the requested entry */
2155 for (del_idx = table_idx; (del_idx + 1) < adapter->num_in_scan_table;
2156 del_idx++) {
2157 /* Copy the next entry over this one */
2158 memcpy(adapter->scan_table + del_idx,
2159 adapter->scan_table + del_idx + 1,
2160 sizeof(struct mwifiex_bssdescriptor));
2161
2162 /*
2163 * Adjust this entry's pointer to its beacon buffer based on
2164 * the removed/compacted entry from the deleted index. Don't
2165 * decrement if the buffer pointer is NULL (no data stored for
2166 * this entry).
2167 */
2168 if (adapter->scan_table[del_idx].beacon_buf) {
2169 adapter->scan_table[del_idx].beacon_buf -=
2170 beacon_buf_adj;
2171 if (adapter->scan_table[del_idx].bcn_wpa_ie)
2172 adapter->scan_table[del_idx].bcn_wpa_ie =
2173 (struct ieee_types_vendor_specific *)
2174 (adapter->scan_table[del_idx].
2175 beacon_buf +
2176 adapter->scan_table[del_idx].
2177 wpa_offset);
2178 if (adapter->scan_table[del_idx].bcn_rsn_ie)
2179 adapter->scan_table[del_idx].bcn_rsn_ie =
2180 (struct ieee_types_generic *)
2181 (adapter->scan_table[del_idx].
2182 beacon_buf +
2183 adapter->scan_table[del_idx].
2184 rsn_offset);
2185 if (adapter->scan_table[del_idx].bcn_wapi_ie)
2186 adapter->scan_table[del_idx].bcn_wapi_ie =
2187 (struct ieee_types_generic *)
2188 (adapter->scan_table[del_idx].beacon_buf
2189 + adapter->scan_table[del_idx].
2190 wapi_offset);
2191 if (adapter->scan_table[del_idx].bcn_ht_cap)
2192 adapter->scan_table[del_idx].bcn_ht_cap =
2193 (struct ieee80211_ht_cap *)
2194 (adapter->scan_table[del_idx].beacon_buf
2195 + adapter->scan_table[del_idx].
2196 ht_cap_offset);
2197
2198 if (adapter->scan_table[del_idx].bcn_ht_info)
2199 adapter->scan_table[del_idx].bcn_ht_info =
2200 (struct ieee80211_ht_info *)
2201 (adapter->scan_table[del_idx].beacon_buf
2202 + adapter->scan_table[del_idx].
2203 ht_info_offset);
2204 if (adapter->scan_table[del_idx].bcn_bss_co_2040)
2205 adapter->scan_table[del_idx].bcn_bss_co_2040 =
2206 (u8 *)
2207 (adapter->scan_table[del_idx].beacon_buf
2208 + adapter->scan_table[del_idx].
2209 bss_co_2040_offset);
2210 if (adapter->scan_table[del_idx].bcn_ext_cap)
2211 adapter->scan_table[del_idx].bcn_ext_cap =
2212 (u8 *)
2213 (adapter->scan_table[del_idx].beacon_buf
2214 + adapter->scan_table[del_idx].
2215 ext_cap_offset);
2216 if (adapter->scan_table[del_idx].bcn_obss_scan)
2217 adapter->scan_table[del_idx].
2218 bcn_obss_scan =
2219 (struct ieee_types_obss_scan_param *)
2220 (adapter->scan_table[del_idx].beacon_buf
2221 + adapter->scan_table[del_idx].
2222 overlap_bss_offset);
2223 }
2224 }
2225
2226 /* The last entry is invalid now that it has been deleted or moved
2227 back */
2228 memset(adapter->scan_table + adapter->num_in_scan_table - 1,
2229 0x00, sizeof(struct mwifiex_bssdescriptor));
2230
2231 adapter->num_in_scan_table--;
2232}
2233
2234/*
2235 * This function deletes all occurrences of a given SSID from the scan table.
2236 *
2237 * This iterates through the scan table and deletes all entries that match
2238 * the given SSID. It also compacts the remaining scan table entries.
2239 */
2240static int
2241mwifiex_scan_delete_ssid_table_entry(struct mwifiex_private *priv,
2242 struct mwifiex_802_11_ssid *del_ssid)
2243{
2244 int ret = -1;
2245 s32 table_idx;
2246
2247 dev_dbg(priv->adapter->dev, "info: scan: delete ssid entry: %-32s\n",
2248 del_ssid->ssid);
2249
2250 /* If the requested SSID is found in the table, delete it. Then keep
2251 searching the table for multiple entires for the SSID until no
2252 more are found */
2253 while ((table_idx = mwifiex_find_ssid_in_list(priv, del_ssid, NULL,
2254 MWIFIEX_BSS_MODE_AUTO)) >=
2255 0) {
2256 dev_dbg(priv->adapter->dev,
2257 "info: Scan: Delete SSID Entry: Found Idx = %d\n",
2258 table_idx);
2259 ret = 0;
2260 mwifiex_scan_delete_table_entry(priv, table_idx);
2261 }
2262
2263 return ret;
2264}
2265
2266/*
2267 * This is an internal function used to start a scan based on an input
2268 * configuration.
2269 *
2270 * This uses the input user scan configuration information when provided in
2271 * order to send the appropriate scan commands to firmware to populate or
2272 * update the internal driver scan table.
2273 */
2274int mwifiex_scan_networks(struct mwifiex_private *priv,
2275 void *wait_buf, u16 action,
2276 const struct mwifiex_user_scan_cfg *user_scan_in,
2277 struct mwifiex_scan_resp *scan_resp)
2278{
2279 int ret = 0;
2280 struct mwifiex_adapter *adapter = priv->adapter;
2281 struct cmd_ctrl_node *cmd_node = NULL;
2282 union mwifiex_scan_cmd_config_tlv *scan_cfg_out = NULL;
2283 struct mwifiex_ie_types_chan_list_param_set *chan_list_out;
2284 u32 buf_size;
2285 struct mwifiex_chan_scan_param_set *scan_chan_list;
2286 u8 keep_previous_scan;
2287 u8 filtered_scan;
2288 u8 scan_current_chan_only;
2289 u8 max_chan_per_scan;
2290 unsigned long flags;
2291
2292 if (action == HostCmd_ACT_GEN_GET) {
2293 if (scan_resp) {
2294 scan_resp->scan_table = (u8 *) adapter->scan_table;
2295 scan_resp->num_in_scan_table =
2296 adapter->num_in_scan_table;
2297 } else {
2298 ret = -1;
2299 }
2300 return ret;
2301 }
2302
2303 if (adapter->scan_processing && action == HostCmd_ACT_GEN_SET) {
2304 dev_dbg(adapter->dev, "cmd: Scan already in process...\n");
2305 return ret;
2306 }
2307
2308 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
2309 adapter->scan_processing = true;
2310 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
2311
2312 if (priv->scan_block && action == HostCmd_ACT_GEN_SET) {
2313 dev_dbg(adapter->dev,
2314 "cmd: Scan is blocked during association...\n");
2315 return ret;
2316 }
2317
2318 scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
2319 GFP_KERNEL);
2320 if (!scan_cfg_out) {
2321 dev_err(adapter->dev, "failed to alloc scan_cfg_out\n");
2322 return -1;
2323 }
2324
2325 buf_size = sizeof(struct mwifiex_chan_scan_param_set) *
2326 MWIFIEX_USER_SCAN_CHAN_MAX;
2327 scan_chan_list = kzalloc(buf_size, GFP_KERNEL);
2328 if (!scan_chan_list) {
2329 dev_err(adapter->dev, "failed to alloc scan_chan_list\n");
2330 kfree(scan_cfg_out);
2331 return -1;
2332 }
2333
2334 keep_previous_scan = false;
2335
2336 mwifiex_scan_setup_scan_config(priv, user_scan_in,
2337 &scan_cfg_out->config, &chan_list_out,
2338 scan_chan_list, &max_chan_per_scan,
2339 &filtered_scan, &scan_current_chan_only);
2340
2341 if (user_scan_in)
2342 keep_previous_scan = user_scan_in->keep_previous_scan;
2343
2344
2345 if (!keep_previous_scan) {
2346 memset(adapter->scan_table, 0x00,
2347 sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP);
2348 adapter->num_in_scan_table = 0;
2349 adapter->bcn_buf_end = adapter->bcn_buf;
2350 }
2351
2352 ret = mwifiex_scan_channel_list(priv, wait_buf, max_chan_per_scan,
2353 filtered_scan, &scan_cfg_out->config,
2354 chan_list_out, scan_chan_list);
2355
2356 /* Get scan command from scan_pending_q and put to cmd_pending_q */
2357 if (!ret) {
2358 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
2359 if (!list_empty(&adapter->scan_pending_q)) {
2360 cmd_node = list_first_entry(&adapter->scan_pending_q,
2361 struct cmd_ctrl_node, list);
2362 list_del(&cmd_node->list);
2363 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
2364 flags);
2365 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
2366 true);
2367 } else {
2368 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
2369 flags);
2370 }
2371 ret = -EINPROGRESS;
2372 } else {
2373 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
2374 adapter->scan_processing = true;
2375 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
2376 }
2377
2378 kfree(scan_cfg_out);
2379 kfree(scan_chan_list);
2380 return ret;
2381}
2382
2383/*
2384 * This function prepares a scan command to be sent to the firmware.
2385 *
2386 * This uses the scan command configuration sent to the command processing
2387 * module in command preparation stage to configure a scan command structure
2388 * to send to firmware.
2389 *
2390 * The fixed fields specifying the BSS type and BSSID filters as well as a
2391 * variable number/length of TLVs are sent in the command to firmware.
2392 *
2393 * Preparation also includes -
2394 * - Setting command ID, and proper size
2395 * - Ensuring correct endian-ness
2396 */
2397int mwifiex_cmd_802_11_scan(struct mwifiex_private *priv,
2398 struct host_cmd_ds_command *cmd, void *data_buf)
2399{
2400 struct host_cmd_ds_802_11_scan *scan_cmd = &cmd->params.scan;
2401 struct mwifiex_scan_cmd_config *scan_cfg;
2402
2403 scan_cfg = (struct mwifiex_scan_cmd_config *) data_buf;
2404
2405 /* Set fixed field variables in scan command */
2406 scan_cmd->bss_mode = scan_cfg->bss_mode;
2407 memcpy(scan_cmd->bssid, scan_cfg->specific_bssid,
2408 sizeof(scan_cmd->bssid));
2409 memcpy(scan_cmd->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
2410
2411 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN);
2412
2413 /* Size is equal to the sizeof(fixed portions) + the TLV len + header */
2414 cmd->size = cpu_to_le16((u16) (sizeof(scan_cmd->bss_mode)
2415 + sizeof(scan_cmd->bssid)
2416 + scan_cfg->tlv_buf_len + S_DS_GEN));
2417
2418 return 0;
2419}
2420
2421/*
2422 * This function handles the command response of scan.
2423 *
2424 * The response buffer for the scan command has the following
2425 * memory layout:
2426 *
2427 * .-------------------------------------------------------------.
2428 * | Header (4 * sizeof(t_u16)): Standard command response hdr |
2429 * .-------------------------------------------------------------.
2430 * | BufSize (t_u16) : sizeof the BSS Description data |
2431 * .-------------------------------------------------------------.
2432 * | NumOfSet (t_u8) : Number of BSS Descs returned |
2433 * .-------------------------------------------------------------.
2434 * | BSSDescription data (variable, size given in BufSize) |
2435 * .-------------------------------------------------------------.
2436 * | TLV data (variable, size calculated using Header->Size, |
2437 * | BufSize and sizeof the fixed fields above) |
2438 * .-------------------------------------------------------------.
2439 */
2440int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
2441 struct host_cmd_ds_command *resp, void *wq_buf)
2442{
2443 int ret = 0;
2444 struct mwifiex_adapter *adapter = priv->adapter;
2445 struct mwifiex_wait_queue *wait_queue = NULL;
2446 struct cmd_ctrl_node *cmd_node = NULL;
2447 struct host_cmd_ds_802_11_scan_rsp *scan_rsp = NULL;
2448 struct mwifiex_bssdescriptor *bss_new_entry = NULL;
2449 struct mwifiex_ie_types_data *tlv_data;
2450 struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
2451 u8 *bss_info;
2452 u32 scan_resp_size;
2453 u32 bytes_left;
2454 u32 num_in_table;
2455 u32 bss_idx;
2456 u32 idx;
2457 u32 tlv_buf_size;
2458 long long tsf_val;
2459 struct mwifiex_chan_freq_power *cfp;
2460 struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
2461 struct chan_band_param_set *chan_band;
2462 u8 band;
2463 u8 is_bgscan_resp;
2464 unsigned long flags;
2465
2466 is_bgscan_resp = (le16_to_cpu(resp->command)
2467 == HostCmd_CMD_802_11_BG_SCAN_QUERY);
2468 if (is_bgscan_resp)
2469 scan_rsp = &resp->params.bg_scan_query_resp.scan_resp;
2470 else
2471 scan_rsp = &resp->params.scan_resp;
2472
2473
2474 if (scan_rsp->number_of_sets > IW_MAX_AP) {
2475 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
2476 scan_rsp->number_of_sets);
2477 ret = -1;
2478 goto done;
2479 }
2480
2481 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
2482 dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
2483 bytes_left);
2484
2485 scan_resp_size = le16_to_cpu(resp->size);
2486
2487 dev_dbg(adapter->dev,
2488 "info: SCAN_RESP: returned %d APs before parsing\n",
2489 scan_rsp->number_of_sets);
2490
2491 num_in_table = adapter->num_in_scan_table;
2492 bss_info = scan_rsp->bss_desc_and_tlv_buffer;
2493
2494 /*
2495 * The size of the TLV buffer is equal to the entire command response
2496 * size (scan_resp_size) minus the fixed fields (sizeof()'s), the
2497 * BSS Descriptions (bss_descript_size as bytesLef) and the command
2498 * response header (S_DS_GEN)
2499 */
2500 tlv_buf_size = scan_resp_size - (bytes_left
2501 + sizeof(scan_rsp->bss_descript_size)
2502 + sizeof(scan_rsp->number_of_sets)
2503 + S_DS_GEN);
2504
2505 tlv_data = (struct mwifiex_ie_types_data *) (scan_rsp->
2506 bss_desc_and_tlv_buffer +
2507 bytes_left);
2508
2509 /* Search the TLV buffer space in the scan response for any valid
2510 TLVs */
2511 mwifiex_ret_802_11_scan_get_tlv_ptrs(adapter, tlv_data, tlv_buf_size,
2512 TLV_TYPE_TSFTIMESTAMP,
2513 (struct mwifiex_ie_types_data **)
2514 &tsf_tlv);
2515
2516 /* Search the TLV buffer space in the scan response for any valid
2517 TLVs */
2518 mwifiex_ret_802_11_scan_get_tlv_ptrs(adapter, tlv_data, tlv_buf_size,
2519 TLV_TYPE_CHANNELBANDLIST,
2520 (struct mwifiex_ie_types_data **)
2521 &chan_band_tlv);
2522
2523 /*
2524 * Process each scan response returned (scan_rsp->number_of_sets).
2525 * Save the information in the bss_new_entry and then insert into the
2526 * driver scan table either as an update to an existing entry
2527 * or as an addition at the end of the table
2528 */
2529 bss_new_entry = kzalloc(sizeof(struct mwifiex_bssdescriptor),
2530 GFP_KERNEL);
2531 if (!bss_new_entry) {
2532 dev_err(adapter->dev, " failed to alloc bss_new_entry\n");
2533 return -1;
2534 }
2535
2536 for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
2537 /* Zero out the bss_new_entry we are about to store info in */
2538 memset(bss_new_entry, 0x00,
2539 sizeof(struct mwifiex_bssdescriptor));
2540
2541 if (mwifiex_interpret_bss_desc_with_ie(adapter, bss_new_entry,
2542 &bss_info,
2543 &bytes_left)) {
2544 /* Error parsing/interpreting scan response, skipped */
2545 dev_err(adapter->dev, "SCAN_RESP: "
2546 "mwifiex_interpret_bss_desc_with_ie "
2547 "returned ERROR\n");
2548 continue;
2549 }
2550
2551 /* Process the data fields and IEs returned for this BSS */
2552 dev_dbg(adapter->dev, "info: SCAN_RESP: BSSID = %pM\n",
2553 bss_new_entry->mac_address);
2554
2555 /* Search the scan table for the same bssid */
2556 for (bss_idx = 0; bss_idx < num_in_table; bss_idx++) {
2557 if (memcmp(bss_new_entry->mac_address,
2558 adapter->scan_table[bss_idx].mac_address,
2559 sizeof(bss_new_entry->mac_address))) {
2560 continue;
2561 }
2562 /*
2563 * If the SSID matches as well, it is a
2564 * duplicate of this entry. Keep the bss_idx
2565 * set to this entry so we replace the old
2566 * contents in the table
2567 */
2568 if ((bss_new_entry->ssid.ssid_len
2569 == adapter->scan_table[bss_idx]. ssid.ssid_len)
2570 && (!memcmp(bss_new_entry->ssid.ssid,
2571 adapter->scan_table[bss_idx].ssid.ssid,
2572 bss_new_entry->ssid.ssid_len))) {
2573 dev_dbg(adapter->dev, "info: SCAN_RESP:"
2574 " duplicate of index: %d\n", bss_idx);
2575 break;
2576 }
2577 }
2578 /*
2579 * If the bss_idx is equal to the number of entries in
2580 * the table, the new entry was not a duplicate; append
2581 * it to the scan table
2582 */
2583 if (bss_idx == num_in_table) {
2584 /* Range check the bss_idx, keep it limited to
2585 the last entry */
2586 if (bss_idx == IW_MAX_AP)
2587 bss_idx--;
2588 else
2589 num_in_table++;
2590 }
2591
2592 /*
2593 * Save the beacon/probe response returned for later application
2594 * retrieval. Duplicate beacon/probe responses are updated if
2595 * possible
2596 */
2597 mwifiex_ret_802_11_scan_store_beacon(priv, bss_idx,
2598 num_in_table, bss_new_entry);
2599 /*
2600 * If the TSF TLV was appended to the scan results, save this
2601 * entry's TSF value in the networkTSF field.The networkTSF is
2602 * the firmware's TSF value at the time the beacon or probe
2603 * response was received.
2604 */
2605 if (tsf_tlv) {
2606 memcpy(&tsf_val, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE]
2607 , sizeof(tsf_val));
2608 memcpy(&bss_new_entry->network_tsf, &tsf_val,
2609 sizeof(bss_new_entry->network_tsf));
2610 }
2611 band = BAND_G;
2612 if (chan_band_tlv) {
2613 chan_band = &chan_band_tlv->chan_band_param[idx];
2614 band = mwifiex_radio_type_to_band(chan_band->radio_type
2615 & (BIT(0) | BIT(1)));
2616 }
2617
2618 /* Save the band designation for this entry for use in join */
2619 bss_new_entry->bss_band = band;
2620 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
2621 (u8) bss_new_entry->bss_band,
2622 (u16)bss_new_entry->channel);
2623
2624 if (cfp)
2625 bss_new_entry->freq = cfp->freq;
2626 else
2627 bss_new_entry->freq = 0;
2628
2629 /* Copy the locally created bss_new_entry to the scan table */
2630 memcpy(&adapter->scan_table[bss_idx], bss_new_entry,
2631 sizeof(adapter->scan_table[bss_idx]));
2632
2633 }
2634
2635 dev_dbg(adapter->dev,
2636 "info: SCAN_RESP: Scanned %2d APs, %d valid, %d total\n",
2637 scan_rsp->number_of_sets,
2638 num_in_table - adapter->num_in_scan_table, num_in_table);
2639
2640 /* Update the total number of BSSIDs in the scan table */
2641 adapter->num_in_scan_table = num_in_table;
2642
2643 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
2644 if (list_empty(&adapter->scan_pending_q)) {
2645 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
2646 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
2647 adapter->scan_processing = false;
2648 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
2649 /*
2650 * Process the resulting scan table:
2651 * - Remove any bad ssids
2652 * - Update our current BSS information from scan data
2653 */
2654 mwifiex_process_scan_results(priv);
2655
2656 /* Need to indicate IOCTL complete */
2657 wait_queue = (struct mwifiex_wait_queue *) wq_buf;
2658 if (wait_queue) {
2659 wait_queue->status = MWIFIEX_ERROR_NO_ERROR;
2660
2661 /* Indicate ioctl complete */
2662 mwifiex_ioctl_complete(adapter,
2663 (struct mwifiex_wait_queue *) wait_queue, 0);
2664 }
2665 if (priv->report_scan_result)
2666 priv->report_scan_result = false;
2667 if (priv->scan_pending_on_block) {
2668 priv->scan_pending_on_block = false;
2669 up(&priv->async_sem);
2670 }
2671
2672 } else {
2673 /* Get scan command from scan_pending_q and put to
2674 cmd_pending_q */
2675 cmd_node = list_first_entry(&adapter->scan_pending_q,
2676 struct cmd_ctrl_node, list);
2677 list_del(&cmd_node->list);
2678 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
2679
2680 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
2681 }
2682
2683done:
2684 kfree((u8 *) bss_new_entry);
2685 return ret;
2686}
2687
2688/*
2689 * This function prepares command for background scan query.
2690 *
2691 * Preparation includes -
2692 * - Setting command ID and proper size
2693 * - Setting background scan flush parameter
2694 * - Ensuring correct endian-ness
2695 */
2696int mwifiex_cmd_802_11_bg_scan_query(struct mwifiex_private *priv,
2697 struct host_cmd_ds_command *cmd,
2698 void *data_buf)
2699{
2700 struct host_cmd_ds_802_11_bg_scan_query *bg_query =
2701 &cmd->params.bg_scan_query;
2702
2703 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_QUERY);
2704 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_bg_scan_query)
2705 + S_DS_GEN);
2706
2707 bg_query->flush = 1;
2708
2709 return 0;
2710}
2711
2712/*
2713 * This function finds a SSID in the scan table.
2714 *
2715 * A BSSID may optionally be provided to qualify the SSID.
2716 * For non-Auto mode, further check is made to make sure the
2717 * BSS found in the scan table is compatible with the current
2718 * settings of the driver.
2719 */
2720s32
2721mwifiex_find_ssid_in_list(struct mwifiex_private *priv,
2722 struct mwifiex_802_11_ssid *ssid, u8 *bssid,
2723 u32 mode)
2724{
2725 struct mwifiex_adapter *adapter = priv->adapter;
2726 s32 net = -1, j;
2727 u8 best_rssi = 0;
2728 u32 i;
2729
2730 dev_dbg(adapter->dev, "info: num of entries in table = %d\n",
2731 adapter->num_in_scan_table);
2732
2733 /*
2734 * Loop through the table until the maximum is reached or until a match
2735 * is found based on the bssid field comparison
2736 */
2737 for (i = 0;
2738 i < adapter->num_in_scan_table && (!bssid || (bssid && net < 0));
2739 i++) {
2740 if (!mwifiex_ssid_cmp(&adapter->scan_table[i].ssid, ssid) &&
2741 (!bssid
2742 || !memcmp(adapter->scan_table[i].mac_address, bssid,
2743 ETH_ALEN))
2744 &&
2745 (mwifiex_get_cfp_by_band_and_channel_from_cfg80211
2746 (priv, (u8) adapter->scan_table[i].bss_band,
2747 (u16) adapter->scan_table[i].channel))) {
2748 switch (mode) {
2749 case MWIFIEX_BSS_MODE_INFRA:
2750 case MWIFIEX_BSS_MODE_IBSS:
2751 j = mwifiex_is_network_compatible(priv, i,
2752 mode);
2753
2754 if (j >= 0) {
2755 if (SCAN_RSSI
2756 (adapter->scan_table[i].rssi) >
2757 best_rssi) {
2758 best_rssi = SCAN_RSSI(adapter->
2759 scan_table
2760 [i].rssi);
2761 net = i;
2762 }
2763 } else {
2764 if (net == -1)
2765 net = j;
2766 }
2767 break;
2768 case MWIFIEX_BSS_MODE_AUTO:
2769 default:
2770 /*
2771 * Do not check compatibility if the mode
2772 * requested is Auto/Unknown. Allows generic
2773 * find to work without verifying against the
2774 * Adapter security settings
2775 */
2776 if (SCAN_RSSI(adapter->scan_table[i].rssi) >
2777 best_rssi) {
2778 best_rssi = SCAN_RSSI(adapter->
2779 scan_table[i].rssi);
2780 net = i;
2781 }
2782 break;
2783 }
2784 }
2785 }
2786
2787 return net;
2788}
2789
2790/*
2791 * This function finds a specific compatible BSSID in the scan list.
2792 *
2793 * This function loops through the scan table looking for a compatible
2794 * match. If a BSSID matches, but the BSS is found to be not compatible
2795 * the function ignores it and continues to search through the rest of
2796 * the entries in case there is an AP with multiple SSIDs assigned to
2797 * the same BSSID.
2798 */
2799s32
2800mwifiex_find_bssid_in_list(struct mwifiex_private *priv, u8 *bssid,
2801 u32 mode)
2802{
2803 struct mwifiex_adapter *adapter = priv->adapter;
2804 s32 net = -1;
2805 u32 i;
2806
2807 if (!bssid)
2808 return -1;
2809
2810 dev_dbg(adapter->dev, "info: FindBSSID: Num of BSSIDs = %d\n",
2811 adapter->num_in_scan_table);
2812
2813 /*
2814 * Look through the scan table for a compatible match. The ret return
2815 * variable will be equal to the index in the scan table (greater
2816 * than zero) if the network is compatible. The loop will continue
2817 * past a matched bssid that is not compatible in case there is an
2818 * AP with multiple SSIDs assigned to the same BSSID
2819 */
2820 for (i = 0; net < 0 && i < adapter->num_in_scan_table; i++) {
2821 if (!memcmp
2822 (adapter->scan_table[i].mac_address, bssid, ETH_ALEN)
2823 && mwifiex_get_cfp_by_band_and_channel_from_cfg80211
2824 (priv,
2825 (u8) adapter->
2826 scan_table[i].
2827 bss_band,
2828 (u16) adapter->
2829 scan_table[i].
2830 channel)) {
2831 switch (mode) {
2832 case MWIFIEX_BSS_MODE_INFRA:
2833 case MWIFIEX_BSS_MODE_IBSS:
2834 net = mwifiex_is_network_compatible(priv, i,
2835 mode);
2836 break;
2837 default:
2838 net = i;
2839 break;
2840 }
2841 }
2842 }
2843
2844 return net;
2845}
2846
2847/*
2848 * This function inserts scan command node to the scan pending queue.
2849 */
2850void
2851mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
2852 struct cmd_ctrl_node *cmd_node)
2853{
2854 struct mwifiex_adapter *adapter = priv->adapter;
2855 unsigned long flags;
2856
2857 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
2858 list_add_tail(&cmd_node->list, &adapter->scan_pending_q);
2859 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
2860}
2861
2862/*
2863 * This function finds an AP with specific ssid in the scan list.
2864 */
2865int mwifiex_find_best_network(struct mwifiex_private *priv,
2866 struct mwifiex_ssid_bssid *req_ssid_bssid)
2867{
2868 struct mwifiex_adapter *adapter = priv->adapter;
2869 struct mwifiex_bssdescriptor *req_bss;
2870 s32 i;
2871
2872 memset(req_ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
2873
2874 i = mwifiex_find_best_network_in_list(priv);
2875
2876 if (i >= 0) {
2877 req_bss = &adapter->scan_table[i];
2878 memcpy(&req_ssid_bssid->ssid, &req_bss->ssid,
2879 sizeof(struct mwifiex_802_11_ssid));
2880 memcpy((u8 *) &req_ssid_bssid->bssid,
2881 (u8 *) &req_bss->mac_address, ETH_ALEN);
2882
2883 /* Make sure we are in the right mode */
2884 if (priv->bss_mode == MWIFIEX_BSS_MODE_AUTO)
2885 priv->bss_mode = req_bss->bss_mode;
2886 }
2887
2888 if (!req_ssid_bssid->ssid.ssid_len)
2889 return -1;
2890
2891 dev_dbg(adapter->dev, "info: Best network found = [%s], "
2892 "[%pM]\n", req_ssid_bssid->ssid.ssid,
2893 req_ssid_bssid->bssid);
2894
2895 return 0;
2896}
2897
2898/*
2899 * This function sends a scan command for all available channels to the
2900 * firmware, filtered on a specific SSID.
2901 */
2902static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
2903 void *wait_buf, u16 action,
2904 struct mwifiex_802_11_ssid *req_ssid,
2905 struct mwifiex_scan_resp *scan_resp)
2906{
2907 struct mwifiex_adapter *adapter = priv->adapter;
2908 int ret = 0;
2909 struct mwifiex_user_scan_cfg *scan_cfg;
2910
2911 if (!req_ssid)
2912 return -1;
2913
2914 if (action == HostCmd_ACT_GEN_GET) {
2915 if (scan_resp) {
2916 scan_resp->scan_table =
2917 (u8 *) &priv->curr_bss_params.bss_descriptor;
2918 scan_resp->num_in_scan_table =
2919 adapter->num_in_scan_table;
2920 } else {
2921 ret = -1;
2922 }
2923 return ret;
2924 }
2925
2926 if (adapter->scan_processing && action == HostCmd_ACT_GEN_SET) {
2927 dev_dbg(adapter->dev, "cmd: Scan already in process...\n");
2928 return ret;
2929 }
2930
2931 if (priv->scan_block && action == HostCmd_ACT_GEN_SET) {
2932 dev_dbg(adapter->dev,
2933 "cmd: Scan is blocked during association...\n");
2934 return ret;
2935 }
2936
2937 mwifiex_scan_delete_ssid_table_entry(priv, req_ssid);
2938
2939 scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);
2940 if (!scan_cfg) {
2941 dev_err(adapter->dev, "failed to alloc scan_cfg\n");
2942 return -1;
2943 }
2944
2945 memcpy(scan_cfg->ssid_list[0].ssid, req_ssid->ssid,
2946 req_ssid->ssid_len);
2947 scan_cfg->keep_previous_scan = true;
2948
2949 ret = mwifiex_scan_networks(priv, wait_buf, action, scan_cfg, NULL);
2950
2951 kfree(scan_cfg);
2952 return ret;
2953}
2954
2955/*
2956 * Sends IOCTL request to start a scan.
2957 *
2958 * This function allocates the IOCTL request buffer, fills it
2959 * with requisite parameters and calls the IOCTL handler.
2960 *
2961 * Scan command can be issued for both normal scan and specific SSID
2962 * scan, depending upon whether an SSID is provided or not.
2963 */
2964int mwifiex_request_scan(struct mwifiex_private *priv, u8 wait_option,
2965 struct mwifiex_802_11_ssid *req_ssid)
2966{
2967 int ret = 0;
2968 struct mwifiex_wait_queue *wait = NULL;
2969 int status = 0;
2970
2971 if (down_interruptible(&priv->async_sem)) {
2972 dev_err(priv->adapter->dev, "%s: acquire semaphore\n",
2973 __func__);
2974 return -1;
2975 }
2976 priv->scan_pending_on_block = true;
2977
2978 /* Allocate wait request buffer */
2979 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
2980 if (!wait) {
2981 ret = -1;
2982 goto done;
2983 }
2984
2985 if (req_ssid && req_ssid->ssid_len != 0)
2986 /* Specific SSID scan */
2987 status = mwifiex_scan_specific_ssid(priv, wait,
2988 HostCmd_ACT_GEN_SET,
2989 req_ssid, NULL);
2990 else
2991 /* Normal scan */
2992 status = mwifiex_scan_networks(priv, wait, HostCmd_ACT_GEN_SET,
2993 NULL, NULL);
2994 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
2995 if (status == -1)
2996 ret = -1;
2997done:
2998 if ((wait) && (status != -EINPROGRESS))
2999 kfree(wait);
3000 if (ret == -1) {
3001 priv->scan_pending_on_block = false;
3002 up(&priv->async_sem);
3003 }
3004 return ret;
3005}
3006
3007/*
3008 * This function appends the vendor specific IE TLV to a buffer.
3009 */
3010int
3011mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
3012 u16 vsie_mask, u8 **buffer)
3013{
3014 int id, ret_len = 0;
3015 struct mwifiex_ie_types_vendor_param_set *vs_param_set;
3016
3017 if (!buffer)
3018 return 0;
3019 if (!(*buffer))
3020 return 0;
3021
3022 /*
3023 * Traverse through the saved vendor specific IE array and append
3024 * the selected(scan/assoc/adhoc) IE as TLV to the command
3025 */
3026 for (id = 0; id < MWIFIEX_MAX_VSIE_NUM; id++) {
3027 if (priv->vs_ie[id].mask & vsie_mask) {
3028 vs_param_set =
3029 (struct mwifiex_ie_types_vendor_param_set *)
3030 *buffer;
3031 vs_param_set->header.type =
3032 cpu_to_le16(TLV_TYPE_PASSTHROUGH);
3033 vs_param_set->header.len =
3034 cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
3035 & 0x00FF) + 2);
3036 memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
3037 le16_to_cpu(vs_param_set->header.len));
3038 *buffer += le16_to_cpu(vs_param_set->header.len) +
3039 sizeof(struct mwifiex_ie_types_header);
3040 ret_len += le16_to_cpu(vs_param_set->header.len) +
3041 sizeof(struct mwifiex_ie_types_header);
3042 }
3043 }
3044 return ret_len;
3045}
3046
3047/*
3048 * This function saves a beacon buffer of the current BSS descriptor.
3049 *
3050 * The current beacon buffer is saved so that it can be restored in the
3051 * following cases that makes the beacon buffer not to contain the current
3052 * ssid's beacon buffer.
3053 * - The current ssid was not found somehow in the last scan.
3054 * - The current ssid was the last entry of the scan table and overloaded.
3055 */
3056void
3057mwifiex_save_curr_bcn(struct mwifiex_private *priv)
3058{
3059 struct mwifiex_bssdescriptor *curr_bss =
3060 &priv->curr_bss_params.bss_descriptor;
3061
3062 /* save the beacon buffer if it is not saved or updated */
3063 if ((priv->curr_bcn_buf == NULL) ||
3064 (priv->curr_bcn_size != curr_bss->beacon_buf_size) ||
3065 (memcmp(priv->curr_bcn_buf, curr_bss->beacon_buf,
3066 curr_bss->beacon_buf_size))) {
3067
3068 kfree(priv->curr_bcn_buf);
3069 priv->curr_bcn_buf = NULL;
3070
3071 priv->curr_bcn_size = curr_bss->beacon_buf_size;
3072 if (!priv->curr_bcn_size)
3073 return;
3074
3075 priv->curr_bcn_buf = kzalloc(curr_bss->beacon_buf_size,
3076 GFP_KERNEL);
3077 if (!priv->curr_bcn_buf) {
3078 dev_err(priv->adapter->dev,
3079 "failed to alloc curr_bcn_buf\n");
3080 } else {
3081 memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
3082 curr_bss->beacon_buf_size);
3083 dev_dbg(priv->adapter->dev,
3084 "info: current beacon saved %d\n",
3085 priv->curr_bcn_size);
3086 }
3087 }
3088}
3089
3090/*
3091 * This function frees the current BSS descriptor beacon buffer.
3092 */
3093void
3094mwifiex_free_curr_bcn(struct mwifiex_private *priv)
3095{
3096 kfree(priv->curr_bcn_buf);
3097 priv->curr_bcn_buf = NULL;
3098}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
new file mode 100644
index 00000000000..f21e5cd1983
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -0,0 +1,1770 @@
1/*
2 * Marvell Wireless LAN device driver: SDIO specific handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include <linux/firmware.h>
21
22#include "decl.h"
23#include "ioctl.h"
24#include "util.h"
25#include "fw.h"
26#include "main.h"
27#include "wmm.h"
28#include "11n.h"
29#include "sdio.h"
30
31
32#define SDIO_VERSION "1.0"
33
34static struct mwifiex_if_ops sdio_ops;
35
36static struct semaphore add_remove_card_sem;
37
38/*
39 * SDIO probe.
40 *
41 * This function probes an mwifiex device and registers it. It allocates
42 * the card structure, enables SDIO function number and initiates the
43 * device registration and initialization procedure by adding a logical
44 * interface.
45 */
46static int
47mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
48{
49 int ret = 0;
50 struct sdio_mmc_card *card = NULL;
51
52 pr_debug("info: vendor=0x%4.04X device=0x%4.04X class=%d function=%d\n",
53 func->vendor, func->device, func->class, func->num);
54
55 card = kzalloc(sizeof(struct sdio_mmc_card), GFP_KERNEL);
56 if (!card) {
57 pr_err("%s: failed to alloc memory\n", __func__);
58 return -ENOMEM;
59 }
60
61 card->func = func;
62
63 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
64
65 sdio_claim_host(func);
66 ret = sdio_enable_func(func);
67 sdio_release_host(func);
68
69 if (ret) {
70 pr_err("%s: failed to enable function\n", __func__);
71 return -EIO;
72 }
73
74 if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops)) {
75 pr_err("%s: add card failed\n", __func__);
76 kfree(card);
77 sdio_claim_host(func);
78 ret = sdio_disable_func(func);
79 sdio_release_host(func);
80 ret = -1;
81 }
82
83 return ret;
84}
85
86/*
87 * SDIO remove.
88 *
89 * This function removes the interface and frees up the card structure.
90 */
91static void
92mwifiex_sdio_remove(struct sdio_func *func)
93{
94 struct sdio_mmc_card *card;
95
96 pr_debug("info: SDIO func num=%d\n", func->num);
97
98 if (func) {
99 card = sdio_get_drvdata(func);
100 if (card) {
101 mwifiex_remove_card(card->adapter,
102 &add_remove_card_sem);
103 kfree(card);
104 }
105 }
106}
107
108/*
109 * SDIO suspend.
110 *
111 * Kernel needs to suspend all functions separately. Therefore all
112 * registered functions must have drivers with suspend and resume
113 * methods. Failing that the kernel simply removes the whole card.
114 *
115 * If already not suspended, this function allocates and sends a host
116 * sleep activate request to the firmware and turns off the traffic.
117 */
118static int mwifiex_sdio_suspend(struct device *dev)
119{
120 struct sdio_func *func = dev_to_sdio_func(dev);
121 struct sdio_mmc_card *card;
122 struct mwifiex_adapter *adapter = NULL;
123 mmc_pm_flag_t pm_flag = 0;
124 int hs_actived = 0;
125 int i;
126 int ret = 0;
127
128 if (func) {
129 pm_flag = sdio_get_host_pm_caps(func);
130 pr_debug("cmd: %s: suspend: PM flag = 0x%x\n",
131 sdio_func_id(func), pm_flag);
132 if (!(pm_flag & MMC_PM_KEEP_POWER)) {
133 pr_err("%s: cannot remain alive while host is"
134 " suspended\n", sdio_func_id(func));
135 return -ENOSYS;
136 }
137
138 card = sdio_get_drvdata(func);
139 if (!card || !card->adapter) {
140 pr_err("suspend: invalid card or adapter\n");
141 return 0;
142 }
143 } else {
144 pr_err("suspend: sdio_func is not specified\n");
145 return 0;
146 }
147
148 adapter = card->adapter;
149
150 /* Enable the Host Sleep */
151 hs_actived = mwifiex_enable_hs(adapter);
152 if (hs_actived) {
153 pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
154 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
155 }
156
157 /* Indicate device suspended */
158 adapter->is_suspended = true;
159
160 for (i = 0; i < adapter->priv_num; i++)
161 netif_carrier_off(adapter->priv[i]->netdev);
162
163 return ret;
164}
165
166/*
167 * SDIO resume.
168 *
169 * Kernel needs to suspend all functions separately. Therefore all
170 * registered functions must have drivers with suspend and resume
171 * methods. Failing that the kernel simply removes the whole card.
172 *
173 * If already not resumed, this function turns on the traffic and
174 * sends a host sleep cancel request to the firmware.
175 */
176static int mwifiex_sdio_resume(struct device *dev)
177{
178 struct sdio_func *func = dev_to_sdio_func(dev);
179 struct sdio_mmc_card *card;
180 struct mwifiex_adapter *adapter = NULL;
181 mmc_pm_flag_t pm_flag = 0;
182 int i;
183
184 if (func) {
185 pm_flag = sdio_get_host_pm_caps(func);
186 card = sdio_get_drvdata(func);
187 if (!card || !card->adapter) {
188 pr_err("resume: invalid card or adapter\n");
189 return 0;
190 }
191 } else {
192 pr_err("resume: sdio_func is not specified\n");
193 return 0;
194 }
195
196 adapter = card->adapter;
197
198 if (!adapter->is_suspended) {
199 dev_warn(adapter->dev, "device already resumed\n");
200 return 0;
201 }
202
203 adapter->is_suspended = false;
204
205 for (i = 0; i < adapter->priv_num; i++)
206 if (adapter->priv[i]->media_connected)
207 netif_carrier_on(adapter->priv[i]->netdev);
208
209 /* Disable Host Sleep */
210 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
211 MWIFIEX_NO_WAIT);
212
213 return 0;
214}
215
216/* Device ID for SD8787 */
217#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
218
219/* WLAN IDs */
220static const struct sdio_device_id mwifiex_ids[] = {
221 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
222 {},
223};
224
225MODULE_DEVICE_TABLE(sdio, mwifiex_ids);
226
227static const struct dev_pm_ops mwifiex_sdio_pm_ops = {
228 .suspend = mwifiex_sdio_suspend,
229 .resume = mwifiex_sdio_resume,
230};
231
232static struct sdio_driver mwifiex_sdio = {
233 .name = "mwifiex_sdio",
234 .id_table = mwifiex_ids,
235 .probe = mwifiex_sdio_probe,
236 .remove = mwifiex_sdio_remove,
237 .drv = {
238 .owner = THIS_MODULE,
239 .pm = &mwifiex_sdio_pm_ops,
240 }
241};
242
243/*
244 * This function writes data into SDIO card register.
245 */
246static int
247mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data)
248{
249 struct sdio_mmc_card *card = adapter->card;
250 int ret = -1;
251
252 sdio_claim_host(card->func);
253 sdio_writeb(card->func, (u8) data, reg, &ret);
254 sdio_release_host(card->func);
255
256 return ret;
257}
258
259/*
260 * This function reads data from SDIO card register.
261 */
262static int
263mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u32 *data)
264{
265 struct sdio_mmc_card *card = adapter->card;
266 int ret = -1;
267 u8 val;
268
269 sdio_claim_host(card->func);
270 val = sdio_readb(card->func, reg, &ret);
271 sdio_release_host(card->func);
272
273 *data = val;
274
275 return ret;
276}
277
278/*
279 * This function writes multiple data into SDIO card memory.
280 *
281 * This does not work in suspended mode.
282 */
283static int
284mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
285 u8 *buffer, u32 pkt_len, u32 port, u32 timeout)
286{
287 struct sdio_mmc_card *card = adapter->card;
288 int ret = -1;
289 u8 blk_mode =
290 (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
291 u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
292 u32 blk_cnt =
293 (blk_mode ==
294 BLOCK_MODE) ? (pkt_len /
295 MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
296 u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
297
298 if (adapter->is_suspended) {
299 dev_err(adapter->dev,
300 "%s: not allowed while suspended\n", __func__);
301 return -1;
302 }
303
304 sdio_claim_host(card->func);
305
306 if (!sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size))
307 ret = 0;
308
309 sdio_release_host(card->func);
310
311 return ret;
312}
313
314/*
315 * This function reads multiple data from SDIO card memory.
316 */
317static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter,
318 u8 *buffer, u32 len,
319 u32 port, u32 timeout, u8 claim)
320{
321 struct sdio_mmc_card *card = adapter->card;
322 int ret = -1;
323 u8 blk_mode =
324 (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
325 u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
326 u32 blk_cnt =
327 (blk_mode ==
328 BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE) : len;
329 u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
330
331 if (claim)
332 sdio_claim_host(card->func);
333
334 if (!sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size))
335 ret = 0;
336
337 if (claim)
338 sdio_release_host(card->func);
339
340 return ret;
341}
342
343/*
344 * This function wakes up the card.
345 *
346 * A host power up command is written to the card configuration
347 * register to wake up the card.
348 */
349static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
350{
351 int ret;
352
353 dev_dbg(adapter->dev, "event: wakeup device...\n");
354 ret = mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP);
355
356 return ret;
357}
358
359/*
360 * This function is called after the card has woken up.
361 *
362 * The card configuration register is reset.
363 */
364static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
365{
366 int ret;
367
368 dev_dbg(adapter->dev, "cmd: wakeup device completed\n");
369 ret = mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
370
371 return ret;
372}
373
374/*
375 * This function initializes the IO ports.
376 *
377 * The following operations are performed -
378 * - Read the IO ports (0, 1 and 2)
379 * - Set host interrupt Reset-To-Read to clear
380 * - Set auto re-enable interrupt
381 */
382static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
383{
384 u32 reg;
385
386 adapter->ioport = 0;
387
388 /* Read the IO port */
389 if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, &reg))
390 adapter->ioport |= (reg & 0xff);
391 else
392 return -1;
393
394 if (!mwifiex_read_reg(adapter, IO_PORT_1_REG, &reg))
395 adapter->ioport |= ((reg & 0xff) << 8);
396 else
397 return -1;
398
399 if (!mwifiex_read_reg(adapter, IO_PORT_2_REG, &reg))
400 adapter->ioport |= ((reg & 0xff) << 16);
401 else
402 return -1;
403
404 pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
405
406 /* Set Host interrupt reset to read to clear */
407 if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, &reg))
408 mwifiex_write_reg(adapter, HOST_INT_RSR_REG,
409 reg | SDIO_INT_MASK);
410 else
411 return -1;
412
413 /* Dnld/Upld ready set to auto reset */
414 if (!mwifiex_read_reg(adapter, CARD_MISC_CFG_REG, &reg))
415 mwifiex_write_reg(adapter, CARD_MISC_CFG_REG,
416 reg | AUTO_RE_ENABLE_INT);
417 else
418 return -1;
419
420 return 0;
421}
422
423/*
424 * This function sends data to the card.
425 */
426static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
427 u8 *payload, u32 pkt_len, u32 port)
428{
429 u32 i = 0;
430 int ret = 0;
431
432 do {
433 ret = mwifiex_write_data_sync(adapter, payload, pkt_len,
434 port, 0);
435 if (ret) {
436 i++;
437 dev_err(adapter->dev, "host_to_card, write iomem"
438 " (%d) failed: %d\n", i, ret);
439 if (mwifiex_write_reg(adapter,
440 CONFIGURATION_REG, 0x04))
441 dev_err(adapter->dev, "write CFG reg failed\n");
442
443 ret = -1;
444 if (i > MAX_WRITE_IOMEM_RETRY)
445 return ret;
446 }
447 } while (ret == -1);
448
449 return ret;
450}
451
452/*
453 * This function gets the read port.
454 *
455 * If control port bit is set in MP read bitmap, the control port
456 * is returned, otherwise the current read port is returned and
457 * the value is increased (provided it does not reach the maximum
458 * limit, in which case it is reset to 1)
459 */
460static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
461{
462 struct sdio_mmc_card *card = adapter->card;
463 u16 rd_bitmap = card->mp_rd_bitmap;
464
465 dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%04x\n", rd_bitmap);
466
467 if (!(rd_bitmap & (CTRL_PORT_MASK | DATA_PORT_MASK)))
468 return -1;
469
470 if (card->mp_rd_bitmap & CTRL_PORT_MASK) {
471 card->mp_rd_bitmap &= (u16) (~CTRL_PORT_MASK);
472 *port = CTRL_PORT;
473 dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%04x\n",
474 *port, card->mp_rd_bitmap);
475 } else {
476 if (card->mp_rd_bitmap & (1 << card->curr_rd_port)) {
477 card->mp_rd_bitmap &=
478 (u16) (~(1 << card->curr_rd_port));
479 *port = card->curr_rd_port;
480
481 if (++card->curr_rd_port == MAX_PORT)
482 card->curr_rd_port = 1;
483 } else {
484 return -1;
485 }
486
487 dev_dbg(adapter->dev,
488 "data: port=%d mp_rd_bitmap=0x%04x -> 0x%04x\n",
489 *port, rd_bitmap, card->mp_rd_bitmap);
490 }
491 return 0;
492}
493
494/*
495 * This function gets the write port for data.
496 *
497 * The current write port is returned if available and the value is
498 * increased (provided it does not reach the maximum limit, in which
499 * case it is reset to 1)
500 */
501static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port)
502{
503 struct sdio_mmc_card *card = adapter->card;
504 u16 wr_bitmap = card->mp_wr_bitmap;
505
506 dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%04x\n", wr_bitmap);
507
508 if (!(wr_bitmap & card->mp_data_port_mask))
509 return -1;
510
511 if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) {
512 card->mp_wr_bitmap &= (u16) (~(1 << card->curr_wr_port));
513 *port = card->curr_wr_port;
514 if (++card->curr_wr_port == card->mp_end_port)
515 card->curr_wr_port = 1;
516 } else {
517 adapter->data_sent = true;
518 return -EBUSY;
519 }
520
521 if (*port == CTRL_PORT) {
522 dev_err(adapter->dev, "invalid data port=%d cur port=%d"
523 " mp_wr_bitmap=0x%04x -> 0x%04x\n",
524 *port, card->curr_wr_port, wr_bitmap,
525 card->mp_wr_bitmap);
526 return -1;
527 }
528
529 dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%04x -> 0x%04x\n",
530 *port, wr_bitmap, card->mp_wr_bitmap);
531
532 return 0;
533}
534
535/*
536 * This function polls the card status.
537 */
538static int
539mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
540{
541 u32 tries;
542 u32 cs = 0;
543
544 for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
545 if (mwifiex_read_reg(adapter, CARD_STATUS_REG, &cs))
546 break;
547 else if ((cs & bits) == bits)
548 return 0;
549
550 udelay(10);
551 }
552
553 dev_err(adapter->dev, "poll card status failed, tries = %d\n",
554 tries);
555 return -1;
556}
557
558/*
559 * This function reads the firmware status.
560 */
561static int
562mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
563{
564 u32 fws0 = 0, fws1 = 0;
565
566 if (mwifiex_read_reg(adapter, CARD_FW_STATUS0_REG, &fws0))
567 return -1;
568
569 if (mwifiex_read_reg(adapter, CARD_FW_STATUS1_REG, &fws1))
570 return -1;
571
572 *dat = (u16) ((fws1 << 8) | fws0);
573
574 return 0;
575}
576
577/*
578 * This function disables the host interrupt.
579 *
580 * The host interrupt mask is read, the disable bit is reset and
581 * written back to the card host interrupt mask register.
582 */
583static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
584{
585 u32 host_int_mask = 0;
586
587 /* Read back the host_int_mask register */
588 if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
589 return -1;
590
591 /* Update with the mask and write back to the register */
592 host_int_mask &= ~HOST_INT_DISABLE;
593
594 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
595 dev_err(adapter->dev, "disable host interrupt failed\n");
596 return -1;
597 }
598
599 return 0;
600}
601
602/*
603 * This function enables the host interrupt.
604 *
605 * The host interrupt enable mask is written to the card
606 * host interrupt mask register.
607 */
608static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
609{
610 /* Simply write the mask to the register */
611 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, HOST_INT_ENABLE)) {
612 dev_err(adapter->dev, "enable host interrupt failed\n");
613 return -1;
614 }
615 return 0;
616}
617
618/*
619 * This function sends a data buffer to the card.
620 */
621static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
622 u32 *type, u8 *buffer,
623 u32 npayload, u32 ioport)
624{
625 int ret = 0;
626 u32 nb;
627
628 if (!buffer) {
629 dev_err(adapter->dev, "%s: buffer is NULL\n", __func__);
630 return -1;
631 }
632
633 ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 0, 1);
634
635 if (ret) {
636 dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__,
637 ret);
638 return -1;
639 }
640
641 nb = le16_to_cpu(*(__le16 *) (buffer));
642 if (nb > npayload) {
643 dev_err(adapter->dev, "%s: invalid packet, nb=%d, npayload=%d\n",
644 __func__, nb, npayload);
645 return -1;
646 }
647
648 *type = le16_to_cpu(*(__le16 *) (buffer + 2));
649
650 return ret;
651}
652
653/*
654 * This function downloads the firmware to the card.
655 *
656 * Firmware is downloaded to the card in blocks. Every block download
657 * is tested for CRC errors, and retried a number of times before
658 * returning failure.
659 */
660static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
661 struct mwifiex_fw_image *fw)
662{
663 int ret = 0;
664 u8 *firmware = fw->fw_buf;
665 u32 firmware_len = fw->fw_len;
666 u32 offset = 0;
667 u32 base0, base1;
668 u8 *fwbuf;
669 u16 len = 0;
670 u32 txlen = 0, tx_blocks = 0, tries = 0;
671 u32 i = 0;
672
673 if (!firmware_len) {
674 dev_err(adapter->dev, "firmware image not found!"
675 " Terminating download\n");
676 return -1;
677 }
678
679 dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n",
680 firmware_len);
681
682 /* Assume that the allocated buffer is 8-byte aligned */
683 fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
684 if (!fwbuf) {
685 dev_err(adapter->dev, "unable to alloc buffer for firmware."
686 " Terminating download\n");
687 return -1;
688 }
689
690 /* Perform firmware data transfer */
691 do {
692 /* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
693 bits */
694 ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY |
695 DN_LD_CARD_RDY);
696 if (ret) {
697 dev_err(adapter->dev, "FW download with helper:"
698 " poll status timeout @ %d\n", offset);
699 goto done;
700 }
701
702 /* More data? */
703 if (offset >= firmware_len)
704 break;
705
706 for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
707 ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_0,
708 &base0);
709 if (ret) {
710 dev_err(adapter->dev, "dev BASE0 register read"
711 " failed: base0=0x%04X(%d). Terminating "
712 "download\n", base0, base0);
713 goto done;
714 }
715 ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_1,
716 &base1);
717 if (ret) {
718 dev_err(adapter->dev, "dev BASE1 register read"
719 " failed: base1=0x%04X(%d). Terminating "
720 "download\n", base1, base1);
721 goto done;
722 }
723 len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff));
724
725 if (len)
726 break;
727
728 udelay(10);
729 }
730
731 if (!len) {
732 break;
733 } else if (len > MWIFIEX_UPLD_SIZE) {
734 dev_err(adapter->dev, "FW download failed @ %d,"
735 " invalid length %d\n", offset, len);
736 ret = -1;
737 goto done;
738 }
739
740 txlen = len;
741
742 if (len & BIT(0)) {
743 i++;
744 if (i > MAX_WRITE_IOMEM_RETRY) {
745 dev_err(adapter->dev, "FW download failed @"
746 " %d, over max retry count\n", offset);
747 ret = -1;
748 goto done;
749 }
750 dev_err(adapter->dev, "CRC indicated by the helper:"
751 " len = 0x%04X, txlen = %d\n", len, txlen);
752 len &= ~BIT(0);
753 /* Setting this to 0 to resend from same offset */
754 txlen = 0;
755 } else {
756 i = 0;
757
758 /* Set blocksize to transfer - checking for last
759 block */
760 if (firmware_len - offset < txlen)
761 txlen = firmware_len - offset;
762
763 tx_blocks = (txlen + MWIFIEX_SDIO_BLOCK_SIZE -
764 1) / MWIFIEX_SDIO_BLOCK_SIZE;
765
766 /* Copy payload to buffer */
767 memmove(fwbuf, &firmware[offset], txlen);
768 }
769
770 ret = mwifiex_write_data_sync(adapter, fwbuf, tx_blocks *
771 MWIFIEX_SDIO_BLOCK_SIZE,
772 adapter->ioport, 0);
773 if (ret) {
774 dev_err(adapter->dev, "FW download, write iomem (%d)"
775 " failed @ %d\n", i, offset);
776 if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
777 dev_err(adapter->dev, "write CFG reg failed\n");
778
779 ret = -1;
780 goto done;
781 }
782
783 offset += txlen;
784 } while (true);
785
786 dev_dbg(adapter->dev, "info: FW download over, size %d bytes\n",
787 offset);
788
789 ret = 0;
790done:
791 kfree(fwbuf);
792 return ret;
793}
794
795/*
796 * This function checks the firmware status in card.
797 *
798 * The winner interface is also determined by this function.
799 */
800static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
801 u32 poll_num, int *winner)
802{
803 int ret = 0;
804 u16 firmware_stat;
805 u32 tries;
806 u32 winner_status;
807
808 /* Wait for firmware initialization event */
809 for (tries = 0; tries < poll_num; tries++) {
810 ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
811 if (ret)
812 continue;
813 if (firmware_stat == FIRMWARE_READY) {
814 ret = 0;
815 break;
816 } else {
817 mdelay(100);
818 ret = -1;
819 }
820 }
821
822 if (winner && ret) {
823 if (mwifiex_read_reg
824 (adapter, CARD_FW_STATUS0_REG, &winner_status))
825 winner_status = 0;
826
827 if (winner_status)
828 *winner = 0;
829 else
830 *winner = 1;
831 }
832 return ret;
833}
834
835/*
836 * This function reads the interrupt status from card.
837 */
838static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
839{
840 struct sdio_mmc_card *card = adapter->card;
841 u32 sdio_ireg = 0;
842 unsigned long flags;
843
844 if (mwifiex_read_data_sync(adapter, card->mp_regs, MAX_MP_REGS,
845 REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0,
846 0)) {
847 dev_err(adapter->dev, "read mp_regs failed\n");
848 return;
849 }
850
851 sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
852 if (sdio_ireg) {
853 /*
854 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
855 * Clear the interrupt status register
856 */
857 dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
858 spin_lock_irqsave(&adapter->int_lock, flags);
859 adapter->int_status |= sdio_ireg;
860 spin_unlock_irqrestore(&adapter->int_lock, flags);
861 }
862
863 return;
864}
865
866/*
867 * SDIO interrupt handler.
868 *
869 * This function reads the interrupt status from firmware and assigns
870 * the main process in workqueue which will handle the interrupt.
871 */
872static void
873mwifiex_sdio_interrupt(struct sdio_func *func)
874{
875 struct mwifiex_adapter *adapter;
876 struct sdio_mmc_card *card;
877
878 card = sdio_get_drvdata(func);
879 if (!card || !card->adapter) {
880 pr_debug("int: func=%p card=%p adapter=%p\n",
881 func, card, card ? card->adapter : NULL);
882 return;
883 }
884 adapter = card->adapter;
885
886 if (adapter->surprise_removed)
887 return;
888
889 if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
890 adapter->ps_state = PS_STATE_AWAKE;
891
892 mwifiex_interrupt_status(adapter);
893 queue_work(adapter->workqueue, &adapter->main_work);
894
895 return;
896}
897
898/*
899 * This function decodes a received packet.
900 *
901 * Based on the type, the packet is treated as either a data, or
902 * a command response, or an event, and the correct handler
903 * function is invoked.
904 */
905static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
906 struct sk_buff *skb, u32 upld_typ)
907{
908 u8 *cmd_buf;
909
910 skb_pull(skb, INTF_HEADER_LEN);
911
912 switch (upld_typ) {
913 case MWIFIEX_TYPE_DATA:
914 dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
915 mwifiex_handle_rx_packet(adapter, skb);
916 break;
917
918 case MWIFIEX_TYPE_CMD:
919 dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n");
920 /* take care of curr_cmd = NULL case */
921 if (!adapter->curr_cmd) {
922 cmd_buf = adapter->upld_buf;
923
924 if (adapter->ps_state == PS_STATE_SLEEP_CFM)
925 mwifiex_process_sleep_confirm_resp(adapter,
926 skb->data, skb->len);
927
928 memcpy(cmd_buf, skb->data, min_t(u32,
929 MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
930
931 dev_kfree_skb_any(skb);
932 } else {
933 adapter->cmd_resp_received = true;
934 adapter->curr_cmd->resp_skb = skb;
935 }
936 break;
937
938 case MWIFIEX_TYPE_EVENT:
939 dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
940 adapter->event_cause = *(u32 *) skb->data;
941
942 skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
943
944 if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
945 memcpy(adapter->event_body, skb->data, skb->len);
946
947 /* event cause has been saved to adapter->event_cause */
948 adapter->event_received = true;
949 adapter->event_skb = skb;
950
951 break;
952
953 default:
954 dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ);
955 dev_kfree_skb_any(skb);
956 break;
957 }
958
959 return 0;
960}
961
962/*
963 * This function transfers received packets from card to driver, performing
964 * aggregation if required.
965 *
966 * For data received on control port, or if aggregation is disabled, the
967 * received buffers are uploaded as separate packets. However, if aggregation
968 * is enabled and required, the buffers are copied onto an aggregation buffer,
969 * provided there is space left, processed and finally uploaded.
970 */
971static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
972 struct sk_buff *skb, u8 port)
973{
974 struct sdio_mmc_card *card = adapter->card;
975 s32 f_do_rx_aggr = 0;
976 s32 f_do_rx_cur = 0;
977 s32 f_aggr_cur = 0;
978 struct sk_buff *skb_deaggr;
979 u32 pind = 0;
980 u32 pkt_len, pkt_type = 0;
981 u8 *curr_ptr;
982 u32 rx_len = skb->len;
983
984 if (port == CTRL_PORT) {
985 /* Read the command Resp without aggr */
986 dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
987 "response\n", __func__);
988
989 f_do_rx_cur = 1;
990 goto rx_curr_single;
991 }
992
993 if (!card->mpa_rx.enabled) {
994 dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
995 __func__);
996
997 f_do_rx_cur = 1;
998 goto rx_curr_single;
999 }
1000
1001 if (card->mp_rd_bitmap & (~((u16) CTRL_PORT_MASK))) {
1002 /* Some more data RX pending */
1003 dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
1004
1005 if (MP_RX_AGGR_IN_PROGRESS(card)) {
1006 if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) {
1007 f_aggr_cur = 1;
1008 } else {
1009 /* No room in Aggr buf, do rx aggr now */
1010 f_do_rx_aggr = 1;
1011 f_do_rx_cur = 1;
1012 }
1013 } else {
1014 /* Rx aggr not in progress */
1015 f_aggr_cur = 1;
1016 }
1017
1018 } else {
1019 /* No more data RX pending */
1020 dev_dbg(adapter->dev, "info: %s: last packet\n", __func__);
1021
1022 if (MP_RX_AGGR_IN_PROGRESS(card)) {
1023 f_do_rx_aggr = 1;
1024 if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len))
1025 f_aggr_cur = 1;
1026 else
1027 /* No room in Aggr buf, do rx aggr now */
1028 f_do_rx_cur = 1;
1029 } else {
1030 f_do_rx_cur = 1;
1031 }
1032 }
1033
1034 if (f_aggr_cur) {
1035 dev_dbg(adapter->dev, "info: current packet aggregation\n");
1036 /* Curr pkt can be aggregated */
1037 MP_RX_AGGR_SETUP(card, skb, port);
1038
1039 if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
1040 MP_RX_AGGR_PORT_LIMIT_REACHED(card)) {
1041 dev_dbg(adapter->dev, "info: %s: aggregated packet "
1042 "limit reached\n", __func__);
1043 /* No more pkts allowed in Aggr buf, rx it */
1044 f_do_rx_aggr = 1;
1045 }
1046 }
1047
1048 if (f_do_rx_aggr) {
1049 /* do aggr RX now */
1050 dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
1051 card->mpa_rx.pkt_cnt);
1052
1053 if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
1054 card->mpa_rx.buf_len,
1055 (adapter->ioport | 0x1000 |
1056 (card->mpa_rx.ports << 4)) +
1057 card->mpa_rx.start_port, 0, 1))
1058 return -1;
1059
1060 curr_ptr = card->mpa_rx.buf;
1061
1062 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
1063
1064 /* get curr PKT len & type */
1065 pkt_len = *(u16 *) &curr_ptr[0];
1066 pkt_type = *(u16 *) &curr_ptr[2];
1067
1068 /* copy pkt to deaggr buf */
1069 skb_deaggr = card->mpa_rx.skb_arr[pind];
1070
1071 if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <=
1072 card->mpa_rx.len_arr[pind])) {
1073
1074 memcpy(skb_deaggr->data, curr_ptr, pkt_len);
1075
1076 skb_trim(skb_deaggr, pkt_len);
1077
1078 /* Process de-aggr packet */
1079 mwifiex_decode_rx_packet(adapter, skb_deaggr,
1080 pkt_type);
1081 } else {
1082 dev_err(adapter->dev, "wrong aggr pkt:"
1083 " type=%d len=%d max_len=%d\n",
1084 pkt_type, pkt_len,
1085 card->mpa_rx.len_arr[pind]);
1086 dev_kfree_skb_any(skb_deaggr);
1087 }
1088 curr_ptr += card->mpa_rx.len_arr[pind];
1089 }
1090 MP_RX_AGGR_BUF_RESET(card);
1091 }
1092
1093rx_curr_single:
1094 if (f_do_rx_cur) {
1095 dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
1096 port, rx_len);
1097
1098 if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
1099 skb->data, skb->len,
1100 adapter->ioport + port))
1101 return -1;
1102
1103 mwifiex_decode_rx_packet(adapter, skb, pkt_type);
1104 }
1105
1106 return 0;
1107}
1108
1109/*
1110 * This function checks the current interrupt status.
1111 *
1112 * The following interrupts are checked and handled by this function -
1113 * - Data sent
1114 * - Command sent
1115 * - Packets received
1116 *
1117 * Since the firmware does not generate download ready interrupt if the
1118 * port updated is command port only, command sent interrupt checking
1119 * should be done manually, and for every SDIO interrupt.
1120 *
1121 * In case of Rx packets received, the packets are uploaded from card to
1122 * host and processed accordingly.
1123 */
1124static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1125{
1126 struct sdio_mmc_card *card = adapter->card;
1127 int ret = 0;
1128 u8 sdio_ireg;
1129 struct sk_buff *skb = NULL;
1130 u8 port = CTRL_PORT;
1131 u32 len_reg_l, len_reg_u;
1132 u32 rx_blocks;
1133 u16 rx_len;
1134 unsigned long flags;
1135
1136 spin_lock_irqsave(&adapter->int_lock, flags);
1137 sdio_ireg = adapter->int_status;
1138 adapter->int_status = 0;
1139 spin_unlock_irqrestore(&adapter->int_lock, flags);
1140
1141 if (!sdio_ireg)
1142 return ret;
1143
1144 if (sdio_ireg & DN_LD_HOST_INT_STATUS) {
1145 card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8;
1146 card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L];
1147 dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n",
1148 card->mp_wr_bitmap);
1149 if (adapter->data_sent &&
1150 (card->mp_wr_bitmap & card->mp_data_port_mask)) {
1151 dev_dbg(adapter->dev,
1152 "info: <--- Tx DONE Interrupt --->\n");
1153 adapter->data_sent = false;
1154 }
1155 }
1156
1157 /* As firmware will not generate download ready interrupt if the port
1158 updated is command port only, cmd_sent should be done for any SDIO
1159 interrupt. */
1160 if (adapter->cmd_sent) {
1161 /* Check if firmware has attach buffer at command port and
1162 update just that in wr_bit_map. */
1163 card->mp_wr_bitmap |=
1164 (u16) card->mp_regs[WR_BITMAP_L] & CTRL_PORT_MASK;
1165 if (card->mp_wr_bitmap & CTRL_PORT_MASK)
1166 adapter->cmd_sent = false;
1167 }
1168
1169 dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
1170 adapter->cmd_sent, adapter->data_sent);
1171 if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
1172 card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8;
1173 card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L];
1174 dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n",
1175 card->mp_rd_bitmap);
1176
1177 while (true) {
1178 ret = mwifiex_get_rd_port(adapter, &port);
1179 if (ret) {
1180 dev_dbg(adapter->dev,
1181 "info: no more rd_port available\n");
1182 break;
1183 }
1184 len_reg_l = RD_LEN_P0_L + (port << 1);
1185 len_reg_u = RD_LEN_P0_U + (port << 1);
1186 rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
1187 rx_len |= (u16) card->mp_regs[len_reg_l];
1188 dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
1189 port, rx_len);
1190 rx_blocks =
1191 (rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
1192 1) / MWIFIEX_SDIO_BLOCK_SIZE;
1193 if (rx_len <= INTF_HEADER_LEN
1194 || (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
1195 MWIFIEX_RX_DATA_BUF_SIZE) {
1196 dev_err(adapter->dev, "invalid rx_len=%d\n",
1197 rx_len);
1198 return -1;
1199 }
1200 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
1201
1202 skb = dev_alloc_skb(rx_len);
1203
1204 if (!skb) {
1205 dev_err(adapter->dev, "%s: failed to alloc skb",
1206 __func__);
1207 return -1;
1208 }
1209
1210 skb_put(skb, rx_len);
1211
1212 dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
1213 rx_len, skb->len);
1214
1215 if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
1216 port)) {
1217 u32 cr = 0;
1218
1219 dev_err(adapter->dev, "card_to_host_mpa failed:"
1220 " int status=%#x\n", sdio_ireg);
1221 if (mwifiex_read_reg(adapter,
1222 CONFIGURATION_REG, &cr))
1223 dev_err(adapter->dev,
1224 "read CFG reg failed\n");
1225
1226 dev_dbg(adapter->dev,
1227 "info: CFG reg val = %d\n", cr);
1228 if (mwifiex_write_reg(adapter,
1229 CONFIGURATION_REG,
1230 (cr | 0x04)))
1231 dev_err(adapter->dev,
1232 "write CFG reg failed\n");
1233
1234 dev_dbg(adapter->dev, "info: write success\n");
1235 if (mwifiex_read_reg(adapter,
1236 CONFIGURATION_REG, &cr))
1237 dev_err(adapter->dev,
1238 "read CFG reg failed\n");
1239
1240 dev_dbg(adapter->dev,
1241 "info: CFG reg val =%x\n", cr);
1242 dev_kfree_skb_any(skb);
1243 return -1;
1244 }
1245 }
1246 }
1247
1248 return 0;
1249}
1250
1251/*
1252 * This function aggregates transmission buffers in driver and downloads
1253 * the aggregated packet to card.
1254 *
1255 * The individual packets are aggregated by copying into an aggregation
1256 * buffer and then downloaded to the card. Previous unsent packets in the
1257 * aggregation buffer are pre-copied first before new packets are added.
1258 * Aggregation is done till there is space left in the aggregation buffer,
1259 * or till new packets are available.
1260 *
1261 * The function will only download the packet to the card when aggregation
1262 * stops, otherwise it will just aggregate the packet in aggregation buffer
1263 * and return.
1264 */
1265static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
1266 u8 *payload, u32 pkt_len, u8 port,
1267 u32 next_pkt_len)
1268{
1269 struct sdio_mmc_card *card = adapter->card;
1270 int ret = 0;
1271 s32 f_send_aggr_buf = 0;
1272 s32 f_send_cur_buf = 0;
1273 s32 f_precopy_cur_buf = 0;
1274 s32 f_postcopy_cur_buf = 0;
1275
1276 if ((!card->mpa_tx.enabled) || (port == CTRL_PORT)) {
1277 dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
1278 __func__);
1279
1280 f_send_cur_buf = 1;
1281 goto tx_curr_single;
1282 }
1283
1284 if (next_pkt_len) {
1285 /* More pkt in TX queue */
1286 dev_dbg(adapter->dev, "info: %s: more packets in queue.\n",
1287 __func__);
1288
1289 if (MP_TX_AGGR_IN_PROGRESS(card)) {
1290 if (!MP_TX_AGGR_PORT_LIMIT_REACHED(card) &&
1291 MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
1292 f_precopy_cur_buf = 1;
1293
1294 if (!(card->mp_wr_bitmap &
1295 (1 << card->curr_wr_port))
1296 || !MP_TX_AGGR_BUF_HAS_ROOM(
1297 card, next_pkt_len))
1298 f_send_aggr_buf = 1;
1299 } else {
1300 /* No room in Aggr buf, send it */
1301 f_send_aggr_buf = 1;
1302
1303 if (MP_TX_AGGR_PORT_LIMIT_REACHED(card) ||
1304 !(card->mp_wr_bitmap &
1305 (1 << card->curr_wr_port)))
1306 f_send_cur_buf = 1;
1307 else
1308 f_postcopy_cur_buf = 1;
1309 }
1310 } else {
1311 if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)
1312 && (card->mp_wr_bitmap & (1 << card->curr_wr_port)))
1313 f_precopy_cur_buf = 1;
1314 else
1315 f_send_cur_buf = 1;
1316 }
1317 } else {
1318 /* Last pkt in TX queue */
1319 dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n",
1320 __func__);
1321
1322 if (MP_TX_AGGR_IN_PROGRESS(card)) {
1323 /* some packs in Aggr buf already */
1324 f_send_aggr_buf = 1;
1325
1326 if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len))
1327 f_precopy_cur_buf = 1;
1328 else
1329 /* No room in Aggr buf, send it */
1330 f_send_cur_buf = 1;
1331 } else {
1332 f_send_cur_buf = 1;
1333 }
1334 }
1335
1336 if (f_precopy_cur_buf) {
1337 dev_dbg(adapter->dev, "data: %s: precopy current buffer\n",
1338 __func__);
1339 MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
1340
1341 if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
1342 MP_TX_AGGR_PORT_LIMIT_REACHED(card))
1343 /* No more pkts allowed in Aggr buf, send it */
1344 f_send_aggr_buf = 1;
1345 }
1346
1347 if (f_send_aggr_buf) {
1348 dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
1349 __func__,
1350 card->mpa_tx.start_port, card->mpa_tx.ports);
1351 ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
1352 card->mpa_tx.buf_len,
1353 (adapter->ioport | 0x1000 |
1354 (card->mpa_tx.ports << 4)) +
1355 card->mpa_tx.start_port);
1356
1357 MP_TX_AGGR_BUF_RESET(card);
1358 }
1359
1360tx_curr_single:
1361 if (f_send_cur_buf) {
1362 dev_dbg(adapter->dev, "data: %s: send current buffer %d\n",
1363 __func__, port);
1364 ret = mwifiex_write_data_to_card(adapter, payload, pkt_len,
1365 adapter->ioport + port);
1366 }
1367
1368 if (f_postcopy_cur_buf) {
1369 dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n",
1370 __func__);
1371 MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
1372 }
1373
1374 return ret;
1375}
1376
1377/*
1378 * This function downloads data from driver to card.
1379 *
1380 * Both commands and data packets are transferred to the card by this
1381 * function.
1382 *
1383 * This function adds the SDIO specific header to the front of the buffer
1384 * before transferring. The header contains the length of the packet and
1385 * the type. The firmware handles the packets based upon this set type.
1386 */
1387static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
1388 u8 type, u8 *payload, u32 pkt_len,
1389 struct mwifiex_tx_param *tx_param)
1390{
1391 struct sdio_mmc_card *card = adapter->card;
1392 int ret = 0;
1393 u32 buf_block_len;
1394 u32 blk_size;
1395 u8 port = CTRL_PORT;
1396
1397 /* Allocate buffer and copy payload */
1398 blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
1399 buf_block_len = (pkt_len + blk_size - 1) / blk_size;
1400 *(u16 *) &payload[0] = (u16) pkt_len;
1401 *(u16 *) &payload[2] = type;
1402
1403 /*
1404 * This is SDIO specific header
1405 * u16 length,
1406 * u16 type (MWIFIEX_TYPE_DATA = 0, MWIFIEX_TYPE_CMD = 1,
1407 * MWIFIEX_TYPE_EVENT = 3)
1408 */
1409 if (type == MWIFIEX_TYPE_DATA) {
1410 ret = mwifiex_get_wr_port_data(adapter, &port);
1411 if (ret) {
1412 dev_err(adapter->dev, "%s: no wr_port available\n",
1413 __func__);
1414 return ret;
1415 }
1416 } else {
1417 adapter->cmd_sent = true;
1418 /* Type must be MWIFIEX_TYPE_CMD */
1419
1420 if (pkt_len <= INTF_HEADER_LEN ||
1421 pkt_len > MWIFIEX_UPLD_SIZE)
1422 dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
1423 __func__, payload, pkt_len);
1424 }
1425
1426 /* Transfer data to card */
1427 pkt_len = buf_block_len * blk_size;
1428
1429 if (tx_param)
1430 ret = mwifiex_host_to_card_mp_aggr(adapter, payload, pkt_len,
1431 port, tx_param->next_pkt_len);
1432 else
1433 ret = mwifiex_host_to_card_mp_aggr(adapter, payload, pkt_len,
1434 port, 0);
1435
1436 if (ret) {
1437 if (type == MWIFIEX_TYPE_CMD)
1438 adapter->cmd_sent = false;
1439 if (type == MWIFIEX_TYPE_DATA)
1440 adapter->data_sent = false;
1441 } else {
1442 if (type == MWIFIEX_TYPE_DATA) {
1443 if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
1444 adapter->data_sent = true;
1445 else
1446 adapter->data_sent = false;
1447 }
1448 }
1449
1450 return ret;
1451}
1452
1453/*
1454 * This function allocates the MPA Tx and Rx buffers.
1455 */
1456static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
1457 u32 mpa_tx_buf_size, u32 mpa_rx_buf_size)
1458{
1459 struct sdio_mmc_card *card = adapter->card;
1460 int ret = 0;
1461
1462 card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL);
1463 if (!card->mpa_tx.buf) {
1464 dev_err(adapter->dev, "could not alloc buffer for MP-A TX\n");
1465 ret = -1;
1466 goto error;
1467 }
1468
1469 card->mpa_tx.buf_size = mpa_tx_buf_size;
1470
1471 card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL);
1472 if (!card->mpa_rx.buf) {
1473 dev_err(adapter->dev, "could not alloc buffer for MP-A RX\n");
1474 ret = -1;
1475 goto error;
1476 }
1477
1478 card->mpa_rx.buf_size = mpa_rx_buf_size;
1479
1480error:
1481 if (ret) {
1482 kfree(card->mpa_tx.buf);
1483 kfree(card->mpa_rx.buf);
1484 }
1485
1486 return ret;
1487}
1488
1489/*
1490 * This function unregisters the SDIO device.
1491 *
1492 * The SDIO IRQ is released, the function is disabled and driver
1493 * data is set to null.
1494 */
1495static void
1496mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1497{
1498 struct sdio_mmc_card *card = adapter->card;
1499
1500 if (adapter->card) {
1501 /* Release the SDIO IRQ */
1502 sdio_claim_host(card->func);
1503 sdio_release_irq(card->func);
1504 sdio_disable_func(card->func);
1505 sdio_release_host(card->func);
1506 sdio_set_drvdata(card->func, NULL);
1507 }
1508}
1509
1510/*
1511 * This function registers the SDIO device.
1512 *
1513 * SDIO IRQ is claimed, block size is set and driver data is initialized.
1514 */
1515static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1516{
1517 int ret = 0;
1518 struct sdio_mmc_card *card = adapter->card;
1519 struct sdio_func *func = card->func;
1520
1521 /* save adapter pointer in card */
1522 card->adapter = adapter;
1523
1524 sdio_claim_host(func);
1525
1526 /* Request the SDIO IRQ */
1527 ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
1528 if (ret) {
1529 pr_err("claim irq failed: ret=%d\n", ret);
1530 goto disable_func;
1531 }
1532
1533 /* Set block size */
1534 ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
1535 if (ret) {
1536 pr_err("cannot set SDIO block size\n");
1537 ret = -1;
1538 goto release_irq;
1539 }
1540
1541 sdio_release_host(func);
1542 sdio_set_drvdata(func, card);
1543
1544 adapter->dev = &func->dev;
1545
1546 return 0;
1547
1548release_irq:
1549 sdio_release_irq(func);
1550disable_func:
1551 sdio_disable_func(func);
1552 sdio_release_host(func);
1553 adapter->card = NULL;
1554
1555 return -1;
1556}
1557
1558/*
1559 * This function initializes the SDIO driver.
1560 *
1561 * The following initializations steps are followed -
1562 * - Read the Host interrupt status register to acknowledge
1563 * the first interrupt got from bootloader
1564 * - Disable host interrupt mask register
1565 * - Get SDIO port
1566 * - Get revision ID
1567 * - Initialize SDIO variables in card
1568 * - Allocate MP registers
1569 * - Allocate MPA Tx and Rx buffers
1570 */
1571static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1572{
1573 struct sdio_mmc_card *card = adapter->card;
1574 int ret;
1575 u32 sdio_ireg = 0;
1576
1577 /*
1578 * Read the HOST_INT_STATUS_REG for ACK the first interrupt got
1579 * from the bootloader. If we don't do this we get a interrupt
1580 * as soon as we register the irq.
1581 */
1582 mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg);
1583
1584 /* Disable host interrupt mask register for SDIO */
1585 mwifiex_sdio_disable_host_int(adapter);
1586
1587 /* Get SDIO ioport */
1588 mwifiex_init_sdio_ioport(adapter);
1589
1590 /* Get revision ID */
1591#define REV_ID_REG 0x5c
1592 mwifiex_read_reg(adapter, REV_ID_REG, &adapter->revision_id);
1593
1594 /* Initialize SDIO variables in card */
1595 card->mp_rd_bitmap = 0;
1596 card->mp_wr_bitmap = 0;
1597 card->curr_rd_port = 1;
1598 card->curr_wr_port = 1;
1599
1600 card->mp_data_port_mask = DATA_PORT_MASK;
1601
1602 card->mpa_tx.buf_len = 0;
1603 card->mpa_tx.pkt_cnt = 0;
1604 card->mpa_tx.start_port = 0;
1605
1606 card->mpa_tx.enabled = 0;
1607 card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
1608
1609 card->mpa_rx.buf_len = 0;
1610 card->mpa_rx.pkt_cnt = 0;
1611 card->mpa_rx.start_port = 0;
1612
1613 card->mpa_rx.enabled = 0;
1614 card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
1615
1616 /* Allocate buffers for SDIO MP-A */
1617 card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL);
1618 if (!card->mp_regs) {
1619 dev_err(adapter->dev, "failed to alloc mp_regs\n");
1620 return -1;
1621 }
1622
1623 ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
1624 SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
1625 SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
1626 if (ret) {
1627 dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
1628 kfree(card->mp_regs);
1629 return -1;
1630 }
1631
1632 return ret;
1633}
1634
1635/*
1636 * This function resets the MPA Tx and Rx buffers.
1637 */
1638static void mwifiex_cleanup_mpa_buf(struct mwifiex_adapter *adapter)
1639{
1640 struct sdio_mmc_card *card = adapter->card;
1641
1642 MP_TX_AGGR_BUF_RESET(card);
1643 MP_RX_AGGR_BUF_RESET(card);
1644}
1645
1646/*
1647 * This function cleans up the allocated card buffers.
1648 *
1649 * The following are freed by this function -
1650 * - MP registers
1651 * - MPA Tx buffer
1652 * - MPA Rx buffer
1653 */
1654static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
1655{
1656 struct sdio_mmc_card *card = adapter->card;
1657
1658 kfree(card->mp_regs);
1659 kfree(card->mpa_tx.buf);
1660 kfree(card->mpa_rx.buf);
1661}
1662
1663/*
1664 * This function updates the MP end port in card.
1665 */
1666static void
1667mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
1668{
1669 struct sdio_mmc_card *card = adapter->card;
1670 int i;
1671
1672 card->mp_end_port = port;
1673
1674 card->mp_data_port_mask = DATA_PORT_MASK;
1675
1676 for (i = 1; i <= MAX_PORT - card->mp_end_port; i++)
1677 card->mp_data_port_mask &= ~(1 << (MAX_PORT - i));
1678
1679 card->curr_wr_port = 1;
1680
1681 dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
1682 port, card->mp_data_port_mask);
1683}
1684
1685static struct mwifiex_if_ops sdio_ops = {
1686 .init_if = mwifiex_init_sdio,
1687 .cleanup_if = mwifiex_cleanup_sdio,
1688 .check_fw_status = mwifiex_check_fw_status,
1689 .prog_fw = mwifiex_prog_fw_w_helper,
1690 .register_dev = mwifiex_register_dev,
1691 .unregister_dev = mwifiex_unregister_dev,
1692 .enable_int = mwifiex_sdio_enable_host_int,
1693 .process_int_status = mwifiex_process_int_status,
1694 .host_to_card = mwifiex_sdio_host_to_card,
1695 .wakeup = mwifiex_pm_wakeup_card,
1696 .wakeup_complete = mwifiex_pm_wakeup_card_complete,
1697
1698 /* SDIO specific */
1699 .update_mp_end_port = mwifiex_update_mp_end_port,
1700 .cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
1701};
1702
1703/*
1704 * This function initializes the SDIO driver.
1705 *
1706 * This initiates the semaphore and registers the device with
1707 * SDIO bus.
1708 */
1709static int
1710mwifiex_sdio_init_module(void)
1711{
1712 int ret;
1713
1714 sema_init(&add_remove_card_sem, 1);
1715
1716 ret = sdio_register_driver(&mwifiex_sdio);
1717
1718 return ret;
1719}
1720
1721/*
1722 * This function cleans up the SDIO driver.
1723 *
1724 * The following major steps are followed for cleanup -
1725 * - Resume the device if its suspended
1726 * - Disconnect the device if connected
1727 * - Shutdown the firmware
1728 * - Unregister the device from SDIO bus.
1729 */
1730static void
1731mwifiex_sdio_cleanup_module(void)
1732{
1733 struct mwifiex_adapter *adapter = g_adapter;
1734 int i;
1735
1736 if (down_interruptible(&add_remove_card_sem))
1737 goto exit_sem_err;
1738
1739 if (!adapter || !adapter->priv_num)
1740 goto exit;
1741
1742 if (adapter->is_suspended)
1743 mwifiex_sdio_resume(adapter->dev);
1744
1745 for (i = 0; i < adapter->priv_num; i++)
1746 if ((GET_BSS_ROLE(adapter->priv[i]) == MWIFIEX_BSS_ROLE_STA) &&
1747 adapter->priv[i]->media_connected)
1748 mwifiex_disconnect(adapter->priv[i], MWIFIEX_CMD_WAIT,
1749 NULL);
1750
1751 if (!adapter->surprise_removed)
1752 mwifiex_shutdown_fw(mwifiex_get_priv
1753 (adapter, MWIFIEX_BSS_ROLE_ANY),
1754 MWIFIEX_CMD_WAIT);
1755
1756exit:
1757 up(&add_remove_card_sem);
1758
1759exit_sem_err:
1760 sdio_unregister_driver(&mwifiex_sdio);
1761}
1762
1763module_init(mwifiex_sdio_init_module);
1764module_exit(mwifiex_sdio_cleanup_module);
1765
1766MODULE_AUTHOR("Marvell International Ltd.");
1767MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
1768MODULE_VERSION(SDIO_VERSION);
1769MODULE_LICENSE("GPL v2");
1770MODULE_FIRMWARE("sd8787.bin");
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
new file mode 100644
index 00000000000..a0e9bc5253e
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -0,0 +1,305 @@
1/*
2 * Marvell Wireless LAN device driver: SDIO specific definitions
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_SDIO_H
21#define _MWIFIEX_SDIO_H
22
23
24#include <linux/mmc/sdio.h>
25#include <linux/mmc/sdio_ids.h>
26#include <linux/mmc/sdio_func.h>
27#include <linux/mmc/card.h>
28
29#include "main.h"
30
31#define BLOCK_MODE 1
32#define BYTE_MODE 0
33
34#define REG_PORT 0
35#define RD_BITMAP_L 0x04
36#define RD_BITMAP_U 0x05
37#define WR_BITMAP_L 0x06
38#define WR_BITMAP_U 0x07
39#define RD_LEN_P0_L 0x08
40#define RD_LEN_P0_U 0x09
41
42#define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff
43
44#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000
45
46#define CTRL_PORT 0
47#define CTRL_PORT_MASK 0x0001
48#define DATA_PORT_MASK 0xfffe
49
50#define MAX_MP_REGS 64
51#define MAX_PORT 16
52
53#define SDIO_MP_AGGR_DEF_PKT_LIMIT 8
54
55#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (4096) /* 4K */
56
57/* Multi port RX aggregation buffer size */
58#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (4096) /* 4K */
59
60/* Misc. Config Register : Auto Re-enable interrupts */
61#define AUTO_RE_ENABLE_INT BIT(4)
62
63/* Host Control Registers */
64/* Host Control Registers : I/O port 0 */
65#define IO_PORT_0_REG 0x78
66/* Host Control Registers : I/O port 1 */
67#define IO_PORT_1_REG 0x79
68/* Host Control Registers : I/O port 2 */
69#define IO_PORT_2_REG 0x7A
70
71/* Host Control Registers : Configuration */
72#define CONFIGURATION_REG 0x00
73/* Host Control Registers : Host without Command 53 finish host*/
74#define HOST_TO_CARD_EVENT (0x1U << 3)
75/* Host Control Registers : Host without Command 53 finish host */
76#define HOST_WO_CMD53_FINISH_HOST (0x1U << 2)
77/* Host Control Registers : Host power up */
78#define HOST_POWER_UP (0x1U << 1)
79/* Host Control Registers : Host power down */
80#define HOST_POWER_DOWN (0x1U << 0)
81
82/* Host Control Registers : Host interrupt mask */
83#define HOST_INT_MASK_REG 0x02
84/* Host Control Registers : Upload host interrupt mask */
85#define UP_LD_HOST_INT_MASK (0x1U)
86/* Host Control Registers : Download host interrupt mask */
87#define DN_LD_HOST_INT_MASK (0x2U)
88/* Enable Host interrupt mask */
89#define HOST_INT_ENABLE (UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK)
90/* Disable Host interrupt mask */
91#define HOST_INT_DISABLE 0xff
92
93/* Host Control Registers : Host interrupt status */
94#define HOST_INTSTATUS_REG 0x03
95/* Host Control Registers : Upload host interrupt status */
96#define UP_LD_HOST_INT_STATUS (0x1U)
97/* Host Control Registers : Download host interrupt status */
98#define DN_LD_HOST_INT_STATUS (0x2U)
99
100/* Host Control Registers : Host interrupt RSR */
101#define HOST_INT_RSR_REG 0x01
102/* Host Control Registers : Upload host interrupt RSR */
103#define UP_LD_HOST_INT_RSR (0x1U)
104#define SDIO_INT_MASK 0x3F
105
106/* Host Control Registers : Host interrupt status */
107#define HOST_INT_STATUS_REG 0x28
108/* Host Control Registers : Upload CRC error */
109#define UP_LD_CRC_ERR (0x1U << 2)
110/* Host Control Registers : Upload restart */
111#define UP_LD_RESTART (0x1U << 1)
112/* Host Control Registers : Download restart */
113#define DN_LD_RESTART (0x1U << 0)
114
115/* Card Control Registers : Card status register */
116#define CARD_STATUS_REG 0x30
117/* Card Control Registers : Card I/O ready */
118#define CARD_IO_READY (0x1U << 3)
119/* Card Control Registers : CIS card ready */
120#define CIS_CARD_RDY (0x1U << 2)
121/* Card Control Registers : Upload card ready */
122#define UP_LD_CARD_RDY (0x1U << 1)
123/* Card Control Registers : Download card ready */
124#define DN_LD_CARD_RDY (0x1U << 0)
125
126/* Card Control Registers : Host interrupt mask register */
127#define HOST_INTERRUPT_MASK_REG 0x34
128/* Card Control Registers : Host power interrupt mask */
129#define HOST_POWER_INT_MASK (0x1U << 3)
130/* Card Control Registers : Abort card interrupt mask */
131#define ABORT_CARD_INT_MASK (0x1U << 2)
132/* Card Control Registers : Upload card interrupt mask */
133#define UP_LD_CARD_INT_MASK (0x1U << 1)
134/* Card Control Registers : Download card interrupt mask */
135#define DN_LD_CARD_INT_MASK (0x1U << 0)
136
137/* Card Control Registers : Card interrupt status register */
138#define CARD_INTERRUPT_STATUS_REG 0x38
139/* Card Control Registers : Power up interrupt */
140#define POWER_UP_INT (0x1U << 4)
141/* Card Control Registers : Power down interrupt */
142#define POWER_DOWN_INT (0x1U << 3)
143
144/* Card Control Registers : Card interrupt RSR register */
145#define CARD_INTERRUPT_RSR_REG 0x3c
146/* Card Control Registers : Power up RSR */
147#define POWER_UP_RSR (0x1U << 4)
148/* Card Control Registers : Power down RSR */
149#define POWER_DOWN_RSR (0x1U << 3)
150
151/* Card Control Registers : Miscellaneous Configuration Register */
152#define CARD_MISC_CFG_REG 0x6C
153
154/* Host F1 read base 0 */
155#define HOST_F1_RD_BASE_0 0x0040
156/* Host F1 read base 1 */
157#define HOST_F1_RD_BASE_1 0x0041
158/* Host F1 card ready */
159#define HOST_F1_CARD_RDY 0x0020
160
161/* Firmware status 0 register */
162#define CARD_FW_STATUS0_REG 0x60
163/* Firmware status 1 register */
164#define CARD_FW_STATUS1_REG 0x61
165/* Rx length register */
166#define CARD_RX_LEN_REG 0x62
167/* Rx unit register */
168#define CARD_RX_UNIT_REG 0x63
169
170/* Event header Len*/
171#define MWIFIEX_EVENT_HEADER_LEN 8
172
173/* Max retry number of CMD53 write */
174#define MAX_WRITE_IOMEM_RETRY 2
175
176/* SDIO Tx aggregation in progress ? */
177#define MP_TX_AGGR_IN_PROGRESS(a) (a->mpa_tx.pkt_cnt > 0)
178
179/* SDIO Tx aggregation buffer room for next packet ? */
180#define MP_TX_AGGR_BUF_HAS_ROOM(a, len) ((a->mpa_tx.buf_len+len) \
181 <= a->mpa_tx.buf_size)
182
183/* Copy current packet (SDIO Tx aggregation buffer) to SDIO buffer */
184#define MP_TX_AGGR_BUF_PUT(a, payload, pkt_len, port) do { \
185 memmove(&a->mpa_tx.buf[a->mpa_tx.buf_len], \
186 payload, pkt_len); \
187 a->mpa_tx.buf_len += pkt_len; \
188 if (!a->mpa_tx.pkt_cnt) \
189 a->mpa_tx.start_port = port; \
190 if (a->mpa_tx.start_port <= port) \
191 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt)); \
192 else \
193 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \
194 a->mp_end_port))); \
195 a->mpa_tx.pkt_cnt++; \
196} while (0);
197
198/* SDIO Tx aggregation limit ? */
199#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \
200 (a->mpa_tx.pkt_cnt == a->mpa_tx.pkt_aggr_limit)
201
202/* SDIO Tx aggregation port limit ? */
203#define MP_TX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_wr_port < \
204 a->mpa_tx.start_port) && (((MAX_PORT - \
205 a->mpa_tx.start_port) + a->curr_wr_port) >= \
206 SDIO_MP_AGGR_DEF_PKT_LIMIT))
207
208/* Reset SDIO Tx aggregation buffer parameters */
209#define MP_TX_AGGR_BUF_RESET(a) do { \
210 a->mpa_tx.pkt_cnt = 0; \
211 a->mpa_tx.buf_len = 0; \
212 a->mpa_tx.ports = 0; \
213 a->mpa_tx.start_port = 0; \
214} while (0);
215
216/* SDIO Rx aggregation limit ? */
217#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \
218 (a->mpa_rx.pkt_cnt == a->mpa_rx.pkt_aggr_limit)
219
220/* SDIO Tx aggregation port limit ? */
221#define MP_RX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_rd_port < \
222 a->mpa_rx.start_port) && (((MAX_PORT - \
223 a->mpa_rx.start_port) + a->curr_rd_port) >= \
224 SDIO_MP_AGGR_DEF_PKT_LIMIT))
225
226/* SDIO Rx aggregation in progress ? */
227#define MP_RX_AGGR_IN_PROGRESS(a) (a->mpa_rx.pkt_cnt > 0)
228
229/* SDIO Rx aggregation buffer room for next packet ? */
230#define MP_RX_AGGR_BUF_HAS_ROOM(a, rx_len) \
231 ((a->mpa_rx.buf_len+rx_len) <= a->mpa_rx.buf_size)
232
233/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
234#define MP_RX_AGGR_SETUP(a, skb, port) do { \
235 a->mpa_rx.buf_len += skb->len; \
236 if (!a->mpa_rx.pkt_cnt) \
237 a->mpa_rx.start_port = port; \
238 if (a->mpa_rx.start_port <= port) \
239 a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt)); \
240 else \
241 a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt+1)); \
242 a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \
243 a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \
244 a->mpa_rx.pkt_cnt++; \
245} while (0);
246
247/* Reset SDIO Rx aggregation buffer parameters */
248#define MP_RX_AGGR_BUF_RESET(a) do { \
249 a->mpa_rx.pkt_cnt = 0; \
250 a->mpa_rx.buf_len = 0; \
251 a->mpa_rx.ports = 0; \
252 a->mpa_rx.start_port = 0; \
253} while (0);
254
255
256/* data structure for SDIO MPA TX */
257struct mwifiex_sdio_mpa_tx {
258 /* multiport tx aggregation buffer pointer */
259 u8 *buf;
260 u32 buf_len;
261 u32 pkt_cnt;
262 u16 ports;
263 u16 start_port;
264 u8 enabled;
265 u32 buf_size;
266 u32 pkt_aggr_limit;
267};
268
269struct mwifiex_sdio_mpa_rx {
270 u8 *buf;
271 u32 buf_len;
272 u32 pkt_cnt;
273 u16 ports;
274 u16 start_port;
275
276 struct sk_buff *skb_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT];
277 u32 len_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT];
278
279 u8 enabled;
280 u32 buf_size;
281 u32 pkt_aggr_limit;
282};
283
284int mwifiex_bus_register(void);
285void mwifiex_bus_unregister(void);
286
287struct sdio_mmc_card {
288 struct sdio_func *func;
289 struct mwifiex_adapter *adapter;
290
291 u16 mp_rd_bitmap;
292 u16 mp_wr_bitmap;
293
294 u16 mp_end_port;
295 u16 mp_data_port_mask;
296
297 u8 curr_rd_port;
298 u8 curr_wr_port;
299
300 u8 *mp_regs;
301
302 struct mwifiex_sdio_mpa_tx mpa_tx;
303 struct mwifiex_sdio_mpa_rx mpa_rx;
304};
305#endif /* _MWIFIEX_SDIO_H */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
new file mode 100644
index 00000000000..795b1eae768
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -0,0 +1,1226 @@
1/*
2 * Marvell Wireless LAN device driver: station command handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * This function prepares command to set/get RSSI information.
30 *
31 * Preparation includes -
32 * - Setting command ID, action and proper size
33 * - Setting data/beacon average factors
34 * - Resetting SNR/NF/RSSI values in private structure
35 * - Ensuring correct endian-ness
36 */
37static int
38mwifiex_cmd_802_11_rssi_info(struct mwifiex_private *priv,
39 struct host_cmd_ds_command *cmd, u16 cmd_action)
40{
41 cmd->command = cpu_to_le16(HostCmd_CMD_RSSI_INFO);
42 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_rssi_info) +
43 S_DS_GEN);
44 cmd->params.rssi_info.action = cpu_to_le16(cmd_action);
45 cmd->params.rssi_info.ndata = cpu_to_le16(priv->data_avg_factor);
46 cmd->params.rssi_info.nbcn = cpu_to_le16(priv->bcn_avg_factor);
47
48 /* Reset SNR/NF/RSSI values in private structure */
49 priv->data_rssi_last = 0;
50 priv->data_nf_last = 0;
51 priv->data_rssi_avg = 0;
52 priv->data_nf_avg = 0;
53 priv->bcn_rssi_last = 0;
54 priv->bcn_nf_last = 0;
55 priv->bcn_rssi_avg = 0;
56 priv->bcn_nf_avg = 0;
57
58 return 0;
59}
60
61/*
62 * This function prepares command to set MAC control.
63 *
64 * Preparation includes -
65 * - Setting command ID, action and proper size
66 * - Ensuring correct endian-ness
67 */
68static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
69 struct host_cmd_ds_command *cmd,
70 u16 cmd_action, void *data_buf)
71{
72 struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
73 u16 action = *((u16 *) data_buf);
74
75 if (cmd_action != HostCmd_ACT_GEN_SET) {
76 dev_err(priv->adapter->dev,
77 "mac_control: only support set cmd\n");
78 return -1;
79 }
80
81 cmd->command = cpu_to_le16(HostCmd_CMD_MAC_CONTROL);
82 cmd->size =
83 cpu_to_le16(sizeof(struct host_cmd_ds_mac_control) + S_DS_GEN);
84 mac_ctrl->action = cpu_to_le16(action);
85
86 return 0;
87}
88
89/*
90 * This function prepares command to set/get SNMP MIB.
91 *
92 * Preparation includes -
93 * - Setting command ID, action and proper size
94 * - Setting SNMP MIB OID number and value
95 * (as required)
96 * - Ensuring correct endian-ness
97 *
98 * The following SNMP MIB OIDs are supported -
99 * - FRAG_THRESH_I : Fragmentation threshold
100 * - RTS_THRESH_I : RTS threshold
101 * - SHORT_RETRY_LIM_I : Short retry limit
102 * - DOT11D_I : 11d support
103 */
104static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
105 struct host_cmd_ds_command *cmd,
106 u16 cmd_action, u32 cmd_oid,
107 void *data_buf)
108{
109 struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
110 u32 ul_temp;
111
112 dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
113 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
114 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
115 - 1 + S_DS_GEN);
116
117 if (cmd_action == HostCmd_ACT_GEN_GET) {
118 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_GET);
119 snmp_mib->buf_size = cpu_to_le16(MAX_SNMP_BUF_SIZE);
120 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
121 + MAX_SNMP_BUF_SIZE);
122 }
123
124 switch (cmd_oid) {
125 case FRAG_THRESH_I:
126 snmp_mib->oid = cpu_to_le16((u16) FRAG_THRESH_I);
127 if (cmd_action == HostCmd_ACT_GEN_SET) {
128 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
129 snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
130 ul_temp = *((u32 *) data_buf);
131 *((__le16 *) (snmp_mib->value)) =
132 cpu_to_le16((u16) ul_temp);
133 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
134 + sizeof(u16));
135 }
136 break;
137 case RTS_THRESH_I:
138 snmp_mib->oid = cpu_to_le16((u16) RTS_THRESH_I);
139 if (cmd_action == HostCmd_ACT_GEN_SET) {
140 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
141 snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
142 ul_temp = *((u32 *) data_buf);
143 *(__le16 *) (snmp_mib->value) =
144 cpu_to_le16((u16) ul_temp);
145 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
146 + sizeof(u16));
147 }
148 break;
149
150 case SHORT_RETRY_LIM_I:
151 snmp_mib->oid = cpu_to_le16((u16) SHORT_RETRY_LIM_I);
152 if (cmd_action == HostCmd_ACT_GEN_SET) {
153 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
154 snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
155 ul_temp = (*(u32 *) data_buf);
156 *((__le16 *) (snmp_mib->value)) =
157 cpu_to_le16((u16) ul_temp);
158 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
159 + sizeof(u16));
160 }
161 break;
162 case DOT11D_I:
163 snmp_mib->oid = cpu_to_le16((u16) DOT11D_I);
164 if (cmd_action == HostCmd_ACT_GEN_SET) {
165 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
166 snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
167 ul_temp = *(u32 *) data_buf;
168 *((__le16 *) (snmp_mib->value)) =
169 cpu_to_le16((u16) ul_temp);
170 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
171 + sizeof(u16));
172 }
173 break;
174 default:
175 break;
176 }
177 dev_dbg(priv->adapter->dev,
178 "cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
179 " Value=0x%x\n",
180 cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
181 le16_to_cpu(*(__le16 *) snmp_mib->value));
182 return 0;
183}
184
185/*
186 * This function prepares command to get log.
187 *
188 * Preparation includes -
189 * - Setting command ID and proper size
190 * - Ensuring correct endian-ness
191 */
192static int
193mwifiex_cmd_802_11_get_log(struct mwifiex_private *priv,
194 struct host_cmd_ds_command *cmd)
195{
196 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_GET_LOG);
197 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_get_log) +
198 S_DS_GEN);
199 return 0;
200}
201
202/*
203 * This function prepares command to set/get Tx data rate configuration.
204 *
205 * Preparation includes -
206 * - Setting command ID, action and proper size
207 * - Setting configuration index, rate scope and rate drop pattern
208 * parameters (as required)
209 * - Ensuring correct endian-ness
210 */
211static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
212 struct host_cmd_ds_command *cmd,
213 u16 cmd_action, void *data_buf)
214{
215 struct host_cmd_ds_tx_rate_cfg *rate_cfg = &cmd->params.tx_rate_cfg;
216 struct mwifiex_rate_scope *rate_scope;
217 struct mwifiex_rate_drop_pattern *rate_drop;
218 u16 *pbitmap_rates = (u16 *) data_buf;
219
220 u32 i;
221
222 cmd->command = cpu_to_le16(HostCmd_CMD_TX_RATE_CFG);
223
224 rate_cfg->action = cpu_to_le16(cmd_action);
225 rate_cfg->cfg_index = 0;
226
227 rate_scope = (struct mwifiex_rate_scope *) ((u8 *) rate_cfg +
228 sizeof(struct host_cmd_ds_tx_rate_cfg));
229 rate_scope->type = cpu_to_le16(TLV_TYPE_RATE_SCOPE);
230 rate_scope->length = cpu_to_le16(sizeof(struct mwifiex_rate_scope) -
231 sizeof(struct mwifiex_ie_types_header));
232 if (pbitmap_rates != NULL) {
233 rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]);
234 rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]);
235 for (i = 0;
236 i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16);
237 i++)
238 rate_scope->ht_mcs_rate_bitmap[i] =
239 cpu_to_le16(pbitmap_rates[2 + i]);
240 } else {
241 rate_scope->hr_dsss_rate_bitmap =
242 cpu_to_le16(priv->bitmap_rates[0]);
243 rate_scope->ofdm_rate_bitmap =
244 cpu_to_le16(priv->bitmap_rates[1]);
245 for (i = 0;
246 i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16);
247 i++)
248 rate_scope->ht_mcs_rate_bitmap[i] =
249 cpu_to_le16(priv->bitmap_rates[2 + i]);
250 }
251
252 rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
253 sizeof(struct mwifiex_rate_scope));
254 rate_drop->type = cpu_to_le16(TLV_TYPE_RATE_DROP_CONTROL);
255 rate_drop->length = cpu_to_le16(sizeof(rate_drop->rate_drop_mode));
256 rate_drop->rate_drop_mode = 0;
257
258 cmd->size =
259 cpu_to_le16(S_DS_GEN + sizeof(struct host_cmd_ds_tx_rate_cfg) +
260 sizeof(struct mwifiex_rate_scope) +
261 sizeof(struct mwifiex_rate_drop_pattern));
262
263 return 0;
264}
265
266/*
267 * This function prepares command to set/get Tx power configuration.
268 *
269 * Preparation includes -
270 * - Setting command ID, action and proper size
271 * - Setting Tx power mode, power group TLV
272 * (as required)
273 * - Ensuring correct endian-ness
274 */
275static int mwifiex_cmd_tx_power_cfg(struct mwifiex_private *priv,
276 struct host_cmd_ds_command *cmd,
277 u16 cmd_action, void *data_buf)
278{
279 struct mwifiex_types_power_group *pg_tlv = NULL;
280 struct host_cmd_ds_txpwr_cfg *txp = NULL;
281 struct host_cmd_ds_txpwr_cfg *cmd_txp_cfg = &cmd->params.txp_cfg;
282
283 cmd->command = cpu_to_le16(HostCmd_CMD_TXPWR_CFG);
284 cmd->size =
285 cpu_to_le16(S_DS_GEN + sizeof(struct host_cmd_ds_txpwr_cfg));
286 switch (cmd_action) {
287 case HostCmd_ACT_GEN_SET:
288 txp = (struct host_cmd_ds_txpwr_cfg *) data_buf;
289 if (txp->mode) {
290 pg_tlv = (struct mwifiex_types_power_group
291 *) ((unsigned long) data_buf +
292 sizeof(struct host_cmd_ds_txpwr_cfg));
293 memmove(cmd_txp_cfg, data_buf,
294 sizeof(struct host_cmd_ds_txpwr_cfg) +
295 sizeof(struct mwifiex_types_power_group) +
296 pg_tlv->length);
297
298 pg_tlv = (struct mwifiex_types_power_group *) ((u8 *)
299 cmd_txp_cfg +
300 sizeof(struct host_cmd_ds_txpwr_cfg));
301 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) +
302 sizeof(struct mwifiex_types_power_group) +
303 pg_tlv->length);
304 } else {
305 memmove(cmd_txp_cfg, data_buf,
306 sizeof(struct host_cmd_ds_txpwr_cfg));
307 }
308 cmd_txp_cfg->action = cpu_to_le16(cmd_action);
309 break;
310 case HostCmd_ACT_GEN_GET:
311 cmd_txp_cfg->action = cpu_to_le16(cmd_action);
312 break;
313 }
314
315 return 0;
316}
317
318/*
319 * This function prepares command to set Host Sleep configuration.
320 *
321 * Preparation includes -
322 * - Setting command ID and proper size
323 * - Setting Host Sleep action, conditions, ARP filters
324 * (as required)
325 * - Ensuring correct endian-ness
326 */
327static int mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
328 struct host_cmd_ds_command *cmd,
329 u16 cmd_action,
330 struct mwifiex_hs_config_param *data_buf)
331{
332 struct mwifiex_adapter *adapter = priv->adapter;
333 struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
334 u16 hs_activate = false;
335
336 if (data_buf == NULL)
337 /* New Activate command */
338 hs_activate = true;
339 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH);
340
341 if (!hs_activate &&
342 (data_buf->conditions
343 != cpu_to_le32(HOST_SLEEP_CFG_CANCEL))
344 && ((adapter->arp_filter_size > 0)
345 && (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
346 dev_dbg(adapter->dev,
347 "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
348 adapter->arp_filter_size);
349 memcpy(((u8 *) hs_cfg) +
350 sizeof(struct host_cmd_ds_802_11_hs_cfg_enh),
351 adapter->arp_filter, adapter->arp_filter_size);
352 cmd->size = cpu_to_le16(adapter->arp_filter_size +
353 sizeof(struct host_cmd_ds_802_11_hs_cfg_enh)
354 + S_DS_GEN);
355 } else {
356 cmd->size = cpu_to_le16(S_DS_GEN + sizeof(struct
357 host_cmd_ds_802_11_hs_cfg_enh));
358 }
359 if (hs_activate) {
360 hs_cfg->action = cpu_to_le16(HS_ACTIVATE);
361 hs_cfg->params.hs_activate.resp_ctrl = RESP_NEEDED;
362 } else {
363 hs_cfg->action = cpu_to_le16(HS_CONFIGURE);
364 hs_cfg->params.hs_config.conditions = data_buf->conditions;
365 hs_cfg->params.hs_config.gpio = data_buf->gpio;
366 hs_cfg->params.hs_config.gap = data_buf->gap;
367 dev_dbg(adapter->dev,
368 "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
369 hs_cfg->params.hs_config.conditions,
370 hs_cfg->params.hs_config.gpio,
371 hs_cfg->params.hs_config.gap);
372 }
373
374 return 0;
375}
376
377/*
378 * This function prepares command to set/get MAC address.
379 *
380 * Preparation includes -
381 * - Setting command ID, action and proper size
382 * - Setting MAC address (for SET only)
383 * - Ensuring correct endian-ness
384 */
385static int mwifiex_cmd_802_11_mac_address(struct mwifiex_private *priv,
386 struct host_cmd_ds_command *cmd,
387 u16 cmd_action)
388{
389 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_MAC_ADDRESS);
390 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_mac_address) +
391 S_DS_GEN);
392 cmd->result = 0;
393
394 cmd->params.mac_addr.action = cpu_to_le16(cmd_action);
395
396 if (cmd_action == HostCmd_ACT_GEN_SET)
397 memcpy(cmd->params.mac_addr.mac_addr, priv->curr_addr,
398 ETH_ALEN);
399 return 0;
400}
401
402/*
403 * This function prepares command to set MAC multicast address.
404 *
405 * Preparation includes -
406 * - Setting command ID, action and proper size
407 * - Setting MAC multicast address
408 * - Ensuring correct endian-ness
409 */
410static int mwifiex_cmd_mac_multicast_adr(struct mwifiex_private *priv,
411 struct host_cmd_ds_command *cmd,
412 u16 cmd_action, void *data_buf)
413{
414 struct mwifiex_multicast_list *mcast_list =
415 (struct mwifiex_multicast_list *) data_buf;
416 struct host_cmd_ds_mac_multicast_adr *mcast_addr = &cmd->params.mc_addr;
417
418 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mac_multicast_adr) +
419 S_DS_GEN);
420 cmd->command = cpu_to_le16(HostCmd_CMD_MAC_MULTICAST_ADR);
421
422 mcast_addr->action = cpu_to_le16(cmd_action);
423 mcast_addr->num_of_adrs =
424 cpu_to_le16((u16) mcast_list->num_multicast_addr);
425 memcpy(mcast_addr->mac_list, mcast_list->mac_list,
426 mcast_list->num_multicast_addr * ETH_ALEN);
427
428 return 0;
429}
430
431/*
432 * This function prepares command to deauthenticate.
433 *
434 * Preparation includes -
435 * - Setting command ID and proper size
436 * - Setting AP MAC address and reason code
437 * - Ensuring correct endian-ness
438 */
439static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv,
440 struct host_cmd_ds_command *cmd,
441 void *data_buf)
442{
443 struct host_cmd_ds_802_11_deauthenticate *deauth = &cmd->params.deauth;
444
445 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_DEAUTHENTICATE);
446 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_deauthenticate)
447 + S_DS_GEN);
448
449 /* Set AP MAC address */
450 memcpy(deauth->mac_addr, (u8 *) data_buf, ETH_ALEN);
451
452 dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr);
453
454 deauth->reason_code = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
455
456 return 0;
457}
458
459/*
460 * This function prepares command to stop Ad-Hoc network.
461 *
462 * Preparation includes -
463 * - Setting command ID and proper size
464 * - Ensuring correct endian-ness
465 */
466static int mwifiex_cmd_802_11_ad_hoc_stop(struct mwifiex_private *priv,
467 struct host_cmd_ds_command *cmd)
468{
469 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_STOP);
470 cmd->size = cpu_to_le16(S_DS_GEN);
471 return 0;
472}
473
474/*
475 * This function sets WEP key(s) to key parameter TLV(s).
476 *
477 * Multi-key parameter TLVs are supported, so we can send multiple
478 * WEP keys in a single buffer.
479 */
480static int
481mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
482 struct mwifiex_ie_type_key_param_set *key_param_set,
483 u16 *key_param_len)
484{
485 int cur_key_param_len = 0;
486 u8 i;
487
488 /* Multi-key_param_set TLV is supported */
489 for (i = 0; i < NUM_WEP_KEYS; i++) {
490 if ((priv->wep_key[i].key_length == WLAN_KEY_LEN_WEP40) ||
491 (priv->wep_key[i].key_length == WLAN_KEY_LEN_WEP104)) {
492 key_param_set->type =
493 cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
494/* Key_param_set WEP fixed length */
495#define KEYPARAMSET_WEP_FIXED_LEN 8
496 key_param_set->length = cpu_to_le16((u16)
497 (priv->wep_key[i].
498 key_length +
499 KEYPARAMSET_WEP_FIXED_LEN));
500 key_param_set->key_type_id =
501 cpu_to_le16(KEY_TYPE_ID_WEP);
502 key_param_set->key_info =
503 cpu_to_le16(KEY_INFO_WEP_ENABLED |
504 KEY_INFO_WEP_UNICAST |
505 KEY_INFO_WEP_MCAST);
506 key_param_set->key_len =
507 cpu_to_le16(priv->wep_key[i].key_length);
508 /* Set WEP key index */
509 key_param_set->key[0] = i;
510 /* Set default Tx key flag */
511 if (i ==
512 (priv->
513 wep_key_curr_index & HostCmd_WEP_KEY_INDEX_MASK))
514 key_param_set->key[1] = 1;
515 else
516 key_param_set->key[1] = 0;
517 memmove(&key_param_set->key[2],
518 priv->wep_key[i].key_material,
519 priv->wep_key[i].key_length);
520
521 cur_key_param_len = priv->wep_key[i].key_length +
522 KEYPARAMSET_WEP_FIXED_LEN +
523 sizeof(struct mwifiex_ie_types_header);
524 *key_param_len += (u16) cur_key_param_len;
525 key_param_set =
526 (struct mwifiex_ie_type_key_param_set *)
527 ((u8 *)key_param_set +
528 cur_key_param_len);
529 } else if (!priv->wep_key[i].key_length) {
530 continue;
531 } else {
532 dev_err(priv->adapter->dev,
533 "key%d Length = %d is incorrect\n",
534 (i + 1), priv->wep_key[i].key_length);
535 return -1;
536 }
537 }
538
539 return 0;
540}
541
542/*
543 * This function prepares command to set/get/reset network key(s).
544 *
545 * Preparation includes -
546 * - Setting command ID, action and proper size
547 * - Setting WEP keys, WAPI keys or WPA keys along with required
548 * encryption (TKIP, AES) (as required)
549 * - Ensuring correct endian-ness
550 */
551static int mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
552 struct host_cmd_ds_command *cmd,
553 u16 cmd_action,
554 u32 cmd_oid, void *data_buf)
555{
556 struct host_cmd_ds_802_11_key_material *key_material =
557 &cmd->params.key_material;
558 struct mwifiex_ds_encrypt_key *enc_key =
559 (struct mwifiex_ds_encrypt_key *) data_buf;
560 u16 key_param_len = 0;
561 int ret = 0;
562 const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
563
564 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
565 key_material->action = cpu_to_le16(cmd_action);
566
567 if (cmd_action == HostCmd_ACT_GEN_GET) {
568 cmd->size =
569 cpu_to_le16(sizeof(key_material->action) + S_DS_GEN);
570 return ret;
571 }
572
573 if (!enc_key) {
574 memset(&key_material->key_param_set, 0,
575 (NUM_WEP_KEYS *
576 sizeof(struct mwifiex_ie_type_key_param_set)));
577 ret = mwifiex_set_keyparamset_wep(priv,
578 &key_material->key_param_set,
579 &key_param_len);
580 cmd->size = cpu_to_le16(key_param_len +
581 sizeof(key_material->action) + S_DS_GEN);
582 return ret;
583 } else
584 memset(&key_material->key_param_set, 0,
585 sizeof(struct mwifiex_ie_type_key_param_set));
586 if (enc_key->is_wapi_key) {
587 dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n");
588 key_material->key_param_set.key_type_id =
589 cpu_to_le16(KEY_TYPE_ID_WAPI);
590 if (cmd_oid == KEY_INFO_ENABLED)
591 key_material->key_param_set.key_info =
592 cpu_to_le16(KEY_INFO_WAPI_ENABLED);
593 else
594 key_material->key_param_set.key_info =
595 cpu_to_le16(!KEY_INFO_WAPI_ENABLED);
596
597 key_material->key_param_set.key[0] = enc_key->key_index;
598 if (!priv->sec_info.wapi_key_on)
599 key_material->key_param_set.key[1] = 1;
600 else
601 /* set 0 when re-key */
602 key_material->key_param_set.key[1] = 0;
603
604 if (0 != memcmp(enc_key->mac_addr, bc_mac, sizeof(bc_mac))) {
605 /* WAPI pairwise key: unicast */
606 key_material->key_param_set.key_info |=
607 cpu_to_le16(KEY_INFO_WAPI_UNICAST);
608 } else { /* WAPI group key: multicast */
609 key_material->key_param_set.key_info |=
610 cpu_to_le16(KEY_INFO_WAPI_MCAST);
611 priv->sec_info.wapi_key_on = true;
612 }
613
614 key_material->key_param_set.type =
615 cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
616 key_material->key_param_set.key_len =
617 cpu_to_le16(WAPI_KEY_LEN);
618 memcpy(&key_material->key_param_set.key[2],
619 enc_key->key_material, enc_key->key_len);
620 memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
621 enc_key->wapi_rxpn, WAPI_RXPN_LEN);
622 key_material->key_param_set.length =
623 cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
624
625 key_param_len = (WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN) +
626 sizeof(struct mwifiex_ie_types_header);
627 cmd->size = cpu_to_le16(key_param_len +
628 sizeof(key_material->action) + S_DS_GEN);
629 return ret;
630 }
631 if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
632 dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
633 key_material->key_param_set.key_type_id =
634 cpu_to_le16(KEY_TYPE_ID_AES);
635 if (cmd_oid == KEY_INFO_ENABLED)
636 key_material->key_param_set.key_info =
637 cpu_to_le16(KEY_INFO_AES_ENABLED);
638 else
639 key_material->key_param_set.key_info =
640 cpu_to_le16(!KEY_INFO_AES_ENABLED);
641
642 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
643 /* AES pairwise key: unicast */
644 key_material->key_param_set.key_info |=
645 cpu_to_le16(KEY_INFO_AES_UNICAST);
646 else /* AES group key: multicast */
647 key_material->key_param_set.key_info |=
648 cpu_to_le16(KEY_INFO_AES_MCAST);
649 } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
650 dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
651 key_material->key_param_set.key_type_id =
652 cpu_to_le16(KEY_TYPE_ID_TKIP);
653 key_material->key_param_set.key_info =
654 cpu_to_le16(KEY_INFO_TKIP_ENABLED);
655
656 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
657 /* TKIP pairwise key: unicast */
658 key_material->key_param_set.key_info |=
659 cpu_to_le16(KEY_INFO_TKIP_UNICAST);
660 else /* TKIP group key: multicast */
661 key_material->key_param_set.key_info |=
662 cpu_to_le16(KEY_INFO_TKIP_MCAST);
663 }
664
665 if (key_material->key_param_set.key_type_id) {
666 key_material->key_param_set.type =
667 cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
668 key_material->key_param_set.key_len =
669 cpu_to_le16((u16) enc_key->key_len);
670 memcpy(key_material->key_param_set.key, enc_key->key_material,
671 enc_key->key_len);
672 key_material->key_param_set.length =
673 cpu_to_le16((u16) enc_key->key_len +
674 KEYPARAMSET_FIXED_LEN);
675
676 key_param_len = (u16) (enc_key->key_len + KEYPARAMSET_FIXED_LEN)
677 + sizeof(struct mwifiex_ie_types_header);
678
679 cmd->size = cpu_to_le16(key_param_len +
680 sizeof(key_material->action) + S_DS_GEN);
681 }
682
683 return ret;
684}
685
686/*
687 * This function prepares command to set/get 11d domain information.
688 *
689 * Preparation includes -
690 * - Setting command ID, action and proper size
691 * - Setting domain information fields (for SET only)
692 * - Ensuring correct endian-ness
693 */
694static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
695 struct host_cmd_ds_command *cmd,
696 u16 cmd_action)
697{
698 struct mwifiex_adapter *adapter = priv->adapter;
699 struct host_cmd_ds_802_11d_domain_info *domain_info =
700 &cmd->params.domain_info;
701 struct mwifiex_ietypes_domain_param_set *domain =
702 &domain_info->domain;
703 u8 no_of_triplet = adapter->domain_reg.no_of_triplet;
704
705 dev_dbg(adapter->dev, "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
706
707 cmd->command = cpu_to_le16(HostCmd_CMD_802_11D_DOMAIN_INFO);
708 domain_info->action = cpu_to_le16(cmd_action);
709 if (cmd_action == HostCmd_ACT_GEN_GET) {
710 cmd->size = cpu_to_le16(sizeof(domain_info->action) + S_DS_GEN);
711 return 0;
712 }
713
714 /* Set domain info fields */
715 domain->header.type = cpu_to_le16(WLAN_EID_COUNTRY);
716 memcpy(domain->country_code, adapter->domain_reg.country_code,
717 sizeof(domain->country_code));
718
719 domain->header.len = cpu_to_le16((no_of_triplet *
720 sizeof(struct ieee80211_country_ie_triplet)) +
721 sizeof(domain->country_code));
722
723 if (no_of_triplet) {
724 memcpy(domain->triplet, adapter->domain_reg.triplet,
725 no_of_triplet *
726 sizeof(struct ieee80211_country_ie_triplet));
727
728 cmd->size = cpu_to_le16(sizeof(domain_info->action) +
729 le16_to_cpu(domain->header.len) +
730 sizeof(struct mwifiex_ie_types_header)
731 + S_DS_GEN);
732 } else {
733 cmd->size = cpu_to_le16(sizeof(domain_info->action) + S_DS_GEN);
734 }
735
736 return 0;
737}
738
739/*
740 * This function prepares command to set/get RF channel.
741 *
742 * Preparation includes -
743 * - Setting command ID, action and proper size
744 * - Setting RF type and current RF channel (for SET only)
745 * - Ensuring correct endian-ness
746 */
747static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
748 struct host_cmd_ds_command *cmd,
749 u16 cmd_action, void *data_buf)
750{
751 struct host_cmd_ds_802_11_rf_channel *rf_chan =
752 &cmd->params.rf_channel;
753 uint16_t rf_type = le16_to_cpu(rf_chan->rf_type);
754
755 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_RF_CHANNEL);
756 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_rf_channel)
757 + S_DS_GEN);
758
759 if (cmd_action == HostCmd_ACT_GEN_SET) {
760 if ((priv->adapter->adhoc_start_band & BAND_A)
761 || (priv->adapter->adhoc_start_band & BAND_AN))
762 rf_chan->rf_type =
763 cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
764
765 rf_type = le16_to_cpu(rf_chan->rf_type);
766 SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset);
767 rf_chan->current_channel = cpu_to_le16(*((u16 *) data_buf));
768 }
769 rf_chan->action = cpu_to_le16(cmd_action);
770 return 0;
771}
772
773/*
774 * This function prepares command to set/get IBSS coalescing status.
775 *
776 * Preparation includes -
777 * - Setting command ID, action and proper size
778 * - Setting status to enable or disable (for SET only)
779 * - Ensuring correct endian-ness
780 */
781static int mwifiex_cmd_ibss_coalescing_status(struct mwifiex_private *priv,
782 struct host_cmd_ds_command *cmd,
783 u16 cmd_action, void *data_buf)
784{
785 struct host_cmd_ds_802_11_ibss_status *ibss_coal =
786 &(cmd->params.ibss_coalescing);
787 u16 enable = 0;
788
789 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_IBSS_COALESCING_STATUS);
790 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_ibss_status) +
791 S_DS_GEN);
792 cmd->result = 0;
793 ibss_coal->action = cpu_to_le16(cmd_action);
794
795 switch (cmd_action) {
796 case HostCmd_ACT_GEN_SET:
797 if (data_buf != NULL)
798 enable = *(u16 *) data_buf;
799 ibss_coal->enable = cpu_to_le16(enable);
800 break;
801
802 /* In other case.. Nothing to do */
803 case HostCmd_ACT_GEN_GET:
804 default:
805 break;
806 }
807
808 return 0;
809}
810
811/*
812 * This function prepares command to set/get register value.
813 *
814 * Preparation includes -
815 * - Setting command ID, action and proper size
816 * - Setting register offset (for both GET and SET) and
817 * register value (for SET only)
818 * - Ensuring correct endian-ness
819 *
820 * The following type of registers can be accessed with this function -
821 * - MAC register
822 * - BBP register
823 * - RF register
824 * - PMIC register
825 * - CAU register
826 * - EEPROM
827 */
828static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
829 u16 cmd_action, void *data_buf)
830{
831 struct mwifiex_ds_reg_rw *reg_rw;
832
833 reg_rw = (struct mwifiex_ds_reg_rw *) data_buf;
834 switch (le16_to_cpu(cmd->command)) {
835 case HostCmd_CMD_MAC_REG_ACCESS:
836 {
837 struct host_cmd_ds_mac_reg_access *mac_reg;
838
839 cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
840 mac_reg = (struct host_cmd_ds_mac_reg_access *) &cmd->
841 params.mac_reg;
842 mac_reg->action = cpu_to_le16(cmd_action);
843 mac_reg->offset =
844 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
845 mac_reg->value = reg_rw->value;
846 break;
847 }
848 case HostCmd_CMD_BBP_REG_ACCESS:
849 {
850 struct host_cmd_ds_bbp_reg_access *bbp_reg;
851
852 cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
853 bbp_reg = (struct host_cmd_ds_bbp_reg_access *) &cmd->
854 params.bbp_reg;
855 bbp_reg->action = cpu_to_le16(cmd_action);
856 bbp_reg->offset =
857 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
858 bbp_reg->value = (u8) le32_to_cpu(reg_rw->value);
859 break;
860 }
861 case HostCmd_CMD_RF_REG_ACCESS:
862 {
863 struct host_cmd_ds_rf_reg_access *rf_reg;
864
865 cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
866 rf_reg = (struct host_cmd_ds_rf_reg_access *) &cmd->
867 params.rf_reg;
868 rf_reg->action = cpu_to_le16(cmd_action);
869 rf_reg->offset =
870 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
871 rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
872 break;
873 }
874 case HostCmd_CMD_PMIC_REG_ACCESS:
875 {
876 struct host_cmd_ds_pmic_reg_access *pmic_reg;
877
878 cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN);
879 pmic_reg = (struct host_cmd_ds_pmic_reg_access *) &cmd->
880 params.pmic_reg;
881 pmic_reg->action = cpu_to_le16(cmd_action);
882 pmic_reg->offset =
883 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
884 pmic_reg->value = (u8) le32_to_cpu(reg_rw->value);
885 break;
886 }
887 case HostCmd_CMD_CAU_REG_ACCESS:
888 {
889 struct host_cmd_ds_rf_reg_access *cau_reg;
890
891 cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
892 cau_reg = (struct host_cmd_ds_rf_reg_access *) &cmd->
893 params.rf_reg;
894 cau_reg->action = cpu_to_le16(cmd_action);
895 cau_reg->offset =
896 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
897 cau_reg->value = (u8) le32_to_cpu(reg_rw->value);
898 break;
899 }
900 case HostCmd_CMD_802_11_EEPROM_ACCESS:
901 {
902 struct mwifiex_ds_read_eeprom *rd_eeprom =
903 (struct mwifiex_ds_read_eeprom *) data_buf;
904 struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom =
905 (struct host_cmd_ds_802_11_eeprom_access *)
906 &cmd->params.eeprom;
907
908 cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN);
909 cmd_eeprom->action = cpu_to_le16(cmd_action);
910 cmd_eeprom->offset = rd_eeprom->offset;
911 cmd_eeprom->byte_count = rd_eeprom->byte_count;
912 cmd_eeprom->value = 0;
913 break;
914 }
915 default:
916 return -1;
917 }
918
919 return 0;
920}
921
922/*
923 * This function prepares the commands before sending them to the firmware.
924 *
925 * This is a generic function which calls specific command preparation
926 * routines based upon the command number.
927 */
928int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
929 u16 cmd_action, u32 cmd_oid,
930 void *data_buf, void *cmd_buf)
931{
932 struct host_cmd_ds_command *cmd_ptr =
933 (struct host_cmd_ds_command *) cmd_buf;
934 int ret = 0;
935
936 /* Prepare command */
937 switch (cmd_no) {
938 case HostCmd_CMD_GET_HW_SPEC:
939 ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr);
940 break;
941 case HostCmd_CMD_MAC_CONTROL:
942 ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action,
943 data_buf);
944 break;
945 case HostCmd_CMD_802_11_MAC_ADDRESS:
946 ret = mwifiex_cmd_802_11_mac_address(priv, cmd_ptr,
947 cmd_action);
948 break;
949 case HostCmd_CMD_MAC_MULTICAST_ADR:
950 ret = mwifiex_cmd_mac_multicast_adr(priv, cmd_ptr, cmd_action,
951 data_buf);
952 break;
953 case HostCmd_CMD_TX_RATE_CFG:
954 ret = mwifiex_cmd_tx_rate_cfg(priv, cmd_ptr, cmd_action,
955 data_buf);
956 break;
957 case HostCmd_CMD_TXPWR_CFG:
958 ret = mwifiex_cmd_tx_power_cfg(priv, cmd_ptr, cmd_action,
959 data_buf);
960 break;
961 case HostCmd_CMD_802_11_PS_MODE_ENH:
962 ret = mwifiex_cmd_enh_power_mode(priv, cmd_ptr, cmd_action,
963 (uint16_t)cmd_oid, data_buf);
964 break;
965 case HostCmd_CMD_802_11_HS_CFG_ENH:
966 ret = mwifiex_cmd_802_11_hs_cfg(priv, cmd_ptr, cmd_action,
967 (struct mwifiex_hs_config_param *) data_buf);
968 break;
969 case HostCmd_CMD_802_11_SCAN:
970 ret = mwifiex_cmd_802_11_scan(priv, cmd_ptr, data_buf);
971 break;
972 case HostCmd_CMD_802_11_BG_SCAN_QUERY:
973 ret = mwifiex_cmd_802_11_bg_scan_query(priv, cmd_ptr,
974 data_buf);
975 break;
976 case HostCmd_CMD_802_11_ASSOCIATE:
977 ret = mwifiex_cmd_802_11_associate(priv, cmd_ptr, data_buf);
978 break;
979 case HostCmd_CMD_802_11_DEAUTHENTICATE:
980 ret = mwifiex_cmd_802_11_deauthenticate(priv, cmd_ptr,
981 data_buf);
982 break;
983 case HostCmd_CMD_802_11_AD_HOC_START:
984 ret = mwifiex_cmd_802_11_ad_hoc_start(priv, cmd_ptr,
985 data_buf);
986 break;
987 case HostCmd_CMD_802_11_GET_LOG:
988 ret = mwifiex_cmd_802_11_get_log(priv, cmd_ptr);
989 break;
990 case HostCmd_CMD_802_11_AD_HOC_JOIN:
991 ret = mwifiex_cmd_802_11_ad_hoc_join(priv, cmd_ptr,
992 data_buf);
993 break;
994 case HostCmd_CMD_802_11_AD_HOC_STOP:
995 ret = mwifiex_cmd_802_11_ad_hoc_stop(priv, cmd_ptr);
996 break;
997 case HostCmd_CMD_RSSI_INFO:
998 ret = mwifiex_cmd_802_11_rssi_info(priv, cmd_ptr, cmd_action);
999 break;
1000 case HostCmd_CMD_802_11_SNMP_MIB:
1001 ret = mwifiex_cmd_802_11_snmp_mib(priv, cmd_ptr, cmd_action,
1002 cmd_oid, data_buf);
1003 break;
1004 case HostCmd_CMD_802_11_TX_RATE_QUERY:
1005 cmd_ptr->command =
1006 cpu_to_le16(HostCmd_CMD_802_11_TX_RATE_QUERY);
1007 cmd_ptr->size =
1008 cpu_to_le16(sizeof(struct host_cmd_ds_tx_rate_query) +
1009 S_DS_GEN);
1010 priv->tx_rate = 0;
1011 ret = 0;
1012 break;
1013 case HostCmd_CMD_VERSION_EXT:
1014 cmd_ptr->command = cpu_to_le16(cmd_no);
1015 cmd_ptr->params.verext.version_str_sel =
1016 (u8) (*((u32 *) data_buf));
1017 memcpy(&cmd_ptr->params, data_buf,
1018 sizeof(struct host_cmd_ds_version_ext));
1019 cmd_ptr->size =
1020 cpu_to_le16(sizeof(struct host_cmd_ds_version_ext) +
1021 S_DS_GEN);
1022 ret = 0;
1023 break;
1024 case HostCmd_CMD_802_11_RF_CHANNEL:
1025 ret = mwifiex_cmd_802_11_rf_channel(priv, cmd_ptr, cmd_action,
1026 data_buf);
1027 break;
1028 case HostCmd_CMD_FUNC_INIT:
1029 if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
1030 priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
1031 cmd_ptr->command = cpu_to_le16(cmd_no);
1032 cmd_ptr->size = cpu_to_le16(S_DS_GEN);
1033 break;
1034 case HostCmd_CMD_FUNC_SHUTDOWN:
1035 priv->adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
1036 cmd_ptr->command = cpu_to_le16(cmd_no);
1037 cmd_ptr->size = cpu_to_le16(S_DS_GEN);
1038 break;
1039 case HostCmd_CMD_11N_ADDBA_REQ:
1040 ret = mwifiex_cmd_11n_addba_req(priv, cmd_ptr, data_buf);
1041 break;
1042 case HostCmd_CMD_11N_DELBA:
1043 ret = mwifiex_cmd_11n_delba(priv, cmd_ptr, data_buf);
1044 break;
1045 case HostCmd_CMD_11N_ADDBA_RSP:
1046 ret = mwifiex_cmd_11n_addba_rsp_gen(priv, cmd_ptr, data_buf);
1047 break;
1048 case HostCmd_CMD_802_11_KEY_MATERIAL:
1049 ret = mwifiex_cmd_802_11_key_material(priv, cmd_ptr,
1050 cmd_action, cmd_oid,
1051 data_buf);
1052 break;
1053 case HostCmd_CMD_802_11D_DOMAIN_INFO:
1054 ret = mwifiex_cmd_802_11d_domain_info(priv, cmd_ptr,
1055 cmd_action);
1056 break;
1057 case HostCmd_CMD_RECONFIGURE_TX_BUFF:
1058 ret = mwifiex_cmd_recfg_tx_buf(priv, cmd_ptr, cmd_action,
1059 data_buf);
1060 break;
1061 case HostCmd_CMD_AMSDU_AGGR_CTRL:
1062 ret = mwifiex_cmd_amsdu_aggr_ctrl(priv, cmd_ptr, cmd_action,
1063 data_buf);
1064 break;
1065 case HostCmd_CMD_11N_CFG:
1066 ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action,
1067 data_buf);
1068 break;
1069 case HostCmd_CMD_WMM_GET_STATUS:
1070 dev_dbg(priv->adapter->dev,
1071 "cmd: WMM: WMM_GET_STATUS cmd sent\n");
1072 cmd_ptr->command = cpu_to_le16(HostCmd_CMD_WMM_GET_STATUS);
1073 cmd_ptr->size =
1074 cpu_to_le16(sizeof(struct host_cmd_ds_wmm_get_status) +
1075 S_DS_GEN);
1076 ret = 0;
1077 break;
1078 case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
1079 ret = mwifiex_cmd_ibss_coalescing_status(priv, cmd_ptr,
1080 cmd_action, data_buf);
1081 break;
1082 case HostCmd_CMD_MAC_REG_ACCESS:
1083 case HostCmd_CMD_BBP_REG_ACCESS:
1084 case HostCmd_CMD_RF_REG_ACCESS:
1085 case HostCmd_CMD_PMIC_REG_ACCESS:
1086 case HostCmd_CMD_CAU_REG_ACCESS:
1087 case HostCmd_CMD_802_11_EEPROM_ACCESS:
1088 ret = mwifiex_cmd_reg_access(cmd_ptr, cmd_action, data_buf);
1089 break;
1090 case HostCmd_CMD_SET_BSS_MODE:
1091 cmd_ptr->command = cpu_to_le16(cmd_no);
1092 if (priv->bss_mode == MWIFIEX_BSS_MODE_IBSS)
1093 cmd_ptr->params.bss_mode.con_type =
1094 CONNECTION_TYPE_ADHOC;
1095 else if (priv->bss_mode == MWIFIEX_BSS_MODE_INFRA)
1096 cmd_ptr->params.bss_mode.con_type =
1097 CONNECTION_TYPE_INFRA;
1098 cmd_ptr->size = cpu_to_le16(sizeof(struct
1099 host_cmd_ds_set_bss_mode) + S_DS_GEN);
1100 ret = 0;
1101 break;
1102 default:
1103 dev_err(priv->adapter->dev,
1104 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
1105 ret = -1;
1106 break;
1107 }
1108 return ret;
1109}
1110
1111/*
1112 * This function issues commands to initialize firmware.
1113 *
1114 * This is called after firmware download to bring the card to
1115 * working state.
1116 *
1117 * The following commands are issued sequentially -
1118 * - Function init (for first interface only)
1119 * - Read MAC address (for first interface only)
1120 * - Reconfigure Tx buffer size (for first interface only)
1121 * - Enable auto deep sleep (for first interface only)
1122 * - Get Tx rate
1123 * - Get Tx power
1124 * - Set IBSS coalescing status
1125 * - Set AMSDU aggregation control
1126 * - Set 11d control
1127 * - Set MAC control (this must be the last command to initialize firmware)
1128 */
1129int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1130{
1131 int ret = 0;
1132 u16 enable = true;
1133 struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl;
1134 struct mwifiex_ds_auto_ds auto_ds;
1135 enum state_11d_t state_11d;
1136
1137 if (first_sta) {
1138
1139 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_FUNC_INIT,
1140 HostCmd_ACT_GEN_SET, 0, NULL, NULL);
1141 if (ret)
1142 return -1;
1143 /* Read MAC address from HW */
1144 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_GET_HW_SPEC,
1145 HostCmd_ACT_GEN_GET, 0, NULL, NULL);
1146 if (ret)
1147 return -1;
1148
1149 /* Reconfigure tx buf size */
1150 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
1151 HostCmd_ACT_GEN_SET, 0, NULL,
1152 &priv->adapter->tx_buf_size);
1153 if (ret)
1154 return -1;
1155
1156 /* Enable IEEE PS by default */
1157 priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
1158 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
1159 EN_AUTO_PS, BITMAP_STA_PS, NULL,
1160 NULL);
1161 if (ret)
1162 return -1;
1163 }
1164
1165 /* get tx rate */
1166 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
1167 HostCmd_ACT_GEN_GET, 0, NULL, NULL);
1168 if (ret)
1169 return -1;
1170 priv->data_rate = 0;
1171
1172 /* get tx power */
1173 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_TXPWR_CFG,
1174 HostCmd_ACT_GEN_GET, 0, NULL, NULL);
1175 if (ret)
1176 return -1;
1177
1178 /* set ibss coalescing_status */
1179 ret = mwifiex_prepare_cmd(priv,
1180 HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
1181 HostCmd_ACT_GEN_SET, 0, NULL, &enable);
1182 if (ret)
1183 return -1;
1184
1185 memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
1186 amsdu_aggr_ctrl.enable = true;
1187 /* Send request to firmware */
1188 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
1189 HostCmd_ACT_GEN_SET, 0, NULL,
1190 (void *) &amsdu_aggr_ctrl);
1191 if (ret)
1192 return -1;
1193 /* MAC Control must be the last command in init_fw */
1194 /* set MAC Control */
1195 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_MAC_CONTROL,
1196 HostCmd_ACT_GEN_SET, 0, NULL,
1197 &priv->curr_pkt_filter);
1198 if (ret)
1199 return -1;
1200
1201 if (first_sta) {
1202 /* Enable auto deep sleep */
1203 auto_ds.auto_ds = DEEP_SLEEP_ON;
1204 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
1205 ret = mwifiex_prepare_cmd(priv,
1206 HostCmd_CMD_802_11_PS_MODE_ENH,
1207 EN_AUTO_PS, BITMAP_AUTO_DS, NULL,
1208 &auto_ds);
1209 if (ret)
1210 return -1;
1211 }
1212
1213 /* Send cmd to FW to enable/disable 11D function */
1214 state_11d = ENABLE_11D;
1215 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
1216 HostCmd_ACT_GEN_SET, DOT11D_I,
1217 NULL, &state_11d);
1218 if (ret)
1219 dev_err(priv->adapter->dev, "11D: failed to enable 11D\n");
1220
1221 /* set last_init_cmd */
1222 priv->adapter->last_init_cmd = HostCmd_CMD_802_11_SNMP_MIB;
1223 ret = -EINPROGRESS;
1224
1225 return ret;
1226}
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
new file mode 100644
index 00000000000..ae960ddf2bd
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -0,0 +1,986 @@
1/*
2 * Marvell Wireless LAN device driver: station command response handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28
29/*
30 * This function handles the command response error case.
31 *
32 * For scan response error, the function cancels all the pending
33 * scan commands and generates an event to inform the applications
34 * of the scan completion.
35 *
36 * For Power Save command failure, we do not retry enter PS
37 * command in case of Ad-hoc mode.
38 *
39 * For all other response errors, the current command buffer is freed
40 * and returned to the free command queue.
41 */
42static void
43mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
44 struct host_cmd_ds_command *resp,
45 struct mwifiex_wait_queue *wq_buf)
46{
47 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
48 struct mwifiex_adapter *adapter = priv->adapter;
49 unsigned long flags;
50
51 dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n",
52 resp->command, resp->result);
53 if (wq_buf)
54 wq_buf->status = MWIFIEX_ERROR_FW_CMDRESP;
55
56 switch (le16_to_cpu(resp->command)) {
57 case HostCmd_CMD_802_11_PS_MODE_ENH:
58 {
59 struct host_cmd_ds_802_11_ps_mode_enh *pm =
60 &resp->params.psmode_enh;
61 dev_err(adapter->dev, "PS_MODE_ENH cmd failed: "
62 "result=0x%x action=0x%X\n",
63 resp->result, le16_to_cpu(pm->action));
64 /* We do not re-try enter-ps command in ad-hoc mode. */
65 if (le16_to_cpu(pm->action) == EN_AUTO_PS &&
66 (le16_to_cpu(pm->params.auto_ps.ps_bitmap) &
67 BITMAP_STA_PS)
68 && priv->bss_mode == MWIFIEX_BSS_MODE_IBSS)
69 adapter->ps_mode =
70 MWIFIEX_802_11_POWER_MODE_CAM;
71 }
72 break;
73 case HostCmd_CMD_802_11_SCAN:
74 /* Cancel all pending scan command */
75 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
76 list_for_each_entry_safe(cmd_node, tmp_node,
77 &adapter->scan_pending_q, list) {
78 list_del(&cmd_node->list);
79 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
80 flags);
81 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
82 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
83 }
84 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
85
86 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
87 adapter->scan_processing = false;
88 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
89 if (priv->report_scan_result)
90 priv->report_scan_result = false;
91 if (priv->scan_pending_on_block) {
92 priv->scan_pending_on_block = false;
93 up(&priv->async_sem);
94 }
95 break;
96
97 case HostCmd_CMD_MAC_CONTROL:
98 break;
99
100 default:
101 break;
102 }
103 /* Handling errors here */
104 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
105
106 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
107 adapter->curr_cmd = NULL;
108 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
109
110 return;
111}
112
113/*
114 * This function handles the command response of get RSSI info.
115 *
116 * Handling includes changing the header fields into CPU format
117 * and saving the following parameters in driver -
118 * - Last data and beacon RSSI value
119 * - Average data and beacon RSSI value
120 * - Last data and beacon NF value
121 * - Average data and beacon NF value
122 *
123 * The parameters are send to the application as well, along with
124 * calculated SNR values.
125 */
126static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
127 struct host_cmd_ds_command *resp,
128 void *data_buf)
129{
130 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
131 &resp->params.rssi_info_rsp;
132 struct mwifiex_ds_get_signal *signal = NULL;
133
134 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
135 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
136
137 priv->data_rssi_avg = le16_to_cpu(rssi_info_rsp->data_rssi_avg);
138 priv->data_nf_avg = le16_to_cpu(rssi_info_rsp->data_nf_avg);
139
140 priv->bcn_rssi_last = le16_to_cpu(rssi_info_rsp->bcn_rssi_last);
141 priv->bcn_nf_last = le16_to_cpu(rssi_info_rsp->bcn_nf_last);
142
143 priv->bcn_rssi_avg = le16_to_cpu(rssi_info_rsp->bcn_rssi_avg);
144 priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg);
145
146 /* Need to indicate IOCTL complete */
147 if (data_buf) {
148 signal = (struct mwifiex_ds_get_signal *) data_buf;
149 memset(signal, 0, sizeof(struct mwifiex_ds_get_signal));
150
151 signal->selector = ALL_RSSI_INFO_MASK;
152
153 /* RSSI */
154 signal->bcn_rssi_last = priv->bcn_rssi_last;
155 signal->bcn_rssi_avg = priv->bcn_rssi_avg;
156 signal->data_rssi_last = priv->data_rssi_last;
157 signal->data_rssi_avg = priv->data_rssi_avg;
158
159 /* SNR */
160 signal->bcn_snr_last =
161 CAL_SNR(priv->bcn_rssi_last, priv->bcn_nf_last);
162 signal->bcn_snr_avg =
163 CAL_SNR(priv->bcn_rssi_avg, priv->bcn_nf_avg);
164 signal->data_snr_last =
165 CAL_SNR(priv->data_rssi_last, priv->data_nf_last);
166 signal->data_snr_avg =
167 CAL_SNR(priv->data_rssi_avg, priv->data_nf_avg);
168
169 /* NF */
170 signal->bcn_nf_last = priv->bcn_nf_last;
171 signal->bcn_nf_avg = priv->bcn_nf_avg;
172 signal->data_nf_last = priv->data_nf_last;
173 signal->data_nf_avg = priv->data_nf_avg;
174 }
175
176 return 0;
177}
178
179/*
180 * This function handles the command response of set/get SNMP
181 * MIB parameters.
182 *
183 * Handling includes changing the header fields into CPU format
184 * and saving the parameter in driver.
185 *
186 * The following parameters are supported -
187 * - Fragmentation threshold
188 * - RTS threshold
189 * - Short retry limit
190 */
191static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
192 struct host_cmd_ds_command *resp,
193 void *data_buf)
194{
195 struct host_cmd_ds_802_11_snmp_mib *smib = &resp->params.smib;
196 u16 oid = le16_to_cpu(smib->oid);
197 u16 query_type = le16_to_cpu(smib->query_type);
198 u32 ul_temp;
199
200 dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x,"
201 " query_type = %#x, buf size = %#x\n",
202 oid, query_type, le16_to_cpu(smib->buf_size));
203 if (query_type == HostCmd_ACT_GEN_GET) {
204 ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
205 if (data_buf)
206 *(u32 *)data_buf = ul_temp;
207 switch (oid) {
208 case FRAG_THRESH_I:
209 dev_dbg(priv->adapter->dev,
210 "info: SNMP_RESP: FragThsd =%u\n", ul_temp);
211 break;
212 case RTS_THRESH_I:
213 dev_dbg(priv->adapter->dev,
214 "info: SNMP_RESP: RTSThsd =%u\n", ul_temp);
215 break;
216 case SHORT_RETRY_LIM_I:
217 dev_dbg(priv->adapter->dev,
218 "info: SNMP_RESP: TxRetryCount=%u\n", ul_temp);
219 break;
220 default:
221 break;
222 }
223 }
224
225 return 0;
226}
227
228/*
229 * This function handles the command response of get log request
230 *
231 * Handling includes changing the header fields into CPU format
232 * and sending the received parameters to application.
233 */
234static int mwifiex_ret_get_log(struct mwifiex_private *priv,
235 struct host_cmd_ds_command *resp,
236 void *data_buf)
237{
238 struct host_cmd_ds_802_11_get_log *get_log =
239 (struct host_cmd_ds_802_11_get_log *) &resp->params.get_log;
240 struct mwifiex_ds_get_stats *stats = NULL;
241
242 if (data_buf) {
243 stats = (struct mwifiex_ds_get_stats *) data_buf;
244 stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame);
245 stats->failed = le32_to_cpu(get_log->failed);
246 stats->retry = le32_to_cpu(get_log->retry);
247 stats->multi_retry = le32_to_cpu(get_log->multi_retry);
248 stats->frame_dup = le32_to_cpu(get_log->frame_dup);
249 stats->rts_success = le32_to_cpu(get_log->rts_success);
250 stats->rts_failure = le32_to_cpu(get_log->rts_failure);
251 stats->ack_failure = le32_to_cpu(get_log->ack_failure);
252 stats->rx_frag = le32_to_cpu(get_log->rx_frag);
253 stats->mcast_rx_frame = le32_to_cpu(get_log->mcast_rx_frame);
254 stats->fcs_error = le32_to_cpu(get_log->fcs_error);
255 stats->tx_frame = le32_to_cpu(get_log->tx_frame);
256 stats->wep_icv_error[0] =
257 le32_to_cpu(get_log->wep_icv_err_cnt[0]);
258 stats->wep_icv_error[1] =
259 le32_to_cpu(get_log->wep_icv_err_cnt[1]);
260 stats->wep_icv_error[2] =
261 le32_to_cpu(get_log->wep_icv_err_cnt[2]);
262 stats->wep_icv_error[3] =
263 le32_to_cpu(get_log->wep_icv_err_cnt[3]);
264 }
265
266 return 0;
267}
268
269/*
270 * This function handles the command response of set/get Tx rate
271 * configurations.
272 *
273 * Handling includes changing the header fields into CPU format
274 * and saving the following parameters in driver -
275 * - DSSS rate bitmap
276 * - OFDM rate bitmap
277 * - HT MCS rate bitmaps
278 *
279 * Based on the new rate bitmaps, the function re-evaluates if
280 * auto data rate has been activated. If not, it sends another
281 * query to the firmware to get the current Tx data rate and updates
282 * the driver value.
283 */
284static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
285 struct host_cmd_ds_command *resp,
286 void *data_buf)
287{
288 struct mwifiex_adapter *adapter = priv->adapter;
289 struct mwifiex_rate_cfg *ds_rate = NULL;
290 struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg;
291 struct mwifiex_rate_scope *rate_scope;
292 struct mwifiex_ie_types_header *head = NULL;
293 u16 tlv, tlv_buf_len;
294 u8 *tlv_buf;
295 u32 i;
296 int ret = 0;
297
298 tlv_buf = (u8 *) ((u8 *) rate_cfg) +
299 sizeof(struct host_cmd_ds_tx_rate_cfg);
300 tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
301
302 while (tlv_buf && tlv_buf_len > 0) {
303 tlv = (*tlv_buf);
304 tlv = tlv | (*(tlv_buf + 1) << 8);
305
306 switch (tlv) {
307 case TLV_TYPE_RATE_SCOPE:
308 rate_scope = (struct mwifiex_rate_scope *) tlv_buf;
309 priv->bitmap_rates[0] =
310 le16_to_cpu(rate_scope->hr_dsss_rate_bitmap);
311 priv->bitmap_rates[1] =
312 le16_to_cpu(rate_scope->ofdm_rate_bitmap);
313 for (i = 0;
314 i <
315 sizeof(rate_scope->ht_mcs_rate_bitmap) /
316 sizeof(u16); i++)
317 priv->bitmap_rates[2 + i] =
318 le16_to_cpu(rate_scope->
319 ht_mcs_rate_bitmap[i]);
320 break;
321 /* Add RATE_DROP tlv here */
322 }
323
324 head = (struct mwifiex_ie_types_header *) tlv_buf;
325 tlv_buf += le16_to_cpu(head->len) + sizeof(*head);
326 tlv_buf_len -= le16_to_cpu(head->len);
327 }
328
329 priv->is_data_rate_auto = mwifiex_is_rate_auto(priv);
330
331 if (priv->is_data_rate_auto)
332 priv->data_rate = 0;
333 else
334 ret = mwifiex_prepare_cmd(priv,
335 HostCmd_CMD_802_11_TX_RATE_QUERY,
336 HostCmd_ACT_GEN_GET, 0, NULL, NULL);
337
338 if (data_buf) {
339 ds_rate = (struct mwifiex_rate_cfg *) data_buf;
340 if (le16_to_cpu(rate_cfg->action) == HostCmd_ACT_GEN_GET) {
341 if (priv->is_data_rate_auto) {
342 ds_rate->is_rate_auto = 1;
343 } else {
344 ds_rate->rate =
345 mwifiex_get_rate_index(adapter,
346 priv->
347 bitmap_rates,
348 sizeof(priv->
349 bitmap_rates));
350 if (ds_rate->rate >=
351 MWIFIEX_RATE_BITMAP_OFDM0
352 && ds_rate->rate <=
353 MWIFIEX_RATE_BITMAP_OFDM7)
354 ds_rate->rate -=
355 (MWIFIEX_RATE_BITMAP_OFDM0 -
356 MWIFIEX_RATE_INDEX_OFDM0);
357 if (ds_rate->rate >=
358 MWIFIEX_RATE_BITMAP_MCS0
359 && ds_rate->rate <=
360 MWIFIEX_RATE_BITMAP_MCS127)
361 ds_rate->rate -=
362 (MWIFIEX_RATE_BITMAP_MCS0 -
363 MWIFIEX_RATE_INDEX_MCS0);
364 }
365 }
366 }
367
368 return ret;
369}
370
371/*
372 * This function handles the command response of get Tx power level.
373 *
374 * Handling includes saving the maximum and minimum Tx power levels
375 * in driver, as well as sending the values to user.
376 */
377static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
378{
379 int length = -1, max_power = -1, min_power = -1;
380 struct mwifiex_types_power_group *pg_tlv_hdr = NULL;
381 struct mwifiex_power_group *pg = NULL;
382
383 if (data_buf) {
384 pg_tlv_hdr =
385 (struct mwifiex_types_power_group *) ((u8 *) data_buf
386 + sizeof(struct host_cmd_ds_txpwr_cfg));
387 pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr +
388 sizeof(struct mwifiex_types_power_group));
389 length = pg_tlv_hdr->length;
390 if (length > 0) {
391 max_power = pg->power_max;
392 min_power = pg->power_min;
393 length -= sizeof(struct mwifiex_power_group);
394 }
395 while (length) {
396 pg++;
397 if (max_power < pg->power_max)
398 max_power = pg->power_max;
399
400 if (min_power > pg->power_min)
401 min_power = pg->power_min;
402
403 length -= sizeof(struct mwifiex_power_group);
404 }
405 if (pg_tlv_hdr->length > 0) {
406 priv->min_tx_power_level = (u8) min_power;
407 priv->max_tx_power_level = (u8) max_power;
408 }
409 } else {
410 return -1;
411 }
412
413 return 0;
414}
415
416/*
417 * This function handles the command response of set/get Tx power
418 * configurations.
419 *
420 * Handling includes changing the header fields into CPU format
421 * and saving the current Tx power level in driver.
422 */
423static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
424 struct host_cmd_ds_command *resp,
425 void *data_buf)
426{
427 struct mwifiex_adapter *adapter = priv->adapter;
428 struct host_cmd_ds_txpwr_cfg *txp_cfg = &resp->params.txp_cfg;
429 struct mwifiex_types_power_group *pg_tlv_hdr = NULL;
430 struct mwifiex_power_group *pg = NULL;
431 u16 action = le16_to_cpu(txp_cfg->action);
432
433 switch (action) {
434 case HostCmd_ACT_GEN_GET:
435 {
436 pg_tlv_hdr =
437 (struct mwifiex_types_power_group *) ((u8 *)
438 txp_cfg +
439 sizeof
440 (struct
441 host_cmd_ds_txpwr_cfg));
442 pg = (struct mwifiex_power_group *) ((u8 *)
443 pg_tlv_hdr +
444 sizeof(struct
445 mwifiex_types_power_group));
446 if (adapter->hw_status ==
447 MWIFIEX_HW_STATUS_INITIALIZING)
448 mwifiex_get_power_level(priv, txp_cfg);
449 priv->tx_power_level = (u16) pg->power_min;
450 break;
451 }
452 case HostCmd_ACT_GEN_SET:
453 if (le32_to_cpu(txp_cfg->mode)) {
454 pg_tlv_hdr =
455 (struct mwifiex_types_power_group *) ((u8 *)
456 txp_cfg +
457 sizeof
458 (struct
459 host_cmd_ds_txpwr_cfg));
460 pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr
461 +
462 sizeof(struct
463 mwifiex_types_power_group));
464 if (pg->power_max == pg->power_min)
465 priv->tx_power_level = (u16) pg->power_min;
466 }
467 break;
468 default:
469 dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n",
470 action);
471 return 0;
472 }
473 dev_dbg(adapter->dev,
474 "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
475 priv->tx_power_level, priv->max_tx_power_level,
476 priv->min_tx_power_level);
477
478 return 0;
479}
480
481/*
482 * This function handles the command response of set/get MAC address.
483 *
484 * Handling includes saving the MAC address in driver.
485 */
486static int mwifiex_ret_802_11_mac_address(struct mwifiex_private *priv,
487 struct host_cmd_ds_command *resp)
488{
489 struct host_cmd_ds_802_11_mac_address *cmd_mac_addr =
490 &resp->params.mac_addr;
491
492 memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN);
493
494 dev_dbg(priv->adapter->dev,
495 "info: set mac address: %pM\n", priv->curr_addr);
496
497 return 0;
498}
499
500/*
501 * This function handles the command response of set/get MAC multicast
502 * address.
503 */
504static int mwifiex_ret_mac_multicast_adr(struct mwifiex_private *priv,
505 struct host_cmd_ds_command *resp)
506{
507 return 0;
508}
509
510/*
511 * This function handles the command response of get Tx rate query.
512 *
513 * Handling includes changing the header fields into CPU format
514 * and saving the Tx rate and HT information parameters in driver.
515 *
516 * Both rate configuration and current data rate can be retrieved
517 * with this request.
518 */
519static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv,
520 struct host_cmd_ds_command *resp)
521{
522 struct mwifiex_adapter *adapter = priv->adapter;
523
524 priv->tx_rate = resp->params.tx_rate.tx_rate;
525 priv->tx_htinfo = resp->params.tx_rate.ht_info;
526 if (!priv->is_data_rate_auto)
527 priv->data_rate =
528 mwifiex_index_to_data_rate(adapter, priv->tx_rate,
529 priv->tx_htinfo);
530
531 return 0;
532}
533
534/*
535 * This function handles the command response of a deauthenticate
536 * command.
537 *
538 * If the deauthenticated MAC matches the current BSS MAC, the connection
539 * state is reset.
540 */
541static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
542 struct host_cmd_ds_command *resp)
543{
544 struct mwifiex_adapter *adapter = priv->adapter;
545
546 adapter->dbg.num_cmd_deauth++;
547 if (!memcmp(resp->params.deauth.mac_addr,
548 &priv->curr_bss_params.bss_descriptor.mac_address,
549 sizeof(resp->params.deauth.mac_addr)))
550 mwifiex_reset_connect_state(priv);
551
552 return 0;
553}
554
555/*
556 * This function handles the command response of ad-hoc stop.
557 *
558 * The function resets the connection state in driver.
559 */
560static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
561 struct host_cmd_ds_command *resp)
562{
563 mwifiex_reset_connect_state(priv);
564 return 0;
565}
566
567/*
568 * This function handles the command response of set/get key material.
569 *
570 * Handling includes updating the driver parameters to reflect the
571 * changes.
572 */
573static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
574 struct host_cmd_ds_command *resp)
575{
576 struct host_cmd_ds_802_11_key_material *key =
577 &resp->params.key_material;
578
579 if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
580 if ((le16_to_cpu(key->key_param_set.key_info) &
581 KEY_INFO_TKIP_MCAST)) {
582 dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
583 priv->wpa_is_gtk_set = true;
584 priv->scan_block = false;
585 }
586 }
587
588 memset(priv->aes_key.key_param_set.key, 0,
589 sizeof(key->key_param_set.key));
590 priv->aes_key.key_param_set.key_len = key->key_param_set.key_len;
591 memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key,
592 le16_to_cpu(priv->aes_key.key_param_set.key_len));
593
594 return 0;
595}
596
597/*
598 * This function handles the command response of get 11d domain information.
599 */
600static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
601 struct host_cmd_ds_command *resp)
602{
603 struct host_cmd_ds_802_11d_domain_info_rsp *domain_info =
604 &resp->params.domain_info_resp;
605 struct mwifiex_ietypes_domain_param_set *domain = &domain_info->domain;
606 u16 action = le16_to_cpu(domain_info->action);
607 u8 no_of_triplet = 0;
608
609 no_of_triplet = (u8) ((le16_to_cpu(domain->header.len) -
610 IEEE80211_COUNTRY_STRING_LEN) /
611 sizeof(struct ieee80211_country_ie_triplet));
612
613 dev_dbg(priv->adapter->dev, "info: 11D Domain Info Resp:"
614 " no_of_triplet=%d\n", no_of_triplet);
615
616 if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) {
617 dev_warn(priv->adapter->dev,
618 "11D: invalid number of triplets %d "
619 "returned!!\n", no_of_triplet);
620 return -1;
621 }
622
623 switch (action) {
624 case HostCmd_ACT_GEN_SET: /* Proc Set Action */
625 break;
626 case HostCmd_ACT_GEN_GET:
627 break;
628 default:
629 dev_err(priv->adapter->dev,
630 "11D: invalid action:%d\n", domain_info->action);
631 return -1;
632 }
633
634 return 0;
635}
636
637/*
638 * This function handles the command response of get RF channel.
639 *
640 * Handling includes changing the header fields into CPU format
641 * and saving the new channel in driver.
642 */
643static int mwifiex_ret_802_11_rf_channel(struct mwifiex_private *priv,
644 struct host_cmd_ds_command *resp,
645 void *data_buf)
646{
647 struct host_cmd_ds_802_11_rf_channel *rf_channel =
648 &resp->params.rf_channel;
649 u16 new_channel = le16_to_cpu(rf_channel->current_channel);
650
651 if (priv->curr_bss_params.bss_descriptor.channel != new_channel) {
652 dev_dbg(priv->adapter->dev, "cmd: Channel Switch: %d to %d\n",
653 priv->curr_bss_params.bss_descriptor.channel,
654 new_channel);
655 /* Update the channel again */
656 priv->curr_bss_params.bss_descriptor.channel = new_channel;
657 }
658 if (data_buf)
659 *((u16 *)data_buf) = new_channel;
660
661 return 0;
662}
663
664/*
665 * This function handles the command response of get extended version.
666 *
667 * Handling includes forming the extended version string and sending it
668 * to application.
669 */
670static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
671 struct host_cmd_ds_command *resp,
672 void *data_buf)
673{
674 struct host_cmd_ds_version_ext *ver_ext = &resp->params.verext;
675 struct host_cmd_ds_version_ext *version_ext = NULL;
676
677 if (data_buf) {
678 version_ext = (struct host_cmd_ds_version_ext *)data_buf;
679 version_ext->version_str_sel = ver_ext->version_str_sel;
680 memcpy(version_ext->version_str, ver_ext->version_str,
681 sizeof(char) * 128);
682 memcpy(priv->version_str, ver_ext->version_str, 128);
683 }
684 return 0;
685}
686
687/*
688 * This function handles the command response of register access.
689 *
690 * The register value and offset are returned to the user. For EEPROM
691 * access, the byte count is also returned.
692 */
693static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
694 void *data_buf)
695{
696 struct mwifiex_ds_reg_rw *reg_rw = NULL;
697 struct mwifiex_ds_read_eeprom *eeprom = NULL;
698
699 if (data_buf) {
700 reg_rw = (struct mwifiex_ds_reg_rw *) data_buf;
701 eeprom = (struct mwifiex_ds_read_eeprom *) data_buf;
702 switch (type) {
703 case HostCmd_CMD_MAC_REG_ACCESS:
704 {
705 struct host_cmd_ds_mac_reg_access *reg;
706 reg = (struct host_cmd_ds_mac_reg_access *)
707 &resp->params.mac_reg;
708 reg_rw->offset = cpu_to_le32(
709 (u32) le16_to_cpu(reg->offset));
710 reg_rw->value = reg->value;
711 break;
712 }
713 case HostCmd_CMD_BBP_REG_ACCESS:
714 {
715 struct host_cmd_ds_bbp_reg_access *reg;
716 reg = (struct host_cmd_ds_bbp_reg_access *)
717 &resp->params.bbp_reg;
718 reg_rw->offset = cpu_to_le32(
719 (u32) le16_to_cpu(reg->offset));
720 reg_rw->value = cpu_to_le32((u32) reg->value);
721 break;
722 }
723
724 case HostCmd_CMD_RF_REG_ACCESS:
725 {
726 struct host_cmd_ds_rf_reg_access *reg;
727 reg = (struct host_cmd_ds_rf_reg_access *)
728 &resp->params.rf_reg;
729 reg_rw->offset = cpu_to_le32(
730 (u32) le16_to_cpu(reg->offset));
731 reg_rw->value = cpu_to_le32((u32) reg->value);
732 break;
733 }
734 case HostCmd_CMD_PMIC_REG_ACCESS:
735 {
736 struct host_cmd_ds_pmic_reg_access *reg;
737 reg = (struct host_cmd_ds_pmic_reg_access *)
738 &resp->params.pmic_reg;
739 reg_rw->offset = cpu_to_le32(
740 (u32) le16_to_cpu(reg->offset));
741 reg_rw->value = cpu_to_le32((u32) reg->value);
742 break;
743 }
744 case HostCmd_CMD_CAU_REG_ACCESS:
745 {
746 struct host_cmd_ds_rf_reg_access *reg;
747 reg = (struct host_cmd_ds_rf_reg_access *)
748 &resp->params.rf_reg;
749 reg_rw->offset = cpu_to_le32(
750 (u32) le16_to_cpu(reg->offset));
751 reg_rw->value = cpu_to_le32((u32) reg->value);
752 break;
753 }
754 case HostCmd_CMD_802_11_EEPROM_ACCESS:
755 {
756 struct host_cmd_ds_802_11_eeprom_access
757 *cmd_eeprom =
758 (struct host_cmd_ds_802_11_eeprom_access
759 *) &resp->params.eeprom;
760 pr_debug("info: EEPROM read len=%x\n",
761 cmd_eeprom->byte_count);
762 if (le16_to_cpu(eeprom->byte_count) <
763 le16_to_cpu(
764 cmd_eeprom->byte_count)) {
765 eeprom->byte_count = cpu_to_le16(0);
766 pr_debug("info: EEPROM read "
767 "length is too big\n");
768 return -1;
769 }
770 eeprom->offset = cmd_eeprom->offset;
771 eeprom->byte_count = cmd_eeprom->byte_count;
772 if (le16_to_cpu(eeprom->byte_count) > 0)
773 memcpy(&eeprom->value,
774 &cmd_eeprom->value,
775 le16_to_cpu(eeprom->byte_count));
776
777 break;
778 }
779 default:
780 return -1;
781 }
782 }
783 return 0;
784}
785
786/*
787 * This function handles the command response of get IBSS coalescing status.
788 *
789 * If the received BSSID is different than the current one, the current BSSID,
790 * beacon interval, ATIM window and ERP information are updated, along with
791 * changing the ad-hoc state accordingly.
792 */
793static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
794 struct host_cmd_ds_command *resp)
795{
796 struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp =
797 &(resp->params.ibss_coalescing);
798 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
799
800 if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
801 return 0;
802
803 dev_dbg(priv->adapter->dev,
804 "info: new BSSID %pM\n", ibss_coal_resp->bssid);
805
806 /* If rsp has NULL BSSID, Just return..... No Action */
807 if (!memcmp(ibss_coal_resp->bssid, zero_mac, ETH_ALEN)) {
808 dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
809 return 0;
810 }
811
812 /* If BSSID is diff, modify current BSS parameters */
813 if (memcmp(priv->curr_bss_params.bss_descriptor.mac_address,
814 ibss_coal_resp->bssid, ETH_ALEN)) {
815 /* BSSID */
816 memcpy(priv->curr_bss_params.bss_descriptor.mac_address,
817 ibss_coal_resp->bssid, ETH_ALEN);
818
819 /* Beacon Interval */
820 priv->curr_bss_params.bss_descriptor.beacon_period
821 = le16_to_cpu(ibss_coal_resp->beacon_interval);
822
823 /* ERP Information */
824 priv->curr_bss_params.bss_descriptor.erp_flags =
825 (u8) le16_to_cpu(ibss_coal_resp->use_g_rate_protect);
826
827 priv->adhoc_state = ADHOC_COALESCED;
828 }
829
830 return 0;
831}
832
833/*
834 * This function handles the command responses.
835 *
836 * This is a generic function, which calls command specific
837 * response handlers based on the command ID.
838 */
839int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv,
840 u16 cmdresp_no, void *cmd_buf, void *wq_buf)
841{
842 int ret = 0;
843 struct mwifiex_adapter *adapter = priv->adapter;
844 struct host_cmd_ds_command *resp =
845 (struct host_cmd_ds_command *) cmd_buf;
846 struct mwifiex_wait_queue *wait_queue =
847 (struct mwifiex_wait_queue *) wq_buf;
848 void *data_buf = adapter->curr_cmd->data_buf;
849
850 /* If the command is not successful, cleanup and return failure */
851 if (resp->result != HostCmd_RESULT_OK) {
852 mwifiex_process_cmdresp_error(priv, resp, wait_queue);
853 return -1;
854 }
855 /* Command successful, handle response */
856 switch (cmdresp_no) {
857 case HostCmd_CMD_GET_HW_SPEC:
858 ret = mwifiex_ret_get_hw_spec(priv, resp);
859 break;
860 case HostCmd_CMD_MAC_CONTROL:
861 break;
862 case HostCmd_CMD_802_11_MAC_ADDRESS:
863 ret = mwifiex_ret_802_11_mac_address(priv, resp);
864 break;
865 case HostCmd_CMD_MAC_MULTICAST_ADR:
866 ret = mwifiex_ret_mac_multicast_adr(priv, resp);
867 break;
868 case HostCmd_CMD_TX_RATE_CFG:
869 ret = mwifiex_ret_tx_rate_cfg(priv, resp, data_buf);
870 break;
871 case HostCmd_CMD_802_11_SCAN:
872 ret = mwifiex_ret_802_11_scan(priv, resp, wait_queue);
873 wait_queue = NULL;
874 adapter->curr_cmd->wq_buf = NULL;
875 break;
876 case HostCmd_CMD_802_11_BG_SCAN_QUERY:
877 ret = mwifiex_ret_802_11_scan(priv, resp, wait_queue);
878 dev_dbg(adapter->dev,
879 "info: CMD_RESP: BG_SCAN result is ready!\n");
880 break;
881 case HostCmd_CMD_TXPWR_CFG:
882 ret = mwifiex_ret_tx_power_cfg(priv, resp, data_buf);
883 break;
884 case HostCmd_CMD_802_11_PS_MODE_ENH:
885 ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf);
886 break;
887 case HostCmd_CMD_802_11_HS_CFG_ENH:
888 ret = mwifiex_ret_802_11_hs_cfg(priv, resp);
889 break;
890 case HostCmd_CMD_802_11_ASSOCIATE:
891 ret = mwifiex_ret_802_11_associate(priv, resp, wait_queue);
892 break;
893 case HostCmd_CMD_802_11_DEAUTHENTICATE:
894 ret = mwifiex_ret_802_11_deauthenticate(priv, resp);
895 break;
896 case HostCmd_CMD_802_11_AD_HOC_START:
897 case HostCmd_CMD_802_11_AD_HOC_JOIN:
898 ret = mwifiex_ret_802_11_ad_hoc(priv, resp, wait_queue);
899 break;
900 case HostCmd_CMD_802_11_AD_HOC_STOP:
901 ret = mwifiex_ret_802_11_ad_hoc_stop(priv, resp);
902 break;
903 case HostCmd_CMD_802_11_GET_LOG:
904 ret = mwifiex_ret_get_log(priv, resp, data_buf);
905 break;
906 case HostCmd_CMD_RSSI_INFO:
907 ret = mwifiex_ret_802_11_rssi_info(priv, resp, data_buf);
908 break;
909 case HostCmd_CMD_802_11_SNMP_MIB:
910 ret = mwifiex_ret_802_11_snmp_mib(priv, resp, data_buf);
911 break;
912 case HostCmd_CMD_802_11_TX_RATE_QUERY:
913 ret = mwifiex_ret_802_11_tx_rate_query(priv, resp);
914 break;
915 case HostCmd_CMD_802_11_RF_CHANNEL:
916 ret = mwifiex_ret_802_11_rf_channel(priv, resp, data_buf);
917 break;
918 case HostCmd_CMD_VERSION_EXT:
919 ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
920 break;
921 case HostCmd_CMD_FUNC_INIT:
922 case HostCmd_CMD_FUNC_SHUTDOWN:
923 break;
924 case HostCmd_CMD_802_11_KEY_MATERIAL:
925 ret = mwifiex_ret_802_11_key_material(priv, resp);
926 break;
927 case HostCmd_CMD_802_11D_DOMAIN_INFO:
928 ret = mwifiex_ret_802_11d_domain_info(priv, resp);
929 break;
930 case HostCmd_CMD_11N_ADDBA_REQ:
931 ret = mwifiex_ret_11n_addba_req(priv, resp);
932 break;
933 case HostCmd_CMD_11N_DELBA:
934 ret = mwifiex_ret_11n_delba(priv, resp);
935 break;
936 case HostCmd_CMD_11N_ADDBA_RSP:
937 ret = mwifiex_ret_11n_addba_resp(priv, resp);
938 break;
939 case HostCmd_CMD_RECONFIGURE_TX_BUFF:
940 adapter->tx_buf_size = (u16) le16_to_cpu(resp->params.
941 tx_buf.buff_size);
942 adapter->tx_buf_size = (adapter->tx_buf_size /
943 MWIFIEX_SDIO_BLOCK_SIZE) *
944 MWIFIEX_SDIO_BLOCK_SIZE;
945 adapter->curr_tx_buf_size = adapter->tx_buf_size;
946 dev_dbg(adapter->dev,
947 "cmd: max_tx_buf_size=%d, tx_buf_size=%d\n",
948 adapter->max_tx_buf_size, adapter->tx_buf_size);
949
950 if (adapter->if_ops.update_mp_end_port)
951 adapter->if_ops.update_mp_end_port(adapter,
952 le16_to_cpu(resp->
953 params.
954 tx_buf.
955 mp_end_port));
956 break;
957 case HostCmd_CMD_AMSDU_AGGR_CTRL:
958 ret = mwifiex_ret_amsdu_aggr_ctrl(priv, resp, data_buf);
959 break;
960 case HostCmd_CMD_WMM_GET_STATUS:
961 ret = mwifiex_ret_wmm_get_status(priv, resp);
962 break;
963 case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
964 ret = mwifiex_ret_ibss_coalescing_status(priv, resp);
965 break;
966 case HostCmd_CMD_MAC_REG_ACCESS:
967 case HostCmd_CMD_BBP_REG_ACCESS:
968 case HostCmd_CMD_RF_REG_ACCESS:
969 case HostCmd_CMD_PMIC_REG_ACCESS:
970 case HostCmd_CMD_CAU_REG_ACCESS:
971 case HostCmd_CMD_802_11_EEPROM_ACCESS:
972 ret = mwifiex_ret_reg_access(cmdresp_no, resp, data_buf);
973 break;
974 case HostCmd_CMD_SET_BSS_MODE:
975 break;
976 case HostCmd_CMD_11N_CFG:
977 ret = mwifiex_ret_11n_cfg(priv, resp, data_buf);
978 break;
979 default:
980 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
981 resp->command);
982 break;
983 }
984
985 return ret;
986}
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
new file mode 100644
index 00000000000..d4a5c1fcefc
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -0,0 +1,405 @@
1/*
2 * Marvell Wireless LAN device driver: station event handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * This function resets the connection state.
30 *
31 * The function is invoked after receiving a disconnect event from firmware,
32 * and performs the following actions -
33 * - Set media status to disconnected
34 * - Clean up Tx and Rx packets
35 * - Resets SNR/NF/RSSI value in driver
36 * - Resets security configurations in driver
37 * - Enables auto data rate
38 * - Saves the previous SSID and BSSID so that they can
39 * be used for re-association, if required
40 * - Erases current SSID and BSSID information
41 * - Sends a disconnect event to upper layers/applications.
42 */
43void
44mwifiex_reset_connect_state(struct mwifiex_private *priv)
45{
46 struct mwifiex_adapter *adapter = priv->adapter;
47
48 if (!priv->media_connected)
49 return;
50
51 dev_dbg(adapter->dev, "info: handles disconnect event\n");
52
53 priv->media_connected = false;
54
55 priv->scan_block = false;
56
57 /* Free Tx and Rx packets, report disconnect to upper layer */
58 mwifiex_clean_txrx(priv);
59
60 /* Reset SNR/NF/RSSI values */
61 priv->data_rssi_last = 0;
62 priv->data_nf_last = 0;
63 priv->data_rssi_avg = 0;
64 priv->data_nf_avg = 0;
65 priv->bcn_rssi_last = 0;
66 priv->bcn_nf_last = 0;
67 priv->bcn_rssi_avg = 0;
68 priv->bcn_nf_avg = 0;
69 priv->rxpd_rate = 0;
70 priv->rxpd_htinfo = 0;
71 priv->sec_info.wpa_enabled = false;
72 priv->sec_info.wpa2_enabled = false;
73 priv->wpa_ie_len = 0;
74
75 priv->sec_info.wapi_enabled = false;
76 priv->wapi_ie_len = 0;
77 priv->sec_info.wapi_key_on = false;
78
79 priv->sec_info.encryption_mode = MWIFIEX_ENCRYPTION_MODE_NONE;
80
81 /* Enable auto data rate */
82 priv->is_data_rate_auto = true;
83 priv->data_rate = 0;
84
85 if (priv->bss_mode == MWIFIEX_BSS_MODE_IBSS) {
86 priv->adhoc_state = ADHOC_IDLE;
87 priv->adhoc_is_link_sensed = false;
88 }
89
90 /*
91 * Memorize the previous SSID and BSSID so
92 * it could be used for re-assoc
93 */
94
95 dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n",
96 priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
97
98 dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n",
99 priv->curr_bss_params.bss_descriptor.ssid.ssid,
100 priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
101
102 memcpy(&priv->prev_ssid,
103 &priv->curr_bss_params.bss_descriptor.ssid,
104 sizeof(struct mwifiex_802_11_ssid));
105
106 memcpy(priv->prev_bssid,
107 priv->curr_bss_params.bss_descriptor.mac_address, ETH_ALEN);
108
109 /* Need to erase the current SSID and BSSID info */
110 memset(&priv->curr_bss_params, 0x00, sizeof(priv->curr_bss_params));
111
112 adapter->tx_lock_flag = false;
113 adapter->pps_uapsd_mode = false;
114
115 if (adapter->num_cmd_timeout && adapter->curr_cmd)
116 return;
117 priv->media_connected = false;
118 if (!priv->disconnect) {
119 priv->disconnect = 1;
120 dev_dbg(adapter->dev, "info: successfully disconnected from"
121 " %pM: reason code %d\n", priv->cfg_bssid,
122 WLAN_REASON_DEAUTH_LEAVING);
123 cfg80211_disconnected(priv->netdev,
124 WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
125 GFP_KERNEL);
126 queue_work(priv->workqueue, &priv->cfg_workqueue);
127 }
128 if (!netif_queue_stopped(priv->netdev))
129 netif_stop_queue(priv->netdev);
130 if (netif_carrier_ok(priv->netdev))
131 netif_carrier_off(priv->netdev);
132 /* Reset wireless stats signal info */
133 priv->w_stats.qual.level = 0;
134 priv->w_stats.qual.noise = 0;
135}
136
137/*
138 * This function handles events generated by firmware.
139 *
140 * This is a generic function and handles all events.
141 *
142 * Event specific routines are called by this function based
143 * upon the generated event cause.
144 *
145 * For the following events, the function just forwards them to upper
146 * layers, optionally recording the change -
147 * - EVENT_LINK_SENSED
148 * - EVENT_MIC_ERR_UNICAST
149 * - EVENT_MIC_ERR_MULTICAST
150 * - EVENT_PORT_RELEASE
151 * - EVENT_RSSI_LOW
152 * - EVENT_SNR_LOW
153 * - EVENT_MAX_FAIL
154 * - EVENT_RSSI_HIGH
155 * - EVENT_SNR_HIGH
156 * - EVENT_DATA_RSSI_LOW
157 * - EVENT_DATA_SNR_LOW
158 * - EVENT_DATA_RSSI_HIGH
159 * - EVENT_DATA_SNR_HIGH
160 * - EVENT_LINK_QUALITY
161 * - EVENT_PRE_BEACON_LOST
162 * - EVENT_IBSS_COALESCED
163 * - EVENT_WEP_ICV_ERR
164 * - EVENT_BW_CHANGE
165 * - EVENT_HOSTWAKE_STAIE
166 *
167 * For the following events, no action is taken -
168 * - EVENT_MIB_CHANGED
169 * - EVENT_INIT_DONE
170 * - EVENT_DUMMY_HOST_WAKEUP_SIGNAL
171 *
172 * Rest of the supported events requires driver handling -
173 * - EVENT_DEAUTHENTICATED
174 * - EVENT_DISASSOCIATED
175 * - EVENT_LINK_LOST
176 * - EVENT_PS_SLEEP
177 * - EVENT_PS_AWAKE
178 * - EVENT_DEEP_SLEEP_AWAKE
179 * - EVENT_HS_ACT_REQ
180 * - EVENT_ADHOC_BCN_LOST
181 * - EVENT_BG_SCAN_REPORT
182 * - EVENT_WMM_STATUS_CHANGE
183 * - EVENT_ADDBA
184 * - EVENT_DELBA
185 * - EVENT_BA_STREAM_TIEMOUT
186 * - EVENT_AMSDU_AGGR_CTRL
187 */
188int mwifiex_process_sta_event(struct mwifiex_private *priv)
189{
190 struct mwifiex_adapter *adapter = priv->adapter;
191 int ret = 0;
192 u32 eventcause = adapter->event_cause;
193
194 switch (eventcause) {
195 case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
196 dev_err(adapter->dev, "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL,"
197 " ignoring it\n");
198 break;
199 case EVENT_LINK_SENSED:
200 dev_dbg(adapter->dev, "event: LINK_SENSED\n");
201 if (!netif_carrier_ok(priv->netdev))
202 netif_carrier_on(priv->netdev);
203 if (netif_queue_stopped(priv->netdev))
204 netif_wake_queue(priv->netdev);
205 break;
206
207 case EVENT_DEAUTHENTICATED:
208 dev_dbg(adapter->dev, "event: Deauthenticated\n");
209 adapter->dbg.num_event_deauth++;
210 if (priv->media_connected)
211 mwifiex_reset_connect_state(priv);
212 break;
213
214 case EVENT_DISASSOCIATED:
215 dev_dbg(adapter->dev, "event: Disassociated\n");
216 adapter->dbg.num_event_disassoc++;
217 if (priv->media_connected)
218 mwifiex_reset_connect_state(priv);
219 break;
220
221 case EVENT_LINK_LOST:
222 dev_dbg(adapter->dev, "event: Link lost\n");
223 adapter->dbg.num_event_link_lost++;
224 if (priv->media_connected)
225 mwifiex_reset_connect_state(priv);
226 break;
227
228 case EVENT_PS_SLEEP:
229 dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
230
231 adapter->ps_state = PS_STATE_PRE_SLEEP;
232
233 mwifiex_check_ps_cond(adapter);
234 break;
235
236 case EVENT_PS_AWAKE:
237 dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
238 if (!adapter->pps_uapsd_mode &&
239 priv->media_connected &&
240 adapter->sleep_period.period) {
241 adapter->pps_uapsd_mode = true;
242 dev_dbg(adapter->dev,
243 "event: PPS/UAPSD mode activated\n");
244 }
245 adapter->tx_lock_flag = false;
246 if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
247 if (mwifiex_check_last_packet_indication(priv)) {
248 if (!adapter->data_sent) {
249 if (!mwifiex_send_null_packet(priv,
250 MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET
251 |
252 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET))
253 adapter->ps_state =
254 PS_STATE_SLEEP;
255 return 0;
256 }
257 }
258 }
259 adapter->ps_state = PS_STATE_AWAKE;
260 adapter->pm_wakeup_card_req = false;
261 adapter->pm_wakeup_fw_try = false;
262
263 break;
264
265 case EVENT_DEEP_SLEEP_AWAKE:
266 adapter->if_ops.wakeup_complete(adapter);
267 dev_dbg(adapter->dev, "event: DS_AWAKE\n");
268 if (adapter->is_deep_sleep)
269 adapter->is_deep_sleep = false;
270 break;
271
272 case EVENT_HS_ACT_REQ:
273 dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
274 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
275 0, 0, NULL, NULL);
276 break;
277
278 case EVENT_MIC_ERR_UNICAST:
279 dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
280 break;
281
282 case EVENT_MIC_ERR_MULTICAST:
283 dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
284 break;
285 case EVENT_MIB_CHANGED:
286 case EVENT_INIT_DONE:
287 break;
288
289 case EVENT_ADHOC_BCN_LOST:
290 dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n");
291 priv->adhoc_is_link_sensed = false;
292 mwifiex_clean_txrx(priv);
293 if (!netif_queue_stopped(priv->netdev))
294 netif_stop_queue(priv->netdev);
295 if (netif_carrier_ok(priv->netdev))
296 netif_carrier_off(priv->netdev);
297 break;
298
299 case EVENT_BG_SCAN_REPORT:
300 dev_dbg(adapter->dev, "event: BGS_REPORT\n");
301 /* Clear the previous scan result */
302 memset(adapter->scan_table, 0x00,
303 sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP);
304 adapter->num_in_scan_table = 0;
305 adapter->bcn_buf_end = adapter->bcn_buf;
306 ret = mwifiex_prepare_cmd(priv,
307 HostCmd_CMD_802_11_BG_SCAN_QUERY,
308 HostCmd_ACT_GEN_GET, 0, NULL, NULL);
309 break;
310
311 case EVENT_PORT_RELEASE:
312 dev_dbg(adapter->dev, "event: PORT RELEASE\n");
313 break;
314
315 case EVENT_WMM_STATUS_CHANGE:
316 dev_dbg(adapter->dev, "event: WMM status changed\n");
317 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
318 0, 0, NULL, NULL);
319 break;
320
321 case EVENT_RSSI_LOW:
322 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
323 break;
324 case EVENT_SNR_LOW:
325 dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n");
326 break;
327 case EVENT_MAX_FAIL:
328 dev_dbg(adapter->dev, "event: MAX_FAIL\n");
329 break;
330 case EVENT_RSSI_HIGH:
331 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
332 break;
333 case EVENT_SNR_HIGH:
334 dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n");
335 break;
336 case EVENT_DATA_RSSI_LOW:
337 dev_dbg(adapter->dev, "event: Data RSSI_LOW\n");
338 break;
339 case EVENT_DATA_SNR_LOW:
340 dev_dbg(adapter->dev, "event: Data SNR_LOW\n");
341 break;
342 case EVENT_DATA_RSSI_HIGH:
343 dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n");
344 break;
345 case EVENT_DATA_SNR_HIGH:
346 dev_dbg(adapter->dev, "event: Data SNR_HIGH\n");
347 break;
348 case EVENT_LINK_QUALITY:
349 dev_dbg(adapter->dev, "event: Link Quality\n");
350 break;
351 case EVENT_PRE_BEACON_LOST:
352 dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n");
353 break;
354 case EVENT_IBSS_COALESCED:
355 dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
356 ret = mwifiex_prepare_cmd(priv,
357 HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
358 HostCmd_ACT_GEN_GET, 0, NULL, NULL);
359 break;
360 case EVENT_ADDBA:
361 dev_dbg(adapter->dev, "event: ADDBA Request\n");
362 mwifiex_prepare_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
363 HostCmd_ACT_GEN_SET, 0, NULL,
364 adapter->event_body);
365 break;
366 case EVENT_DELBA:
367 dev_dbg(adapter->dev, "event: DELBA Request\n");
368 mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
369 break;
370 case EVENT_BA_STREAM_TIEMOUT:
371 dev_dbg(adapter->dev, "event: BA Stream timeout\n");
372 mwifiex_11n_ba_stream_timeout(priv,
373 (struct host_cmd_ds_11n_batimeout
374 *)
375 adapter->event_body);
376 break;
377 case EVENT_AMSDU_AGGR_CTRL:
378 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n",
379 *(u16 *) adapter->event_body);
380 adapter->tx_buf_size =
381 min(adapter->curr_tx_buf_size,
382 le16_to_cpu(*(__le16 *) adapter->event_body));
383 dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
384 adapter->tx_buf_size);
385 break;
386
387 case EVENT_WEP_ICV_ERR:
388 dev_dbg(adapter->dev, "event: WEP ICV error\n");
389 break;
390
391 case EVENT_BW_CHANGE:
392 dev_dbg(adapter->dev, "event: BW Change\n");
393 break;
394
395 case EVENT_HOSTWAKE_STAIE:
396 dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
397 break;
398 default:
399 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
400 eventcause);
401 break;
402 }
403
404 return ret;
405}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
new file mode 100644
index 00000000000..665a519b140
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -0,0 +1,2478 @@
1/*
2 * Marvell Wireless LAN device driver: functions for station ioctl
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27#include "cfg80211.h"
28
29/*
30 * Copies the multicast address list from device to driver.
31 *
32 * This function does not validate the destination memory for
33 * size, and the calling function must ensure enough memory is
34 * available.
35 */
36static int
37mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
38 struct net_device *dev)
39{
40 int i = 0;
41 struct netdev_hw_addr *ha;
42
43 netdev_for_each_mc_addr(ha, dev)
44 memcpy(&mlist->mac_list[i++], ha->addr, ETH_ALEN);
45
46 return i;
47}
48
49/*
50 * Allocate and fills a wait queue with proper parameters.
51 *
52 * This function needs to be called before an IOCTL request can be made.
53 * It can handle the following wait options:
54 * MWIFIEX_NO_WAIT - Waiting is disabled
55 * MWIFIEX_IOCTL_WAIT - Waiting is done on IOCTL wait queue
56 * MWIFIEX_CMD_WAIT - Waiting is done on command wait queue
57 * MWIFIEX_WSTATS_WAIT - Waiting is done on stats wait queue
58 */
59struct mwifiex_wait_queue *
60mwifiex_alloc_fill_wait_queue(struct mwifiex_private *priv,
61 u8 wait_option)
62{
63 struct mwifiex_wait_queue *wait = NULL;
64
65 wait = (struct mwifiex_wait_queue *)
66 kzalloc(sizeof(struct mwifiex_wait_queue), GFP_ATOMIC);
67 if (!wait) {
68 dev_err(priv->adapter->dev, "%s: fail to alloc buffer\n",
69 __func__);
70 return wait;
71 }
72
73 wait->bss_index = priv->bss_index;
74
75 switch (wait_option) {
76 case MWIFIEX_NO_WAIT:
77 wait->enabled = 0;
78 break;
79 case MWIFIEX_IOCTL_WAIT:
80 priv->ioctl_wait_q_woken = false;
81 wait->start_time = jiffies;
82 wait->wait = &priv->ioctl_wait_q;
83 wait->condition = &priv->ioctl_wait_q_woken;
84 wait->enabled = 1;
85 break;
86 case MWIFIEX_CMD_WAIT:
87 priv->cmd_wait_q_woken = false;
88 wait->start_time = jiffies;
89 wait->wait = &priv->cmd_wait_q;
90 wait->condition = &priv->cmd_wait_q_woken;
91 wait->enabled = 1;
92 break;
93 case MWIFIEX_WSTATS_WAIT:
94 priv->w_stats_wait_q_woken = false;
95 wait->start_time = jiffies;
96 wait->wait = &priv->w_stats_wait_q;
97 wait->condition = &priv->w_stats_wait_q_woken;
98 wait->enabled = 1;
99 break;
100 }
101
102 return wait;
103}
104
105/*
106 * Wait queue completion handler.
107 *
108 * This function waits on a particular wait queue.
109 * For NO_WAIT option, it returns immediately. It also cancels the
110 * pending IOCTL request after waking up, in case of errors.
111 */
112static void
113mwifiex_wait_ioctl_complete(struct mwifiex_private *priv,
114 struct mwifiex_wait_queue *wait,
115 u8 wait_option)
116{
117 bool cancel_flag = false;
118
119 switch (wait_option) {
120 case MWIFIEX_NO_WAIT:
121 break;
122 case MWIFIEX_IOCTL_WAIT:
123 wait_event_interruptible(priv->ioctl_wait_q,
124 priv->ioctl_wait_q_woken);
125 if (!priv->ioctl_wait_q_woken)
126 cancel_flag = true;
127 break;
128 case MWIFIEX_CMD_WAIT:
129 wait_event_interruptible(priv->cmd_wait_q,
130 priv->cmd_wait_q_woken);
131 if (!priv->cmd_wait_q_woken)
132 cancel_flag = true;
133 break;
134 case MWIFIEX_WSTATS_WAIT:
135 wait_event_interruptible(priv->w_stats_wait_q,
136 priv->w_stats_wait_q_woken);
137 if (!priv->w_stats_wait_q_woken)
138 cancel_flag = true;
139 break;
140 }
141 if (cancel_flag) {
142 mwifiex_cancel_pending_ioctl(priv->adapter, wait);
143 dev_dbg(priv->adapter->dev, "cmd: IOCTL cancel: wait=%p, wait_option=%d\n",
144 wait, wait_option);
145 }
146
147 return;
148}
149
150/*
151 * The function waits for the request to complete and issues the
152 * completion handler, if required.
153 */
154int mwifiex_request_ioctl(struct mwifiex_private *priv,
155 struct mwifiex_wait_queue *wait,
156 int status, u8 wait_option)
157{
158 switch (status) {
159 case -EINPROGRESS:
160 dev_dbg(priv->adapter->dev, "cmd: IOCTL pending: wait=%p, wait_option=%d\n",
161 wait, wait_option);
162 atomic_inc(&priv->adapter->ioctl_pending);
163 /* Status pending, wake up main process */
164 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
165
166 /* Wait for completion */
167 if (wait_option) {
168 mwifiex_wait_ioctl_complete(priv, wait, wait_option);
169 status = wait->status;
170 }
171 break;
172 case 0:
173 case -1:
174 case -EBUSY:
175 default:
176 break;
177 }
178 return status;
179}
180EXPORT_SYMBOL_GPL(mwifiex_request_ioctl);
181
182/*
183 * IOCTL request handler to set/get MAC address.
184 *
185 * This function prepares the correct firmware command and
186 * issues it to get the extended version information.
187 */
188static int mwifiex_bss_ioctl_mac_address(struct mwifiex_private *priv,
189 struct mwifiex_wait_queue *wait,
190 u8 action, u8 *mac)
191{
192 int ret = 0;
193
194 if ((action == HostCmd_ACT_GEN_GET) && mac) {
195 memcpy(mac, priv->curr_addr, ETH_ALEN);
196 return 0;
197 }
198
199 /* Send request to firmware */
200 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
201 action, 0, wait, mac);
202 if (!ret)
203 ret = -EINPROGRESS;
204
205 return ret;
206}
207
208/*
209 * Sends IOCTL request to set MAC address.
210 *
211 * This function allocates the IOCTL request buffer, fills it
212 * with requisite parameters and calls the IOCTL handler.
213 */
214int mwifiex_request_set_mac_address(struct mwifiex_private *priv)
215{
216 struct mwifiex_wait_queue *wait = NULL;
217 int status = 0;
218 u8 wait_option = MWIFIEX_CMD_WAIT;
219
220 /* Allocate wait buffer */
221 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
222 if (!wait)
223 return -ENOMEM;
224
225 status = mwifiex_bss_ioctl_mac_address(priv, wait, HostCmd_ACT_GEN_SET,
226 NULL);
227
228 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
229 if (!status)
230 memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
231 else
232 dev_err(priv->adapter->dev, "set mac address failed: status=%d"
233 " error_code=%#x\n", status, wait->status);
234
235 kfree(wait);
236 return status;
237}
238
239/*
240 * IOCTL request handler to set multicast list.
241 *
242 * This function prepares the correct firmware command and
243 * issues it to set the multicast list.
244 *
245 * This function can be used to enable promiscuous mode, or enable all
246 * multicast packets, or to enable selective multicast.
247 */
248static int
249mwifiex_bss_ioctl_multicast_list(struct mwifiex_private *priv,
250 struct mwifiex_wait_queue *wait,
251 u16 action,
252 struct mwifiex_multicast_list *mcast_list)
253{
254 int ret = 0;
255 u16 old_pkt_filter;
256
257 old_pkt_filter = priv->curr_pkt_filter;
258 if (action == HostCmd_ACT_GEN_GET)
259 return -1;
260
261 if (mcast_list->mode == MWIFIEX_PROMISC_MODE) {
262 dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n");
263 priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
264 priv->curr_pkt_filter &=
265 ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
266 } else {
267 /* Multicast */
268 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
269 if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
270 dev_dbg(priv->adapter->dev,
271 "info: Enabling All Multicast!\n");
272 priv->curr_pkt_filter |=
273 HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
274 } else {
275 priv->curr_pkt_filter &=
276 ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
277 if (mcast_list->num_multicast_addr) {
278 dev_dbg(priv->adapter->dev,
279 "info: Set multicast list=%d\n",
280 mcast_list->num_multicast_addr);
281 /* Set multicast addresses to firmware */
282 if (old_pkt_filter == priv->curr_pkt_filter) {
283 /* Send request to firmware */
284 ret = mwifiex_prepare_cmd(priv,
285 HostCmd_CMD_MAC_MULTICAST_ADR,
286 action, 0, wait, mcast_list);
287 if (!ret)
288 ret = -EINPROGRESS;
289 } else {
290 /* Send request to firmware */
291 ret = mwifiex_prepare_cmd(priv,
292 HostCmd_CMD_MAC_MULTICAST_ADR,
293 action, 0, NULL,
294 mcast_list);
295 }
296 }
297 }
298 }
299 dev_dbg(priv->adapter->dev,
300 "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
301 old_pkt_filter, priv->curr_pkt_filter);
302 if (old_pkt_filter != priv->curr_pkt_filter) {
303 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_MAC_CONTROL, action,
304 0, wait, &priv->curr_pkt_filter);
305 if (!ret)
306 ret = -EINPROGRESS;
307 }
308
309 return ret;
310}
311
312/*
313 * Sends IOCTL request to set multicast list.
314 *
315 * This function allocates the IOCTL request buffer, fills it
316 * with requisite parameters and calls the IOCTL handler.
317 */
318void
319mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
320 struct net_device *dev)
321{
322 struct mwifiex_wait_queue *wait = NULL;
323 struct mwifiex_multicast_list mcast_list;
324 u8 wait_option = MWIFIEX_NO_WAIT;
325 int status = 0;
326
327 /* Allocate wait buffer */
328 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
329 if (!wait)
330 return;
331
332 if (dev->flags & IFF_PROMISC) {
333 mcast_list.mode = MWIFIEX_PROMISC_MODE;
334 } else if (dev->flags & IFF_ALLMULTI ||
335 netdev_mc_count(dev) > MWIFIEX_MAX_MULTICAST_LIST_SIZE) {
336 mcast_list.mode = MWIFIEX_ALL_MULTI_MODE;
337 } else {
338 mcast_list.mode = MWIFIEX_MULTICAST_MODE;
339 if (netdev_mc_count(dev))
340 mcast_list.num_multicast_addr =
341 mwifiex_copy_mcast_addr(&mcast_list, dev);
342 }
343 status = mwifiex_bss_ioctl_multicast_list(priv, wait,
344 HostCmd_ACT_GEN_SET,
345 &mcast_list);
346
347 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
348 if (wait && status != -EINPROGRESS)
349 kfree(wait);
350
351 return;
352}
353
354/*
355 * IOCTL request handler to disconnect from a BSS/IBSS.
356 */
357static int mwifiex_bss_ioctl_stop(struct mwifiex_private *priv,
358 struct mwifiex_wait_queue *wait, u8 *mac)
359{
360 return mwifiex_deauthenticate(priv, wait, mac);
361}
362
363/*
364 * Sends IOCTL request to disconnect from a BSS.
365 *
366 * This function allocates the IOCTL request buffer, fills it
367 * with requisite parameters and calls the IOCTL handler.
368 */
369int mwifiex_disconnect(struct mwifiex_private *priv, u8 wait_option, u8 *mac)
370{
371 struct mwifiex_wait_queue *wait = NULL;
372 int status = 0;
373
374 /* Allocate wait buffer */
375 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
376 if (!wait)
377 return -ENOMEM;
378
379 status = mwifiex_bss_ioctl_stop(priv, wait, mac);
380
381 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
382
383 kfree(wait);
384 return status;
385}
386EXPORT_SYMBOL_GPL(mwifiex_disconnect);
387
388/*
389 * IOCTL request handler to join a BSS/IBSS.
390 *
391 * In Ad-Hoc mode, the IBSS is created if not found in scan list.
392 * In both Ad-Hoc and infra mode, an deauthentication is performed
393 * first.
394 */
395static int mwifiex_bss_ioctl_start(struct mwifiex_private *priv,
396 struct mwifiex_wait_queue *wait,
397 struct mwifiex_ssid_bssid *ssid_bssid)
398{
399 int ret = 0;
400 struct mwifiex_adapter *adapter = priv->adapter;
401 s32 i = -1;
402
403 priv->scan_block = false;
404 if (!ssid_bssid)
405 return -1;
406
407 if (priv->bss_mode == MWIFIEX_BSS_MODE_INFRA) {
408 /* Infra mode */
409 ret = mwifiex_deauthenticate(priv, NULL, NULL);
410 if (ret)
411 return ret;
412
413 /* Search for the requested SSID in the scan table */
414 if (ssid_bssid->ssid.ssid_len)
415 i = mwifiex_find_ssid_in_list(priv, &ssid_bssid->ssid,
416 NULL, MWIFIEX_BSS_MODE_INFRA);
417 else
418 i = mwifiex_find_bssid_in_list(priv,
419 (u8 *) &ssid_bssid->bssid,
420 MWIFIEX_BSS_MODE_INFRA);
421 if (i < 0)
422 return -1;
423
424 dev_dbg(adapter->dev,
425 "info: SSID found in scan list ... associating...\n");
426
427 /* Clear any past association response stored for
428 * application retrieval */
429 priv->assoc_rsp_size = 0;
430 ret = mwifiex_associate(priv, wait, &adapter->scan_table[i]);
431 if (ret)
432 return ret;
433 } else {
434 /* Adhoc mode */
435 /* If the requested SSID matches current SSID, return */
436 if (ssid_bssid->ssid.ssid_len &&
437 (!mwifiex_ssid_cmp
438 (&priv->curr_bss_params.bss_descriptor.ssid,
439 &ssid_bssid->ssid)))
440 return 0;
441
442 /* Exit Adhoc mode first */
443 dev_dbg(adapter->dev, "info: Sending Adhoc Stop\n");
444 ret = mwifiex_deauthenticate(priv, NULL, NULL);
445 if (ret)
446 return ret;
447
448 priv->adhoc_is_link_sensed = false;
449
450 /* Search for the requested network in the scan table */
451 if (ssid_bssid->ssid.ssid_len)
452 i = mwifiex_find_ssid_in_list(priv,
453 &ssid_bssid->ssid, NULL,
454 MWIFIEX_BSS_MODE_IBSS);
455 else
456 i = mwifiex_find_bssid_in_list(priv,
457 (u8 *)&ssid_bssid->bssid,
458 MWIFIEX_BSS_MODE_IBSS);
459
460 if (i >= 0) {
461 dev_dbg(adapter->dev, "info: network found in scan"
462 " list. Joining...\n");
463 ret = mwifiex_adhoc_join(priv, wait,
464 &adapter->scan_table[i]);
465 if (ret)
466 return ret;
467 } else { /* i >= 0 */
468 dev_dbg(adapter->dev, "info: Network not found in "
469 "the list, creating adhoc with ssid = %s\n",
470 ssid_bssid->ssid.ssid);
471 ret = mwifiex_adhoc_start(priv, wait,
472 &ssid_bssid->ssid);
473 if (ret)
474 return ret;
475 }
476 }
477
478 if (!ret)
479 ret = -EINPROGRESS;
480
481 return ret;
482}
483
484/*
485 * Sends IOCTL request to connect with a BSS.
486 *
487 * This function allocates the IOCTL request buffer, fills it
488 * with requisite parameters and calls the IOCTL handler.
489 */
490int mwifiex_bss_start(struct mwifiex_private *priv, u8 wait_option,
491 struct mwifiex_ssid_bssid *ssid_bssid)
492{
493 struct mwifiex_wait_queue *wait = NULL;
494 struct mwifiex_ssid_bssid tmp_ssid_bssid;
495 int status = 0;
496
497 /* Stop the O.S. TX queue if needed */
498 if (!netif_queue_stopped(priv->netdev))
499 netif_stop_queue(priv->netdev);
500
501 /* Allocate wait buffer */
502 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
503 if (!wait)
504 return -ENOMEM;
505
506 if (ssid_bssid)
507 memcpy(&tmp_ssid_bssid, ssid_bssid,
508 sizeof(struct mwifiex_ssid_bssid));
509 status = mwifiex_bss_ioctl_start(priv, wait, &tmp_ssid_bssid);
510
511 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
512
513 kfree(wait);
514 return status;
515}
516
517/*
518 * IOCTL request handler to set host sleep configuration.
519 *
520 * This function prepares the correct firmware command and
521 * issues it.
522 */
523static int
524mwifiex_pm_ioctl_hs_cfg(struct mwifiex_private *priv,
525 struct mwifiex_wait_queue *wait,
526 u16 action, struct mwifiex_ds_hs_cfg *hs_cfg)
527{
528 struct mwifiex_adapter *adapter = priv->adapter;
529 int status = 0;
530 u32 prev_cond = 0;
531
532 switch (action) {
533 case HostCmd_ACT_GEN_SET:
534 if (adapter->pps_uapsd_mode) {
535 dev_dbg(adapter->dev, "info: Host Sleep IOCTL"
536 " is blocked in UAPSD/PPS mode\n");
537 status = -1;
538 break;
539 }
540 if (hs_cfg->is_invoke_hostcmd) {
541 if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL) {
542 if (!adapter->is_hs_configured)
543 /* Already cancelled */
544 break;
545 /* Save previous condition */
546 prev_cond = le32_to_cpu(adapter->hs_cfg
547 .conditions);
548 adapter->hs_cfg.conditions =
549 cpu_to_le32(hs_cfg->conditions);
550 } else if (hs_cfg->conditions) {
551 adapter->hs_cfg.conditions =
552 cpu_to_le32(hs_cfg->conditions);
553 adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
554 if (hs_cfg->gap)
555 adapter->hs_cfg.gap = (u8)hs_cfg->gap;
556 } else if (adapter->hs_cfg.conditions ==
557 cpu_to_le32(
558 HOST_SLEEP_CFG_CANCEL)) {
559 /* Return failure if no parameters for HS
560 enable */
561 status = -1;
562 break;
563 }
564 status = mwifiex_prepare_cmd(priv,
565 HostCmd_CMD_802_11_HS_CFG_ENH,
566 HostCmd_ACT_GEN_SET,
567 0, wait, &adapter->hs_cfg);
568 if (!status)
569 status = -EINPROGRESS;
570 if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL)
571 /* Restore previous condition */
572 adapter->hs_cfg.conditions =
573 cpu_to_le32(prev_cond);
574 } else {
575 adapter->hs_cfg.conditions =
576 cpu_to_le32(hs_cfg->conditions);
577 adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
578 adapter->hs_cfg.gap = (u8)hs_cfg->gap;
579 }
580 break;
581 case HostCmd_ACT_GEN_GET:
582 hs_cfg->conditions = le32_to_cpu(adapter->hs_cfg.conditions);
583 hs_cfg->gpio = adapter->hs_cfg.gpio;
584 hs_cfg->gap = adapter->hs_cfg.gap;
585 break;
586 default:
587 status = -1;
588 break;
589 }
590
591 return status;
592}
593
594/*
595 * Sends IOCTL request to set Host Sleep parameters.
596 *
597 * This function allocates the IOCTL request buffer, fills it
598 * with requisite parameters and calls the IOCTL handler.
599 */
600int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
601 u8 wait_option,
602 struct mwifiex_ds_hs_cfg *hscfg)
603{
604 int ret = 0;
605 struct mwifiex_wait_queue *wait = NULL;
606
607 if (!hscfg)
608 return -ENOMEM;
609
610 /* Allocate wait buffer */
611 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
612 if (!wait)
613 return -ENOMEM;
614
615 ret = mwifiex_pm_ioctl_hs_cfg(priv, wait, action, hscfg);
616
617 ret = mwifiex_request_ioctl(priv, wait, ret, wait_option);
618
619 if (wait && (ret != -EINPROGRESS))
620 kfree(wait);
621 return ret;
622}
623
624/*
625 * Sends IOCTL request to cancel the existing Host Sleep configuration.
626 *
627 * This function allocates the IOCTL request buffer, fills it
628 * with requisite parameters and calls the IOCTL handler.
629 */
630int mwifiex_cancel_hs(struct mwifiex_private *priv, u8 wait_option)
631{
632 int ret = 0;
633 struct mwifiex_ds_hs_cfg hscfg;
634
635 /* Cancel Host Sleep */
636 hscfg.conditions = HOST_SLEEP_CFG_CANCEL;
637 hscfg.is_invoke_hostcmd = true;
638 ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
639 wait_option, &hscfg);
640
641 return ret;
642}
643EXPORT_SYMBOL_GPL(mwifiex_cancel_hs);
644
645/*
646 * Sends IOCTL request to cancel the existing Host Sleep configuration.
647 *
648 * This function allocates the IOCTL request buffer, fills it
649 * with requisite parameters and calls the IOCTL handler.
650 */
651int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
652{
653 struct mwifiex_ds_hs_cfg hscfg;
654
655 if (adapter->hs_activated) {
656 dev_dbg(adapter->dev, "cmd: HS Already actived\n");
657 return true;
658 }
659
660 /* Enable Host Sleep */
661 adapter->hs_activate_wait_q_woken = false;
662
663 memset(&hscfg, 0, sizeof(struct mwifiex_hs_config_param));
664 hscfg.is_invoke_hostcmd = true;
665
666 if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
667 MWIFIEX_BSS_ROLE_STA),
668 HostCmd_ACT_GEN_SET,
669 MWIFIEX_IOCTL_WAIT, &hscfg)) {
670 dev_err(adapter->dev, "IOCTL request HS enable failed\n");
671 return false;
672 }
673
674 wait_event_interruptible(adapter->hs_activate_wait_q,
675 adapter->hs_activate_wait_q_woken);
676
677 return true;
678}
679EXPORT_SYMBOL_GPL(mwifiex_enable_hs);
680
681/*
682 * IOCTL request handler to get signal information.
683 *
684 * This function prepares the correct firmware command and
685 * issues it to get the signal (RSSI) information.
686 *
687 * This only works in the connected mode.
688 */
689static int mwifiex_get_info_signal(struct mwifiex_private *priv,
690 struct mwifiex_wait_queue *wait,
691 struct mwifiex_ds_get_signal *signal)
692{
693 int ret = 0;
694
695 if (!wait) {
696 dev_err(priv->adapter->dev, "WAIT information is not present\n");
697 return -1;
698 }
699
700 /* Signal info can be obtained only if connected */
701 if (!priv->media_connected) {
702 dev_dbg(priv->adapter->dev,
703 "info: Can not get signal in disconnected state\n");
704 return -1;
705 }
706
707 /* Send request to firmware */
708 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_RSSI_INFO,
709 HostCmd_ACT_GEN_GET, 0, wait, signal);
710
711 if (!ret)
712 ret = -EINPROGRESS;
713
714 return ret;
715}
716
717/*
718 * IOCTL request handler to get statistics.
719 *
720 * This function prepares the correct firmware command and
721 * issues it to get the statistics (RSSI) information.
722 */
723static int mwifiex_get_info_stats(struct mwifiex_private *priv,
724 struct mwifiex_wait_queue *wait,
725 struct mwifiex_ds_get_stats *log)
726{
727 int ret = 0;
728
729 if (!wait) {
730 dev_err(priv->adapter->dev, "MWIFIEX IOCTL information is not present\n");
731 return -1;
732 }
733
734 /* Send request to firmware */
735 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_GET_LOG,
736 HostCmd_ACT_GEN_GET, 0, wait, log);
737
738 if (!ret)
739 ret = -EINPROGRESS;
740
741 return ret;
742}
743
744/*
745 * IOCTL request handler to get BSS information.
746 *
747 * This function collates the information from different driver structures
748 * to send to the user.
749 */
750int mwifiex_get_bss_info(struct mwifiex_private *priv,
751 struct mwifiex_bss_info *info)
752{
753 struct mwifiex_adapter *adapter = priv->adapter;
754 struct mwifiex_bssdescriptor *bss_desc;
755 s32 tbl_idx = 0;
756
757 if (!info)
758 return -1;
759
760 /* Get current BSS info */
761 bss_desc = &priv->curr_bss_params.bss_descriptor;
762
763 /* BSS mode */
764 info->bss_mode = priv->bss_mode;
765
766 /* SSID */
767 memcpy(&info->ssid, &bss_desc->ssid,
768 sizeof(struct mwifiex_802_11_ssid));
769
770 /* BSSID */
771 memcpy(&info->bssid, &bss_desc->mac_address, ETH_ALEN);
772
773 /* Channel */
774 info->bss_chan = bss_desc->channel;
775
776 /* Region code */
777 info->region_code = adapter->region_code;
778
779 /* Scan table index if connected */
780 info->scan_table_idx = 0;
781 if (priv->media_connected) {
782 tbl_idx =
783 mwifiex_find_ssid_in_list(priv, &bss_desc->ssid,
784 bss_desc->mac_address,
785 priv->bss_mode);
786 if (tbl_idx >= 0)
787 info->scan_table_idx = tbl_idx;
788 }
789
790 /* Connection status */
791 info->media_connected = priv->media_connected;
792
793 /* Radio status */
794 info->radio_on = adapter->radio_on;
795
796 /* Tx power information */
797 info->max_power_level = priv->max_tx_power_level;
798 info->min_power_level = priv->min_tx_power_level;
799
800 /* AdHoc state */
801 info->adhoc_state = priv->adhoc_state;
802
803 /* Last beacon NF */
804 info->bcn_nf_last = priv->bcn_nf_last;
805
806 /* wep status */
807 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
808 info->wep_status = true;
809 else
810 info->wep_status = false;
811
812 info->is_hs_configured = adapter->is_hs_configured;
813 info->is_deep_sleep = adapter->is_deep_sleep;
814
815 return 0;
816}
817
818/*
819 * IOCTL request handler to get extended version information.
820 *
821 * This function prepares the correct firmware command and
822 * issues it to get the extended version information.
823 */
824static int mwifiex_get_info_ver_ext(struct mwifiex_private *priv,
825 struct mwifiex_wait_queue *wait,
826 struct mwifiex_ver_ext *ver_ext)
827{
828 int ret = 0;
829
830 /* Send request to firmware */
831 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_VERSION_EXT,
832 HostCmd_ACT_GEN_GET, 0, wait, ver_ext);
833 if (!ret)
834 ret = -EINPROGRESS;
835
836 return ret;
837}
838
839/*
840 * IOCTL request handler to set/get SNMP MIB parameters.
841 *
842 * This function prepares the correct firmware command and
843 * issues it.
844 *
845 * Currently the following parameters are supported -
846 * Set/get RTS Threshold
847 * Set/get fragmentation threshold
848 * Set/get retry count
849 */
850int mwifiex_snmp_mib_ioctl(struct mwifiex_private *priv,
851 struct mwifiex_wait_queue *wait,
852 u32 cmd_oid, u16 action, u32 *value)
853{
854 int ret = 0;
855
856 if (!value)
857 return -1;
858
859 /* Send request to firmware */
860 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
861 action, cmd_oid, wait, value);
862
863 if (!ret)
864 ret = -EINPROGRESS;
865
866 return ret;
867}
868
869/*
870 * IOCTL request handler to set/get band configurations.
871 *
872 * For SET operation, it performs extra checks to make sure the Ad-Hoc
873 * band and channel are compatible. Otherwise it returns an error.
874 *
875 * For GET operation, this function retrieves the following information -
876 * - Infra bands
877 * - Ad-hoc band
878 * - Ad-hoc channel
879 * - Secondary channel offset
880 */
881int mwifiex_radio_ioctl_band_cfg(struct mwifiex_private *priv,
882 u16 action,
883 struct mwifiex_ds_band_cfg *radio_cfg)
884{
885 struct mwifiex_adapter *adapter = priv->adapter;
886 u8 infra_band = 0;
887 u8 adhoc_band = 0;
888 u32 adhoc_channel = 0;
889
890 if (action == HostCmd_ACT_GEN_GET) {
891 /* Infra Bands */
892 radio_cfg->config_bands = adapter->config_bands;
893 /* Adhoc Band */
894 radio_cfg->adhoc_start_band = adapter->adhoc_start_band;
895 /* Adhoc channel */
896 radio_cfg->adhoc_channel = priv->adhoc_channel;
897 /* Secondary channel offset */
898 radio_cfg->sec_chan_offset = adapter->chan_offset;
899 return 0;
900 }
901
902 /* For action = SET */
903 infra_band = (u8) radio_cfg->config_bands;
904 adhoc_band = (u8) radio_cfg->adhoc_start_band;
905 adhoc_channel = radio_cfg->adhoc_channel;
906
907 /* SET Infra band */
908 if ((infra_band | adapter->fw_bands) & ~adapter->fw_bands)
909 return -1;
910
911 adapter->config_bands = infra_band;
912
913 /* SET Ad-hoc Band */
914 if ((adhoc_band | adapter->fw_bands) & ~adapter->fw_bands)
915 return -1;
916
917 if (adhoc_band)
918 adapter->adhoc_start_band = adhoc_band;
919 adapter->chan_offset = (u8) radio_cfg->sec_chan_offset;
920 /*
921 * If no adhoc_channel is supplied verify if the existing adhoc
922 * channel compiles with new adhoc_band
923 */
924 if (!adhoc_channel) {
925 if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
926 (priv, adapter->adhoc_start_band,
927 priv->adhoc_channel)) {
928 /* Pass back the default channel */
929 radio_cfg->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
930 if ((adapter->adhoc_start_band & BAND_A)
931 || (adapter->adhoc_start_band & BAND_AN))
932 radio_cfg->adhoc_channel =
933 DEFAULT_AD_HOC_CHANNEL_A;
934 }
935 } else { /* Retrurn error if adhoc_band and
936 adhoc_channel combination is invalid */
937 if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
938 (priv, adapter->adhoc_start_band, (u16) adhoc_channel))
939 return -1;
940 priv->adhoc_channel = (u8) adhoc_channel;
941 }
942 if ((adhoc_band & BAND_GN) || (adhoc_band & BAND_AN))
943 adapter->adhoc_11n_enabled = true;
944 else
945 adapter->adhoc_11n_enabled = false;
946
947 return 0;
948}
949
950/*
951 * IOCTL request handler to set/get active channel.
952 *
953 * This function performs validity checking on channel/frequency
954 * compatibility and returns failure if not valid.
955 */
956int mwifiex_bss_ioctl_channel(struct mwifiex_private *priv, u16 action,
957 struct mwifiex_chan_freq_power *chan)
958{
959 struct mwifiex_adapter *adapter = priv->adapter;
960 struct mwifiex_chan_freq_power *cfp = NULL;
961
962 if (!chan)
963 return -1;
964
965 if (action == HostCmd_ACT_GEN_GET) {
966 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
967 priv->curr_bss_params.band,
968 (u16) priv->curr_bss_params.bss_descriptor.
969 channel);
970 chan->channel = cfp->channel;
971 chan->freq = cfp->freq;
972
973 return 0;
974 }
975 if (!chan->channel && !chan->freq)
976 return -1;
977 if (adapter->adhoc_start_band & BAND_AN)
978 adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN;
979 else if (adapter->adhoc_start_band & BAND_A)
980 adapter->adhoc_start_band = BAND_G | BAND_B;
981 if (chan->channel) {
982 if (chan->channel <= MAX_CHANNEL_BAND_BG)
983 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211
984 (priv, 0, (u16) chan->channel);
985 if (!cfp) {
986 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211
987 (priv, BAND_A, (u16) chan->channel);
988 if (cfp) {
989 if (adapter->adhoc_11n_enabled)
990 adapter->adhoc_start_band = BAND_A
991 | BAND_AN;
992 else
993 adapter->adhoc_start_band = BAND_A;
994 }
995 }
996 } else {
997 if (chan->freq <= MAX_FREQUENCY_BAND_BG)
998 cfp = mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
999 priv, 0, chan->freq);
1000 if (!cfp) {
1001 cfp = mwifiex_get_cfp_by_band_and_freq_from_cfg80211
1002 (priv, BAND_A, chan->freq);
1003 if (cfp) {
1004 if (adapter->adhoc_11n_enabled)
1005 adapter->adhoc_start_band = BAND_A
1006 | BAND_AN;
1007 else
1008 adapter->adhoc_start_band = BAND_A;
1009 }
1010 }
1011 }
1012 if (!cfp || !cfp->channel) {
1013 dev_err(adapter->dev, "invalid channel/freq\n");
1014 return -1;
1015 }
1016 priv->adhoc_channel = (u8) cfp->channel;
1017 chan->channel = cfp->channel;
1018 chan->freq = cfp->freq;
1019
1020 return 0;
1021}
1022
1023/*
1024 * IOCTL request handler to set/get BSS mode.
1025 *
1026 * This function prepares the correct firmware command and
1027 * issues it to set or get the BSS mode.
1028 *
1029 * In case the mode is changed, a deauthentication is performed
1030 * first by the function automatically.
1031 */
1032int mwifiex_bss_ioctl_mode(struct mwifiex_private *priv,
1033 struct mwifiex_wait_queue *wait,
1034 u16 action, int *mode)
1035{
1036 int ret = 0;
1037
1038 if (!mode)
1039 return -1;
1040
1041 if (action == HostCmd_ACT_GEN_GET) {
1042 *mode = priv->bss_mode;
1043 return 0;
1044 }
1045
1046 if ((priv->bss_mode == *mode) || (*mode == MWIFIEX_BSS_MODE_AUTO)) {
1047 dev_dbg(priv->adapter->dev,
1048 "info: Already set to required mode! No change!\n");
1049 priv->bss_mode = *mode;
1050 return 0;
1051 }
1052
1053 ret = mwifiex_deauthenticate(priv, wait, NULL);
1054
1055 priv->sec_info.authentication_mode = MWIFIEX_AUTH_MODE_OPEN;
1056 priv->bss_mode = *mode;
1057 if (priv->bss_mode != MWIFIEX_BSS_MODE_AUTO) {
1058 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
1059 HostCmd_ACT_GEN_SET, 0, wait, NULL);
1060 if (!ret)
1061 ret = -EINPROGRESS;
1062 }
1063
1064 return ret;
1065}
1066
1067/*
1068 * IOCTL request handler to set/get Ad-Hoc channel.
1069 *
1070 * This function prepares the correct firmware command and
1071 * issues it to set or get the ad-hoc channel.
1072 */
1073static int mwifiex_bss_ioctl_ibss_channel(struct mwifiex_private *priv,
1074 struct mwifiex_wait_queue *wait,
1075 u16 action, u16 *channel)
1076{
1077 int ret = 0;
1078
1079 if (action == HostCmd_ACT_GEN_GET) {
1080 if (!priv->media_connected) {
1081 *channel = priv->adhoc_channel;
1082 return ret;
1083 }
1084 } else {
1085 priv->adhoc_channel = (u8) *channel;
1086 }
1087
1088 /* Send request to firmware */
1089 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_RF_CHANNEL,
1090 action, 0, wait, channel);
1091 if (!ret)
1092 ret = -EINPROGRESS;
1093
1094 return ret;
1095}
1096
1097/*
1098 * IOCTL request handler to find a particular BSS.
1099 *
1100 * The BSS can be searched with either a BSSID or a SSID. If none of
1101 * these are provided, just the best BSS (best RSSI) is returned.
1102 */
1103int mwifiex_bss_ioctl_find_bss(struct mwifiex_private *priv,
1104 struct mwifiex_wait_queue *wait,
1105 struct mwifiex_ssid_bssid *ssid_bssid)
1106{
1107 struct mwifiex_adapter *adapter = priv->adapter;
1108 int ret = 0;
1109 struct mwifiex_bssdescriptor *bss_desc;
1110 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
1111 u8 mac[ETH_ALEN];
1112 int i = 0;
1113
1114 if (memcmp(ssid_bssid->bssid, zero_mac, sizeof(zero_mac))) {
1115 i = mwifiex_find_bssid_in_list(priv,
1116 (u8 *) ssid_bssid->bssid,
1117 priv->bss_mode);
1118 if (i < 0) {
1119 memcpy(mac, ssid_bssid->bssid, sizeof(mac));
1120 dev_err(adapter->dev, "cannot find bssid %pM\n", mac);
1121 return -1;
1122 }
1123 bss_desc = &adapter->scan_table[i];
1124 memcpy(&ssid_bssid->ssid, &bss_desc->ssid,
1125 sizeof(struct mwifiex_802_11_ssid));
1126 } else if (ssid_bssid->ssid.ssid_len) {
1127 i = mwifiex_find_ssid_in_list(priv, &ssid_bssid->ssid, NULL,
1128 priv->bss_mode);
1129 if (i < 0) {
1130 dev_err(adapter->dev, "cannot find ssid %s\n",
1131 ssid_bssid->ssid.ssid);
1132 return -1;
1133 }
1134 bss_desc = &adapter->scan_table[i];
1135 memcpy(ssid_bssid->bssid, bss_desc->mac_address, ETH_ALEN);
1136 } else {
1137 ret = mwifiex_find_best_network(priv, ssid_bssid);
1138 }
1139
1140 return ret;
1141}
1142
1143/*
1144 * IOCTL request handler to change Ad-Hoc channel.
1145 *
1146 * This function allocates the IOCTL request buffer, fills it
1147 * with requisite parameters and calls the IOCTL handler.
1148 *
1149 * The function follows the following steps to perform the change -
1150 * - Get current IBSS information
1151 * - Get current channel
1152 * - If no change is required, return
1153 * - If not connected, change channel and return
1154 * - If connected,
1155 * - Disconnect
1156 * - Change channel
1157 * - Perform specific SSID scan with same SSID
1158 * - Start/Join the IBSS
1159 */
1160int
1161mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
1162{
1163 int ret = 0;
1164 int status = 0;
1165 struct mwifiex_bss_info bss_info;
1166 struct mwifiex_wait_queue *wait = NULL;
1167 u8 wait_option = MWIFIEX_IOCTL_WAIT;
1168 struct mwifiex_ssid_bssid ssid_bssid;
1169 u16 curr_chan = 0;
1170
1171 memset(&bss_info, 0, sizeof(bss_info));
1172
1173 /* Get BSS information */
1174 if (mwifiex_get_bss_info(priv, &bss_info))
1175 return -1;
1176
1177 /* Allocate wait buffer */
1178 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
1179 if (!wait)
1180 return -ENOMEM;
1181
1182 /* Get current channel */
1183 status = mwifiex_bss_ioctl_ibss_channel(priv, wait, HostCmd_ACT_GEN_GET,
1184 &curr_chan);
1185
1186 if (mwifiex_request_ioctl(priv, wait, status, wait_option)) {
1187 ret = -1;
1188 goto done;
1189 }
1190 if (curr_chan == channel) {
1191 ret = 0;
1192 goto done;
1193 }
1194 dev_dbg(priv->adapter->dev, "cmd: updating channel from %d to %d\n",
1195 curr_chan, channel);
1196
1197 if (!bss_info.media_connected) {
1198 ret = 0;
1199 goto done;
1200 }
1201
1202 /* Do disonnect */
1203 memset(&ssid_bssid, 0, ETH_ALEN);
1204 status = mwifiex_bss_ioctl_stop(priv, wait, ssid_bssid.bssid);
1205
1206 if (mwifiex_request_ioctl(priv, wait, status, wait_option)) {
1207 ret = -1;
1208 goto done;
1209 }
1210
1211 status = mwifiex_bss_ioctl_ibss_channel(priv, wait, HostCmd_ACT_GEN_SET,
1212 (u16 *) &channel);
1213
1214 if (mwifiex_request_ioctl(priv, wait, status, wait_option)) {
1215 ret = -1;
1216 goto done;
1217 }
1218
1219 /* Do specific SSID scanning */
1220 if (mwifiex_request_scan(priv, wait_option, &bss_info.ssid)) {
1221 ret = -1;
1222 goto done;
1223 }
1224 /* Start/Join Adhoc network */
1225 memset(&ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
1226 memcpy(&ssid_bssid.ssid, &bss_info.ssid,
1227 sizeof(struct mwifiex_802_11_ssid));
1228
1229 status = mwifiex_bss_ioctl_start(priv, wait, &ssid_bssid);
1230
1231 if (mwifiex_request_ioctl(priv, wait, status, wait_option))
1232 ret = -1;
1233
1234done:
1235 kfree(wait);
1236 return ret;
1237}
1238
1239/*
1240 * IOCTL request handler to get current driver mode.
1241 *
1242 * This function allocates the IOCTL request buffer, fills it
1243 * with requisite parameters and calls the IOCTL handler.
1244 */
1245int
1246mwifiex_drv_get_mode(struct mwifiex_private *priv, u8 wait_option)
1247{
1248 struct mwifiex_wait_queue *wait = NULL;
1249 int status = 0;
1250 int mode = -1;
1251
1252 /* Allocate wait buffer */
1253 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
1254 if (!wait)
1255 return -1;
1256
1257 status = mwifiex_bss_ioctl_mode(priv, wait, HostCmd_ACT_GEN_GET, &mode);
1258
1259 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
1260
1261 if (wait && (status != -EINPROGRESS))
1262 kfree(wait);
1263 return mode;
1264}
1265
1266/*
1267 * IOCTL request handler to get rate.
1268 *
1269 * This function prepares the correct firmware command and
1270 * issues it to get the current rate if it is connected,
1271 * otherwise, the function returns the lowest supported rate
1272 * for the band.
1273 */
1274static int mwifiex_rate_ioctl_get_rate_value(struct mwifiex_private *priv,
1275 struct mwifiex_wait_queue *wait,
1276 struct mwifiex_rate_cfg *rate_cfg)
1277{
1278 struct mwifiex_adapter *adapter = priv->adapter;
1279 int ret = 0;
1280
1281 rate_cfg->is_rate_auto = priv->is_data_rate_auto;
1282 if (!priv->media_connected) {
1283 switch (adapter->config_bands) {
1284 case BAND_B:
1285 /* Return the lowest supported rate for B band */
1286 rate_cfg->rate = supported_rates_b[0] & 0x7f;
1287 break;
1288 case BAND_G:
1289 case BAND_G | BAND_GN:
1290 /* Return the lowest supported rate for G band */
1291 rate_cfg->rate = supported_rates_g[0] & 0x7f;
1292 break;
1293 case BAND_B | BAND_G:
1294 case BAND_A | BAND_B | BAND_G:
1295 case BAND_A | BAND_B:
1296 case BAND_A | BAND_B | BAND_G | BAND_AN | BAND_GN:
1297 case BAND_B | BAND_G | BAND_GN:
1298 /* Return the lowest supported rate for BG band */
1299 rate_cfg->rate = supported_rates_bg[0] & 0x7f;
1300 break;
1301 case BAND_A:
1302 case BAND_A | BAND_G:
1303 case BAND_A | BAND_G | BAND_AN | BAND_GN:
1304 case BAND_A | BAND_AN:
1305 /* Return the lowest supported rate for A band */
1306 rate_cfg->rate = supported_rates_a[0] & 0x7f;
1307 break;
1308 case BAND_GN:
1309 /* Return the lowest supported rate for N band */
1310 rate_cfg->rate = supported_rates_n[0] & 0x7f;
1311 break;
1312 default:
1313 dev_warn(adapter->dev, "invalid band %#x\n",
1314 adapter->config_bands);
1315 break;
1316 }
1317 } else {
1318 /* Send request to firmware */
1319 ret = mwifiex_prepare_cmd(priv,
1320 HostCmd_CMD_802_11_TX_RATE_QUERY,
1321 HostCmd_ACT_GEN_GET, 0, wait, NULL);
1322 if (!ret)
1323 ret = -EINPROGRESS;
1324 }
1325
1326 return ret;
1327}
1328
1329/*
1330 * IOCTL request handler to set rate.
1331 *
1332 * This function prepares the correct firmware command and
1333 * issues it to set the current rate.
1334 *
1335 * The function also performs validation checking on the supplied value.
1336 */
1337static int mwifiex_rate_ioctl_set_rate_value(struct mwifiex_private *priv,
1338 struct mwifiex_wait_queue *wait,
1339 struct mwifiex_rate_cfg *rate_cfg)
1340{
1341 u8 rates[MWIFIEX_SUPPORTED_RATES];
1342 u8 *rate = NULL;
1343 int rate_index = 0;
1344 u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
1345 u32 i = 0;
1346 int ret = 0;
1347 struct mwifiex_adapter *adapter = priv->adapter;
1348
1349 if (rate_cfg->is_rate_auto) {
1350 memset(bitmap_rates, 0, sizeof(bitmap_rates));
1351 /* Support all HR/DSSS rates */
1352 bitmap_rates[0] = 0x000F;
1353 /* Support all OFDM rates */
1354 bitmap_rates[1] = 0x00FF;
1355 /* Support all HT-MCSs rate */
1356 for (i = 0; i < ARRAY_SIZE(priv->bitmap_rates) - 3; i++)
1357 bitmap_rates[i + 2] = 0xFFFF;
1358 bitmap_rates[9] = 0x3FFF;
1359 } else {
1360 memset(rates, 0, sizeof(rates));
1361 mwifiex_get_active_data_rates(priv, rates);
1362 rate = rates;
1363 for (i = 0; (rate[i] && i < MWIFIEX_SUPPORTED_RATES); i++) {
1364 dev_dbg(adapter->dev, "info: rate=%#x wanted=%#x\n",
1365 rate[i], rate_cfg->rate);
1366 if ((rate[i] & 0x7f) == (rate_cfg->rate & 0x7f))
1367 break;
1368 }
1369 if (!rate[i] || (i == MWIFIEX_SUPPORTED_RATES)) {
1370 dev_err(adapter->dev, "fixed data rate %#x is out "
1371 "of range\n", rate_cfg->rate);
1372 return -1;
1373 }
1374 memset(bitmap_rates, 0, sizeof(bitmap_rates));
1375
1376 rate_index =
1377 mwifiex_data_rate_to_index(adapter, rate_cfg->rate);
1378
1379 /* Only allow b/g rates to be set */
1380 if (rate_index >= MWIFIEX_RATE_INDEX_HRDSSS0 &&
1381 rate_index <= MWIFIEX_RATE_INDEX_HRDSSS3) {
1382 bitmap_rates[0] = 1 << rate_index;
1383 } else {
1384 rate_index -= 1; /* There is a 0x00 in the table */
1385 if (rate_index >= MWIFIEX_RATE_INDEX_OFDM0 &&
1386 rate_index <= MWIFIEX_RATE_INDEX_OFDM7)
1387 bitmap_rates[1] = 1 << (rate_index -
1388 MWIFIEX_RATE_INDEX_OFDM0);
1389 }
1390 }
1391
1392 /* Send request to firmware */
1393 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
1394 HostCmd_ACT_GEN_SET, 0, wait, bitmap_rates);
1395 if (!ret)
1396 ret = -EINPROGRESS;
1397
1398 return ret;
1399}
1400
1401/*
1402 * IOCTL request handler to set/get rate.
1403 *
1404 * This function can be used to set/get either the rate value or the
1405 * rate index.
1406 */
1407static int mwifiex_rate_ioctl_cfg(struct mwifiex_private *priv,
1408 struct mwifiex_wait_queue *wait,
1409 struct mwifiex_rate_cfg *rate_cfg)
1410{
1411 int status = 0;
1412
1413 if (!rate_cfg)
1414 return -1;
1415
1416 if (rate_cfg->action == HostCmd_ACT_GEN_GET)
1417 status = mwifiex_rate_ioctl_get_rate_value(
1418 priv, wait, rate_cfg);
1419 else
1420 status = mwifiex_rate_ioctl_set_rate_value(
1421 priv, wait, rate_cfg);
1422
1423 return status;
1424}
1425
1426/*
1427 * Sends IOCTL request to get the data rate.
1428 *
1429 * This function allocates the IOCTL request buffer, fills it
1430 * with requisite parameters and calls the IOCTL handler.
1431 */
1432int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
1433 struct mwifiex_rate_cfg *rate)
1434{
1435 int ret = 0;
1436 struct mwifiex_wait_queue *wait = NULL;
1437 u8 wait_option = MWIFIEX_IOCTL_WAIT;
1438
1439 /* Allocate wait buffer */
1440 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
1441 if (!wait)
1442 return -ENOMEM;
1443
1444 memset(rate, 0, sizeof(struct mwifiex_rate_cfg));
1445 rate->action = HostCmd_ACT_GEN_GET;
1446 ret = mwifiex_rate_ioctl_cfg(priv, wait, rate);
1447
1448 ret = mwifiex_request_ioctl(priv, wait, ret, wait_option);
1449 if (!ret) {
1450 if (rate && rate->is_rate_auto)
1451 rate->rate = mwifiex_index_to_data_rate(priv->adapter,
1452 priv->tx_rate, priv->tx_htinfo);
1453 else if (rate)
1454 rate->rate = priv->data_rate;
1455 } else {
1456 ret = -1;
1457 }
1458
1459 kfree(wait);
1460 return ret;
1461}
1462
1463/*
1464 * IOCTL request handler to set tx power configuration.
1465 *
1466 * This function prepares the correct firmware command and
1467 * issues it.
1468 *
1469 * For non-auto power mode, all the following power groups are set -
1470 * - Modulation class HR/DSSS
1471 * - Modulation class OFDM
1472 * - Modulation class HTBW20
1473 * - Modulation class HTBW40
1474 */
1475static int mwifiex_power_ioctl_set_power(struct mwifiex_private *priv,
1476 struct mwifiex_wait_queue *wait,
1477 struct mwifiex_power_cfg *power_cfg)
1478{
1479 int ret = 0;
1480 struct host_cmd_ds_txpwr_cfg *txp_cfg = NULL;
1481 struct mwifiex_types_power_group *pg_tlv = NULL;
1482 struct mwifiex_power_group *pg = NULL;
1483 u8 *buf = NULL;
1484 u16 dbm = 0;
1485
1486 if (!power_cfg->is_power_auto) {
1487 dbm = (u16) power_cfg->power_level;
1488 if ((dbm < priv->min_tx_power_level) ||
1489 (dbm > priv->max_tx_power_level)) {
1490 dev_err(priv->adapter->dev, "txpower value %d dBm"
1491 " is out of range (%d dBm-%d dBm)\n",
1492 dbm, priv->min_tx_power_level,
1493 priv->max_tx_power_level);
1494 return -1;
1495 }
1496 }
1497 buf = kzalloc(MWIFIEX_SIZE_OF_CMD_BUFFER, GFP_KERNEL);
1498 if (!buf) {
1499 dev_err(priv->adapter->dev, "%s: failed to alloc cmd buffer\n",
1500 __func__);
1501 return -1;
1502 }
1503
1504 txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
1505 txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
1506 if (!power_cfg->is_power_auto) {
1507 txp_cfg->mode = cpu_to_le32(1);
1508 pg_tlv = (struct mwifiex_types_power_group *) (buf +
1509 sizeof(struct host_cmd_ds_txpwr_cfg));
1510 pg_tlv->type = TLV_TYPE_POWER_GROUP;
1511 pg_tlv->length = 4 * sizeof(struct mwifiex_power_group);
1512 pg = (struct mwifiex_power_group *) (buf +
1513 sizeof(struct host_cmd_ds_txpwr_cfg) +
1514 sizeof(struct mwifiex_types_power_group));
1515 /* Power group for modulation class HR/DSSS */
1516 pg->first_rate_code = 0x00;
1517 pg->last_rate_code = 0x03;
1518 pg->modulation_class = MOD_CLASS_HR_DSSS;
1519 pg->power_step = 0;
1520 pg->power_min = (s8) dbm;
1521 pg->power_max = (s8) dbm;
1522 pg++;
1523 /* Power group for modulation class OFDM */
1524 pg->first_rate_code = 0x00;
1525 pg->last_rate_code = 0x07;
1526 pg->modulation_class = MOD_CLASS_OFDM;
1527 pg->power_step = 0;
1528 pg->power_min = (s8) dbm;
1529 pg->power_max = (s8) dbm;
1530 pg++;
1531 /* Power group for modulation class HTBW20 */
1532 pg->first_rate_code = 0x00;
1533 pg->last_rate_code = 0x20;
1534 pg->modulation_class = MOD_CLASS_HT;
1535 pg->power_step = 0;
1536 pg->power_min = (s8) dbm;
1537 pg->power_max = (s8) dbm;
1538 pg->ht_bandwidth = HT_BW_20;
1539 pg++;
1540 /* Power group for modulation class HTBW40 */
1541 pg->first_rate_code = 0x00;
1542 pg->last_rate_code = 0x20;
1543 pg->modulation_class = MOD_CLASS_HT;
1544 pg->power_step = 0;
1545 pg->power_min = (s8) dbm;
1546 pg->power_max = (s8) dbm;
1547 pg->ht_bandwidth = HT_BW_40;
1548 }
1549 /* Send request to firmware */
1550 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_TXPWR_CFG,
1551 HostCmd_ACT_GEN_SET, 0, wait, buf);
1552 if (!ret)
1553 ret = -EINPROGRESS;
1554 kfree(buf);
1555
1556 return ret;
1557}
1558
1559/*
1560 * IOCTL request handler to get power save mode.
1561 *
1562 * This function prepares the correct firmware command and
1563 * issues it.
1564 */
1565static int mwifiex_pm_ioctl_ps_mode(struct mwifiex_private *priv,
1566 struct mwifiex_wait_queue *wait,
1567 u32 *ps_mode, u16 action)
1568{
1569 int ret = 0;
1570 struct mwifiex_adapter *adapter = priv->adapter;
1571 u16 sub_cmd;
1572
1573 if (action == HostCmd_ACT_GEN_SET) {
1574 if (*ps_mode)
1575 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
1576 else
1577 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
1578 sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS;
1579 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
1580 sub_cmd, BITMAP_STA_PS, wait, NULL);
1581 if ((!ret) && (sub_cmd == DIS_AUTO_PS))
1582 ret = mwifiex_prepare_cmd(priv,
1583 HostCmd_CMD_802_11_PS_MODE_ENH, GET_PS,
1584 0, NULL, NULL);
1585 } else {
1586 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
1587 GET_PS, 0, wait, NULL);
1588 }
1589
1590 if (!ret)
1591 ret = -EINPROGRESS;
1592
1593 return ret;
1594}
1595
1596/*
1597 * IOCTL request handler to set/reset WPA IE.
1598 *
1599 * The supplied WPA IE is treated as a opaque buffer. Only the first field
1600 * is checked to determine WPA version. If buffer length is zero, the existing
1601 * WPA IE is reset.
1602 */
1603static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
1604 u8 *ie_data_ptr, u16 ie_len)
1605{
1606 if (ie_len) {
1607 if (ie_len > sizeof(priv->wpa_ie)) {
1608 dev_err(priv->adapter->dev,
1609 "failed to copy WPA IE, too big\n");
1610 return -1;
1611 }
1612 memcpy(priv->wpa_ie, ie_data_ptr, ie_len);
1613 priv->wpa_ie_len = (u8) ie_len;
1614 dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
1615 priv->wpa_ie_len, priv->wpa_ie[0]);
1616
1617 if (priv->wpa_ie[0] == WLAN_EID_WPA) {
1618 priv->sec_info.wpa_enabled = true;
1619 } else if (priv->wpa_ie[0] == WLAN_EID_RSN) {
1620 priv->sec_info.wpa2_enabled = true;
1621 } else {
1622 priv->sec_info.wpa_enabled = false;
1623 priv->sec_info.wpa2_enabled = false;
1624 }
1625 } else {
1626 memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie));
1627 priv->wpa_ie_len = 0;
1628 dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n",
1629 priv->wpa_ie_len, priv->wpa_ie[0]);
1630 priv->sec_info.wpa_enabled = false;
1631 priv->sec_info.wpa2_enabled = false;
1632 }
1633
1634 return 0;
1635}
1636
1637/*
1638 * IOCTL request handler to set/reset WAPI IE.
1639 *
1640 * The supplied WAPI IE is treated as a opaque buffer. Only the first field
1641 * is checked to internally enable WAPI. If buffer length is zero, the existing
1642 * WAPI IE is reset.
1643 */
1644static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
1645 u8 *ie_data_ptr, u16 ie_len)
1646{
1647 if (ie_len) {
1648 if (ie_len > sizeof(priv->wapi_ie)) {
1649 dev_dbg(priv->adapter->dev,
1650 "info: failed to copy WAPI IE, too big\n");
1651 return -1;
1652 }
1653 memcpy(priv->wapi_ie, ie_data_ptr, ie_len);
1654 priv->wapi_ie_len = ie_len;
1655 dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n",
1656 priv->wapi_ie_len, priv->wapi_ie[0]);
1657
1658 if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY)
1659 priv->sec_info.wapi_enabled = true;
1660 } else {
1661 memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie));
1662 priv->wapi_ie_len = ie_len;
1663 dev_dbg(priv->adapter->dev,
1664 "info: Reset wapi_ie_len=%d IE=%#x\n",
1665 priv->wapi_ie_len, priv->wapi_ie[0]);
1666 priv->sec_info.wapi_enabled = false;
1667 }
1668 return 0;
1669}
1670
1671/*
1672 * IOCTL request handler to set WAPI key.
1673 *
1674 * This function prepares the correct firmware command and
1675 * issues it.
1676 */
1677static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_adapter *adapter,
1678 struct mwifiex_wait_queue *wait,
1679 struct mwifiex_ds_encrypt_key *encrypt_key)
1680{
1681 int ret = 0;
1682 struct mwifiex_private *priv = adapter->priv[wait->bss_index];
1683
1684 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
1685 HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
1686 wait, encrypt_key);
1687 if (!ret)
1688 ret = -EINPROGRESS;
1689
1690 return ret;
1691}
1692
1693/*
1694 * IOCTL request handler to set/get authentication mode.
1695 */
1696static int mwifiex_set_auth_mode(struct mwifiex_private *priv, u32 auth_mode)
1697{
1698 int ret = 0;
1699
1700 priv->sec_info.authentication_mode = auth_mode;
1701 if (priv->sec_info.authentication_mode == MWIFIEX_AUTH_MODE_NETWORKEAP)
1702 ret = mwifiex_set_wpa_ie_helper(priv, NULL, 0);
1703
1704 return ret;
1705}
1706
1707/*
1708 * IOCTL request handler to set WEP network key.
1709 *
1710 * This function prepares the correct firmware command and
1711 * issues it, after validation checks.
1712 */
1713static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_adapter *adapter,
1714 struct mwifiex_wait_queue *wait,
1715 struct mwifiex_ds_encrypt_key *encrypt_key)
1716{
1717 int ret = 0;
1718 struct mwifiex_private *priv = adapter->priv[wait->bss_index];
1719 struct mwifiex_wep_key *wep_key = NULL;
1720 int index;
1721
1722 if (priv->wep_key_curr_index >= NUM_WEP_KEYS)
1723 priv->wep_key_curr_index = 0;
1724 wep_key = &priv->wep_key[priv->wep_key_curr_index];
1725 index = encrypt_key->key_index;
1726 if (encrypt_key->key_disable) {
1727 priv->sec_info.wep_status = MWIFIEX_802_11_WEP_DISABLED;
1728 } else if (!encrypt_key->key_len) {
1729 /* Copy the required key as the current key */
1730 wep_key = &priv->wep_key[index];
1731 if (!wep_key->key_length) {
1732 dev_err(adapter->dev,
1733 "key not set, so cannot enable it\n");
1734 return -1;
1735 }
1736 priv->wep_key_curr_index = (u16) index;
1737 priv->sec_info.wep_status = MWIFIEX_802_11_WEP_ENABLED;
1738 } else {
1739 wep_key = &priv->wep_key[index];
1740 /* Cleanup */
1741 memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
1742 /* Copy the key in the driver */
1743 memcpy(wep_key->key_material,
1744 encrypt_key->key_material,
1745 encrypt_key->key_len);
1746 wep_key->key_index = index;
1747 wep_key->key_length = encrypt_key->key_len;
1748 priv->sec_info.wep_status = MWIFIEX_802_11_WEP_ENABLED;
1749 }
1750 if (wep_key->key_length) {
1751 /* Send request to firmware */
1752 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
1753 HostCmd_ACT_GEN_SET, 0, NULL, NULL);
1754 if (ret)
1755 return ret;
1756 }
1757 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
1758 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
1759 else
1760 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
1761
1762 /* Send request to firmware */
1763 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_MAC_CONTROL,
1764 HostCmd_ACT_GEN_SET, 0, wait,
1765 &priv->curr_pkt_filter);
1766 if (!ret)
1767 ret = -EINPROGRESS;
1768
1769 return ret;
1770}
1771
1772/*
1773 * IOCTL request handler to set WPA key.
1774 *
1775 * This function prepares the correct firmware command and
1776 * issues it, after validation checks.
1777 *
1778 * Current driver only supports key length of up to 32 bytes.
1779 *
1780 * This function can also be used to disable a currently set key.
1781 */
1782static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_adapter *adapter,
1783 struct mwifiex_wait_queue *wait,
1784 struct mwifiex_ds_encrypt_key *encrypt_key)
1785{
1786 int ret = 0;
1787 struct mwifiex_private *priv = adapter->priv[wait->bss_index];
1788 u8 remove_key = false;
1789 struct host_cmd_ds_802_11_key_material *ibss_key;
1790
1791 /* Current driver only supports key length of up to 32 bytes */
1792 if (encrypt_key->key_len > MWIFIEX_MAX_KEY_LENGTH) {
1793 dev_err(adapter->dev, "key length too long\n");
1794 return -1;
1795 }
1796
1797 if (priv->bss_mode == MWIFIEX_BSS_MODE_IBSS) {
1798 /*
1799 * IBSS/WPA-None uses only one key (Group) for both receiving
1800 * and sending unicast and multicast packets.
1801 */
1802 /* Send the key as PTK to firmware */
1803 encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
1804 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
1805 HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
1806 NULL, encrypt_key);
1807 if (ret)
1808 return ret;
1809
1810 ibss_key = &priv->aes_key;
1811 memset(ibss_key, 0,
1812 sizeof(struct host_cmd_ds_802_11_key_material));
1813 /* Copy the key in the driver */
1814 memcpy(ibss_key->key_param_set.key, encrypt_key->key_material,
1815 encrypt_key->key_len);
1816 memcpy(&ibss_key->key_param_set.key_len, &encrypt_key->key_len,
1817 sizeof(ibss_key->key_param_set.key_len));
1818 ibss_key->key_param_set.key_type_id
1819 = cpu_to_le16(KEY_TYPE_ID_TKIP);
1820 ibss_key->key_param_set.key_info
1821 = cpu_to_le16(KEY_INFO_TKIP_ENABLED);
1822
1823 /* Send the key as GTK to firmware */
1824 encrypt_key->key_index = ~MWIFIEX_KEY_INDEX_UNICAST;
1825 }
1826
1827 if (!encrypt_key->key_index)
1828 encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
1829
1830 if (remove_key)
1831 /* Send request to firmware */
1832 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
1833 HostCmd_ACT_GEN_SET,
1834 !(KEY_INFO_ENABLED),
1835 wait, encrypt_key);
1836 else
1837 /* Send request to firmware */
1838 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
1839 HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
1840 wait, encrypt_key);
1841
1842 if (!ret)
1843 ret = -EINPROGRESS;
1844
1845 return ret;
1846}
1847
1848/*
1849 * IOCTL request handler to set/get network keys.
1850 *
1851 * This is a generic key handling function which supports WEP, WPA
1852 * and WAPI.
1853 */
1854static int
1855mwifiex_sec_ioctl_encrypt_key(struct mwifiex_private *priv,
1856 struct mwifiex_wait_queue *wait,
1857 struct mwifiex_ds_encrypt_key *encrypt_key)
1858{
1859 int status = 0;
1860 struct mwifiex_adapter *adapter = priv->adapter;
1861
1862 if (encrypt_key->is_wapi_key)
1863 status = mwifiex_sec_ioctl_set_wapi_key(adapter, wait,
1864 encrypt_key);
1865 else if (encrypt_key->key_len > WLAN_KEY_LEN_WEP104)
1866 status = mwifiex_sec_ioctl_set_wpa_key(adapter, wait,
1867 encrypt_key);
1868 else
1869 status = mwifiex_sec_ioctl_set_wep_key(adapter, wait,
1870 encrypt_key);
1871 return status;
1872}
1873
1874/*
1875 * This function returns the driver version.
1876 */
1877int
1878mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
1879 int max_len)
1880{
1881 union {
1882 u32 l;
1883 u8 c[4];
1884 } ver;
1885 char fw_ver[32];
1886
1887 ver.l = adapter->fw_release_number;
1888 sprintf(fw_ver, "%u.%u.%u.p%u", ver.c[2], ver.c[1], ver.c[0], ver.c[3]);
1889
1890 snprintf(version, max_len, driver_version, fw_ver);
1891
1892 dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version);
1893
1894 return 0;
1895}
1896
1897/*
1898 * Sends IOCTL request to set Tx power. It can be set to either auto
1899 * or a fixed value.
1900 *
1901 * This function allocates the IOCTL request buffer, fills it
1902 * with requisite parameters and calls the IOCTL handler.
1903 */
1904int
1905mwifiex_set_tx_power(struct mwifiex_private *priv, int type, int dbm)
1906{
1907 struct mwifiex_power_cfg power_cfg;
1908 struct mwifiex_wait_queue *wait = NULL;
1909 int status = 0;
1910 int ret = 0;
1911
1912 wait = mwifiex_alloc_fill_wait_queue(priv, MWIFIEX_IOCTL_WAIT);
1913 if (!wait)
1914 return -ENOMEM;
1915
1916 if (type == NL80211_TX_POWER_FIXED) {
1917 power_cfg.is_power_auto = 0;
1918 power_cfg.power_level = dbm;
1919 } else {
1920 power_cfg.is_power_auto = 1;
1921 }
1922 status = mwifiex_power_ioctl_set_power(priv, wait, &power_cfg);
1923
1924 ret = mwifiex_request_ioctl(priv, wait, status, MWIFIEX_IOCTL_WAIT);
1925
1926 kfree(wait);
1927 return ret;
1928}
1929
1930/*
1931 * Sends IOCTL request to get scan table.
1932 *
1933 * This function allocates the IOCTL request buffer, fills it
1934 * with requisite parameters and calls the IOCTL handler.
1935 */
1936int mwifiex_get_scan_table(struct mwifiex_private *priv, u8 wait_option,
1937 struct mwifiex_scan_resp *scan_resp)
1938{
1939 struct mwifiex_wait_queue *wait = NULL;
1940 struct mwifiex_scan_resp scan;
1941 int status = 0;
1942
1943 /* Allocate wait buffer */
1944 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
1945 if (!wait)
1946 return -ENOMEM;
1947
1948 status = mwifiex_scan_networks(priv, wait, HostCmd_ACT_GEN_GET,
1949 NULL, &scan);
1950
1951 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
1952 if (!status) {
1953 if (scan_resp)
1954 memcpy(scan_resp, &scan,
1955 sizeof(struct mwifiex_scan_resp));
1956 }
1957
1958 if (wait && (status != -EINPROGRESS))
1959 kfree(wait);
1960 return status;
1961}
1962
1963/*
1964 * Sends IOCTL request to get signal information.
1965 *
1966 * This function allocates the IOCTL request buffer, fills it
1967 * with requisite parameters and calls the IOCTL handler.
1968 */
1969int mwifiex_get_signal_info(struct mwifiex_private *priv, u8 wait_option,
1970 struct mwifiex_ds_get_signal *signal)
1971{
1972 struct mwifiex_ds_get_signal info;
1973 struct mwifiex_wait_queue *wait = NULL;
1974 int status = 0;
1975
1976 /* Allocate wait buffer */
1977 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
1978 if (!wait)
1979 return -ENOMEM;
1980
1981 info.selector = ALL_RSSI_INFO_MASK;
1982
1983 status = mwifiex_get_info_signal(priv, wait, &info);
1984
1985 status = mwifiex_request_ioctl(priv, wait, status, wait_option);
1986 if (!status) {
1987 if (signal)
1988 memcpy(signal, &info,
1989 sizeof(struct mwifiex_ds_get_signal));
1990 if (info.selector & BCN_RSSI_AVG_MASK)
1991 priv->w_stats.qual.level = info.bcn_rssi_avg;
1992 if (info.selector & BCN_NF_AVG_MASK)
1993 priv->w_stats.qual.noise = info.bcn_nf_avg;
1994 }
1995
1996 if (wait && (status != -EINPROGRESS))
1997 kfree(wait);
1998 return status;
1999}
2000
2001/*
2002 * Sends IOCTL request to set encryption mode.
2003 *
2004 * This function allocates the IOCTL request buffer, fills it
2005 * with requisite parameters and calls the IOCTL handler.
2006 */
2007static int mwifiex_set_encrypt_mode(struct mwifiex_private *priv,
2008 u8 wait_option, u32 encrypt_mode)
2009{
2010 priv->sec_info.encryption_mode = encrypt_mode;
2011 return 0;
2012}
2013
2014/*
2015 * This function set the authentication parameters. It sets both encryption
2016 * mode and authentication mode, and also enables WPA if required.
2017 */
2018int
2019mwifiex_set_auth(struct mwifiex_private *priv, int encrypt_mode,
2020 int auth_mode, int wpa_enabled)
2021{
2022 if (mwifiex_set_encrypt_mode(priv, MWIFIEX_IOCTL_WAIT, encrypt_mode))
2023 return -EFAULT;
2024
2025 if (mwifiex_set_auth_mode(priv, auth_mode))
2026 return -EFAULT;
2027
2028 return 0;
2029}
2030
2031/*
2032 * Sends IOCTL request to set encoding parameters.
2033 *
2034 * This function allocates the IOCTL request buffer, fills it
2035 * with requisite parameters and calls the IOCTL handler.
2036 */
2037int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
2038 int key_len, u8 key_index, int disable)
2039{
2040 struct mwifiex_wait_queue *wait = NULL;
2041 struct mwifiex_ds_encrypt_key encrypt_key;
2042 int status = 0;
2043 int ret = 0;
2044
2045 wait = mwifiex_alloc_fill_wait_queue(priv, MWIFIEX_IOCTL_WAIT);
2046 if (!wait)
2047 return -ENOMEM;
2048
2049 memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
2050 encrypt_key.key_len = key_len;
2051 if (!disable) {
2052 encrypt_key.key_index = key_index;
2053 if (key_len)
2054 memcpy(encrypt_key.key_material, key, key_len);
2055 } else {
2056 encrypt_key.key_disable = true;
2057 }
2058
2059 status = mwifiex_sec_ioctl_encrypt_key(priv, wait, &encrypt_key);
2060
2061 if (mwifiex_request_ioctl(priv, wait, status, MWIFIEX_IOCTL_WAIT))
2062 ret = -EFAULT;
2063
2064 kfree(wait);
2065 return ret;
2066}
2067
2068/*
2069 * Sends IOCTL request to set power management parameters.
2070 *
2071 * This function allocates the IOCTL request buffer, fills it
2072 * with requisite parameters and calls the IOCTL handler.
2073 */
2074int
2075mwifiex_drv_set_power(struct mwifiex_private *priv, bool power_on)
2076{
2077 int ret = 0;
2078 int status = 0;
2079 struct mwifiex_wait_queue *wait = NULL;
2080 u32 ps_mode;
2081
2082 wait = mwifiex_alloc_fill_wait_queue(priv, MWIFIEX_IOCTL_WAIT);
2083 if (!wait)
2084 return -ENOMEM;
2085
2086 ps_mode = power_on;
2087 status = mwifiex_pm_ioctl_ps_mode(priv, wait, &ps_mode,
2088 HostCmd_ACT_GEN_SET);
2089
2090 ret = mwifiex_request_ioctl(priv, wait, status, MWIFIEX_IOCTL_WAIT);
2091
2092 kfree(wait);
2093 return ret;
2094}
2095
2096/*
2097 * Sends IOCTL request to get extended version.
2098 *
2099 * This function allocates the IOCTL request buffer, fills it
2100 * with requisite parameters and calls the IOCTL handler.
2101 */
2102int
2103mwifiex_get_ver_ext(struct mwifiex_private *priv)
2104{
2105 struct mwifiex_ver_ext ver_ext;
2106 struct mwifiex_wait_queue *wait = NULL;
2107 int status = 0;
2108 int ret = 0;
2109 u8 wait_option = MWIFIEX_IOCTL_WAIT;
2110
2111 /* Allocate wait buffer */
2112 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
2113 if (!wait)
2114 return -ENOMEM;
2115
2116 /* get fw version */
2117 memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
2118 status = mwifiex_get_info_ver_ext(priv, wait, &ver_ext);
2119
2120 ret = mwifiex_request_ioctl(priv, wait, status, wait_option);
2121
2122 if (ret)
2123 ret = -1;
2124
2125 kfree(wait);
2126 return ret;
2127}
2128
2129/*
2130 * Sends IOCTL request to get statistics information.
2131 *
2132 * This function allocates the IOCTL request buffer, fills it
2133 * with requisite parameters and calls the IOCTL handler.
2134 */
2135int
2136mwifiex_get_stats_info(struct mwifiex_private *priv,
2137 struct mwifiex_ds_get_stats *log)
2138{
2139 int ret = 0;
2140 int status = 0;
2141 struct mwifiex_wait_queue *wait = NULL;
2142 struct mwifiex_ds_get_stats get_log;
2143 u8 wait_option = MWIFIEX_IOCTL_WAIT;
2144
2145 /* Allocate wait buffer */
2146 wait = mwifiex_alloc_fill_wait_queue(priv, wait_option);
2147 if (!wait)
2148 return -ENOMEM;
2149
2150 memset(&get_log, 0, sizeof(struct mwifiex_ds_get_stats));
2151 status = mwifiex_get_info_stats(priv, wait, &get_log);
2152
2153 /* Send IOCTL request to MWIFIEX */
2154 ret = mwifiex_request_ioctl(priv, wait, status, wait_option);
2155 if (!ret) {
2156 if (log)
2157 memcpy(log, &get_log, sizeof(struct
2158 mwifiex_ds_get_stats));
2159 priv->w_stats.discard.fragment = get_log.fcs_error;
2160 priv->w_stats.discard.retries = get_log.retry;
2161 priv->w_stats.discard.misc = get_log.ack_failure;
2162 }
2163
2164 kfree(wait);
2165 return ret;
2166}
2167
2168/*
2169 * IOCTL request handler to read/write register.
2170 *
2171 * This function prepares the correct firmware command and
2172 * issues it.
2173 *
2174 * Access to the following registers are supported -
2175 * - MAC
2176 * - BBP
2177 * - RF
2178 * - PMIC
2179 * - CAU
2180 */
2181static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv,
2182 struct mwifiex_wait_queue *wait,
2183 struct mwifiex_ds_reg_rw *reg_rw,
2184 u16 action)
2185{
2186 int ret = 0;
2187 u16 cmd_no;
2188
2189 switch (le32_to_cpu(reg_rw->type)) {
2190 case MWIFIEX_REG_MAC:
2191 cmd_no = HostCmd_CMD_MAC_REG_ACCESS;
2192 break;
2193 case MWIFIEX_REG_BBP:
2194 cmd_no = HostCmd_CMD_BBP_REG_ACCESS;
2195 break;
2196 case MWIFIEX_REG_RF:
2197 cmd_no = HostCmd_CMD_RF_REG_ACCESS;
2198 break;
2199 case MWIFIEX_REG_PMIC:
2200 cmd_no = HostCmd_CMD_PMIC_REG_ACCESS;
2201 break;
2202 case MWIFIEX_REG_CAU:
2203 cmd_no = HostCmd_CMD_CAU_REG_ACCESS;
2204 break;
2205 default:
2206 return -1;
2207 }
2208
2209 /* Send request to firmware */
2210 ret = mwifiex_prepare_cmd(priv, cmd_no, action, 0, wait, reg_rw);
2211
2212 if (!ret)
2213 ret = -EINPROGRESS;
2214
2215 return ret;
2216}
2217
2218/*
2219 * Sends IOCTL request to write to a register.
2220 *
2221 * This function allocates the IOCTL request buffer, fills it
2222 * with requisite parameters and calls the IOCTL handler.
2223 */
2224int
2225mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type,
2226 u32 reg_offset, u32 reg_value)
2227{
2228 int ret = 0;
2229 int status = 0;
2230 struct mwifiex_wait_queue *wait = NULL;
2231 struct mwifiex_ds_reg_rw reg_rw;
2232
2233 wait = mwifiex_alloc_fill_wait_queue(priv, MWIFIEX_IOCTL_WAIT);
2234 if (!wait)
2235 return -ENOMEM;
2236
2237 reg_rw.type = cpu_to_le32(reg_type);
2238 reg_rw.offset = cpu_to_le32(reg_offset);
2239 reg_rw.value = cpu_to_le32(reg_value);
2240 status = mwifiex_reg_mem_ioctl_reg_rw(priv, wait, &reg_rw,
2241 HostCmd_ACT_GEN_SET);
2242
2243 ret = mwifiex_request_ioctl(priv, wait, status, MWIFIEX_IOCTL_WAIT);
2244
2245 kfree(wait);
2246 return ret;
2247}
2248
2249/*
2250 * Sends IOCTL request to read from a register.
2251 *
2252 * This function allocates the IOCTL request buffer, fills it
2253 * with requisite parameters and calls the IOCTL handler.
2254 */
2255int
2256mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type,
2257 u32 reg_offset, u32 *value)
2258{
2259 int ret = 0;
2260 int status = 0;
2261 struct mwifiex_wait_queue *wait = NULL;
2262 struct mwifiex_ds_reg_rw reg_rw;
2263
2264 wait = mwifiex_alloc_fill_wait_queue(priv, MWIFIEX_IOCTL_WAIT);
2265 if (!wait)
2266 return -ENOMEM;
2267
2268 reg_rw.type = cpu_to_le32(reg_type);
2269 reg_rw.offset = cpu_to_le32(reg_offset);
2270 status = mwifiex_reg_mem_ioctl_reg_rw(priv, wait, &reg_rw,
2271 HostCmd_ACT_GEN_GET);
2272
2273 ret = mwifiex_request_ioctl(priv, wait, status, MWIFIEX_IOCTL_WAIT);
2274 if (ret)
2275 goto done;
2276
2277 *value = le32_to_cpu(reg_rw.value);
2278
2279done:
2280 kfree(wait);
2281 return ret;
2282}
2283
2284/*
2285 * IOCTL request handler to read EEPROM.
2286 *
2287 * This function prepares the correct firmware command and
2288 * issues it.
2289 */
2290static int
2291mwifiex_reg_mem_ioctl_read_eeprom(struct mwifiex_private *priv,
2292 struct mwifiex_wait_queue *wait,
2293 struct mwifiex_ds_read_eeprom *rd_eeprom)
2294{
2295 int ret = 0;
2296
2297 /* Send request to firmware */
2298 ret = mwifiex_prepare_cmd(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
2299 HostCmd_ACT_GEN_GET, 0, wait, rd_eeprom);
2300
2301 if (!ret)
2302 ret = -EINPROGRESS;
2303
2304 return ret;
2305}
2306
2307/*
2308 * Sends IOCTL request to read from EEPROM.
2309 *
2310 * This function allocates the IOCTL request buffer, fills it
2311 * with requisite parameters and calls the IOCTL handler.
2312 */
2313int
2314mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
2315 u8 *value)
2316{
2317 int ret = 0;
2318 int status = 0;
2319 struct mwifiex_wait_queue *wait = NULL;
2320 struct mwifiex_ds_read_eeprom rd_eeprom;
2321
2322 wait = mwifiex_alloc_fill_wait_queue(priv, MWIFIEX_IOCTL_WAIT);
2323 if (!wait)
2324 return -ENOMEM;
2325
2326 rd_eeprom.offset = cpu_to_le16((u16) offset);
2327 rd_eeprom.byte_count = cpu_to_le16((u16) bytes);
2328 status = mwifiex_reg_mem_ioctl_read_eeprom(priv, wait, &rd_eeprom);
2329
2330 ret = mwifiex_request_ioctl(priv, wait, status, MWIFIEX_IOCTL_WAIT);
2331 if (ret)
2332 goto done;
2333
2334 memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA);
2335done:
2336 kfree(wait);
2337 return ret;
2338}
2339
2340/*
2341 * This function sets a generic IE. In addition to generic IE, it can
2342 * also handle WPA, WPA2 and WAPI IEs.
2343 */
2344static int
2345mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
2346 u16 ie_len)
2347{
2348 int ret = 0;
2349 struct ieee_types_vendor_header *pvendor_ie;
2350 const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 };
2351 const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 };
2352
2353 /* If the passed length is zero, reset the buffer */
2354 if (!ie_len) {
2355 priv->gen_ie_buf_len = 0;
2356 priv->wps.session_enable = false;
2357
2358 return 0;
2359 } else if (!ie_data_ptr) {
2360 return -1;
2361 }
2362 pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
2363 /* Test to see if it is a WPA IE, if not, then it is a gen IE */
2364 if (((pvendor_ie->element_id == WLAN_EID_WPA)
2365 && (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui))))
2366 || (pvendor_ie->element_id == WLAN_EID_RSN)) {
2367
2368 /* IE is a WPA/WPA2 IE so call set_wpa function */
2369 ret = mwifiex_set_wpa_ie_helper(priv, ie_data_ptr, ie_len);
2370 priv->wps.session_enable = false;
2371
2372 return ret;
2373 } else if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) {
2374 /* IE is a WAPI IE so call set_wapi function */
2375 ret = mwifiex_set_wapi_ie(priv, ie_data_ptr, ie_len);
2376
2377 return ret;
2378 }
2379 /*
2380 * Verify that the passed length is not larger than the
2381 * available space remaining in the buffer
2382 */
2383 if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) {
2384
2385 /* Test to see if it is a WPS IE, if so, enable
2386 * wps session flag
2387 */
2388 pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
2389 if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC)
2390 && (!memcmp(pvendor_ie->oui, wps_oui,
2391 sizeof(wps_oui)))) {
2392 priv->wps.session_enable = true;
2393 dev_dbg(priv->adapter->dev,
2394 "info: WPS Session Enabled.\n");
2395 }
2396
2397 /* Append the passed data to the end of the
2398 genIeBuffer */
2399 memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr,
2400 ie_len);
2401 /* Increment the stored buffer length by the
2402 size passed */
2403 priv->gen_ie_buf_len += ie_len;
2404 } else {
2405 /* Passed data does not fit in the remaining
2406 buffer space */
2407 ret = -1;
2408 }
2409
2410 /* Return 0, or -1 for error case */
2411 return ret;
2412}
2413
2414/*
2415 * IOCTL request handler to set/get generic IE.
2416 *
2417 * In addition to various generic IEs, this function can also be
2418 * used to set the ARP filter.
2419 */
2420static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
2421 struct mwifiex_ds_misc_gen_ie *gen_ie,
2422 u16 action)
2423{
2424 struct mwifiex_adapter *adapter = priv->adapter;
2425
2426 switch (gen_ie->type) {
2427 case MWIFIEX_IE_TYPE_GEN_IE:
2428 if (action == HostCmd_ACT_GEN_GET) {
2429 gen_ie->len = priv->wpa_ie_len;
2430 memcpy(gen_ie->ie_data, priv->wpa_ie, gen_ie->len);
2431 } else {
2432 mwifiex_set_gen_ie_helper(priv, gen_ie->ie_data,
2433 (u16) gen_ie->len);
2434 }
2435 break;
2436 case MWIFIEX_IE_TYPE_ARP_FILTER:
2437 memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter));
2438 if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) {
2439 adapter->arp_filter_size = 0;
2440 dev_err(adapter->dev, "invalid ARP filter size\n");
2441 return -1;
2442 } else {
2443 memcpy(adapter->arp_filter, gen_ie->ie_data,
2444 gen_ie->len);
2445 adapter->arp_filter_size = gen_ie->len;
2446 }
2447 break;
2448 default:
2449 dev_err(adapter->dev, "invalid IE type\n");
2450 return -1;
2451 }
2452 return 0;
2453}
2454
2455/*
2456 * Sends IOCTL request to set a generic IE.
2457 *
2458 * This function allocates the IOCTL request buffer, fills it
2459 * with requisite parameters and calls the IOCTL handler.
2460 */
2461int
2462mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
2463{
2464 struct mwifiex_ds_misc_gen_ie gen_ie;
2465 int status = 0;
2466
2467 if (ie_len > IW_CUSTOM_MAX)
2468 return -EFAULT;
2469
2470 gen_ie.type = MWIFIEX_IE_TYPE_GEN_IE;
2471 gen_ie.len = ie_len;
2472 memcpy(gen_ie.ie_data, ie, ie_len);
2473 status = mwifiex_misc_ioctl_gen_ie(priv, &gen_ie, HostCmd_ACT_GEN_SET);
2474 if (status)
2475 return -EFAULT;
2476
2477 return 0;
2478}
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
new file mode 100644
index 00000000000..8282679e64f
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -0,0 +1,182 @@
1/*
2 * Marvell Wireless LAN device driver: station RX data handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "11n_aggr.h"
26#include "11n_rxreorder.h"
27
28/*
29 * This function processes the received packet and forwards it
30 * to kernel/upper layer.
31 *
32 * This function parses through the received packet and determines
33 * if it is a debug packet or normal packet.
34 *
35 * For non-debug packets, the function chops off unnecessary leading
36 * header bytes, reconstructs the packet as an ethernet frame or
37 * 802.2/llc/snap frame as required, and sends it to kernel/upper layer.
38 *
39 * The completion callback is called after processing in complete.
40 */
41int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
42 struct sk_buff *skb)
43{
44 int ret = 0;
45 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
46 struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
47 struct rx_packet_hdr *rx_pkt_hdr;
48 struct rxpd *local_rx_pd;
49 int hdr_chop;
50 struct ethhdr *eth_hdr;
51 u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
52
53 local_rx_pd = (struct rxpd *) (skb->data);
54
55 rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
56 local_rx_pd->rx_pkt_offset);
57
58 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
59 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
60 /*
61 * Replace the 803 header and rfc1042 header (llc/snap) with an
62 * EthernetII header, keep the src/dst and snap_type
63 * (ethertype).
64 * The firmware only passes up SNAP frames converting
65 * all RX Data from 802.11 to 802.2/LLC/SNAP frames.
66 * To create the Ethernet II, just move the src, dst address
67 * right before the snap_type.
68 */
69 eth_hdr = (struct ethhdr *)
70 ((u8 *) &rx_pkt_hdr->eth803_hdr
71 + sizeof(rx_pkt_hdr->eth803_hdr) +
72 sizeof(rx_pkt_hdr->rfc1042_hdr)
73 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
74 - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
75 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
76
77 memcpy(eth_hdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
78 sizeof(eth_hdr->h_source));
79 memcpy(eth_hdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
80 sizeof(eth_hdr->h_dest));
81
82 /* Chop off the rxpd + the excess memory from the 802.2/llc/snap
83 header that was removed. */
84 hdr_chop = (u8 *) eth_hdr - (u8 *) local_rx_pd;
85 } else {
86 /* Chop off the rxpd */
87 hdr_chop = (u8 *) &rx_pkt_hdr->eth803_hdr -
88 (u8 *) local_rx_pd;
89 }
90
91 /* Chop off the leading header bytes so the it points to the start of
92 either the reconstructed EthII frame or the 802.2/llc/snap frame */
93 skb_pull(skb, hdr_chop);
94
95 priv->rxpd_rate = local_rx_pd->rx_rate;
96
97 priv->rxpd_htinfo = local_rx_pd->ht_info;
98
99 ret = mwifiex_recv_packet(adapter, skb);
100 if (ret == -1)
101 dev_err(adapter->dev, "recv packet failed\n");
102
103 return ret;
104}
105
106/*
107 * This function processes the received buffer.
108 *
109 * The function looks into the RxPD and performs sanity tests on the
110 * received buffer to ensure its a valid packet, before processing it
111 * further. If the packet is determined to be aggregated, it is
112 * de-aggregated accordingly. Non-unicast packets are sent directly to
113 * the kernel/upper layers. Unicast packets are handed over to the
114 * Rx reordering routine if 11n is enabled.
115 *
116 * The completion callback is called after processing in complete.
117 */
118int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
119 struct sk_buff *skb)
120{
121 int ret = 0;
122 struct rxpd *local_rx_pd;
123 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
124 struct rx_packet_hdr *rx_pkt_hdr;
125 u8 ta[ETH_ALEN];
126 u16 rx_pkt_type = 0;
127 struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
128
129 local_rx_pd = (struct rxpd *) (skb->data);
130 rx_pkt_type = local_rx_pd->rx_pkt_type;
131
132 rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
133 local_rx_pd->rx_pkt_offset);
134
135 if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) >
136 (u16) skb->len) {
137 dev_err(adapter->dev, "wrong rx packet: len=%d,"
138 " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
139 local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
140 priv->stats.rx_dropped++;
141 dev_kfree_skb_any(skb);
142 return ret;
143 }
144 if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) {
145 mwifiex_11n_deaggregate_pkt(priv, skb);
146 return ret;
147 }
148 /*
149 * If the packet is not an unicast packet then send the packet
150 * directly to os. Don't pass thru rx reordering
151 */
152 if (!IS_11N_ENABLED(priv) ||
153 memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
154 mwifiex_process_rx_packet(adapter, skb);
155 return ret;
156 }
157
158 if (mwifiex_queuing_ra_based(priv)) {
159 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
160 } else {
161 if (rx_pkt_type != PKT_TYPE_BAR)
162 priv->rx_seq[local_rx_pd->priority] =
163 local_rx_pd->seq_num;
164 memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
165 ETH_ALEN);
166 }
167
168 /* Reorder and send to OS */
169 ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
170 local_rx_pd->priority, ta,
171 (u8) local_rx_pd->rx_pkt_type,
172 (void *) skb);
173
174 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
175 if (priv && (ret == -1))
176 priv->stats.rx_dropped++;
177
178 dev_kfree_skb_any(skb);
179 }
180
181 return ret;
182}
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
new file mode 100644
index 00000000000..e8db6bd021c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -0,0 +1,202 @@
1/*
2 * Marvell Wireless LAN device driver: station TX data handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26
27/*
28 * This function fills the TxPD for tx packets.
29 *
30 * The Tx buffer received by this function should already have the
31 * header space allocated for TxPD.
32 *
33 * This function inserts the TxPD in between interface header and actual
34 * data and adjusts the buffer pointers accordingly.
35 *
36 * The following TxPD fields are set by this function, as required -
37 * - BSS number
38 * - Tx packet length and offset
39 * - Priority
40 * - Packet delay
41 * - Priority specific Tx control
42 * - Flags
43 */
44void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
45 struct sk_buff *skb)
46{
47 struct mwifiex_adapter *adapter = priv->adapter;
48 struct txpd *local_tx_pd;
49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
50
51 if (!skb->len) {
52 dev_err(adapter->dev, "Tx: bad packet length: %d\n",
53 skb->len);
54 tx_info->status_code = MWIFIEX_ERROR_PKT_SIZE_INVALID;
55 return skb->data;
56 }
57
58 BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN));
59 skb_push(skb, sizeof(*local_tx_pd));
60
61 local_tx_pd = (struct txpd *) skb->data;
62 memset(local_tx_pd, 0, sizeof(struct txpd));
63 local_tx_pd->bss_num = priv->bss_num;
64 local_tx_pd->bss_type = priv->bss_type;
65 local_tx_pd->tx_pkt_length = cpu_to_le16((u16) (skb->len -
66 sizeof(struct txpd)));
67
68 local_tx_pd->priority = (u8) skb->priority;
69 local_tx_pd->pkt_delay_2ms =
70 mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
71
72 if (local_tx_pd->priority <
73 ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
74 /*
75 * Set the priority specific tx_control field, setting of 0 will
76 * cause the default value to be used later in this function
77 */
78 local_tx_pd->tx_control =
79 cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[local_tx_pd->
80 priority]);
81
82 if (adapter->pps_uapsd_mode) {
83 if (mwifiex_check_last_packet_indication(priv)) {
84 adapter->tx_lock_flag = true;
85 local_tx_pd->flags =
86 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
87 }
88 }
89
90 /* Offset of actual data */
91 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
92
93 /* make space for INTF_HEADER_LEN */
94 skb_push(skb, INTF_HEADER_LEN);
95
96 if (!local_tx_pd->tx_control)
97 /* TxCtrl set by user or default */
98 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
99
100 return skb->data;
101}
102
103/*
104 * This function tells firmware to send a NULL data packet.
105 *
106 * The function creates a NULL data packet with TxPD and sends to the
107 * firmware for transmission, with highest priority setting.
108 */
109int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
110{
111 struct mwifiex_adapter *adapter = priv->adapter;
112 struct txpd *local_tx_pd;
113/* sizeof(struct txpd) + Interface specific header */
114#define NULL_PACKET_HDR 64
115 u32 data_len = NULL_PACKET_HDR;
116 struct sk_buff *skb = NULL;
117 int ret = 0;
118 struct mwifiex_txinfo *tx_info = NULL;
119
120 if (adapter->surprise_removed)
121 return -1;
122
123 if (!priv->media_connected)
124 return -1;
125
126 if (adapter->data_sent)
127 return -1;
128
129 skb = dev_alloc_skb(data_len);
130 if (!skb)
131 return -1;
132
133 tx_info = MWIFIEX_SKB_TXCB(skb);
134 tx_info->bss_index = priv->bss_index;
135 skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN);
136 skb_push(skb, sizeof(struct txpd));
137
138 local_tx_pd = (struct txpd *) skb->data;
139 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
140 local_tx_pd->flags = flags;
141 local_tx_pd->priority = WMM_HIGHEST_PRIORITY;
142 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
143 local_tx_pd->bss_num = priv->bss_num;
144 local_tx_pd->bss_type = priv->bss_type;
145
146 skb_push(skb, INTF_HEADER_LEN);
147
148 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
149 skb->data, skb->len, NULL);
150 switch (ret) {
151 case -EBUSY:
152 adapter->data_sent = true;
153 /* Fall through FAILURE handling */
154 case -1:
155 dev_kfree_skb_any(skb);
156 dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
157 __func__, ret);
158 adapter->dbg.num_tx_host_to_card_failure++;
159 break;
160 case 0:
161 dev_kfree_skb_any(skb);
162 dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n",
163 __func__);
164 adapter->tx_lock_flag = true;
165 break;
166 case -EINPROGRESS:
167 break;
168 default:
169 break;
170 }
171
172 return ret;
173}
174
175/*
176 * This function checks if we need to send last packet indication.
177 */
178u8
179mwifiex_check_last_packet_indication(struct mwifiex_private *priv)
180{
181 struct mwifiex_adapter *adapter = priv->adapter;
182 u8 ret = false;
183 u8 prop_ps = true;
184
185 if (!adapter->sleep_period.period)
186 return ret;
187 if (mwifiex_wmm_lists_empty(adapter)) {
188 if ((priv->curr_bss_params.wmm_uapsd_enabled &&
189 priv->wmm_qosinfo) || prop_ps)
190 ret = true;
191 }
192
193 if (ret && !adapter->cmd_sent && !adapter->curr_cmd
194 && !is_command_pending(adapter)) {
195 adapter->delay_null_pkt = false;
196 ret = true;
197 } else {
198 ret = false;
199 adapter->delay_null_pkt = true;
200 }
201 return ret;
202}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
new file mode 100644
index 00000000000..f06923cb1c4
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -0,0 +1,202 @@
1/*
2 * Marvell Wireless LAN device driver: generic TX/RX data handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26
27/*
28 * This function processes the received buffer.
29 *
30 * Main responsibility of this function is to parse the RxPD to
31 * identify the correct interface this packet is headed for and
32 * forwarding it to the associated handling function, where the
33 * packet will be further processed and sent to kernel/upper layer
34 * if required.
35 */
36int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
37 struct sk_buff *skb)
38{
39 int ret = 0;
40 struct mwifiex_private *priv =
41 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
42 struct rxpd *local_rx_pd;
43 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
44
45 local_rx_pd = (struct rxpd *) (skb->data);
46 /* Get the BSS number from rxpd, get corresponding priv */
47 priv = mwifiex_get_priv_by_id(adapter, local_rx_pd->bss_num &
48 BSS_NUM_MASK, local_rx_pd->bss_type);
49 if (!priv)
50 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
51
52 rx_info->bss_index = priv->bss_index;
53 ret = mwifiex_process_sta_rx_packet(adapter, skb);
54
55 return ret;
56}
57EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
58
59/*
60 * This function sends a packet to device.
61 *
62 * It processes the packet to add the TxPD, checks condition and
63 * sends the processed packet to firmware for transmission.
64 *
65 * On successful completion, the function calls the completion callback
66 * and logs the time.
67 */
68int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
69 struct mwifiex_tx_param *tx_param)
70{
71 int ret = -1;
72 struct mwifiex_adapter *adapter = priv->adapter;
73 u8 *head_ptr = NULL;
74 struct txpd *local_tx_pd = NULL;
75
76 head_ptr = (u8 *) mwifiex_process_sta_txpd(priv, skb);
77 if (head_ptr) {
78 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
79 local_tx_pd =
80 (struct txpd *) (head_ptr + INTF_HEADER_LEN);
81
82 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
83 skb->data, skb->len, tx_param);
84 }
85
86 switch (ret) {
87 case -EBUSY:
88 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
89 (adapter->pps_uapsd_mode) &&
90 (adapter->tx_lock_flag)) {
91 priv->adapter->tx_lock_flag = false;
92 local_tx_pd->flags = 0;
93 }
94 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
95 break;
96 case -1:
97 adapter->data_sent = false;
98 dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
99 ret);
100 adapter->dbg.num_tx_host_to_card_failure++;
101 mwifiex_write_data_complete(adapter, skb, ret);
102 break;
103 case -EINPROGRESS:
104 adapter->data_sent = false;
105 break;
106 case 0:
107 mwifiex_write_data_complete(adapter, skb, ret);
108 break;
109 default:
110 break;
111 }
112
113 return ret;
114}
115
116/*
117 * Packet send completion callback handler.
118 *
119 * It either frees the buffer directly or forwards it to another
120 * completion callback which checks conditions, updates statistics,
121 * wakes up stalled traffic queue if required, and then frees the buffer.
122 */
123int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
124 struct sk_buff *skb, int status)
125{
126 struct mwifiex_private *priv = NULL, *tpriv = NULL;
127 struct mwifiex_txinfo *tx_info = NULL;
128 int i;
129
130 if (!skb)
131 return 0;
132
133 tx_info = MWIFIEX_SKB_TXCB(skb);
134 priv = mwifiex_bss_index_to_priv(adapter, tx_info->bss_index);
135 if (!priv)
136 goto done;
137
138 priv->netdev->trans_start = jiffies;
139 if (!status) {
140 priv->stats.tx_packets++;
141 priv->stats.tx_bytes += skb->len;
142 } else {
143 priv->stats.tx_errors++;
144 }
145 atomic_dec(&adapter->tx_pending);
146
147 for (i = 0; i < adapter->priv_num; i++) {
148
149 tpriv = adapter->priv[i];
150
151 if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
152 && (tpriv->media_connected)) {
153 if (netif_queue_stopped(tpriv->netdev))
154 netif_wake_queue(tpriv->netdev);
155 }
156 }
157done:
158 dev_kfree_skb_any(skb);
159
160 return 0;
161}
162
163/*
164 * Packet receive completion callback handler.
165 *
166 * This function calls another completion callback handler which
167 * updates the statistics, and optionally updates the parent buffer
168 * use count before freeing the received packet.
169 */
170int mwifiex_recv_packet_complete(struct mwifiex_adapter *adapter,
171 struct sk_buff *skb, int status)
172{
173 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
174 struct mwifiex_rxinfo *rx_info_parent = NULL;
175 struct mwifiex_private *priv;
176 struct sk_buff *skb_parent = NULL;
177 unsigned long flags;
178
179 priv = adapter->priv[rx_info->bss_index];
180
181 if (priv && (status == -1))
182 priv->stats.rx_dropped++;
183
184 if (rx_info->parent) {
185 skb_parent = rx_info->parent;
186 rx_info_parent = MWIFIEX_SKB_RXCB(skb_parent);
187
188 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
189 --rx_info_parent->use_count;
190
191 if (!rx_info_parent->use_count) {
192 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
193 dev_kfree_skb_any(skb_parent);
194 } else {
195 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
196 }
197 } else {
198 dev_kfree_skb_any(skb);
199 }
200
201 return 0;
202}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
new file mode 100644
index 00000000000..205022aa52f
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -0,0 +1,252 @@
1/*
2 * Marvell Wireless LAN device driver: utility functions
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * Firmware initialization complete callback handler.
30 *
31 * This function wakes up the function waiting on the init
32 * wait queue for the firmware initialization to complete.
33 */
34int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
35{
36
37 adapter->init_wait_q_woken = true;
38 wake_up_interruptible(&adapter->init_wait_q);
39 return 0;
40}
41
42/*
43 * Firmware shutdown complete callback handler.
44 *
45 * This function sets the hardware status to not ready and wakes up
46 * the function waiting on the init wait queue for the firmware
47 * shutdown to complete.
48 */
49int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter)
50{
51 adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY;
52 adapter->init_wait_q_woken = true;
53 wake_up_interruptible(&adapter->init_wait_q);
54 return 0;
55}
56
57/*
58 * IOCTL request handler to send function init/shutdown command
59 * to firmware.
60 *
61 * This function prepares the correct firmware command and
62 * issues it.
63 */
64int mwifiex_misc_ioctl_init_shutdown(struct mwifiex_adapter *adapter,
65 struct mwifiex_wait_queue *wait,
66 u32 func_init_shutdown)
67{
68 struct mwifiex_private *priv = adapter->priv[wait->bss_index];
69 int ret;
70 u16 cmd;
71
72 if (func_init_shutdown == MWIFIEX_FUNC_INIT) {
73 cmd = HostCmd_CMD_FUNC_INIT;
74 } else if (func_init_shutdown == MWIFIEX_FUNC_SHUTDOWN) {
75 cmd = HostCmd_CMD_FUNC_SHUTDOWN;
76 } else {
77 dev_err(adapter->dev, "unsupported parameter\n");
78 return -1;
79 }
80
81 /* Send command to firmware */
82 ret = mwifiex_prepare_cmd(priv, cmd, HostCmd_ACT_GEN_SET,
83 0, wait, NULL);
84
85 if (!ret)
86 ret = -EINPROGRESS;
87
88 return ret;
89}
90
91/*
92 * IOCTL request handler to set/get debug information.
93 *
94 * This function collates/sets the information from/to different driver
95 * structures.
96 */
97int mwifiex_get_debug_info(struct mwifiex_private *priv,
98 struct mwifiex_debug_info *info)
99{
100 struct mwifiex_adapter *adapter = priv->adapter;
101
102 if (info) {
103 memcpy(info->packets_out,
104 priv->wmm.packets_out,
105 sizeof(priv->wmm.packets_out));
106 info->max_tx_buf_size = (u32) adapter->max_tx_buf_size;
107 info->tx_buf_size = (u32) adapter->tx_buf_size;
108 info->rx_tbl_num = mwifiex_get_rx_reorder_tbl(
109 priv, info->rx_tbl);
110 info->tx_tbl_num = mwifiex_get_tx_ba_stream_tbl(
111 priv, info->tx_tbl);
112 info->ps_mode = adapter->ps_mode;
113 info->ps_state = adapter->ps_state;
114 info->is_deep_sleep = adapter->is_deep_sleep;
115 info->pm_wakeup_card_req = adapter->pm_wakeup_card_req;
116 info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
117 info->is_hs_configured = adapter->is_hs_configured;
118 info->hs_activated = adapter->hs_activated;
119 info->num_cmd_host_to_card_failure
120 = adapter->dbg.num_cmd_host_to_card_failure;
121 info->num_cmd_sleep_cfm_host_to_card_failure
122 = adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure;
123 info->num_tx_host_to_card_failure
124 = adapter->dbg.num_tx_host_to_card_failure;
125 info->num_event_deauth = adapter->dbg.num_event_deauth;
126 info->num_event_disassoc = adapter->dbg.num_event_disassoc;
127 info->num_event_link_lost = adapter->dbg.num_event_link_lost;
128 info->num_cmd_deauth = adapter->dbg.num_cmd_deauth;
129 info->num_cmd_assoc_success =
130 adapter->dbg.num_cmd_assoc_success;
131 info->num_cmd_assoc_failure =
132 adapter->dbg.num_cmd_assoc_failure;
133 info->num_tx_timeout = adapter->dbg.num_tx_timeout;
134 info->num_cmd_timeout = adapter->dbg.num_cmd_timeout;
135 info->timeout_cmd_id = adapter->dbg.timeout_cmd_id;
136 info->timeout_cmd_act = adapter->dbg.timeout_cmd_act;
137 memcpy(info->last_cmd_id, adapter->dbg.last_cmd_id,
138 sizeof(adapter->dbg.last_cmd_id));
139 memcpy(info->last_cmd_act, adapter->dbg.last_cmd_act,
140 sizeof(adapter->dbg.last_cmd_act));
141 info->last_cmd_index = adapter->dbg.last_cmd_index;
142 memcpy(info->last_cmd_resp_id, adapter->dbg.last_cmd_resp_id,
143 sizeof(adapter->dbg.last_cmd_resp_id));
144 info->last_cmd_resp_index = adapter->dbg.last_cmd_resp_index;
145 memcpy(info->last_event, adapter->dbg.last_event,
146 sizeof(adapter->dbg.last_event));
147 info->last_event_index = adapter->dbg.last_event_index;
148 info->data_sent = adapter->data_sent;
149 info->cmd_sent = adapter->cmd_sent;
150 info->cmd_resp_received = adapter->cmd_resp_received;
151 }
152
153 return 0;
154}
155
156/*
157 * This function processes the received packet before sending it to the
158 * kernel.
159 *
160 * It extracts the SKB from the received buffer and sends it to kernel.
161 * In case the received buffer does not contain the data in SKB format,
162 * the function creates a blank SKB, fills it with the data from the
163 * received buffer and then sends this new SKB to the kernel.
164 */
165int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
166{
167 struct mwifiex_rxinfo *rx_info = NULL;
168 struct mwifiex_private *priv = NULL;
169
170 if (!skb)
171 return -1;
172
173 rx_info = MWIFIEX_SKB_RXCB(skb);
174 priv = mwifiex_bss_index_to_priv(adapter, rx_info->bss_index);
175 if (!priv)
176 return -1;
177
178 skb->dev = priv->netdev;
179 skb->protocol = eth_type_trans(skb, priv->netdev);
180 skb->ip_summed = CHECKSUM_NONE;
181 priv->stats.rx_bytes += skb->len;
182 priv->stats.rx_packets++;
183 if (in_interrupt())
184 netif_rx(skb);
185 else
186 netif_rx_ni(skb);
187
188 return 0;
189}
190
191/*
192 * Receive packet completion callback handler.
193 *
194 * This function updates the statistics and frees the buffer SKB.
195 */
196int mwifiex_recv_complete(struct mwifiex_adapter *adapter,
197 struct sk_buff *skb, int status)
198{
199 struct mwifiex_private *priv = NULL;
200 struct mwifiex_rxinfo *rx_info = NULL;
201
202 if (!skb)
203 return 0;
204
205 rx_info = MWIFIEX_SKB_RXCB(skb);
206 priv = mwifiex_bss_index_to_priv(adapter, rx_info->bss_index);
207
208 if (priv && (status == -1))
209 priv->stats.rx_dropped++;
210
211 dev_kfree_skb_any(skb);
212
213 return 0;
214}
215
216/*
217 * IOCTL completion callback handler.
218 *
219 * This function is called when a pending IOCTL is completed.
220 *
221 * If work queue support is enabled, the function wakes up the
222 * corresponding waiting function. Otherwise, it processes the
223 * IOCTL response and frees the response buffer.
224 */
225int mwifiex_ioctl_complete(struct mwifiex_adapter *adapter,
226 struct mwifiex_wait_queue *wait_queue,
227 int status)
228{
229 enum mwifiex_error_code status_code =
230 (enum mwifiex_error_code) wait_queue->status;
231
232 atomic_dec(&adapter->ioctl_pending);
233
234 dev_dbg(adapter->dev, "cmd: IOCTL completed: status=%d,"
235 " status_code=%#x\n", status, status_code);
236
237 if (wait_queue->enabled) {
238 *wait_queue->condition = true;
239 wait_queue->status = status;
240 if (status && (status_code == MWIFIEX_ERROR_CMD_TIMEOUT))
241 dev_err(adapter->dev, "cmd timeout\n");
242 else
243 wake_up_interruptible(wait_queue->wait);
244 } else {
245 if (status)
246 dev_err(adapter->dev, "cmd failed: status_code=%#x\n",
247 status_code);
248 kfree(wait_queue);
249 }
250
251 return 0;
252}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
new file mode 100644
index 00000000000..9506afc6c0e
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -0,0 +1,32 @@
1/*
2 * Marvell Wireless LAN device driver: utility functions
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_UTIL_H_
21#define _MWIFIEX_UTIL_H_
22
23static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
24{
25 return (struct mwifiex_rxinfo *)skb->cb;
26}
27
28static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
29{
30 return (struct mwifiex_txinfo *)skb->cb;
31}
32#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
new file mode 100644
index 00000000000..1cfbc6bed69
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -0,0 +1,1237 @@
1/*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28
29/* Maximum value FW can accept for driver delay in packet transmission */
30#define DRV_PKT_DELAY_TO_FW_MAX 512
31
32
33#define WMM_QUEUED_PACKET_LOWER_LIMIT 180
34
35#define WMM_QUEUED_PACKET_UPPER_LIMIT 200
36
37/* Offset for TOS field in the IP header */
38#define IPTOS_OFFSET 5
39
40/* WMM information IE */
41static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
42 0x00, 0x50, 0xf2, 0x02,
43 0x00, 0x01, 0x00
44};
45
46static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
47 WMM_AC_BK,
48 WMM_AC_VI,
49 WMM_AC_VO
50};
51
52static u8 tos_to_tid[] = {
53 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
54 0x01, /* 0 1 0 AC_BK */
55 0x02, /* 0 0 0 AC_BK */
56 0x00, /* 0 0 1 AC_BE */
57 0x03, /* 0 1 1 AC_BE */
58 0x04, /* 1 0 0 AC_VI */
59 0x05, /* 1 0 1 AC_VI */
60 0x06, /* 1 1 0 AC_VO */
61 0x07 /* 1 1 1 AC_VO */
62};
63
64/*
65 * This table inverses the tos_to_tid operation to get a priority
66 * which is in sequential order, and can be compared.
67 * Use this to compare the priority of two different TIDs.
68 */
69static u8 tos_to_tid_inv[] = {
70 0x02, /* from tos_to_tid[2] = 0 */
71 0x00, /* from tos_to_tid[0] = 1 */
72 0x01, /* from tos_to_tid[1] = 2 */
73 0x03,
74 0x04,
75 0x05,
76 0x06,
77 0x07};
78
79static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
80
81/*
82 * This function debug prints the priority parameters for a WMM AC.
83 */
84static void
85mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
86{
87 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
88
89 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
90 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
91 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
92 & MWIFIEX_ACI) >> 5]],
93 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
94 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
95 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
96 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
97 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
98 le16_to_cpu(ac_param->tx_op_limit));
99}
100
101/*
102 * This function allocates a route address list.
103 *
104 * The function also initializes the list with the provided RA.
105 */
106static struct mwifiex_ra_list_tbl *
107mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
108{
109 struct mwifiex_ra_list_tbl *ra_list;
110
111 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
112
113 if (!ra_list) {
114 dev_err(adapter->dev, "%s: failed to alloc ra_list\n",
115 __func__);
116 return NULL;
117 }
118 INIT_LIST_HEAD(&ra_list->list);
119 skb_queue_head_init(&ra_list->skb_head);
120
121 memcpy(ra_list->ra, ra, ETH_ALEN);
122
123 ra_list->total_pkts_size = 0;
124
125 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
126
127 return ra_list;
128}
129
130/*
131 * This function allocates and adds a RA list for all TIDs
132 * with the given RA.
133 */
134void
135mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
136{
137 int i;
138 struct mwifiex_ra_list_tbl *ra_list;
139 struct mwifiex_adapter *adapter = priv->adapter;
140
141 for (i = 0; i < MAX_NUM_TID; ++i) {
142 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
143 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
144
145 if (!ra_list)
146 break;
147
148 if (!mwifiex_queuing_ra_based(priv))
149 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
150 else
151 ra_list->is_11n_enabled = false;
152
153 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
154 ra_list, ra_list->is_11n_enabled);
155
156 list_add_tail(&ra_list->list,
157 &priv->wmm.tid_tbl_ptr[i].ra_list);
158
159 if (!priv->wmm.tid_tbl_ptr[i].ra_list_curr)
160 priv->wmm.tid_tbl_ptr[i].ra_list_curr = ra_list;
161 }
162}
163
164/*
165 * This function sets the WMM queue priorities to their default values.
166 */
167static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
168{
169 /* Default queue priorities: VO->VI->BE->BK */
170 priv->wmm.queue_priority[0] = WMM_AC_VO;
171 priv->wmm.queue_priority[1] = WMM_AC_VI;
172 priv->wmm.queue_priority[2] = WMM_AC_BE;
173 priv->wmm.queue_priority[3] = WMM_AC_BK;
174}
175
176/*
177 * This function map ACs to TIDs.
178 */
179static void
180mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv,
181 u8 queue_priority[])
182{
183 int i;
184
185 for (i = 0; i < 4; ++i) {
186 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
187 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
188 }
189}
190
191/*
192 * This function initializes WMM priority queues.
193 */
194void
195mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
196 struct ieee_types_wmm_parameter *wmm_ie)
197{
198 u16 cw_min, avg_back_off, tmp[4];
199 u32 i, j, num_ac;
200 u8 ac_idx;
201
202 if (!wmm_ie || !priv->wmm_enabled) {
203 /* WMM is not enabled, just set the defaults and return */
204 mwifiex_wmm_default_queue_priorities(priv);
205 return;
206 }
207
208 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
209 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
210 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
211 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
212 wmm_ie->reserved);
213
214 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
215 cw_min = (1 << (wmm_ie->ac_params[num_ac].ecw_bitmap &
216 MWIFIEX_ECW_MIN)) - 1;
217 avg_back_off = (cw_min >> 1) +
218 (wmm_ie->ac_params[num_ac].aci_aifsn_bitmap &
219 MWIFIEX_AIFSN);
220
221 ac_idx = wmm_aci_to_qidx_map[(wmm_ie->ac_params[num_ac].
222 aci_aifsn_bitmap &
223 MWIFIEX_ACI) >> 5];
224 priv->wmm.queue_priority[ac_idx] = ac_idx;
225 tmp[ac_idx] = avg_back_off;
226
227 dev_dbg(priv->adapter->dev, "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
228 (1 << ((wmm_ie->ac_params[num_ac].ecw_bitmap &
229 MWIFIEX_ECW_MAX) >> 4)) - 1,
230 cw_min, avg_back_off);
231 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
232 }
233
234 /* Bubble sort */
235 for (i = 0; i < num_ac; i++) {
236 for (j = 1; j < num_ac - i; j++) {
237 if (tmp[j - 1] > tmp[j]) {
238 swap(tmp[j - 1], tmp[j]);
239 swap(priv->wmm.queue_priority[j - 1],
240 priv->wmm.queue_priority[j]);
241 } else if (tmp[j - 1] == tmp[j]) {
242 if (priv->wmm.queue_priority[j - 1]
243 < priv->wmm.queue_priority[j])
244 swap(priv->wmm.queue_priority[j - 1],
245 priv->wmm.queue_priority[j]);
246 }
247 }
248 }
249
250 mwifiex_wmm_queue_priorities_tid(priv, priv->wmm.queue_priority);
251}
252
253/*
254 * This function evaluates whether or not an AC is to be downgraded.
255 *
256 * In case the AC is not enabled, the highest AC is returned that is
257 * enabled and does not require admission control.
258 */
259static enum mwifiex_wmm_ac_e
260mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
261 enum mwifiex_wmm_ac_e eval_ac)
262{
263 int down_ac;
264 enum mwifiex_wmm_ac_e ret_ac;
265 struct mwifiex_wmm_ac_status *ac_status;
266
267 ac_status = &priv->wmm.ac_status[eval_ac];
268
269 if (!ac_status->disabled)
270 /* Okay to use this AC, its enabled */
271 return eval_ac;
272
273 /* Setup a default return value of the lowest priority */
274 ret_ac = WMM_AC_BK;
275
276 /*
277 * Find the highest AC that is enabled and does not require
278 * admission control. The spec disallows downgrading to an AC,
279 * which is enabled due to a completed admission control.
280 * Unadmitted traffic is not to be sent on an AC with admitted
281 * traffic.
282 */
283 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
284 ac_status = &priv->wmm.ac_status[down_ac];
285
286 if (!ac_status->disabled && !ac_status->flow_required)
287 /* AC is enabled and does not require admission
288 control */
289 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
290 }
291
292 return ret_ac;
293}
294
295/*
296 * This function downgrades WMM priority queue.
297 */
298void
299mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
300{
301 int ac_val;
302
303 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
304 "BK(0), BE(1), VI(2), VO(3)\n");
305
306 if (!priv->wmm_enabled) {
307 /* WMM is not enabled, default priorities */
308 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
309 priv->wmm.ac_down_graded_vals[ac_val] =
310 (enum mwifiex_wmm_ac_e) ac_val;
311 } else {
312 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
313 priv->wmm.ac_down_graded_vals[ac_val]
314 = mwifiex_wmm_eval_downgrade_ac(priv,
315 (enum mwifiex_wmm_ac_e) ac_val);
316 dev_dbg(priv->adapter->dev, "info: WMM: AC PRIO %d maps to %d\n",
317 ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
318 }
319 }
320}
321
322/*
323 * This function converts the IP TOS field to an WMM AC
324 * Queue assignment.
325 */
326static enum mwifiex_wmm_ac_e
327mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
328{
329 /* Map of TOS UP values to WMM AC */
330 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
331 WMM_AC_BK,
332 WMM_AC_BK,
333 WMM_AC_BE,
334 WMM_AC_VI,
335 WMM_AC_VI,
336 WMM_AC_VO,
337 WMM_AC_VO
338 };
339
340 if (tos >= ARRAY_SIZE(tos_to_ac))
341 return WMM_AC_BE;
342
343 return tos_to_ac[tos];
344}
345
346/*
347 * This function evaluates a given TID and downgrades it to a lower
348 * TID if the WMM Parameter IE received from the AP indicates that the
349 * AP is disabled (due to call admission control (ACM bit). Mapping
350 * of TID to AC is taken care of internally.
351 */
352static u8
353mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
354{
355 enum mwifiex_wmm_ac_e ac, ac_down;
356 u8 new_tid;
357
358 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
359 ac_down = priv->wmm.ac_down_graded_vals[ac];
360
361 /* Send the index to tid array, picking from the array will be
362 * taken care by dequeuing function
363 */
364 new_tid = ac_to_tid[ac_down][tid % 2];
365
366 return new_tid;
367}
368
369/*
370 * This function initializes the WMM state information and the
371 * WMM data path queues.
372 */
373void
374mwifiex_wmm_init(struct mwifiex_adapter *adapter)
375{
376 int i, j;
377 struct mwifiex_private *priv;
378
379 for (j = 0; j < adapter->priv_num; ++j) {
380 priv = adapter->priv[j];
381 if (!priv)
382 continue;
383
384 for (i = 0; i < MAX_NUM_TID; ++i) {
385 priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
386 priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
387 priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
388 priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
389 }
390
391 priv->aggr_prio_tbl[6].amsdu
392 = priv->aggr_prio_tbl[6].ampdu_ap
393 = priv->aggr_prio_tbl[6].ampdu_user
394 = BA_STREAM_NOT_ALLOWED;
395
396 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
397 = priv->aggr_prio_tbl[7].ampdu_user
398 = BA_STREAM_NOT_ALLOWED;
399
400 priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT;
401 priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
402 priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
403 }
404}
405
406/*
407 * This function checks if WMM Tx queue is empty.
408 */
409int
410mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
411{
412 int i, j;
413 struct mwifiex_private *priv;
414
415 for (j = 0; j < adapter->priv_num; ++j) {
416 priv = adapter->priv[j];
417 if (priv) {
418 for (i = 0; i < MAX_NUM_TID; i++)
419 if (!mwifiex_wmm_is_ra_list_empty(adapter,
420 &priv->wmm.tid_tbl_ptr[i].ra_list))
421 return false;
422 }
423 }
424
425 return true;
426}
427
428/*
429 * This function deletes all packets in an RA list node.
430 *
431 * The packet sent completion callback handler are called with
432 * status failure, after they are dequeued to ensure proper
433 * cleanup. The RA list node itself is freed at the end.
434 */
435static void
436mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
437 struct mwifiex_ra_list_tbl *ra_list)
438{
439 struct mwifiex_adapter *adapter = priv->adapter;
440 struct sk_buff *skb, *tmp;
441
442 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
443 mwifiex_write_data_complete(adapter, skb, -1);
444}
445
446/*
447 * This function deletes all packets in an RA list.
448 *
449 * Each nodes in the RA list are freed individually first, and then
450 * the RA list itself is freed.
451 */
452static void
453mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
454 struct list_head *ra_list_head)
455{
456 struct mwifiex_ra_list_tbl *ra_list;
457
458 list_for_each_entry(ra_list, ra_list_head, list)
459 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
460}
461
462/*
463 * This function deletes all packets in all RA lists.
464 */
465static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
466{
467 int i;
468
469 for (i = 0; i < MAX_NUM_TID; i++)
470 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
471 ra_list);
472}
473
474/*
475 * This function deletes all route addresses from all RA lists.
476 */
477static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
478{
479 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
480 int i;
481
482 for (i = 0; i < MAX_NUM_TID; ++i) {
483 dev_dbg(priv->adapter->dev,
484 "info: ra_list: freeing buf for tid %d\n", i);
485 list_for_each_entry_safe(ra_list, tmp_node,
486 &priv->wmm.tid_tbl_ptr[i].ra_list, list) {
487 list_del(&ra_list->list);
488 kfree(ra_list);
489 }
490
491 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
492
493 priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
494 }
495}
496
497/*
498 * This function cleans up the Tx and Rx queues.
499 *
500 * Cleanup includes -
501 * - All packets in RA lists
502 * - All entries in Rx reorder table
503 * - All entries in Tx BA stream table
504 * - MPA buffer (if required)
505 * - All RA lists
506 */
507void
508mwifiex_clean_txrx(struct mwifiex_private *priv)
509{
510 unsigned long flags;
511
512 mwifiex_11n_cleanup_reorder_tbl(priv);
513 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
514
515 mwifiex_wmm_cleanup_queues(priv);
516 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
517
518 if (priv->adapter->if_ops.cleanup_mpa_buf)
519 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
520
521 mwifiex_wmm_delete_all_ralist(priv);
522 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
523
524 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
525}
526
527/*
528 * This function retrieves a particular RA list node, matching with the
529 * given TID and RA address.
530 */
531static struct mwifiex_ra_list_tbl *
532mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
533 u8 *ra_addr)
534{
535 struct mwifiex_ra_list_tbl *ra_list;
536
537 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
538 list) {
539 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
540 return ra_list;
541 }
542
543 return NULL;
544}
545
546/*
547 * This function retrieves an RA list node for a given TID and
548 * RA address pair.
549 *
550 * If no such node is found, a new node is added first and then
551 * retrieved.
552 */
553static struct mwifiex_ra_list_tbl *
554mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
555{
556 struct mwifiex_ra_list_tbl *ra_list;
557
558 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
559 if (ra_list)
560 return ra_list;
561 mwifiex_ralist_add(priv, ra_addr);
562
563 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
564}
565
566/*
567 * This function checks if a particular RA list node exists in a given TID
568 * table index.
569 */
570int
571mwifiex_is_ralist_valid(struct mwifiex_private *priv,
572 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
573{
574 struct mwifiex_ra_list_tbl *rlist;
575
576 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
577 list) {
578 if (rlist == ra_list)
579 return true;
580 }
581
582 return false;
583}
584
585/*
586 * This function adds a packet to WMM queue.
587 *
588 * In disconnected state the packet is immediately dropped and the
589 * packet send completion callback is called with status failure.
590 *
591 * Otherwise, the correct RA list node is located and the packet
592 * is queued at the list tail.
593 */
594void
595mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
596 struct sk_buff *skb)
597{
598 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
599 struct mwifiex_private *priv = adapter->priv[tx_info->bss_index];
600 u32 tid;
601 struct mwifiex_ra_list_tbl *ra_list;
602 u8 ra[ETH_ALEN], tid_down;
603 unsigned long flags;
604
605 if (!priv->media_connected) {
606 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
607 mwifiex_write_data_complete(adapter, skb, -1);
608 return;
609 }
610
611 tid = skb->priority;
612
613 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
614
615 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
616
617 /* In case of infra as we have already created the list during
618 association we just don't have to call get_queue_raptr, we will
619 have only 1 raptr for a tid in case of infra */
620 if (!mwifiex_queuing_ra_based(priv)) {
621 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
622 ra_list = list_first_entry(
623 &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
624 struct mwifiex_ra_list_tbl, list);
625 else
626 ra_list = NULL;
627 } else {
628 memcpy(ra, skb->data, ETH_ALEN);
629 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
630 }
631
632 if (!ra_list) {
633 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
634 mwifiex_write_data_complete(adapter, skb, -1);
635 return;
636 }
637
638 skb_queue_tail(&ra_list->skb_head, skb);
639
640 ra_list->total_pkts_size += skb->len;
641
642 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
643}
644
645/*
646 * This function processes the get WMM status command response from firmware.
647 *
648 * The response may contain multiple TLVs -
649 * - AC Queue status TLVs
650 * - Current WMM Parameter IE TLV
651 * - Admission Control action frame TLVs
652 *
653 * This function parses the TLVs and then calls further specific functions
654 * to process any changes in the queue prioritize or state.
655 */
656int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
657 const struct host_cmd_ds_command *resp)
658{
659 u8 *curr = (u8 *) &resp->params.get_wmm_status;
660 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
661 int valid = true;
662
663 struct mwifiex_ie_types_data *tlv_hdr;
664 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
665 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
666 struct mwifiex_wmm_ac_status *ac_status;
667
668 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
669 resp_len);
670
671 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
672 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
673 tlv_len = le16_to_cpu(tlv_hdr->header.len);
674
675 switch (le16_to_cpu(tlv_hdr->header.type)) {
676 case TLV_TYPE_WMMQSTATUS:
677 tlv_wmm_qstatus =
678 (struct mwifiex_ie_types_wmm_queue_status *)
679 tlv_hdr;
680 dev_dbg(priv->adapter->dev,
681 "info: CMD_RESP: WMM_GET_STATUS:"
682 " QSTATUS TLV: %d, %d, %d\n",
683 tlv_wmm_qstatus->queue_index,
684 tlv_wmm_qstatus->flow_required,
685 tlv_wmm_qstatus->disabled);
686
687 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
688 queue_index];
689 ac_status->disabled = tlv_wmm_qstatus->disabled;
690 ac_status->flow_required =
691 tlv_wmm_qstatus->flow_required;
692 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
693 break;
694
695 case WLAN_EID_VENDOR_SPECIFIC:
696 /*
697 * Point the regular IEEE IE 2 bytes into the Marvell IE
698 * and setup the IEEE IE type and length byte fields
699 */
700
701 wmm_param_ie =
702 (struct ieee_types_wmm_parameter *) (curr +
703 2);
704 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
705 wmm_param_ie->vend_hdr.element_id =
706 WLAN_EID_VENDOR_SPECIFIC;
707
708 dev_dbg(priv->adapter->dev,
709 "info: CMD_RESP: WMM_GET_STATUS:"
710 " WMM Parameter Set Count: %d\n",
711 wmm_param_ie->qos_info_bitmap &
712 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
713
714 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
715 wmm_ie, wmm_param_ie,
716 wmm_param_ie->vend_hdr.len + 2);
717
718 break;
719
720 default:
721 valid = false;
722 break;
723 }
724
725 curr += (tlv_len + sizeof(tlv_hdr->header));
726 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
727 }
728
729 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
730 mwifiex_wmm_setup_ac_downgrade(priv);
731
732 return 0;
733}
734
735/*
736 * Callback handler from the command module to allow insertion of a WMM TLV.
737 *
738 * If the BSS we are associating to supports WMM, this function adds the
739 * required WMM Information IE to the association request command buffer in
740 * the form of a Marvell extended IEEE IE.
741 */
742u32
743mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
744 u8 **assoc_buf,
745 struct ieee_types_wmm_parameter *wmm_ie,
746 struct ieee80211_ht_cap *ht_cap)
747{
748 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
749 u32 ret_len = 0;
750
751 /* Null checks */
752 if (!assoc_buf)
753 return 0;
754 if (!(*assoc_buf))
755 return 0;
756
757 if (!wmm_ie)
758 return 0;
759
760 dev_dbg(priv->adapter->dev, "info: WMM: process assoc req:"
761 "bss->wmmIe=0x%x\n",
762 wmm_ie->vend_hdr.element_id);
763
764 if ((priv->wmm_required
765 || (ht_cap && (priv->adapter->config_bands & BAND_GN
766 || priv->adapter->config_bands & BAND_AN))
767 )
768 && wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
769 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
770 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
771 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
772 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
773 le16_to_cpu(wmm_tlv->header.len));
774 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
775 memcpy((u8 *) (wmm_tlv->wmm_ie
776 + le16_to_cpu(wmm_tlv->header.len)
777 - sizeof(priv->wmm_qosinfo)),
778 &priv->wmm_qosinfo,
779 sizeof(priv->wmm_qosinfo));
780
781 ret_len = sizeof(wmm_tlv->header)
782 + le16_to_cpu(wmm_tlv->header.len);
783
784 *assoc_buf += ret_len;
785 }
786
787 return ret_len;
788}
789
790/*
791 * This function computes the time delay in the driver queues for a
792 * given packet.
793 *
794 * When the packet is received at the OS/Driver interface, the current
795 * time is set in the packet structure. The difference between the present
796 * time and that received time is computed in this function and limited
797 * based on pre-compiled limits in the driver.
798 */
799u8
800mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
801 const struct sk_buff *skb)
802{
803 u8 ret_val = 0;
804 struct timeval out_tstamp, in_tstamp;
805 u32 queue_delay;
806
807 do_gettimeofday(&out_tstamp);
808 in_tstamp = ktime_to_timeval(skb->tstamp);
809
810 queue_delay = (out_tstamp.tv_sec - in_tstamp.tv_sec) * 1000;
811 queue_delay += (out_tstamp.tv_usec - in_tstamp.tv_usec) / 1000;
812
813 /*
814 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
815 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
816 *
817 * Pass max value if queue_delay is beyond the uint8 range
818 */
819 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
820
821 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
822 " %d ms sent to FW\n", queue_delay, ret_val);
823
824 return ret_val;
825}
826
827/*
828 * This function retrieves the highest priority RA list table pointer.
829 */
830static struct mwifiex_ra_list_tbl *
831mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
832 struct mwifiex_private **priv, int *tid)
833{
834 struct mwifiex_private *priv_tmp;
835 struct mwifiex_ra_list_tbl *ptr, *head;
836 struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head;
837 struct mwifiex_tid_tbl *tid_ptr;
838 int is_list_empty;
839 unsigned long flags;
840 int i, j;
841
842 for (j = adapter->priv_num - 1; j >= 0; --j) {
843 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
844 flags);
845 is_list_empty = list_empty(&adapter->bss_prio_tbl[j]
846 .bss_prio_head);
847 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
848 flags);
849 if (is_list_empty)
850 continue;
851
852 if (adapter->bss_prio_tbl[j].bss_prio_cur ==
853 (struct mwifiex_bss_prio_node *)
854 &adapter->bss_prio_tbl[j].bss_prio_head) {
855 bssprio_node =
856 list_first_entry(&adapter->bss_prio_tbl[j]
857 .bss_prio_head,
858 struct mwifiex_bss_prio_node,
859 list);
860 bssprio_head = bssprio_node;
861 } else {
862 bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
863 bssprio_head = bssprio_node;
864 }
865
866 do {
867 priv_tmp = bssprio_node->priv;
868
869 for (i = HIGH_PRIO_TID; i >= LOW_PRIO_TID; --i) {
870
871 tid_ptr = &(priv_tmp)->wmm.
872 tid_tbl_ptr[tos_to_tid[i]];
873
874 spin_lock_irqsave(&tid_ptr->tid_tbl_lock,
875 flags);
876 is_list_empty =
877 list_empty(&adapter->bss_prio_tbl[j]
878 .bss_prio_head);
879 spin_unlock_irqrestore(&tid_ptr->tid_tbl_lock,
880 flags);
881 if (is_list_empty)
882 continue;
883
884 /*
885 * Always choose the next ra we transmitted
886 * last time, this way we pick the ra's in
887 * round robin fashion.
888 */
889 ptr = list_first_entry(
890 &tid_ptr->ra_list_curr->list,
891 struct mwifiex_ra_list_tbl,
892 list);
893
894 head = ptr;
895 if (ptr == (struct mwifiex_ra_list_tbl *)
896 &tid_ptr->ra_list) {
897 /* Get next ra */
898 ptr = list_first_entry(&ptr->list,
899 struct mwifiex_ra_list_tbl, list);
900 head = ptr;
901 }
902
903 do {
904 is_list_empty =
905 skb_queue_empty(&ptr->skb_head);
906 if (!is_list_empty) {
907 *priv = priv_tmp;
908 *tid = tos_to_tid[i];
909 return ptr;
910 }
911 /* Get next ra */
912 ptr = list_first_entry(&ptr->list,
913 struct mwifiex_ra_list_tbl,
914 list);
915 if (ptr ==
916 (struct mwifiex_ra_list_tbl *)
917 &tid_ptr->ra_list)
918 ptr = list_first_entry(
919 &ptr->list,
920 struct mwifiex_ra_list_tbl,
921 list);
922 } while (ptr != head);
923 }
924
925 /* Get next bss priority node */
926 bssprio_node = list_first_entry(&bssprio_node->list,
927 struct mwifiex_bss_prio_node,
928 list);
929
930 if (bssprio_node ==
931 (struct mwifiex_bss_prio_node *)
932 &adapter->bss_prio_tbl[j].bss_prio_head)
933 /* Get next bss priority node */
934 bssprio_node = list_first_entry(
935 &bssprio_node->list,
936 struct mwifiex_bss_prio_node,
937 list);
938 } while (bssprio_node != bssprio_head);
939 }
940 return NULL;
941}
942
943/*
944 * This function gets the number of packets in the Tx queue of a
945 * particular RA list.
946 */
947static int
948mwifiex_num_pkts_in_txq(struct mwifiex_private *priv,
949 struct mwifiex_ra_list_tbl *ptr, int max_buf_size)
950{
951 int count = 0, total_size = 0;
952 struct sk_buff *skb, *tmp;
953
954 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
955 total_size += skb->len;
956 if (total_size < max_buf_size)
957 ++count;
958 else
959 break;
960 }
961
962 return count;
963}
964
965/*
966 * This function sends a single packet to firmware for transmission.
967 */
968static void
969mwifiex_send_single_packet(struct mwifiex_private *priv,
970 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
971 unsigned long ra_list_flags)
972 __releases(&priv->wmm.ra_list_spinlock)
973{
974 struct sk_buff *skb, *skb_next;
975 struct mwifiex_tx_param tx_param;
976 struct mwifiex_adapter *adapter = priv->adapter;
977 int status = 0;
978 struct mwifiex_txinfo *tx_info;
979
980 if (skb_queue_empty(&ptr->skb_head)) {
981 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
982 ra_list_flags);
983 dev_dbg(adapter->dev, "data: nothing to send\n");
984 return;
985 }
986
987 skb = skb_dequeue(&ptr->skb_head);
988
989 tx_info = MWIFIEX_SKB_TXCB(skb);
990 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
991
992 ptr->total_pkts_size -= skb->len;
993
994 if (!skb_queue_empty(&ptr->skb_head))
995 skb_next = skb_peek(&ptr->skb_head);
996 else
997 skb_next = NULL;
998
999 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1000
1001 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1002 sizeof(struct txpd) : 0);
1003
1004 status = mwifiex_process_tx(priv, skb, &tx_param);
1005
1006 if (status == -EBUSY) {
1007 /* Queue the packet back at the head */
1008 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1009
1010 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1011 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1012 ra_list_flags);
1013 mwifiex_write_data_complete(adapter, skb, -1);
1014 return;
1015 }
1016
1017 skb_queue_tail(&ptr->skb_head, skb);
1018
1019 ptr->total_pkts_size += skb->len;
1020 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1021 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1022 ra_list_flags);
1023 } else {
1024 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1025 if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1026 priv->wmm.packets_out[ptr_index]++;
1027 priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
1028 }
1029 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
1030 list_first_entry(
1031 &adapter->bss_prio_tbl[priv->bss_priority]
1032 .bss_prio_cur->list,
1033 struct mwifiex_bss_prio_node,
1034 list);
1035 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1036 ra_list_flags);
1037 }
1038}
1039
1040/*
1041 * This function checks if the first packet in the given RA list
1042 * is already processed or not.
1043 */
1044static int
1045mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1046 struct mwifiex_ra_list_tbl *ptr)
1047{
1048 struct sk_buff *skb;
1049 struct mwifiex_txinfo *tx_info;
1050
1051 if (skb_queue_empty(&ptr->skb_head))
1052 return false;
1053
1054 skb = skb_peek(&ptr->skb_head);
1055
1056 tx_info = MWIFIEX_SKB_TXCB(skb);
1057 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1058 return true;
1059
1060 return false;
1061}
1062
1063/*
1064 * This function sends a single processed packet to firmware for
1065 * transmission.
1066 */
1067static void
1068mwifiex_send_processed_packet(struct mwifiex_private *priv,
1069 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1070 unsigned long ra_list_flags)
1071 __releases(&priv->wmm.ra_list_spinlock)
1072{
1073 struct mwifiex_tx_param tx_param;
1074 struct mwifiex_adapter *adapter = priv->adapter;
1075 int ret = -1;
1076 struct sk_buff *skb, *skb_next;
1077 struct mwifiex_txinfo *tx_info;
1078
1079 if (skb_queue_empty(&ptr->skb_head)) {
1080 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1081 ra_list_flags);
1082 return;
1083 }
1084
1085 skb = skb_dequeue(&ptr->skb_head);
1086
1087 if (!skb_queue_empty(&ptr->skb_head))
1088 skb_next = skb_peek(&ptr->skb_head);
1089 else
1090 skb_next = NULL;
1091
1092 tx_info = MWIFIEX_SKB_TXCB(skb);
1093
1094 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1095 tx_param.next_pkt_len =
1096 ((skb_next) ? skb_next->len +
1097 sizeof(struct txpd) : 0);
1098 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1099 skb->data, skb->len, &tx_param);
1100 switch (ret) {
1101 case -EBUSY:
1102 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
1103 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1104
1105 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1106 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1107 ra_list_flags);
1108 mwifiex_write_data_complete(adapter, skb, -1);
1109 return;
1110 }
1111
1112 skb_queue_tail(&ptr->skb_head, skb);
1113
1114 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1115 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1116 ra_list_flags);
1117 break;
1118 case -1:
1119 adapter->data_sent = false;
1120 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1121 adapter->dbg.num_tx_host_to_card_failure++;
1122 mwifiex_write_data_complete(adapter, skb, ret);
1123 break;
1124 case -EINPROGRESS:
1125 adapter->data_sent = false;
1126 default:
1127 break;
1128 }
1129 if (ret != -EBUSY) {
1130 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1131 if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1132 priv->wmm.packets_out[ptr_index]++;
1133 priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
1134 }
1135 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
1136 list_first_entry(
1137 &adapter->bss_prio_tbl[priv->bss_priority]
1138 .bss_prio_cur->list,
1139 struct mwifiex_bss_prio_node,
1140 list);
1141 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1142 ra_list_flags);
1143 }
1144}
1145
1146/*
1147 * This function dequeues a packet from the highest priority list
1148 * and transmits it.
1149 */
1150static int
1151mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1152{
1153 struct mwifiex_ra_list_tbl *ptr;
1154 struct mwifiex_private *priv = NULL;
1155 int ptr_index = 0;
1156 u8 ra[ETH_ALEN];
1157 int tid_del = 0, tid = 0;
1158 unsigned long flags;
1159
1160 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1161 if (!ptr)
1162 return -1;
1163
1164 tid = mwifiex_get_tid(priv->adapter, ptr);
1165
1166 dev_dbg(adapter->dev, "data: tid=%d\n", tid);
1167
1168 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1169 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1170 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1171 return -1;
1172 }
1173
1174 if (mwifiex_is_ptr_processed(priv, ptr)) {
1175 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1176 /* ra_list_spinlock has been freed in
1177 mwifiex_send_processed_packet() */
1178 return 0;
1179 }
1180
1181 if (!ptr->is_11n_enabled || mwifiex_is_ba_stream_setup(priv, ptr, tid)
1182 || ((priv->sec_info.wpa_enabled
1183 || priv->sec_info.wpa2_enabled) && !priv->wpa_is_gtk_set)
1184 ) {
1185 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1186 /* ra_list_spinlock has been freed in
1187 mwifiex_send_single_packet() */
1188 } else {
1189 if (mwifiex_is_ampdu_allowed(priv, ptr, tid)) {
1190 if (mwifiex_is_ba_stream_avail(priv)) {
1191 mwifiex_11n_create_tx_ba_stream_tbl(priv,
1192 ptr->ra, tid,
1193 BA_STREAM_SETUP_INPROGRESS);
1194 mwifiex_send_addba(priv, tid, ptr->ra);
1195 } else if (mwifiex_find_stream_to_delete
1196 (priv, ptr, tid, &tid_del, ra)) {
1197 mwifiex_11n_create_tx_ba_stream_tbl(priv,
1198 ptr->ra, tid,
1199 BA_STREAM_SETUP_INPROGRESS);
1200 mwifiex_send_delba(priv, tid_del, ra, 1);
1201 }
1202 }
1203/* Minimum number of AMSDU */
1204#define MIN_NUM_AMSDU 2
1205 if (mwifiex_is_amsdu_allowed(priv, ptr, tid) &&
1206 (mwifiex_num_pkts_in_txq(priv, ptr, adapter->tx_buf_size) >=
1207 MIN_NUM_AMSDU))
1208 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
1209 ptr_index, flags);
1210 /* ra_list_spinlock has been freed in
1211 mwifiex_11n_aggregate_pkt() */
1212 else
1213 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1214 /* ra_list_spinlock has been freed in
1215 mwifiex_send_single_packet() */
1216 }
1217 return 0;
1218}
1219
1220/*
1221 * This function transmits the highest priority packet awaiting in the
1222 * WMM Queues.
1223 */
1224void
1225mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1226{
1227 do {
1228 /* Check if busy */
1229 if (adapter->data_sent || adapter->tx_lock_flag)
1230 break;
1231
1232 if (mwifiex_dequeue_tx_packet(adapter))
1233 break;
1234 } while (true);
1235
1236 return;
1237}
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
new file mode 100644
index 00000000000..241f1b0b77f
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -0,0 +1,112 @@
1/*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_WMM_H_
21#define _MWIFIEX_WMM_H_
22
23enum ieee_types_wmm_aciaifsn_bitmasks {
24 MWIFIEX_AIFSN = (BIT(0) | BIT(1) | BIT(2) | BIT(3)),
25 MWIFIEX_ACM = BIT(4),
26 MWIFIEX_ACI = (BIT(5) | BIT(6)),
27};
28
29enum ieee_types_wmm_ecw_bitmasks {
30 MWIFIEX_ECW_MIN = (BIT(0) | BIT(1) | BIT(2) | BIT(3)),
31 MWIFIEX_ECW_MAX = (BIT(4) | BIT(5) | BIT(6) | BIT(7)),
32};
33
34/*
35 * This function retrieves the TID of the given RA list.
36 */
37static inline int
38mwifiex_get_tid(struct mwifiex_adapter *adapter,
39 struct mwifiex_ra_list_tbl *ptr)
40{
41 struct sk_buff *skb;
42
43 if (skb_queue_empty(&ptr->skb_head))
44 return 0;
45
46 skb = skb_peek(&ptr->skb_head);
47
48 return skb->priority;
49}
50
51/*
52 * This function gets the length of a list.
53 */
54static inline int
55mwifiex_wmm_list_len(struct mwifiex_adapter *adapter, struct list_head *head)
56{
57 struct list_head *pos;
58 int count = 0;
59
60 list_for_each(pos, head)
61 ++count;
62
63 return count;
64}
65
66/*
67 * This function checks if a RA list is empty or not.
68 */
69static inline u8
70mwifiex_wmm_is_ra_list_empty(struct mwifiex_adapter *adapter,
71 struct list_head *ra_list_hhead)
72{
73 struct mwifiex_ra_list_tbl *ra_list;
74 int is_list_empty;
75
76 list_for_each_entry(ra_list, ra_list_hhead, list) {
77 is_list_empty = skb_queue_empty(&ra_list->skb_head);
78 if (!is_list_empty)
79 return false;
80 }
81
82 return true;
83}
84
85void mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
86 struct sk_buff *skb);
87void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
88
89int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
90void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
91int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
92 struct mwifiex_ra_list_tbl *ra_list, int tid);
93
94u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
95 const struct sk_buff *skb);
96void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
97
98extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
99 u8 **assoc_buf,
100 struct ieee_types_wmm_parameter
101 *wmmie,
102 struct ieee80211_ht_cap
103 *htcap);
104
105void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
106 struct ieee_types_wmm_parameter
107 *wmm_ie);
108void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
109extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
110 const struct host_cmd_ds_command *resp);
111
112#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 36952274950..ae56d2f32b2 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -63,6 +63,7 @@ MODULE_PARM_DESC(ap_mode_default,
63#define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38 63#define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38
64#define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c 64#define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c
65#define MWL8K_A2H_INT_DUMMY (1 << 20) 65#define MWL8K_A2H_INT_DUMMY (1 << 20)
66#define MWL8K_A2H_INT_BA_WATCHDOG (1 << 14)
66#define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11) 67#define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11)
67#define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10) 68#define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10)
68#define MWL8K_A2H_INT_RADAR_DETECT (1 << 7) 69#define MWL8K_A2H_INT_RADAR_DETECT (1 << 7)
@@ -82,10 +83,14 @@ MODULE_PARM_DESC(ap_mode_default,
82 MWL8K_A2H_INT_MAC_EVENT | \ 83 MWL8K_A2H_INT_MAC_EVENT | \
83 MWL8K_A2H_INT_OPC_DONE | \ 84 MWL8K_A2H_INT_OPC_DONE | \
84 MWL8K_A2H_INT_RX_READY | \ 85 MWL8K_A2H_INT_RX_READY | \
85 MWL8K_A2H_INT_TX_DONE) 86 MWL8K_A2H_INT_TX_DONE | \
87 MWL8K_A2H_INT_BA_WATCHDOG)
86 88
87#define MWL8K_RX_QUEUES 1 89#define MWL8K_RX_QUEUES 1
88#define MWL8K_TX_QUEUES 4 90#define MWL8K_TX_WMM_QUEUES 4
91#define MWL8K_MAX_AMPDU_QUEUES 8
92#define MWL8K_MAX_TX_QUEUES (MWL8K_TX_WMM_QUEUES + MWL8K_MAX_AMPDU_QUEUES)
93#define mwl8k_tx_queues(priv) (MWL8K_TX_WMM_QUEUES + (priv)->num_ampdu_queues)
89 94
90struct rxd_ops { 95struct rxd_ops {
91 int rxd_size; 96 int rxd_size;
@@ -134,6 +139,21 @@ struct mwl8k_tx_queue {
134 struct sk_buff **skb; 139 struct sk_buff **skb;
135}; 140};
136 141
142enum {
143 AMPDU_NO_STREAM,
144 AMPDU_STREAM_NEW,
145 AMPDU_STREAM_IN_PROGRESS,
146 AMPDU_STREAM_ACTIVE,
147};
148
149struct mwl8k_ampdu_stream {
150 struct ieee80211_sta *sta;
151 u8 tid;
152 u8 state;
153 u8 idx;
154 u8 txq_idx; /* index of this stream in priv->txq */
155};
156
137struct mwl8k_priv { 157struct mwl8k_priv {
138 struct ieee80211_hw *hw; 158 struct ieee80211_hw *hw;
139 struct pci_dev *pdev; 159 struct pci_dev *pdev;
@@ -159,6 +179,12 @@ struct mwl8k_priv {
159 u32 ap_macids_supported; 179 u32 ap_macids_supported;
160 u32 sta_macids_supported; 180 u32 sta_macids_supported;
161 181
182 /* Ampdu stream information */
183 u8 num_ampdu_queues;
184 spinlock_t stream_lock;
185 struct mwl8k_ampdu_stream ampdu[MWL8K_MAX_AMPDU_QUEUES];
186 struct work_struct watchdog_ba_handle;
187
162 /* firmware access */ 188 /* firmware access */
163 struct mutex fw_mutex; 189 struct mutex fw_mutex;
164 struct task_struct *fw_mutex_owner; 190 struct task_struct *fw_mutex_owner;
@@ -190,7 +216,8 @@ struct mwl8k_priv {
190 int pending_tx_pkts; 216 int pending_tx_pkts;
191 217
192 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; 218 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
193 struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES]; 219 struct mwl8k_tx_queue txq[MWL8K_MAX_TX_QUEUES];
220 u32 txq_offset[MWL8K_MAX_TX_QUEUES];
194 221
195 bool radio_on; 222 bool radio_on;
196 bool radio_short_preamble; 223 bool radio_short_preamble;
@@ -223,7 +250,7 @@ struct mwl8k_priv {
223 * preserve the queue configurations so they can be restored if/when 250 * preserve the queue configurations so they can be restored if/when
224 * the firmware image is swapped. 251 * the firmware image is swapped.
225 */ 252 */
226 struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_QUEUES]; 253 struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES];
227 254
228 /* async firmware loading state */ 255 /* async firmware loading state */
229 unsigned fw_state; 256 unsigned fw_state;
@@ -264,6 +291,7 @@ struct mwl8k_vif {
264struct mwl8k_sta { 291struct mwl8k_sta {
265 /* Index into station database. Returned by UPDATE_STADB. */ 292 /* Index into station database. Returned by UPDATE_STADB. */
266 u8 peer_id; 293 u8 peer_id;
294 u8 is_ampdu_allowed;
267}; 295};
268#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) 296#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
269 297
@@ -351,10 +379,12 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
351#define MWL8K_CMD_ENABLE_SNIFFER 0x0150 379#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
352#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */ 380#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
353#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 381#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
382#define MWL8K_CMD_GET_WATCHDOG_BITMAP 0x0205
354#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */ 383#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
355#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ 384#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
356#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */ 385#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
357#define MWL8K_CMD_UPDATE_STADB 0x1123 386#define MWL8K_CMD_UPDATE_STADB 0x1123
387#define MWL8K_CMD_BASTREAM 0x1125
358 388
359static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) 389static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
360{ 390{
@@ -394,6 +424,8 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
394 MWL8K_CMDNAME(SET_NEW_STN); 424 MWL8K_CMDNAME(SET_NEW_STN);
395 MWL8K_CMDNAME(UPDATE_ENCRYPTION); 425 MWL8K_CMDNAME(UPDATE_ENCRYPTION);
396 MWL8K_CMDNAME(UPDATE_STADB); 426 MWL8K_CMDNAME(UPDATE_STADB);
427 MWL8K_CMDNAME(BASTREAM);
428 MWL8K_CMDNAME(GET_WATCHDOG_BITMAP);
397 default: 429 default:
398 snprintf(buf, bufsize, "0x%x", cmd); 430 snprintf(buf, bufsize, "0x%x", cmd);
399 } 431 }
@@ -1126,6 +1158,9 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
1126 struct mwl8k_rx_queue *rxq = priv->rxq + index; 1158 struct mwl8k_rx_queue *rxq = priv->rxq + index;
1127 int i; 1159 int i;
1128 1160
1161 if (rxq->rxd == NULL)
1162 return;
1163
1129 for (i = 0; i < MWL8K_RX_DESCS; i++) { 1164 for (i = 0; i < MWL8K_RX_DESCS; i++) {
1130 if (rxq->buf[i].skb != NULL) { 1165 if (rxq->buf[i].skb != NULL) {
1131 pci_unmap_single(priv->pdev, 1166 pci_unmap_single(priv->pdev,
@@ -1318,7 +1353,7 @@ struct mwl8k_tx_desc {
1318 __le16 pkt_len; 1353 __le16 pkt_len;
1319 __u8 dest_MAC_addr[ETH_ALEN]; 1354 __u8 dest_MAC_addr[ETH_ALEN];
1320 __le32 next_txd_phys_addr; 1355 __le32 next_txd_phys_addr;
1321 __le32 reserved; 1356 __le32 timestamp;
1322 __le16 rate_info; 1357 __le16 rate_info;
1323 __u8 peer_id; 1358 __u8 peer_id;
1324 __u8 tx_frag_cnt; 1359 __u8 tx_frag_cnt;
@@ -1382,7 +1417,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
1382 struct mwl8k_priv *priv = hw->priv; 1417 struct mwl8k_priv *priv = hw->priv;
1383 int i; 1418 int i;
1384 1419
1385 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 1420 for (i = 0; i < mwl8k_tx_queues(priv); i++) {
1386 struct mwl8k_tx_queue *txq = priv->txq + i; 1421 struct mwl8k_tx_queue *txq = priv->txq + i;
1387 int fw_owned = 0; 1422 int fw_owned = 0;
1388 int drv_owned = 0; 1423 int drv_owned = 0;
@@ -1483,6 +1518,54 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1483 MWL8K_TXD_STATUS_OK_RETRY | \ 1518 MWL8K_TXD_STATUS_OK_RETRY | \
1484 MWL8K_TXD_STATUS_OK_MORE_RETRY)) 1519 MWL8K_TXD_STATUS_OK_MORE_RETRY))
1485 1520
1521static int mwl8k_tid_queue_mapping(u8 tid)
1522{
1523 BUG_ON(tid > 7);
1524
1525 switch (tid) {
1526 case 0:
1527 case 3:
1528 return IEEE80211_AC_BE;
1529 break;
1530 case 1:
1531 case 2:
1532 return IEEE80211_AC_BK;
1533 break;
1534 case 4:
1535 case 5:
1536 return IEEE80211_AC_VI;
1537 break;
1538 case 6:
1539 case 7:
1540 return IEEE80211_AC_VO;
1541 break;
1542 default:
1543 return -1;
1544 break;
1545 }
1546}
1547
1548/* The firmware will fill in the rate information
1549 * for each packet that gets queued in the hardware
1550 * in this structure
1551 */
1552
1553struct rateinfo {
1554 __le16 format:1;
1555 __le16 short_gi:1;
1556 __le16 band_width:1;
1557 __le16 rate_id_mcs:6;
1558 __le16 adv_coding:2;
1559 __le16 antenna:2;
1560 __le16 act_sub_chan:2;
1561 __le16 preamble_type:1;
1562 __le16 power_id:4;
1563 __le16 antenna2:1;
1564 __le16 reserved:1;
1565 __le16 tx_bf_frame:1;
1566 __le16 green_field:1;
1567} __packed;
1568
1486static int 1569static int
1487mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force) 1570mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1488{ 1571{
@@ -1499,6 +1582,11 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1499 struct sk_buff *skb; 1582 struct sk_buff *skb;
1500 struct ieee80211_tx_info *info; 1583 struct ieee80211_tx_info *info;
1501 u32 status; 1584 u32 status;
1585 struct ieee80211_sta *sta;
1586 struct mwl8k_sta *sta_info = NULL;
1587 u16 rate_info;
1588 struct rateinfo *rate;
1589 struct ieee80211_hdr *wh;
1502 1590
1503 tx = txq->head; 1591 tx = txq->head;
1504 tx_desc = txq->txd + tx; 1592 tx_desc = txq->txd + tx;
@@ -1527,11 +1615,34 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1527 1615
1528 mwl8k_remove_dma_header(skb, tx_desc->qos_control); 1616 mwl8k_remove_dma_header(skb, tx_desc->qos_control);
1529 1617
1618 wh = (struct ieee80211_hdr *) skb->data;
1619
1530 /* Mark descriptor as unused */ 1620 /* Mark descriptor as unused */
1531 tx_desc->pkt_phys_addr = 0; 1621 tx_desc->pkt_phys_addr = 0;
1532 tx_desc->pkt_len = 0; 1622 tx_desc->pkt_len = 0;
1533 1623
1534 info = IEEE80211_SKB_CB(skb); 1624 info = IEEE80211_SKB_CB(skb);
1625 if (ieee80211_is_data(wh->frame_control)) {
1626 sta = info->control.sta;
1627 if (sta) {
1628 sta_info = MWL8K_STA(sta);
1629 BUG_ON(sta_info == NULL);
1630 rate_info = le16_to_cpu(tx_desc->rate_info);
1631 rate = (struct rateinfo *)&rate_info;
1632 /* If rate is < 6.5 Mpbs for an ht station
1633 * do not form an ampdu. If the station is a
1634 * legacy station (format = 0), do not form an
1635 * ampdu
1636 */
1637 if (rate->rate_id_mcs < 1 ||
1638 rate->format == 0) {
1639 sta_info->is_ampdu_allowed = false;
1640 } else {
1641 sta_info->is_ampdu_allowed = true;
1642 }
1643 }
1644 }
1645
1535 ieee80211_tx_info_clear_status(info); 1646 ieee80211_tx_info_clear_status(info);
1536 1647
1537 /* Rate control is happening in the firmware. 1648 /* Rate control is happening in the firmware.
@@ -1548,7 +1659,8 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1548 processed++; 1659 processed++;
1549 } 1660 }
1550 1661
1551 if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex)) 1662 if (index < MWL8K_TX_WMM_QUEUES && processed && priv->radio_on &&
1663 !mutex_is_locked(&priv->fw_mutex))
1552 ieee80211_wake_queue(hw, index); 1664 ieee80211_wake_queue(hw, index);
1553 1665
1554 return processed; 1666 return processed;
@@ -1560,6 +1672,9 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1560 struct mwl8k_priv *priv = hw->priv; 1672 struct mwl8k_priv *priv = hw->priv;
1561 struct mwl8k_tx_queue *txq = priv->txq + index; 1673 struct mwl8k_tx_queue *txq = priv->txq + index;
1562 1674
1675 if (txq->txd == NULL)
1676 return;
1677
1563 mwl8k_txq_reclaim(hw, index, INT_MAX, 1); 1678 mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
1564 1679
1565 kfree(txq->skb); 1680 kfree(txq->skb);
@@ -1571,12 +1686,81 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1571 txq->txd = NULL; 1686 txq->txd = NULL;
1572} 1687}
1573 1688
1689/* caller must hold priv->stream_lock when calling the stream functions */
1690struct mwl8k_ampdu_stream *
1691mwl8k_add_stream(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 tid)
1692{
1693 struct mwl8k_ampdu_stream *stream;
1694 struct mwl8k_priv *priv = hw->priv;
1695 int i;
1696
1697 for (i = 0; i < priv->num_ampdu_queues; i++) {
1698 stream = &priv->ampdu[i];
1699 if (stream->state == AMPDU_NO_STREAM) {
1700 stream->sta = sta;
1701 stream->state = AMPDU_STREAM_NEW;
1702 stream->tid = tid;
1703 stream->idx = i;
1704 stream->txq_idx = MWL8K_TX_WMM_QUEUES + i;
1705 wiphy_debug(hw->wiphy, "Added a new stream for %pM %d",
1706 sta->addr, tid);
1707 return stream;
1708 }
1709 }
1710 return NULL;
1711}
1712
1713static int
1714mwl8k_start_stream(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
1715{
1716 int ret;
1717
1718 /* if the stream has already been started, don't start it again */
1719 if (stream->state != AMPDU_STREAM_NEW)
1720 return 0;
1721 ret = ieee80211_start_tx_ba_session(stream->sta, stream->tid, 0);
1722 if (ret)
1723 wiphy_debug(hw->wiphy, "Failed to start stream for %pM %d: "
1724 "%d\n", stream->sta->addr, stream->tid, ret);
1725 else
1726 wiphy_debug(hw->wiphy, "Started stream for %pM %d\n",
1727 stream->sta->addr, stream->tid);
1728 return ret;
1729}
1730
1731static void
1732mwl8k_remove_stream(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
1733{
1734 wiphy_debug(hw->wiphy, "Remove stream for %pM %d\n", stream->sta->addr,
1735 stream->tid);
1736 memset(stream, 0, sizeof(*stream));
1737}
1738
1739static struct mwl8k_ampdu_stream *
1740mwl8k_lookup_stream(struct ieee80211_hw *hw, u8 *addr, u8 tid)
1741{
1742 struct mwl8k_priv *priv = hw->priv;
1743 int i;
1744
1745 for (i = 0 ; i < priv->num_ampdu_queues; i++) {
1746 struct mwl8k_ampdu_stream *stream;
1747 stream = &priv->ampdu[i];
1748 if (stream->state == AMPDU_NO_STREAM)
1749 continue;
1750 if (!memcmp(stream->sta->addr, addr, ETH_ALEN) &&
1751 stream->tid == tid)
1752 return stream;
1753 }
1754 return NULL;
1755}
1756
1574static void 1757static void
1575mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) 1758mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1576{ 1759{
1577 struct mwl8k_priv *priv = hw->priv; 1760 struct mwl8k_priv *priv = hw->priv;
1578 struct ieee80211_tx_info *tx_info; 1761 struct ieee80211_tx_info *tx_info;
1579 struct mwl8k_vif *mwl8k_vif; 1762 struct mwl8k_vif *mwl8k_vif;
1763 struct ieee80211_sta *sta;
1580 struct ieee80211_hdr *wh; 1764 struct ieee80211_hdr *wh;
1581 struct mwl8k_tx_queue *txq; 1765 struct mwl8k_tx_queue *txq;
1582 struct mwl8k_tx_desc *tx; 1766 struct mwl8k_tx_desc *tx;
@@ -1584,6 +1768,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1584 u32 txstatus; 1768 u32 txstatus;
1585 u8 txdatarate; 1769 u8 txdatarate;
1586 u16 qos; 1770 u16 qos;
1771 int txpriority;
1772 u8 tid = 0;
1773 struct mwl8k_ampdu_stream *stream = NULL;
1774 bool start_ba_session = false;
1775 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1587 1776
1588 wh = (struct ieee80211_hdr *)skb->data; 1777 wh = (struct ieee80211_hdr *)skb->data;
1589 if (ieee80211_is_data_qos(wh->frame_control)) 1778 if (ieee80211_is_data_qos(wh->frame_control))
@@ -1599,6 +1788,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1599 wh = &((struct mwl8k_dma_data *)skb->data)->wh; 1788 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1600 1789
1601 tx_info = IEEE80211_SKB_CB(skb); 1790 tx_info = IEEE80211_SKB_CB(skb);
1791 sta = tx_info->control.sta;
1602 mwl8k_vif = MWL8K_VIF(tx_info->control.vif); 1792 mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
1603 1793
1604 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1794 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -1626,12 +1816,90 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1626 qos |= MWL8K_QOS_ACK_POLICY_NORMAL; 1816 qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
1627 } 1817 }
1628 1818
1819 /* Queue ADDBA request in the respective data queue. While setting up
1820 * the ampdu stream, mac80211 queues further packets for that
1821 * particular ra/tid pair. However, packets piled up in the hardware
1822 * for that ra/tid pair will still go out. ADDBA request and the
1823 * related data packets going out from different queues asynchronously
1824 * will cause a shift in the receiver window which might result in
1825 * ampdu packets getting dropped at the receiver after the stream has
1826 * been setup.
1827 */
1828 if (unlikely(ieee80211_is_action(wh->frame_control) &&
1829 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
1830 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ &&
1831 priv->ap_fw)) {
1832 u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
1833 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
1834 index = mwl8k_tid_queue_mapping(tid);
1835 }
1836
1837 txpriority = index;
1838
1839 if (ieee80211_is_data_qos(wh->frame_control) &&
1840 skb->protocol != cpu_to_be16(ETH_P_PAE) &&
1841 sta->ht_cap.ht_supported && priv->ap_fw) {
1842 tid = qos & 0xf;
1843 spin_lock(&priv->stream_lock);
1844 stream = mwl8k_lookup_stream(hw, sta->addr, tid);
1845 if (stream != NULL) {
1846 if (stream->state == AMPDU_STREAM_ACTIVE) {
1847 txpriority = stream->txq_idx;
1848 index = stream->txq_idx;
1849 } else if (stream->state == AMPDU_STREAM_NEW) {
1850 /* We get here if the driver sends us packets
1851 * after we've initiated a stream, but before
1852 * our ampdu_action routine has been called
1853 * with IEEE80211_AMPDU_TX_START to get the SSN
1854 * for the ADDBA request. So this packet can
1855 * go out with no risk of sequence number
1856 * mismatch. No special handling is required.
1857 */
1858 } else {
1859 /* Drop packets that would go out after the
1860 * ADDBA request was sent but before the ADDBA
1861 * response is received. If we don't do this,
1862 * the recipient would probably receive it
1863 * after the ADDBA request with SSN 0. This
1864 * will cause the recipient's BA receive window
1865 * to shift, which would cause the subsequent
1866 * packets in the BA stream to be discarded.
1867 * mac80211 queues our packets for us in this
1868 * case, so this is really just a safety check.
1869 */
1870 wiphy_warn(hw->wiphy,
1871 "Cannot send packet while ADDBA "
1872 "dialog is underway.\n");
1873 spin_unlock(&priv->stream_lock);
1874 dev_kfree_skb(skb);
1875 return;
1876 }
1877 } else {
1878 /* Defer calling mwl8k_start_stream so that the current
1879 * skb can go out before the ADDBA request. This
1880 * prevents sequence number mismatch at the recepient
1881 * as described above.
1882 */
1883 if (MWL8K_STA(sta)->is_ampdu_allowed) {
1884 stream = mwl8k_add_stream(hw, sta, tid);
1885 if (stream != NULL)
1886 start_ba_session = true;
1887 }
1888 }
1889 spin_unlock(&priv->stream_lock);
1890 }
1891
1629 dma = pci_map_single(priv->pdev, skb->data, 1892 dma = pci_map_single(priv->pdev, skb->data,
1630 skb->len, PCI_DMA_TODEVICE); 1893 skb->len, PCI_DMA_TODEVICE);
1631 1894
1632 if (pci_dma_mapping_error(priv->pdev, dma)) { 1895 if (pci_dma_mapping_error(priv->pdev, dma)) {
1633 wiphy_debug(hw->wiphy, 1896 wiphy_debug(hw->wiphy,
1634 "failed to dma map skb, dropping TX frame.\n"); 1897 "failed to dma map skb, dropping TX frame.\n");
1898 if (start_ba_session) {
1899 spin_lock(&priv->stream_lock);
1900 mwl8k_remove_stream(hw, stream);
1901 spin_unlock(&priv->stream_lock);
1902 }
1635 dev_kfree_skb(skb); 1903 dev_kfree_skb(skb);
1636 return; 1904 return;
1637 } 1905 }
@@ -1640,12 +1908,22 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1640 1908
1641 txq = priv->txq + index; 1909 txq = priv->txq + index;
1642 1910
1911 if (index >= MWL8K_TX_WMM_QUEUES && txq->len >= MWL8K_TX_DESCS) {
1912 /* This is the case in which the tx packet is destined for an
1913 * AMPDU queue and that AMPDU queue is full. Because we don't
1914 * start and stop the AMPDU queues, we must drop these packets.
1915 */
1916 dev_kfree_skb(skb);
1917 spin_unlock_bh(&priv->tx_lock);
1918 return;
1919 }
1920
1643 BUG_ON(txq->skb[txq->tail] != NULL); 1921 BUG_ON(txq->skb[txq->tail] != NULL);
1644 txq->skb[txq->tail] = skb; 1922 txq->skb[txq->tail] = skb;
1645 1923
1646 tx = txq->txd + txq->tail; 1924 tx = txq->txd + txq->tail;
1647 tx->data_rate = txdatarate; 1925 tx->data_rate = txdatarate;
1648 tx->tx_priority = index; 1926 tx->tx_priority = txpriority;
1649 tx->qos_control = cpu_to_le16(qos); 1927 tx->qos_control = cpu_to_le16(qos);
1650 tx->pkt_phys_addr = cpu_to_le32(dma); 1928 tx->pkt_phys_addr = cpu_to_le32(dma);
1651 tx->pkt_len = cpu_to_le16(skb->len); 1929 tx->pkt_len = cpu_to_le16(skb->len);
@@ -1664,12 +1942,20 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1664 if (txq->tail == MWL8K_TX_DESCS) 1942 if (txq->tail == MWL8K_TX_DESCS)
1665 txq->tail = 0; 1943 txq->tail = 0;
1666 1944
1667 if (txq->head == txq->tail) 1945 if (txq->head == txq->tail && index < MWL8K_TX_WMM_QUEUES)
1668 ieee80211_stop_queue(hw, index); 1946 ieee80211_stop_queue(hw, index);
1669 1947
1670 mwl8k_tx_start(priv); 1948 mwl8k_tx_start(priv);
1671 1949
1672 spin_unlock_bh(&priv->tx_lock); 1950 spin_unlock_bh(&priv->tx_lock);
1951
1952 /* Initiate the ampdu session here */
1953 if (start_ba_session) {
1954 spin_lock(&priv->stream_lock);
1955 if (mwl8k_start_stream(hw, stream))
1956 mwl8k_remove_stream(hw, stream);
1957 spin_unlock(&priv->stream_lock);
1958 }
1673} 1959}
1674 1960
1675 1961
@@ -1867,7 +2153,7 @@ struct mwl8k_cmd_get_hw_spec_sta {
1867 __u8 mcs_bitmap[16]; 2153 __u8 mcs_bitmap[16];
1868 __le32 rx_queue_ptr; 2154 __le32 rx_queue_ptr;
1869 __le32 num_tx_queues; 2155 __le32 num_tx_queues;
1870 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; 2156 __le32 tx_queue_ptrs[MWL8K_TX_WMM_QUEUES];
1871 __le32 caps2; 2157 __le32 caps2;
1872 __le32 num_tx_desc_per_queue; 2158 __le32 num_tx_desc_per_queue;
1873 __le32 total_rxd; 2159 __le32 total_rxd;
@@ -1973,8 +2259,8 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1973 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); 2259 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
1974 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 2260 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1975 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); 2261 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
1976 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 2262 cmd->num_tx_queues = cpu_to_le32(mwl8k_tx_queues(priv));
1977 for (i = 0; i < MWL8K_TX_QUEUES; i++) 2263 for (i = 0; i < mwl8k_tx_queues(priv); i++)
1978 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); 2264 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1979 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 2265 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1980 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 2266 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
@@ -2016,13 +2302,16 @@ struct mwl8k_cmd_get_hw_spec_ap {
2016 __le32 wcbbase2; 2302 __le32 wcbbase2;
2017 __le32 wcbbase3; 2303 __le32 wcbbase3;
2018 __le32 fw_api_version; 2304 __le32 fw_api_version;
2305 __le32 caps;
2306 __le32 num_of_ampdu_queues;
2307 __le32 wcbbase_ampdu[MWL8K_MAX_AMPDU_QUEUES];
2019} __packed; 2308} __packed;
2020 2309
2021static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) 2310static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
2022{ 2311{
2023 struct mwl8k_priv *priv = hw->priv; 2312 struct mwl8k_priv *priv = hw->priv;
2024 struct mwl8k_cmd_get_hw_spec_ap *cmd; 2313 struct mwl8k_cmd_get_hw_spec_ap *cmd;
2025 int rc; 2314 int rc, i;
2026 u32 api_version; 2315 u32 api_version;
2027 2316
2028 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2317 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2054,27 +2343,31 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
2054 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 2343 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
2055 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 2344 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
2056 priv->hw_rev = cmd->hw_rev; 2345 priv->hw_rev = cmd->hw_rev;
2057 mwl8k_setup_2ghz_band(hw); 2346 mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
2058 priv->ap_macids_supported = 0x000000ff; 2347 priv->ap_macids_supported = 0x000000ff;
2059 priv->sta_macids_supported = 0x00000000; 2348 priv->sta_macids_supported = 0x00000000;
2060 2349 priv->num_ampdu_queues = le32_to_cpu(cmd->num_of_ampdu_queues);
2061 off = le32_to_cpu(cmd->wcbbase0) & 0xffff; 2350 if (priv->num_ampdu_queues > MWL8K_MAX_AMPDU_QUEUES) {
2062 iowrite32(priv->txq[0].txd_dma, priv->sram + off); 2351 wiphy_warn(hw->wiphy, "fw reported %d ampdu queues"
2063 2352 " but we only support %d.\n",
2353 priv->num_ampdu_queues,
2354 MWL8K_MAX_AMPDU_QUEUES);
2355 priv->num_ampdu_queues = MWL8K_MAX_AMPDU_QUEUES;
2356 }
2064 off = le32_to_cpu(cmd->rxwrptr) & 0xffff; 2357 off = le32_to_cpu(cmd->rxwrptr) & 0xffff;
2065 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); 2358 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
2066 2359
2067 off = le32_to_cpu(cmd->rxrdptr) & 0xffff; 2360 off = le32_to_cpu(cmd->rxrdptr) & 0xffff;
2068 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); 2361 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
2069 2362
2070 off = le32_to_cpu(cmd->wcbbase1) & 0xffff; 2363 priv->txq_offset[0] = le32_to_cpu(cmd->wcbbase0) & 0xffff;
2071 iowrite32(priv->txq[1].txd_dma, priv->sram + off); 2364 priv->txq_offset[1] = le32_to_cpu(cmd->wcbbase1) & 0xffff;
2365 priv->txq_offset[2] = le32_to_cpu(cmd->wcbbase2) & 0xffff;
2366 priv->txq_offset[3] = le32_to_cpu(cmd->wcbbase3) & 0xffff;
2072 2367
2073 off = le32_to_cpu(cmd->wcbbase2) & 0xffff; 2368 for (i = 0; i < priv->num_ampdu_queues; i++)
2074 iowrite32(priv->txq[2].txd_dma, priv->sram + off); 2369 priv->txq_offset[i + MWL8K_TX_WMM_QUEUES] =
2075 2370 le32_to_cpu(cmd->wcbbase_ampdu[i]) & 0xffff;
2076 off = le32_to_cpu(cmd->wcbbase3) & 0xffff;
2077 iowrite32(priv->txq[3].txd_dma, priv->sram + off);
2078 } 2371 }
2079 2372
2080done: 2373done:
@@ -2097,12 +2390,20 @@ struct mwl8k_cmd_set_hw_spec {
2097 __le32 caps; 2390 __le32 caps;
2098 __le32 rx_queue_ptr; 2391 __le32 rx_queue_ptr;
2099 __le32 num_tx_queues; 2392 __le32 num_tx_queues;
2100 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; 2393 __le32 tx_queue_ptrs[MWL8K_MAX_TX_QUEUES];
2101 __le32 flags; 2394 __le32 flags;
2102 __le32 num_tx_desc_per_queue; 2395 __le32 num_tx_desc_per_queue;
2103 __le32 total_rxd; 2396 __le32 total_rxd;
2104} __packed; 2397} __packed;
2105 2398
2399/* If enabled, MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY will cause
2400 * packets to expire 500 ms after the timestamp in the tx descriptor. That is,
2401 * the packets that are queued for more than 500ms, will be dropped in the
2402 * hardware. This helps minimizing the issues caused due to head-of-line
2403 * blocking where a slow client can hog the bandwidth and affect traffic to a
2404 * faster client.
2405 */
2406#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
2106#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 2407#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
2107#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 2408#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
2108#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 2409#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
@@ -2123,7 +2424,7 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2123 2424
2124 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 2425 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
2125 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); 2426 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
2126 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 2427 cmd->num_tx_queues = cpu_to_le32(mwl8k_tx_queues(priv));
2127 2428
2128 /* 2429 /*
2129 * Mac80211 stack has Q0 as highest priority and Q3 as lowest in 2430 * Mac80211 stack has Q0 as highest priority and Q3 as lowest in
@@ -2131,8 +2432,8 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2131 * in that order. Map Q3 of mac80211 to Q0 of firmware so that the 2432 * in that order. Map Q3 of mac80211 to Q0 of firmware so that the
2132 * priority is interpreted the right way in firmware. 2433 * priority is interpreted the right way in firmware.
2133 */ 2434 */
2134 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 2435 for (i = 0; i < mwl8k_tx_queues(priv); i++) {
2135 int j = MWL8K_TX_QUEUES - 1 - i; 2436 int j = mwl8k_tx_queues(priv) - 1 - i;
2136 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma); 2437 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
2137 } 2438 }
2138 2439
@@ -3122,6 +3423,65 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
3122} 3423}
3123 3424
3124/* 3425/*
3426 * CMD_GET_WATCHDOG_BITMAP.
3427 */
3428struct mwl8k_cmd_get_watchdog_bitmap {
3429 struct mwl8k_cmd_pkt header;
3430 u8 bitmap;
3431} __packed;
3432
3433static int mwl8k_cmd_get_watchdog_bitmap(struct ieee80211_hw *hw, u8 *bitmap)
3434{
3435 struct mwl8k_cmd_get_watchdog_bitmap *cmd;
3436 int rc;
3437
3438 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3439 if (cmd == NULL)
3440 return -ENOMEM;
3441
3442 cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_WATCHDOG_BITMAP);
3443 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3444
3445 rc = mwl8k_post_cmd(hw, &cmd->header);
3446 if (!rc)
3447 *bitmap = cmd->bitmap;
3448
3449 kfree(cmd);
3450
3451 return rc;
3452}
3453
3454#define INVALID_BA 0xAA
3455static void mwl8k_watchdog_ba_events(struct work_struct *work)
3456{
3457 int rc;
3458 u8 bitmap = 0, stream_index;
3459 struct mwl8k_ampdu_stream *streams;
3460 struct mwl8k_priv *priv =
3461 container_of(work, struct mwl8k_priv, watchdog_ba_handle);
3462
3463 rc = mwl8k_cmd_get_watchdog_bitmap(priv->hw, &bitmap);
3464 if (rc)
3465 return;
3466
3467 if (bitmap == INVALID_BA)
3468 return;
3469
3470 /* the bitmap is the hw queue number. Map it to the ampdu queue. */
3471 stream_index = bitmap - MWL8K_TX_WMM_QUEUES;
3472
3473 BUG_ON(stream_index >= priv->num_ampdu_queues);
3474
3475 streams = &priv->ampdu[stream_index];
3476
3477 if (streams->state == AMPDU_STREAM_ACTIVE)
3478 ieee80211_stop_tx_ba_session(streams->sta, streams->tid);
3479
3480 return;
3481}
3482
3483
3484/*
3125 * CMD_BSS_START. 3485 * CMD_BSS_START.
3126 */ 3486 */
3127struct mwl8k_cmd_bss_start { 3487struct mwl8k_cmd_bss_start {
@@ -3150,6 +3510,152 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
3150} 3510}
3151 3511
3152/* 3512/*
3513 * CMD_BASTREAM.
3514 */
3515
3516/*
3517 * UPSTREAM is tx direction
3518 */
3519#define BASTREAM_FLAG_DIRECTION_UPSTREAM 0x00
3520#define BASTREAM_FLAG_IMMEDIATE_TYPE 0x01
3521
3522enum {
3523 MWL8K_BA_CREATE,
3524 MWL8K_BA_UPDATE,
3525 MWL8K_BA_DESTROY,
3526 MWL8K_BA_FLUSH,
3527 MWL8K_BA_CHECK,
3528} ba_stream_action_type;
3529
3530
3531struct mwl8k_create_ba_stream {
3532 __le32 flags;
3533 __le32 idle_thrs;
3534 __le32 bar_thrs;
3535 __le32 window_size;
3536 u8 peer_mac_addr[6];
3537 u8 dialog_token;
3538 u8 tid;
3539 u8 queue_id;
3540 u8 param_info;
3541 __le32 ba_context;
3542 u8 reset_seq_no_flag;
3543 __le16 curr_seq_no;
3544 u8 sta_src_mac_addr[6];
3545} __packed;
3546
3547struct mwl8k_destroy_ba_stream {
3548 __le32 flags;
3549 __le32 ba_context;
3550} __packed;
3551
3552struct mwl8k_cmd_bastream {
3553 struct mwl8k_cmd_pkt header;
3554 __le32 action;
3555 union {
3556 struct mwl8k_create_ba_stream create_params;
3557 struct mwl8k_destroy_ba_stream destroy_params;
3558 };
3559} __packed;
3560
3561static int
3562mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
3563{
3564 struct mwl8k_cmd_bastream *cmd;
3565 int rc;
3566
3567 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3568 if (cmd == NULL)
3569 return -ENOMEM;
3570
3571 cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
3572 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3573
3574 cmd->action = cpu_to_le32(MWL8K_BA_CHECK);
3575
3576 cmd->create_params.queue_id = stream->idx;
3577 memcpy(&cmd->create_params.peer_mac_addr[0], stream->sta->addr,
3578 ETH_ALEN);
3579 cmd->create_params.tid = stream->tid;
3580
3581 cmd->create_params.flags =
3582 cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE) |
3583 cpu_to_le32(BASTREAM_FLAG_DIRECTION_UPSTREAM);
3584
3585 rc = mwl8k_post_cmd(hw, &cmd->header);
3586
3587 kfree(cmd);
3588
3589 return rc;
3590}
3591
3592static int
3593mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
3594 u8 buf_size)
3595{
3596 struct mwl8k_cmd_bastream *cmd;
3597 int rc;
3598
3599 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3600 if (cmd == NULL)
3601 return -ENOMEM;
3602
3603
3604 cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
3605 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3606
3607 cmd->action = cpu_to_le32(MWL8K_BA_CREATE);
3608
3609 cmd->create_params.bar_thrs = cpu_to_le32((u32)buf_size);
3610 cmd->create_params.window_size = cpu_to_le32((u32)buf_size);
3611 cmd->create_params.queue_id = stream->idx;
3612
3613 memcpy(cmd->create_params.peer_mac_addr, stream->sta->addr, ETH_ALEN);
3614 cmd->create_params.tid = stream->tid;
3615 cmd->create_params.curr_seq_no = cpu_to_le16(0);
3616 cmd->create_params.reset_seq_no_flag = 1;
3617
3618 cmd->create_params.param_info =
3619 (stream->sta->ht_cap.ampdu_factor &
3620 IEEE80211_HT_AMPDU_PARM_FACTOR) |
3621 ((stream->sta->ht_cap.ampdu_density << 2) &
3622 IEEE80211_HT_AMPDU_PARM_DENSITY);
3623
3624 cmd->create_params.flags =
3625 cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE |
3626 BASTREAM_FLAG_DIRECTION_UPSTREAM);
3627
3628 rc = mwl8k_post_cmd(hw, &cmd->header);
3629
3630 wiphy_debug(hw->wiphy, "Created a BA stream for %pM : tid %d\n",
3631 stream->sta->addr, stream->tid);
3632 kfree(cmd);
3633
3634 return rc;
3635}
3636
3637static void mwl8k_destroy_ba(struct ieee80211_hw *hw,
3638 struct mwl8k_ampdu_stream *stream)
3639{
3640 struct mwl8k_cmd_bastream *cmd;
3641
3642 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3643 if (cmd == NULL)
3644 return;
3645
3646 cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
3647 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3648 cmd->action = cpu_to_le32(MWL8K_BA_DESTROY);
3649
3650 cmd->destroy_params.ba_context = cpu_to_le32(stream->idx);
3651 mwl8k_post_cmd(hw, &cmd->header);
3652
3653 wiphy_debug(hw->wiphy, "Deleted BA stream index %d\n", stream->idx);
3654
3655 kfree(cmd);
3656}
3657
3658/*
3153 * CMD_SET_NEW_STN. 3659 * CMD_SET_NEW_STN.
3154 */ 3660 */
3155struct mwl8k_cmd_set_new_stn { 3661struct mwl8k_cmd_set_new_stn {
@@ -3670,6 +4176,11 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
3670 tasklet_schedule(&priv->poll_rx_task); 4176 tasklet_schedule(&priv->poll_rx_task);
3671 } 4177 }
3672 4178
4179 if (status & MWL8K_A2H_INT_BA_WATCHDOG) {
4180 status &= ~MWL8K_A2H_INT_BA_WATCHDOG;
4181 ieee80211_queue_work(hw, &priv->watchdog_ba_handle);
4182 }
4183
3673 if (status) 4184 if (status)
3674 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 4185 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3675 4186
@@ -3698,7 +4209,7 @@ static void mwl8k_tx_poll(unsigned long data)
3698 4209
3699 spin_lock_bh(&priv->tx_lock); 4210 spin_lock_bh(&priv->tx_lock);
3700 4211
3701 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4212 for (i = 0; i < mwl8k_tx_queues(priv); i++)
3702 limit -= mwl8k_txq_reclaim(hw, i, limit, 0); 4213 limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
3703 4214
3704 if (!priv->pending_tx_pkts && priv->tx_wait != NULL) { 4215 if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
@@ -3822,6 +4333,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
3822 4333
3823 /* Stop finalize join worker */ 4334 /* Stop finalize join worker */
3824 cancel_work_sync(&priv->finalize_join_worker); 4335 cancel_work_sync(&priv->finalize_join_worker);
4336 cancel_work_sync(&priv->watchdog_ba_handle);
3825 if (priv->beacon_skb != NULL) 4337 if (priv->beacon_skb != NULL)
3826 dev_kfree_skb(priv->beacon_skb); 4338 dev_kfree_skb(priv->beacon_skb);
3827 4339
@@ -3830,7 +4342,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
3830 tasklet_disable(&priv->poll_rx_task); 4342 tasklet_disable(&priv->poll_rx_task);
3831 4343
3832 /* Return all skbs to mac80211 */ 4344 /* Return all skbs to mac80211 */
3833 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4345 for (i = 0; i < mwl8k_tx_queues(priv); i++)
3834 mwl8k_txq_reclaim(hw, i, INT_MAX, 1); 4346 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
3835} 4347}
3836 4348
@@ -4305,6 +4817,8 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
4305 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); 4817 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
4306 if (ret >= 0) { 4818 if (ret >= 0) {
4307 MWL8K_STA(sta)->peer_id = ret; 4819 MWL8K_STA(sta)->peer_id = ret;
4820 if (sta->ht_cap.ht_supported)
4821 MWL8K_STA(sta)->is_ampdu_allowed = true;
4308 ret = 0; 4822 ret = 0;
4309 } 4823 }
4310 4824
@@ -4328,14 +4842,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
4328 4842
4329 rc = mwl8k_fw_lock(hw); 4843 rc = mwl8k_fw_lock(hw);
4330 if (!rc) { 4844 if (!rc) {
4331 BUG_ON(queue > MWL8K_TX_QUEUES - 1); 4845 BUG_ON(queue > MWL8K_TX_WMM_QUEUES - 1);
4332 memcpy(&priv->wmm_params[queue], params, sizeof(*params)); 4846 memcpy(&priv->wmm_params[queue], params, sizeof(*params));
4333 4847
4334 if (!priv->wmm_enabled) 4848 if (!priv->wmm_enabled)
4335 rc = mwl8k_cmd_set_wmm_mode(hw, 1); 4849 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
4336 4850
4337 if (!rc) { 4851 if (!rc) {
4338 int q = MWL8K_TX_QUEUES - 1 - queue; 4852 int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
4339 rc = mwl8k_cmd_set_edca_params(hw, q, 4853 rc = mwl8k_cmd_set_edca_params(hw, q,
4340 params->cw_min, 4854 params->cw_min,
4341 params->cw_max, 4855 params->cw_max,
@@ -4371,21 +4885,118 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
4371 return 0; 4885 return 0;
4372} 4886}
4373 4887
4888#define MAX_AMPDU_ATTEMPTS 5
4889
4374static int 4890static int
4375mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4891mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4376 enum ieee80211_ampdu_mlme_action action, 4892 enum ieee80211_ampdu_mlme_action action,
4377 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 4893 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4378 u8 buf_size) 4894 u8 buf_size)
4379{ 4895{
4896
4897 int i, rc = 0;
4898 struct mwl8k_priv *priv = hw->priv;
4899 struct mwl8k_ampdu_stream *stream;
4900 u8 *addr = sta->addr;
4901
4902 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
4903 return -ENOTSUPP;
4904
4905 spin_lock(&priv->stream_lock);
4906 stream = mwl8k_lookup_stream(hw, addr, tid);
4907
4380 switch (action) { 4908 switch (action) {
4381 case IEEE80211_AMPDU_RX_START: 4909 case IEEE80211_AMPDU_RX_START:
4382 case IEEE80211_AMPDU_RX_STOP: 4910 case IEEE80211_AMPDU_RX_STOP:
4383 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) 4911 break;
4384 return -ENOTSUPP; 4912 case IEEE80211_AMPDU_TX_START:
4385 return 0; 4913 /* By the time we get here the hw queues may contain outgoing
4914 * packets for this RA/TID that are not part of this BA
4915 * session. The hw will assign sequence numbers to these
4916 * packets as they go out. So if we query the hw for its next
4917 * sequence number and use that for the SSN here, it may end up
4918 * being wrong, which will lead to sequence number mismatch at
4919 * the recipient. To avoid this, we reset the sequence number
4920 * to O for the first MPDU in this BA stream.
4921 */
4922 *ssn = 0;
4923 if (stream == NULL) {
4924 /* This means that somebody outside this driver called
4925 * ieee80211_start_tx_ba_session. This is unexpected
4926 * because we do our own rate control. Just warn and
4927 * move on.
4928 */
4929 wiphy_warn(hw->wiphy, "Unexpected call to %s. "
4930 "Proceeding anyway.\n", __func__);
4931 stream = mwl8k_add_stream(hw, sta, tid);
4932 }
4933 if (stream == NULL) {
4934 wiphy_debug(hw->wiphy, "no free AMPDU streams\n");
4935 rc = -EBUSY;
4936 break;
4937 }
4938 stream->state = AMPDU_STREAM_IN_PROGRESS;
4939
4940 /* Release the lock before we do the time consuming stuff */
4941 spin_unlock(&priv->stream_lock);
4942 for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
4943 rc = mwl8k_check_ba(hw, stream);
4944
4945 if (!rc)
4946 break;
4947 /*
4948 * HW queues take time to be flushed, give them
4949 * sufficient time
4950 */
4951
4952 msleep(1000);
4953 }
4954 spin_lock(&priv->stream_lock);
4955 if (rc) {
4956 wiphy_err(hw->wiphy, "Stream for tid %d busy after %d"
4957 " attempts\n", tid, MAX_AMPDU_ATTEMPTS);
4958 mwl8k_remove_stream(hw, stream);
4959 rc = -EBUSY;
4960 break;
4961 }
4962 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
4963 break;
4964 case IEEE80211_AMPDU_TX_STOP:
4965 if (stream == NULL)
4966 break;
4967 if (stream->state == AMPDU_STREAM_ACTIVE) {
4968 spin_unlock(&priv->stream_lock);
4969 mwl8k_destroy_ba(hw, stream);
4970 spin_lock(&priv->stream_lock);
4971 }
4972 mwl8k_remove_stream(hw, stream);
4973 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
4974 break;
4975 case IEEE80211_AMPDU_TX_OPERATIONAL:
4976 BUG_ON(stream == NULL);
4977 BUG_ON(stream->state != AMPDU_STREAM_IN_PROGRESS);
4978 spin_unlock(&priv->stream_lock);
4979 rc = mwl8k_create_ba(hw, stream, buf_size);
4980 spin_lock(&priv->stream_lock);
4981 if (!rc)
4982 stream->state = AMPDU_STREAM_ACTIVE;
4983 else {
4984 spin_unlock(&priv->stream_lock);
4985 mwl8k_destroy_ba(hw, stream);
4986 spin_lock(&priv->stream_lock);
4987 wiphy_debug(hw->wiphy,
4988 "Failed adding stream for sta %pM tid %d\n",
4989 addr, tid);
4990 mwl8k_remove_stream(hw, stream);
4991 }
4992 break;
4993
4386 default: 4994 default:
4387 return -ENOTSUPP; 4995 rc = -ENOTSUPP;
4388 } 4996 }
4997
4998 spin_unlock(&priv->stream_lock);
4999 return rc;
4389} 5000}
4390 5001
4391static const struct ieee80211_ops mwl8k_ops = { 5002static const struct ieee80211_ops mwl8k_ops = {
@@ -4434,7 +5045,7 @@ enum {
4434 MWL8366, 5045 MWL8366,
4435}; 5046};
4436 5047
4437#define MWL8K_8366_AP_FW_API 1 5048#define MWL8K_8366_AP_FW_API 2
4438#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw" 5049#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
4439#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api) 5050#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
4440 5051
@@ -4600,6 +5211,23 @@ static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
4600 return rc; 5211 return rc;
4601} 5212}
4602 5213
5214static int mwl8k_init_txqs(struct ieee80211_hw *hw)
5215{
5216 struct mwl8k_priv *priv = hw->priv;
5217 int rc = 0;
5218 int i;
5219
5220 for (i = 0; i < mwl8k_tx_queues(priv); i++) {
5221 rc = mwl8k_txq_init(hw, i);
5222 if (rc)
5223 break;
5224 if (priv->ap_fw)
5225 iowrite32(priv->txq[i].txd_dma,
5226 priv->sram + priv->txq_offset[i]);
5227 }
5228 return rc;
5229}
5230
4603/* initialize hw after successfully loading a firmware image */ 5231/* initialize hw after successfully loading a firmware image */
4604static int mwl8k_probe_hw(struct ieee80211_hw *hw) 5232static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4605{ 5233{
@@ -4627,15 +5255,23 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4627 goto err_stop_firmware; 5255 goto err_stop_firmware;
4628 rxq_refill(hw, 0, INT_MAX); 5256 rxq_refill(hw, 0, INT_MAX);
4629 5257
4630 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 5258 /* For the sta firmware, we need to know the dma addresses of tx queues
4631 rc = mwl8k_txq_init(hw, i); 5259 * before sending MWL8K_CMD_GET_HW_SPEC. So we must initialize them
5260 * prior to issuing this command. But for the AP case, we learn the
5261 * total number of queues from the result CMD_GET_HW_SPEC, so for this
5262 * case we must initialize the tx queues after.
5263 */
5264 priv->num_ampdu_queues = 0;
5265 if (!priv->ap_fw) {
5266 rc = mwl8k_init_txqs(hw);
4632 if (rc) 5267 if (rc)
4633 goto err_free_queues; 5268 goto err_free_queues;
4634 } 5269 }
4635 5270
4636 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 5271 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
4637 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 5272 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
4638 iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY, 5273 iowrite32(MWL8K_A2H_INT_TX_DONE|MWL8K_A2H_INT_RX_READY|
5274 MWL8K_A2H_INT_BA_WATCHDOG,
4639 priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); 5275 priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
4640 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); 5276 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
4641 5277
@@ -4646,6 +5282,8 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4646 goto err_free_queues; 5282 goto err_free_queues;
4647 } 5283 }
4648 5284
5285 memset(priv->ampdu, 0, sizeof(priv->ampdu));
5286
4649 /* 5287 /*
4650 * Temporarily enable interrupts. Initial firmware host 5288 * Temporarily enable interrupts. Initial firmware host
4651 * commands use interrupts and avoid polling. Disable 5289 * commands use interrupts and avoid polling. Disable
@@ -4657,6 +5295,8 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4657 if (priv->ap_fw) { 5295 if (priv->ap_fw) {
4658 rc = mwl8k_cmd_get_hw_spec_ap(hw); 5296 rc = mwl8k_cmd_get_hw_spec_ap(hw);
4659 if (!rc) 5297 if (!rc)
5298 rc = mwl8k_init_txqs(hw);
5299 if (!rc)
4660 rc = mwl8k_cmd_set_hw_spec(hw); 5300 rc = mwl8k_cmd_set_hw_spec(hw);
4661 } else { 5301 } else {
4662 rc = mwl8k_cmd_get_hw_spec_sta(hw); 5302 rc = mwl8k_cmd_get_hw_spec_sta(hw);
@@ -4698,7 +5338,7 @@ err_free_irq:
4698 free_irq(priv->pdev->irq, hw); 5338 free_irq(priv->pdev->irq, hw);
4699 5339
4700err_free_queues: 5340err_free_queues:
4701 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5341 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4702 mwl8k_txq_deinit(hw, i); 5342 mwl8k_txq_deinit(hw, i);
4703 mwl8k_rxq_deinit(hw, 0); 5343 mwl8k_rxq_deinit(hw, 0);
4704 5344
@@ -4720,7 +5360,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
4720 mwl8k_stop(hw); 5360 mwl8k_stop(hw);
4721 mwl8k_rxq_deinit(hw, 0); 5361 mwl8k_rxq_deinit(hw, 0);
4722 5362
4723 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5363 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4724 mwl8k_txq_deinit(hw, i); 5364 mwl8k_txq_deinit(hw, i);
4725 5365
4726 rc = mwl8k_init_firmware(hw, fw_image, false); 5366 rc = mwl8k_init_firmware(hw, fw_image, false);
@@ -4739,7 +5379,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
4739 if (rc) 5379 if (rc)
4740 goto fail; 5380 goto fail;
4741 5381
4742 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 5382 for (i = 0; i < MWL8K_TX_WMM_QUEUES; i++) {
4743 rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]); 5383 rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]);
4744 if (rc) 5384 if (rc)
4745 goto fail; 5385 goto fail;
@@ -4773,7 +5413,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4773 5413
4774 hw->channel_change_time = 10; 5414 hw->channel_change_time = 10;
4775 5415
4776 hw->queues = MWL8K_TX_QUEUES; 5416 hw->queues = MWL8K_TX_WMM_QUEUES;
4777 5417
4778 /* Set rssi values to dBm */ 5418 /* Set rssi values to dBm */
4779 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL; 5419 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
@@ -4789,6 +5429,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4789 5429
4790 /* Finalize join worker */ 5430 /* Finalize join worker */
4791 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); 5431 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
5432 /* Handle watchdog ba events */
5433 INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events);
4792 5434
4793 /* TX reclaim and RX tasklets. */ 5435 /* TX reclaim and RX tasklets. */
4794 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw); 5436 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
@@ -4808,6 +5450,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4808 5450
4809 spin_lock_init(&priv->tx_lock); 5451 spin_lock_init(&priv->tx_lock);
4810 5452
5453 spin_lock_init(&priv->stream_lock);
5454
4811 priv->tx_wait = NULL; 5455 priv->tx_wait = NULL;
4812 5456
4813 rc = mwl8k_probe_hw(hw); 5457 rc = mwl8k_probe_hw(hw);
@@ -4829,7 +5473,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4829 return 0; 5473 return 0;
4830 5474
4831err_unprobe_hw: 5475err_unprobe_hw:
4832 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5476 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4833 mwl8k_txq_deinit(hw, i); 5477 mwl8k_txq_deinit(hw, i);
4834 mwl8k_rxq_deinit(hw, 0); 5478 mwl8k_rxq_deinit(hw, 0);
4835 5479
@@ -4988,10 +5632,10 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
4988 mwl8k_hw_reset(priv); 5632 mwl8k_hw_reset(priv);
4989 5633
4990 /* Return all skbs to mac80211 */ 5634 /* Return all skbs to mac80211 */
4991 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5635 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4992 mwl8k_txq_reclaim(hw, i, INT_MAX, 1); 5636 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
4993 5637
4994 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5638 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4995 mwl8k_txq_deinit(hw, i); 5639 mwl8k_txq_deinit(hw, i);
4996 5640
4997 mwl8k_rxq_deinit(hw, 0); 5641 mwl8k_rxq_deinit(hw, 0);
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 09fae2f0ea0..736bbb9bd1d 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -153,6 +153,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
153 priv->scan_request = request; 153 priv->scan_request = request;
154 154
155 err = orinoco_hw_trigger_scan(priv, request->ssids); 155 err = orinoco_hw_trigger_scan(priv, request->ssids);
156 /* On error the we aren't processing the request */
157 if (err)
158 priv->scan_request = NULL;
156 159
157 return err; 160 return err;
158} 161}
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index f3d396e7544..62c6b2b37db 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1376,13 +1376,13 @@ static void orinoco_process_scan_results(struct work_struct *work)
1376 1376
1377 spin_lock_irqsave(&priv->scan_lock, flags); 1377 spin_lock_irqsave(&priv->scan_lock, flags);
1378 list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { 1378 list_for_each_entry_safe(sd, temp, &priv->scan_list, list) {
1379 spin_unlock_irqrestore(&priv->scan_lock, flags);
1380 1379
1381 buf = sd->buf; 1380 buf = sd->buf;
1382 len = sd->len; 1381 len = sd->len;
1383 type = sd->type; 1382 type = sd->type;
1384 1383
1385 list_del(&sd->list); 1384 list_del(&sd->list);
1385 spin_unlock_irqrestore(&priv->scan_lock, flags);
1386 kfree(sd); 1386 kfree(sd);
1387 1387
1388 if (len > 0) { 1388 if (len > 0) {
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4daa5..0494d7b102d 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
199 while (i != idx) { 199 while (i != idx) {
200 u16 len; 200 u16 len;
201 struct sk_buff *skb; 201 struct sk_buff *skb;
202 dma_addr_t dma_addr;
202 desc = &ring[i]; 203 desc = &ring[i];
203 len = le16_to_cpu(desc->len); 204 len = le16_to_cpu(desc->len);
204 skb = rx_buf[i]; 205 skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
216 217
217 len = priv->common.rx_mtu; 218 len = priv->common.rx_mtu;
218 } 219 }
220 dma_addr = le32_to_cpu(desc->host_addr);
221 pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
222 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
219 skb_put(skb, len); 223 skb_put(skb, len);
220 224
221 if (p54_rx(dev, skb)) { 225 if (p54_rx(dev, skb)) {
222 pci_unmap_single(priv->pdev, 226 pci_unmap_single(priv->pdev, dma_addr,
223 le32_to_cpu(desc->host_addr), 227 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
224 priv->common.rx_mtu + 32,
225 PCI_DMA_FROMDEVICE);
226 rx_buf[i] = NULL; 228 rx_buf[i] = NULL;
227 desc->host_addr = 0; 229 desc->host_addr = cpu_to_le32(0);
228 } else { 230 } else {
229 skb_trim(skb, 0); 231 skb_trim(skb, 0);
232 pci_dma_sync_single_for_device(priv->pdev, dma_addr,
233 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
230 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); 234 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
231 } 235 }
232 236
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 18d24b7b1e3..7ecc0bda57b 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -649,8 +649,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
649 goto err_free_common; 649 goto err_free_common;
650 } 650 }
651 651
652 set_irq_type(gpio_to_irq(p54spi_gpio_irq), 652 irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING);
653 IRQ_TYPE_EDGE_RISING);
654 653
655 disable_irq(gpio_to_irq(p54spi_gpio_irq)); 654 disable_irq(gpio_to_irq(p54spi_gpio_irq));
656 655
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7638c..9b344a921e7 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ 98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
101 {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
101 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 102 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
102 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ 103 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
103 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ 104 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2cce24..518542b4bf9 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2597 __le32 mode; 2597 __le32 mode;
2598 int ret; 2598 int ret;
2599 2599
2600 if (priv->device_type != RNDIS_BCM4320B)
2601 return -ENOTSUPP;
2602
2600 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, 2603 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
2601 enabled ? "enabled" : "disabled", 2604 enabled ? "enabled" : "disabled",
2602 timeout); 2605 timeout);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index f1a92144996..4e368657a83 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -719,6 +719,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
719 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 719 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
720 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 720 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
721 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, 721 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
722 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
722 /* AzureWave */ 723 /* AzureWave */
723 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 724 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
724 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, 725 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -913,7 +914,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
913 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, 914 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
914 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, 915 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
915 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, 916 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
916 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
917 /* AzureWave */ 917 /* AzureWave */
918 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 918 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
919 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, 919 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -937,6 +937,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
937 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 937 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
938 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, 938 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
939 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, 939 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
940 /* Edimax */
941 { USB_DEVICE(0x7392, 0x4085), USB_DEVICE_DATA(&rt2800usb_ops) },
940 /* Encore */ 942 /* Encore */
941 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, 943 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
942 /* Gemtek */ 944 /* Gemtek */
@@ -961,6 +963,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
961 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 963 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
962 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) }, 964 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
963 /* Planex */ 965 /* Planex */
966 { USB_DEVICE(0x2019, 0x5201), USB_DEVICE_DATA(&rt2800usb_ops) },
964 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 967 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
965 /* Qcom */ 968 /* Qcom */
966 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, 969 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -972,6 +975,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
972 /* Sweex */ 975 /* Sweex */
973 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, 976 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
974 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, 977 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
978 /* Toshiba */
979 { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
975 /* Zyxel */ 980 /* Zyxel */
976 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, 981 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
977#endif 982#endif
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 4f92cba6810..f74a8701c67 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -410,8 +410,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
410 410
411 if (!efuse_shadow_update_chk(hw)) { 411 if (!efuse_shadow_update_chk(hw)) {
412 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 412 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
413 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 413 memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
414 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 414 &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
415 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 415 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
416 416
417 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 417 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
@@ -446,9 +446,9 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
446 446
447 if (word_en != 0x0F) { 447 if (word_en != 0x0F) {
448 u8 tmpdata[8]; 448 u8 tmpdata[8];
449 memcpy((void *)tmpdata, 449 memcpy(tmpdata,
450 (void *)(&rtlefuse-> 450 &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
451 efuse_map[EFUSE_MODIFY_MAP][base]), 8); 451 8);
452 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, 452 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
453 ("U-efuse\n"), tmpdata, 8); 453 ("U-efuse\n"), tmpdata, 8);
454 454
@@ -465,8 +465,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
465 efuse_power_switch(hw, true, false); 465 efuse_power_switch(hw, true, false);
466 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 466 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
467 467
468 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 468 memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
469 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 469 &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
470 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 470 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
471 471
472 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n")); 472 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n"));
@@ -479,13 +479,12 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
479 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 479 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
480 480
481 if (rtlefuse->autoload_failflag == true) { 481 if (rtlefuse->autoload_failflag == true) {
482 memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128, 482 memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, 128);
483 0xFF);
484 } else 483 } else
485 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 484 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
486 485
487 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 486 memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
488 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 487 &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
489 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 488 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
490 489
491} 490}
@@ -694,8 +693,8 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
694 if (offset > 15) 693 if (offset > 15)
695 return false; 694 return false;
696 695
697 memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff); 696 memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
698 memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff); 697 memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
699 698
700 while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { 699 while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
701 if (readstate & PG_STATE_HEADER) { 700 if (readstate & PG_STATE_HEADER) {
@@ -862,7 +861,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
862 861
863 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); 862 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
864 863
865 memset((void *)originaldata, 8 * sizeof(u8), 0xff); 864 memset(originaldata, 0xff, 8 * sizeof(u8));
866 865
867 if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { 866 if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
868 badworden = efuse_word_enable_data_write(hw, 867 badworden = efuse_word_enable_data_write(hw,
@@ -917,7 +916,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
917 target_pkt.offset = offset; 916 target_pkt.offset = offset;
918 target_pkt.word_en = word_en; 917 target_pkt.word_en = word_en;
919 918
920 memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF); 919 memset(target_pkt.data, 0xFF, 8 * sizeof(u8));
921 920
922 efuse_word_enable_data_read(word_en, data, target_pkt.data); 921 efuse_word_enable_data_read(word_en, data, target_pkt.data);
923 target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); 922 target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
@@ -1022,7 +1021,7 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
1022 u8 badworden = 0x0F; 1021 u8 badworden = 0x0F;
1023 u8 tmpdata[8]; 1022 u8 tmpdata[8];
1024 1023
1025 memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff); 1024 memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
1026 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 1025 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
1027 ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); 1026 ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
1028 1027
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 1f18bf7df74..9cd7703c2a3 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1477,13 +1477,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1477 struct pci_dev *bridge_pdev = pdev->bus->self; 1477 struct pci_dev *bridge_pdev = pdev->bus->self;
1478 u16 venderid; 1478 u16 venderid;
1479 u16 deviceid; 1479 u16 deviceid;
1480 u8 revisionid;
1481 u16 irqline; 1480 u16 irqline;
1482 u8 tmp; 1481 u8 tmp;
1483 1482
1484 venderid = pdev->vendor; 1483 venderid = pdev->vendor;
1485 deviceid = pdev->device; 1484 deviceid = pdev->device;
1486 pci_read_config_byte(pdev, 0x8, &revisionid);
1487 pci_read_config_word(pdev, 0x3C, &irqline); 1485 pci_read_config_word(pdev, 0x3C, &irqline);
1488 1486
1489 if (deviceid == RTL_PCI_8192_DID || 1487 if (deviceid == RTL_PCI_8192_DID ||
@@ -1494,7 +1492,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1494 deviceid == RTL_PCI_8173_DID || 1492 deviceid == RTL_PCI_8173_DID ||
1495 deviceid == RTL_PCI_8172_DID || 1493 deviceid == RTL_PCI_8172_DID ||
1496 deviceid == RTL_PCI_8171_DID) { 1494 deviceid == RTL_PCI_8171_DID) {
1497 switch (revisionid) { 1495 switch (pdev->revision) {
1498 case RTL_PCI_REVISION_ID_8192PCIE: 1496 case RTL_PCI_REVISION_ID_8192PCIE:
1499 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1497 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1500 ("8192 PCI-E is found - " 1498 ("8192 PCI-E is found - "
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index 803adcc80c9..b0b0b13dd0a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -532,9 +532,9 @@
532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
533do { \ 533do { \
534 if (_size > TX_DESC_NEXT_DESC_OFFSET) \ 534 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
535 memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ 535 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
536 else \ 536 else \
537 memset((void *)__pdesc, 0, _size); \ 537 memset(__pdesc, 0, _size); \
538} while (0); 538} while (0);
539 539
540#define RX_HAL_IS_CCK_RATE(_pdesc)\ 540#define RX_HAL_IS_CCK_RATE(_pdesc)\
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index d0b0d43b9a6..3f0cb81c424 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -656,7 +656,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
656 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 656 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
657 __le16 fc = hdr->frame_control; 657 __le16 fc = hdr->frame_control;
658 658
659 memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE); 659 memset(pdesc, 0, RTL_TX_HEADER_SIZE);
660 if (firstseg) 660 if (firstseg)
661 SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); 661 SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
662 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); 662 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 9d0c01a86d0..01226f8e70f 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -34,6 +34,7 @@
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/version.h> 35#include <linux/version.h>
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <linux/vmalloc.h>
37#include <linux/usb.h> 38#include <linux/usb.h>
38#include <net/mac80211.h> 39#include <net/mac80211.h>
39#include "debug.h" 40#include "debug.h"
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index d550b5e68d3..f51a0241a44 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -265,7 +265,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
265 goto disable; 265 goto disable;
266 } 266 }
267 267
268 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 268 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
269 disable_irq(wl->irq); 269 disable_irq(wl->irq);
270 270
271 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; 271 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index ac872b38960..af6448c4d3e 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -286,7 +286,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
286 goto out_free; 286 goto out_free;
287 } 287 }
288 288
289 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 289 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
290 290
291 disable_irq(wl->irq); 291 disable_irq(wl->irq);
292 292
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index 1907eafb9b1..5728a918e50 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -5,7 +5,5 @@ zd1211rw-objs := zd_chip.o zd_mac.o \
5 zd_rf_al7230b.o zd_rf_uw2453.o \ 5 zd_rf_al7230b.o zd_rf_uw2453.o \
6 zd_rf.o zd_usb.o 6 zd_rf.o zd_usb.o
7 7
8ifeq ($(CONFIG_ZD1211RW_DEBUG),y) 8ccflags-$(CONFIG_ZD1211RW_DEBUG) := -DDEBUG
9EXTRA_CFLAGS += -DDEBUG
10endif
11 9
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 81e80489a05..58236e6d092 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -60,6 +60,7 @@ static struct usb_device_id usb_ids[] = {
60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, 60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
61 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
62 { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, 62 { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 },
63 { USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 },
63 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, 64 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
64 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 65 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
65 /* ZD1211B */ 66 /* ZD1211B */
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
new file mode 100644
index 00000000000..e346e8125ef
--- /dev/null
+++ b/drivers/net/xen-netback/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
2
3xen-netback-y := netback.o xenbus.o interface.o
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
new file mode 100644
index 00000000000..5d7bbf2b2ee
--- /dev/null
+++ b/drivers/net/xen-netback/common.h
@@ -0,0 +1,161 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27#ifndef __XEN_NETBACK__COMMON_H__
28#define __XEN_NETBACK__COMMON_H__
29
30#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
31
32#include <linux/module.h>
33#include <linux/interrupt.h>
34#include <linux/slab.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/io.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/wait.h>
41#include <linux/sched.h>
42
43#include <xen/interface/io/netif.h>
44#include <xen/interface/grant_table.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47
48struct xen_netbk;
49
50struct xenvif {
51 /* Unique identifier for this interface. */
52 domid_t domid;
53 unsigned int handle;
54
55 /* Reference to netback processing backend. */
56 struct xen_netbk *netbk;
57
58 u8 fe_dev_addr[6];
59
60 /* Physical parameters of the comms window. */
61 grant_handle_t tx_shmem_handle;
62 grant_ref_t tx_shmem_ref;
63 grant_handle_t rx_shmem_handle;
64 grant_ref_t rx_shmem_ref;
65 unsigned int irq;
66
67 /* List of frontends to notify after a batch of frames sent. */
68 struct list_head notify_list;
69
70 /* The shared rings and indexes. */
71 struct xen_netif_tx_back_ring tx;
72 struct xen_netif_rx_back_ring rx;
73 struct vm_struct *tx_comms_area;
74 struct vm_struct *rx_comms_area;
75
76 /* Flags that must not be set in dev->features */
77 u32 features_disabled;
78
79 /* Frontend feature information. */
80 u8 can_sg:1;
81 u8 gso:1;
82 u8 gso_prefix:1;
83 u8 csum:1;
84
85 /* Internal feature information. */
86 u8 can_queue:1; /* can queue packets for receiver? */
87
88 /*
89 * Allow xenvif_start_xmit() to peek ahead in the rx request
90 * ring. This is a prediction of what rx_req_cons will be
91 * once all queued skbs are put on the ring.
92 */
93 RING_IDX rx_req_cons_peek;
94
95 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
96 unsigned long credit_bytes;
97 unsigned long credit_usec;
98 unsigned long remaining_credit;
99 struct timer_list credit_timeout;
100
101 /* Statistics */
102 unsigned long rx_gso_checksum_fixup;
103
104 /* Miscellaneous private stuff. */
105 struct list_head schedule_list;
106 atomic_t refcnt;
107 struct net_device *dev;
108
109 wait_queue_head_t waiting_to_free;
110};
111
112#define XEN_NETIF_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
113#define XEN_NETIF_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
114
115struct xenvif *xenvif_alloc(struct device *parent,
116 domid_t domid,
117 unsigned int handle);
118
119int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
120 unsigned long rx_ring_ref, unsigned int evtchn);
121void xenvif_disconnect(struct xenvif *vif);
122
123void xenvif_get(struct xenvif *vif);
124void xenvif_put(struct xenvif *vif);
125
126int xenvif_xenbus_init(void);
127
128int xenvif_schedulable(struct xenvif *vif);
129
130int xen_netbk_rx_ring_full(struct xenvif *vif);
131
132int xen_netbk_must_stop_queue(struct xenvif *vif);
133
134/* (Un)Map communication rings. */
135void xen_netbk_unmap_frontend_rings(struct xenvif *vif);
136int xen_netbk_map_frontend_rings(struct xenvif *vif,
137 grant_ref_t tx_ring_ref,
138 grant_ref_t rx_ring_ref);
139
140/* (De)Register a xenvif with the netback backend. */
141void xen_netbk_add_xenvif(struct xenvif *vif);
142void xen_netbk_remove_xenvif(struct xenvif *vif);
143
144/* (De)Schedule backend processing for a xenvif */
145void xen_netbk_schedule_xenvif(struct xenvif *vif);
146void xen_netbk_deschedule_xenvif(struct xenvif *vif);
147
148/* Check for SKBs from frontend and schedule backend processing */
149void xen_netbk_check_rx_xenvif(struct xenvif *vif);
150/* Receive an SKB from the frontend */
151void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
152
153/* Queue an SKB for transmission to the frontend */
154void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
155/* Notify xenvif that ring now has space to send an skb to the frontend */
156void xenvif_notify_tx_completion(struct xenvif *vif);
157
158/* Returns number of ring slots required to send an skb to the frontend */
159unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
160
161#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
new file mode 100644
index 00000000000..de569cc19da
--- /dev/null
+++ b/drivers/net/xen-netback/interface.c
@@ -0,0 +1,424 @@
1/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
33#include <linux/ethtool.h>
34#include <linux/rtnetlink.h>
35#include <linux/if_vlan.h>
36
37#include <xen/events.h>
38#include <asm/xen/hypercall.h>
39
40#define XENVIF_QUEUE_LENGTH 32
41
42void xenvif_get(struct xenvif *vif)
43{
44 atomic_inc(&vif->refcnt);
45}
46
47void xenvif_put(struct xenvif *vif)
48{
49 if (atomic_dec_and_test(&vif->refcnt))
50 wake_up(&vif->waiting_to_free);
51}
52
53int xenvif_schedulable(struct xenvif *vif)
54{
55 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
56}
57
58static int xenvif_rx_schedulable(struct xenvif *vif)
59{
60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
61}
62
63static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
64{
65 struct xenvif *vif = dev_id;
66
67 if (vif->netbk == NULL)
68 return IRQ_NONE;
69
70 xen_netbk_schedule_xenvif(vif);
71
72 if (xenvif_rx_schedulable(vif))
73 netif_wake_queue(vif->dev);
74
75 return IRQ_HANDLED;
76}
77
78static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
79{
80 struct xenvif *vif = netdev_priv(dev);
81
82 BUG_ON(skb->dev != dev);
83
84 if (vif->netbk == NULL)
85 goto drop;
86
87 /* Drop the packet if the target domain has no receive buffers. */
88 if (!xenvif_rx_schedulable(vif))
89 goto drop;
90
91 /* Reserve ring slots for the worst-case number of fragments. */
92 vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
93 xenvif_get(vif);
94
95 if (vif->can_queue && xen_netbk_must_stop_queue(vif))
96 netif_stop_queue(dev);
97
98 xen_netbk_queue_tx_skb(vif, skb);
99
100 return NETDEV_TX_OK;
101
102 drop:
103 vif->dev->stats.tx_dropped++;
104 dev_kfree_skb(skb);
105 return NETDEV_TX_OK;
106}
107
108void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
109{
110 netif_rx_ni(skb);
111}
112
113void xenvif_notify_tx_completion(struct xenvif *vif)
114{
115 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
116 netif_wake_queue(vif->dev);
117}
118
119static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
120{
121 struct xenvif *vif = netdev_priv(dev);
122 return &vif->dev->stats;
123}
124
125static void xenvif_up(struct xenvif *vif)
126{
127 xen_netbk_add_xenvif(vif);
128 enable_irq(vif->irq);
129 xen_netbk_check_rx_xenvif(vif);
130}
131
132static void xenvif_down(struct xenvif *vif)
133{
134 disable_irq(vif->irq);
135 xen_netbk_deschedule_xenvif(vif);
136 xen_netbk_remove_xenvif(vif);
137}
138
139static int xenvif_open(struct net_device *dev)
140{
141 struct xenvif *vif = netdev_priv(dev);
142 if (netif_carrier_ok(dev))
143 xenvif_up(vif);
144 netif_start_queue(dev);
145 return 0;
146}
147
148static int xenvif_close(struct net_device *dev)
149{
150 struct xenvif *vif = netdev_priv(dev);
151 if (netif_carrier_ok(dev))
152 xenvif_down(vif);
153 netif_stop_queue(dev);
154 return 0;
155}
156
157static int xenvif_change_mtu(struct net_device *dev, int mtu)
158{
159 struct xenvif *vif = netdev_priv(dev);
160 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
161
162 if (mtu > max)
163 return -EINVAL;
164 dev->mtu = mtu;
165 return 0;
166}
167
168static void xenvif_set_features(struct xenvif *vif)
169{
170 struct net_device *dev = vif->dev;
171 u32 features = dev->features;
172
173 if (vif->can_sg)
174 features |= NETIF_F_SG;
175 if (vif->gso || vif->gso_prefix)
176 features |= NETIF_F_TSO;
177 if (vif->csum)
178 features |= NETIF_F_IP_CSUM;
179
180 features &= ~(vif->features_disabled);
181
182 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN)
183 dev->mtu = ETH_DATA_LEN;
184
185 dev->features = features;
186}
187
188static int xenvif_set_tx_csum(struct net_device *dev, u32 data)
189{
190 struct xenvif *vif = netdev_priv(dev);
191 if (data) {
192 if (!vif->csum)
193 return -EOPNOTSUPP;
194 vif->features_disabled &= ~NETIF_F_IP_CSUM;
195 } else {
196 vif->features_disabled |= NETIF_F_IP_CSUM;
197 }
198
199 xenvif_set_features(vif);
200 return 0;
201}
202
203static int xenvif_set_sg(struct net_device *dev, u32 data)
204{
205 struct xenvif *vif = netdev_priv(dev);
206 if (data) {
207 if (!vif->can_sg)
208 return -EOPNOTSUPP;
209 vif->features_disabled &= ~NETIF_F_SG;
210 } else {
211 vif->features_disabled |= NETIF_F_SG;
212 }
213
214 xenvif_set_features(vif);
215 return 0;
216}
217
218static int xenvif_set_tso(struct net_device *dev, u32 data)
219{
220 struct xenvif *vif = netdev_priv(dev);
221 if (data) {
222 if (!vif->gso && !vif->gso_prefix)
223 return -EOPNOTSUPP;
224 vif->features_disabled &= ~NETIF_F_TSO;
225 } else {
226 vif->features_disabled |= NETIF_F_TSO;
227 }
228
229 xenvif_set_features(vif);
230 return 0;
231}
232
233static const struct xenvif_stat {
234 char name[ETH_GSTRING_LEN];
235 u16 offset;
236} xenvif_stats[] = {
237 {
238 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup)
240 },
241};
242
243static int xenvif_get_sset_count(struct net_device *dev, int string_set)
244{
245 switch (string_set) {
246 case ETH_SS_STATS:
247 return ARRAY_SIZE(xenvif_stats);
248 default:
249 return -EINVAL;
250 }
251}
252
253static void xenvif_get_ethtool_stats(struct net_device *dev,
254 struct ethtool_stats *stats, u64 * data)
255{
256 void *vif = netdev_priv(dev);
257 int i;
258
259 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
260 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
261}
262
263static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
264{
265 int i;
266
267 switch (stringset) {
268 case ETH_SS_STATS:
269 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
270 memcpy(data + i * ETH_GSTRING_LEN,
271 xenvif_stats[i].name, ETH_GSTRING_LEN);
272 break;
273 }
274}
275
276static struct ethtool_ops xenvif_ethtool_ops = {
277 .get_tx_csum = ethtool_op_get_tx_csum,
278 .set_tx_csum = xenvif_set_tx_csum,
279 .get_sg = ethtool_op_get_sg,
280 .set_sg = xenvif_set_sg,
281 .get_tso = ethtool_op_get_tso,
282 .set_tso = xenvif_set_tso,
283 .get_link = ethtool_op_get_link,
284
285 .get_sset_count = xenvif_get_sset_count,
286 .get_ethtool_stats = xenvif_get_ethtool_stats,
287 .get_strings = xenvif_get_strings,
288};
289
290static struct net_device_ops xenvif_netdev_ops = {
291 .ndo_start_xmit = xenvif_start_xmit,
292 .ndo_get_stats = xenvif_get_stats,
293 .ndo_open = xenvif_open,
294 .ndo_stop = xenvif_close,
295 .ndo_change_mtu = xenvif_change_mtu,
296};
297
298struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
299 unsigned int handle)
300{
301 int err;
302 struct net_device *dev;
303 struct xenvif *vif;
304 char name[IFNAMSIZ] = {};
305
306 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
307 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
308 if (dev == NULL) {
309 pr_warn("Could not allocate netdev\n");
310 return ERR_PTR(-ENOMEM);
311 }
312
313 SET_NETDEV_DEV(dev, parent);
314
315 vif = netdev_priv(dev);
316 vif->domid = domid;
317 vif->handle = handle;
318 vif->netbk = NULL;
319 vif->can_sg = 1;
320 vif->csum = 1;
321 atomic_set(&vif->refcnt, 1);
322 init_waitqueue_head(&vif->waiting_to_free);
323 vif->dev = dev;
324 INIT_LIST_HEAD(&vif->schedule_list);
325 INIT_LIST_HEAD(&vif->notify_list);
326
327 vif->credit_bytes = vif->remaining_credit = ~0UL;
328 vif->credit_usec = 0UL;
329 init_timer(&vif->credit_timeout);
330 /* Initialize 'expires' now: it's used to track the credit window. */
331 vif->credit_timeout.expires = jiffies;
332
333 dev->netdev_ops = &xenvif_netdev_ops;
334 xenvif_set_features(vif);
335 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
336
337 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
338
339 /*
340 * Initialise a dummy MAC address. We choose the numerically
341 * largest non-broadcast address to prevent the address getting
342 * stolen by an Ethernet bridge for STP purposes.
343 * (FE:FF:FF:FF:FF:FF)
344 */
345 memset(dev->dev_addr, 0xFF, ETH_ALEN);
346 dev->dev_addr[0] &= ~0x01;
347
348 netif_carrier_off(dev);
349
350 err = register_netdev(dev);
351 if (err) {
352 netdev_warn(dev, "Could not register device: err=%d\n", err);
353 free_netdev(dev);
354 return ERR_PTR(err);
355 }
356
357 netdev_dbg(dev, "Successfully created xenvif\n");
358 return vif;
359}
360
361int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
362 unsigned long rx_ring_ref, unsigned int evtchn)
363{
364 int err = -ENOMEM;
365
366 /* Already connected through? */
367 if (vif->irq)
368 return 0;
369
370 xenvif_set_features(vif);
371
372 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
373 if (err < 0)
374 goto err;
375
376 err = bind_interdomain_evtchn_to_irqhandler(
377 vif->domid, evtchn, xenvif_interrupt, 0,
378 vif->dev->name, vif);
379 if (err < 0)
380 goto err_unmap;
381 vif->irq = err;
382 disable_irq(vif->irq);
383
384 xenvif_get(vif);
385
386 rtnl_lock();
387 netif_carrier_on(vif->dev);
388 if (netif_running(vif->dev))
389 xenvif_up(vif);
390 rtnl_unlock();
391
392 return 0;
393err_unmap:
394 xen_netbk_unmap_frontend_rings(vif);
395err:
396 return err;
397}
398
399void xenvif_disconnect(struct xenvif *vif)
400{
401 struct net_device *dev = vif->dev;
402 if (netif_carrier_ok(dev)) {
403 rtnl_lock();
404 netif_carrier_off(dev); /* discard queued packets */
405 if (netif_running(dev))
406 xenvif_down(vif);
407 rtnl_unlock();
408 xenvif_put(vif);
409 }
410
411 atomic_dec(&vif->refcnt);
412 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
413
414 del_timer_sync(&vif->credit_timeout);
415
416 if (vif->irq)
417 unbind_from_irqhandler(vif->irq, vif);
418
419 unregister_netdev(vif->dev);
420
421 xen_netbk_unmap_frontend_rings(vif);
422
423 free_netdev(vif->dev);
424}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
new file mode 100644
index 00000000000..0e4851b8a77
--- /dev/null
+++ b/drivers/net/xen-netback/netback.c
@@ -0,0 +1,1745 @@
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40
41#include <net/tcp.h>
42
43#include <xen/events.h>
44#include <xen/interface/memory.h>
45
46#include <asm/xen/hypercall.h>
47#include <asm/xen/page.h>
48
49struct pending_tx_info {
50 struct xen_netif_tx_request req;
51 struct xenvif *vif;
52};
53typedef unsigned int pending_ring_idx_t;
54
55struct netbk_rx_meta {
56 int id;
57 int size;
58 int gso_size;
59};
60
61#define MAX_PENDING_REQS 256
62
63#define MAX_BUFFER_OFFSET PAGE_SIZE
64
65/* extra field used in struct page */
66union page_ext {
67 struct {
68#if BITS_PER_LONG < 64
69#define IDX_WIDTH 8
70#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
71 unsigned int group:GROUP_WIDTH;
72 unsigned int idx:IDX_WIDTH;
73#else
74 unsigned int group, idx;
75#endif
76 } e;
77 void *mapping;
78};
79
80struct xen_netbk {
81 wait_queue_head_t wq;
82 struct task_struct *task;
83
84 struct sk_buff_head rx_queue;
85 struct sk_buff_head tx_queue;
86
87 struct timer_list net_timer;
88
89 struct page *mmap_pages[MAX_PENDING_REQS];
90
91 pending_ring_idx_t pending_prod;
92 pending_ring_idx_t pending_cons;
93 struct list_head net_schedule_list;
94
95 /* Protect the net_schedule_list in netif. */
96 spinlock_t net_schedule_list_lock;
97
98 atomic_t netfront_count;
99
100 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
101 struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
102
103 u16 pending_ring[MAX_PENDING_REQS];
104
105 /*
106 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
107 * head/fragment page uses 2 copy operations because it
108 * straddles two buffers in the frontend.
109 */
110 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
111 struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
112};
113
114static struct xen_netbk *xen_netbk;
115static int xen_netbk_group_nr;
116
117void xen_netbk_add_xenvif(struct xenvif *vif)
118{
119 int i;
120 int min_netfront_count;
121 int min_group = 0;
122 struct xen_netbk *netbk;
123
124 min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
125 for (i = 0; i < xen_netbk_group_nr; i++) {
126 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
127 if (netfront_count < min_netfront_count) {
128 min_group = i;
129 min_netfront_count = netfront_count;
130 }
131 }
132
133 netbk = &xen_netbk[min_group];
134
135 vif->netbk = netbk;
136 atomic_inc(&netbk->netfront_count);
137}
138
139void xen_netbk_remove_xenvif(struct xenvif *vif)
140{
141 struct xen_netbk *netbk = vif->netbk;
142 vif->netbk = NULL;
143 atomic_dec(&netbk->netfront_count);
144}
145
146static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
147static void make_tx_response(struct xenvif *vif,
148 struct xen_netif_tx_request *txp,
149 s8 st);
150static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
151 u16 id,
152 s8 st,
153 u16 offset,
154 u16 size,
155 u16 flags);
156
157static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
158 unsigned int idx)
159{
160 return page_to_pfn(netbk->mmap_pages[idx]);
161}
162
163static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
164 unsigned int idx)
165{
166 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
167}
168
169/* extra field used in struct page */
170static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
171 unsigned int idx)
172{
173 unsigned int group = netbk - xen_netbk;
174 union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
175
176 BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
177 pg->mapping = ext.mapping;
178}
179
180static int get_page_ext(struct page *pg,
181 unsigned int *pgroup, unsigned int *pidx)
182{
183 union page_ext ext = { .mapping = pg->mapping };
184 struct xen_netbk *netbk;
185 unsigned int group, idx;
186
187 group = ext.e.group - 1;
188
189 if (group < 0 || group >= xen_netbk_group_nr)
190 return 0;
191
192 netbk = &xen_netbk[group];
193
194 idx = ext.e.idx;
195
196 if ((idx < 0) || (idx >= MAX_PENDING_REQS))
197 return 0;
198
199 if (netbk->mmap_pages[idx] != pg)
200 return 0;
201
202 *pgroup = group;
203 *pidx = idx;
204
205 return 1;
206}
207
208/*
209 * This is the amount of packet we copy rather than map, so that the
210 * guest can't fiddle with the contents of the headers while we do
211 * packet processing on them (netfilter, routing, etc).
212 */
213#define PKT_PROT_LEN (ETH_HLEN + \
214 VLAN_HLEN + \
215 sizeof(struct iphdr) + MAX_IPOPTLEN + \
216 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
217
218static inline pending_ring_idx_t pending_index(unsigned i)
219{
220 return i & (MAX_PENDING_REQS-1);
221}
222
223static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
224{
225 return MAX_PENDING_REQS -
226 netbk->pending_prod + netbk->pending_cons;
227}
228
229static void xen_netbk_kick_thread(struct xen_netbk *netbk)
230{
231 wake_up(&netbk->wq);
232}
233
234static int max_required_rx_slots(struct xenvif *vif)
235{
236 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
237
238 if (vif->can_sg || vif->gso || vif->gso_prefix)
239 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
240
241 return max;
242}
243
244int xen_netbk_rx_ring_full(struct xenvif *vif)
245{
246 RING_IDX peek = vif->rx_req_cons_peek;
247 RING_IDX needed = max_required_rx_slots(vif);
248
249 return ((vif->rx.sring->req_prod - peek) < needed) ||
250 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
251}
252
253int xen_netbk_must_stop_queue(struct xenvif *vif)
254{
255 if (!xen_netbk_rx_ring_full(vif))
256 return 0;
257
258 vif->rx.sring->req_event = vif->rx_req_cons_peek +
259 max_required_rx_slots(vif);
260 mb(); /* request notification /then/ check the queue */
261
262 return xen_netbk_rx_ring_full(vif);
263}
264
265/*
266 * Returns true if we should start a new receive buffer instead of
267 * adding 'size' bytes to a buffer which currently contains 'offset'
268 * bytes.
269 */
270static bool start_new_rx_buffer(int offset, unsigned long size, int head)
271{
272 /* simple case: we have completely filled the current buffer. */
273 if (offset == MAX_BUFFER_OFFSET)
274 return true;
275
276 /*
277 * complex case: start a fresh buffer if the current frag
278 * would overflow the current buffer but only if:
279 * (i) this frag would fit completely in the next buffer
280 * and (ii) there is already some data in the current buffer
281 * and (iii) this is not the head buffer.
282 *
283 * Where:
284 * - (i) stops us splitting a frag into two copies
285 * unless the frag is too large for a single buffer.
286 * - (ii) stops us from leaving a buffer pointlessly empty.
287 * - (iii) stops us leaving the first buffer
288 * empty. Strictly speaking this is already covered
289 * by (ii) but is explicitly checked because
290 * netfront relies on the first buffer being
291 * non-empty and can crash otherwise.
292 *
293 * This means we will effectively linearise small
294 * frags but do not needlessly split large buffers
295 * into multiple copies tend to give large frags their
296 * own buffers as before.
297 */
298 if ((offset + size > MAX_BUFFER_OFFSET) &&
299 (size <= MAX_BUFFER_OFFSET) && offset && !head)
300 return true;
301
302 return false;
303}
304
305/*
306 * Figure out how many ring slots we're going to need to send @skb to
307 * the guest. This function is essentially a dry run of
308 * netbk_gop_frag_copy.
309 */
310unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
311{
312 unsigned int count;
313 int i, copy_off;
314
315 count = DIV_ROUND_UP(
316 offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
317
318 copy_off = skb_headlen(skb) % PAGE_SIZE;
319
320 if (skb_shinfo(skb)->gso_size)
321 count++;
322
323 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
324 unsigned long size = skb_shinfo(skb)->frags[i].size;
325 unsigned long bytes;
326 while (size > 0) {
327 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
328
329 if (start_new_rx_buffer(copy_off, size, 0)) {
330 count++;
331 copy_off = 0;
332 }
333
334 bytes = size;
335 if (copy_off + bytes > MAX_BUFFER_OFFSET)
336 bytes = MAX_BUFFER_OFFSET - copy_off;
337
338 copy_off += bytes;
339 size -= bytes;
340 }
341 }
342 return count;
343}
344
345struct netrx_pending_operations {
346 unsigned copy_prod, copy_cons;
347 unsigned meta_prod, meta_cons;
348 struct gnttab_copy *copy;
349 struct netbk_rx_meta *meta;
350 int copy_off;
351 grant_ref_t copy_gref;
352};
353
354static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
355 struct netrx_pending_operations *npo)
356{
357 struct netbk_rx_meta *meta;
358 struct xen_netif_rx_request *req;
359
360 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
361
362 meta = npo->meta + npo->meta_prod++;
363 meta->gso_size = 0;
364 meta->size = 0;
365 meta->id = req->id;
366
367 npo->copy_off = 0;
368 npo->copy_gref = req->gref;
369
370 return meta;
371}
372
373/*
374 * Set up the grant operations for this fragment. If it's a flipping
375 * interface, we also set up the unmap request from here.
376 */
377static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
378 struct netrx_pending_operations *npo,
379 struct page *page, unsigned long size,
380 unsigned long offset, int *head)
381{
382 struct gnttab_copy *copy_gop;
383 struct netbk_rx_meta *meta;
384 /*
385 * These variables a used iff get_page_ext returns true,
386 * in which case they are guaranteed to be initialized.
387 */
388 unsigned int uninitialized_var(group), uninitialized_var(idx);
389 int foreign = get_page_ext(page, &group, &idx);
390 unsigned long bytes;
391
392 /* Data must not cross a page boundary. */
393 BUG_ON(size + offset > PAGE_SIZE);
394
395 meta = npo->meta + npo->meta_prod - 1;
396
397 while (size > 0) {
398 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
399
400 if (start_new_rx_buffer(npo->copy_off, size, *head)) {
401 /*
402 * Netfront requires there to be some data in the head
403 * buffer.
404 */
405 BUG_ON(*head);
406
407 meta = get_next_rx_buffer(vif, npo);
408 }
409
410 bytes = size;
411 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
412 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
413
414 copy_gop = npo->copy + npo->copy_prod++;
415 copy_gop->flags = GNTCOPY_dest_gref;
416 if (foreign) {
417 struct xen_netbk *netbk = &xen_netbk[group];
418 struct pending_tx_info *src_pend;
419
420 src_pend = &netbk->pending_tx_info[idx];
421
422 copy_gop->source.domid = src_pend->vif->domid;
423 copy_gop->source.u.ref = src_pend->req.gref;
424 copy_gop->flags |= GNTCOPY_source_gref;
425 } else {
426 void *vaddr = page_address(page);
427 copy_gop->source.domid = DOMID_SELF;
428 copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
429 }
430 copy_gop->source.offset = offset;
431 copy_gop->dest.domid = vif->domid;
432
433 copy_gop->dest.offset = npo->copy_off;
434 copy_gop->dest.u.ref = npo->copy_gref;
435 copy_gop->len = bytes;
436
437 npo->copy_off += bytes;
438 meta->size += bytes;
439
440 offset += bytes;
441 size -= bytes;
442
443 /* Leave a gap for the GSO descriptor. */
444 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
445 vif->rx.req_cons++;
446
447 *head = 0; /* There must be something in this buffer now. */
448
449 }
450}
451
452/*
453 * Prepare an SKB to be transmitted to the frontend.
454 *
455 * This function is responsible for allocating grant operations, meta
456 * structures, etc.
457 *
458 * It returns the number of meta structures consumed. The number of
459 * ring slots used is always equal to the number of meta slots used
460 * plus the number of GSO descriptors used. Currently, we use either
461 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
462 * frontend-side LRO).
463 */
464static int netbk_gop_skb(struct sk_buff *skb,
465 struct netrx_pending_operations *npo)
466{
467 struct xenvif *vif = netdev_priv(skb->dev);
468 int nr_frags = skb_shinfo(skb)->nr_frags;
469 int i;
470 struct xen_netif_rx_request *req;
471 struct netbk_rx_meta *meta;
472 unsigned char *data;
473 int head = 1;
474 int old_meta_prod;
475
476 old_meta_prod = npo->meta_prod;
477
478 /* Set up a GSO prefix descriptor, if necessary */
479 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
480 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
481 meta = npo->meta + npo->meta_prod++;
482 meta->gso_size = skb_shinfo(skb)->gso_size;
483 meta->size = 0;
484 meta->id = req->id;
485 }
486
487 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
488 meta = npo->meta + npo->meta_prod++;
489
490 if (!vif->gso_prefix)
491 meta->gso_size = skb_shinfo(skb)->gso_size;
492 else
493 meta->gso_size = 0;
494
495 meta->size = 0;
496 meta->id = req->id;
497 npo->copy_off = 0;
498 npo->copy_gref = req->gref;
499
500 data = skb->data;
501 while (data < skb_tail_pointer(skb)) {
502 unsigned int offset = offset_in_page(data);
503 unsigned int len = PAGE_SIZE - offset;
504
505 if (data + len > skb_tail_pointer(skb))
506 len = skb_tail_pointer(skb) - data;
507
508 netbk_gop_frag_copy(vif, skb, npo,
509 virt_to_page(data), len, offset, &head);
510 data += len;
511 }
512
513 for (i = 0; i < nr_frags; i++) {
514 netbk_gop_frag_copy(vif, skb, npo,
515 skb_shinfo(skb)->frags[i].page,
516 skb_shinfo(skb)->frags[i].size,
517 skb_shinfo(skb)->frags[i].page_offset,
518 &head);
519 }
520
521 return npo->meta_prod - old_meta_prod;
522}
523
524/*
525 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
526 * used to set up the operations on the top of
527 * netrx_pending_operations, which have since been done. Check that
528 * they didn't give any errors and advance over them.
529 */
530static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
531 struct netrx_pending_operations *npo)
532{
533 struct gnttab_copy *copy_op;
534 int status = XEN_NETIF_RSP_OKAY;
535 int i;
536
537 for (i = 0; i < nr_meta_slots; i++) {
538 copy_op = npo->copy + npo->copy_cons++;
539 if (copy_op->status != GNTST_okay) {
540 netdev_dbg(vif->dev,
541 "Bad status %d from copy to DOM%d.\n",
542 copy_op->status, vif->domid);
543 status = XEN_NETIF_RSP_ERROR;
544 }
545 }
546
547 return status;
548}
549
550static void netbk_add_frag_responses(struct xenvif *vif, int status,
551 struct netbk_rx_meta *meta,
552 int nr_meta_slots)
553{
554 int i;
555 unsigned long offset;
556
557 /* No fragments used */
558 if (nr_meta_slots <= 1)
559 return;
560
561 nr_meta_slots--;
562
563 for (i = 0; i < nr_meta_slots; i++) {
564 int flags;
565 if (i == nr_meta_slots - 1)
566 flags = 0;
567 else
568 flags = XEN_NETRXF_more_data;
569
570 offset = 0;
571 make_rx_response(vif, meta[i].id, status, offset,
572 meta[i].size, flags);
573 }
574}
575
576struct skb_cb_overlay {
577 int meta_slots_used;
578};
579
580static void xen_netbk_rx_action(struct xen_netbk *netbk)
581{
582 struct xenvif *vif = NULL, *tmp;
583 s8 status;
584 u16 irq, flags;
585 struct xen_netif_rx_response *resp;
586 struct sk_buff_head rxq;
587 struct sk_buff *skb;
588 LIST_HEAD(notify);
589 int ret;
590 int nr_frags;
591 int count;
592 unsigned long offset;
593 struct skb_cb_overlay *sco;
594
595 struct netrx_pending_operations npo = {
596 .copy = netbk->grant_copy_op,
597 .meta = netbk->meta,
598 };
599
600 skb_queue_head_init(&rxq);
601
602 count = 0;
603
604 while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
605 vif = netdev_priv(skb->dev);
606 nr_frags = skb_shinfo(skb)->nr_frags;
607
608 sco = (struct skb_cb_overlay *)skb->cb;
609 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
610
611 count += nr_frags + 1;
612
613 __skb_queue_tail(&rxq, skb);
614
615 /* Filled the batch queue? */
616 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
617 break;
618 }
619
620 BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
621
622 if (!npo.copy_prod)
623 return;
624
625 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
626 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
627 npo.copy_prod);
628 BUG_ON(ret != 0);
629
630 while ((skb = __skb_dequeue(&rxq)) != NULL) {
631 sco = (struct skb_cb_overlay *)skb->cb;
632
633 vif = netdev_priv(skb->dev);
634
635 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
636 resp = RING_GET_RESPONSE(&vif->rx,
637 vif->rx.rsp_prod_pvt++);
638
639 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
640
641 resp->offset = netbk->meta[npo.meta_cons].gso_size;
642 resp->id = netbk->meta[npo.meta_cons].id;
643 resp->status = sco->meta_slots_used;
644
645 npo.meta_cons++;
646 sco->meta_slots_used--;
647 }
648
649
650 vif->dev->stats.tx_bytes += skb->len;
651 vif->dev->stats.tx_packets++;
652
653 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
654
655 if (sco->meta_slots_used == 1)
656 flags = 0;
657 else
658 flags = XEN_NETRXF_more_data;
659
660 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
661 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
662 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
663 /* remote but checksummed. */
664 flags |= XEN_NETRXF_data_validated;
665
666 offset = 0;
667 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
668 status, offset,
669 netbk->meta[npo.meta_cons].size,
670 flags);
671
672 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
673 struct xen_netif_extra_info *gso =
674 (struct xen_netif_extra_info *)
675 RING_GET_RESPONSE(&vif->rx,
676 vif->rx.rsp_prod_pvt++);
677
678 resp->flags |= XEN_NETRXF_extra_info;
679
680 gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
681 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
682 gso->u.gso.pad = 0;
683 gso->u.gso.features = 0;
684
685 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
686 gso->flags = 0;
687 }
688
689 netbk_add_frag_responses(vif, status,
690 netbk->meta + npo.meta_cons + 1,
691 sco->meta_slots_used);
692
693 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
694 irq = vif->irq;
695 if (ret && list_empty(&vif->notify_list))
696 list_add_tail(&vif->notify_list, &notify);
697
698 xenvif_notify_tx_completion(vif);
699
700 xenvif_put(vif);
701 npo.meta_cons += sco->meta_slots_used;
702 dev_kfree_skb(skb);
703 }
704
705 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
706 notify_remote_via_irq(vif->irq);
707 list_del_init(&vif->notify_list);
708 }
709
710 /* More work to do? */
711 if (!skb_queue_empty(&netbk->rx_queue) &&
712 !timer_pending(&netbk->net_timer))
713 xen_netbk_kick_thread(netbk);
714}
715
716void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
717{
718 struct xen_netbk *netbk = vif->netbk;
719
720 skb_queue_tail(&netbk->rx_queue, skb);
721
722 xen_netbk_kick_thread(netbk);
723}
724
725static void xen_netbk_alarm(unsigned long data)
726{
727 struct xen_netbk *netbk = (struct xen_netbk *)data;
728 xen_netbk_kick_thread(netbk);
729}
730
731static int __on_net_schedule_list(struct xenvif *vif)
732{
733 return !list_empty(&vif->schedule_list);
734}
735
736/* Must be called with net_schedule_list_lock held */
737static void remove_from_net_schedule_list(struct xenvif *vif)
738{
739 if (likely(__on_net_schedule_list(vif))) {
740 list_del_init(&vif->schedule_list);
741 xenvif_put(vif);
742 }
743}
744
745static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
746{
747 struct xenvif *vif = NULL;
748
749 spin_lock_irq(&netbk->net_schedule_list_lock);
750 if (list_empty(&netbk->net_schedule_list))
751 goto out;
752
753 vif = list_first_entry(&netbk->net_schedule_list,
754 struct xenvif, schedule_list);
755 if (!vif)
756 goto out;
757
758 xenvif_get(vif);
759
760 remove_from_net_schedule_list(vif);
761out:
762 spin_unlock_irq(&netbk->net_schedule_list_lock);
763 return vif;
764}
765
766void xen_netbk_schedule_xenvif(struct xenvif *vif)
767{
768 unsigned long flags;
769 struct xen_netbk *netbk = vif->netbk;
770
771 if (__on_net_schedule_list(vif))
772 goto kick;
773
774 spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
775 if (!__on_net_schedule_list(vif) &&
776 likely(xenvif_schedulable(vif))) {
777 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
778 xenvif_get(vif);
779 }
780 spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
781
782kick:
783 smp_mb();
784 if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
785 !list_empty(&netbk->net_schedule_list))
786 xen_netbk_kick_thread(netbk);
787}
788
789void xen_netbk_deschedule_xenvif(struct xenvif *vif)
790{
791 struct xen_netbk *netbk = vif->netbk;
792 spin_lock_irq(&netbk->net_schedule_list_lock);
793 remove_from_net_schedule_list(vif);
794 spin_unlock_irq(&netbk->net_schedule_list_lock);
795}
796
797void xen_netbk_check_rx_xenvif(struct xenvif *vif)
798{
799 int more_to_do;
800
801 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
802
803 if (more_to_do)
804 xen_netbk_schedule_xenvif(vif);
805}
806
807static void tx_add_credit(struct xenvif *vif)
808{
809 unsigned long max_burst, max_credit;
810
811 /*
812 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
813 * Otherwise the interface can seize up due to insufficient credit.
814 */
815 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
816 max_burst = min(max_burst, 131072UL);
817 max_burst = max(max_burst, vif->credit_bytes);
818
819 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
820 max_credit = vif->remaining_credit + vif->credit_bytes;
821 if (max_credit < vif->remaining_credit)
822 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
823
824 vif->remaining_credit = min(max_credit, max_burst);
825}
826
827static void tx_credit_callback(unsigned long data)
828{
829 struct xenvif *vif = (struct xenvif *)data;
830 tx_add_credit(vif);
831 xen_netbk_check_rx_xenvif(vif);
832}
833
834static void netbk_tx_err(struct xenvif *vif,
835 struct xen_netif_tx_request *txp, RING_IDX end)
836{
837 RING_IDX cons = vif->tx.req_cons;
838
839 do {
840 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
841 if (cons >= end)
842 break;
843 txp = RING_GET_REQUEST(&vif->tx, cons++);
844 } while (1);
845 vif->tx.req_cons = cons;
846 xen_netbk_check_rx_xenvif(vif);
847 xenvif_put(vif);
848}
849
850static int netbk_count_requests(struct xenvif *vif,
851 struct xen_netif_tx_request *first,
852 struct xen_netif_tx_request *txp,
853 int work_to_do)
854{
855 RING_IDX cons = vif->tx.req_cons;
856 int frags = 0;
857
858 if (!(first->flags & XEN_NETTXF_more_data))
859 return 0;
860
861 do {
862 if (frags >= work_to_do) {
863 netdev_dbg(vif->dev, "Need more frags\n");
864 return -frags;
865 }
866
867 if (unlikely(frags >= MAX_SKB_FRAGS)) {
868 netdev_dbg(vif->dev, "Too many frags\n");
869 return -frags;
870 }
871
872 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
873 sizeof(*txp));
874 if (txp->size > first->size) {
875 netdev_dbg(vif->dev, "Frags galore\n");
876 return -frags;
877 }
878
879 first->size -= txp->size;
880 frags++;
881
882 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
883 netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
884 txp->offset, txp->size);
885 return -frags;
886 }
887 } while ((txp++)->flags & XEN_NETTXF_more_data);
888 return frags;
889}
890
891static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
892 struct sk_buff *skb,
893 unsigned long pending_idx)
894{
895 struct page *page;
896 page = alloc_page(GFP_KERNEL|__GFP_COLD);
897 if (!page)
898 return NULL;
899 set_page_ext(page, netbk, pending_idx);
900 netbk->mmap_pages[pending_idx] = page;
901 return page;
902}
903
904static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
905 struct xenvif *vif,
906 struct sk_buff *skb,
907 struct xen_netif_tx_request *txp,
908 struct gnttab_copy *gop)
909{
910 struct skb_shared_info *shinfo = skb_shinfo(skb);
911 skb_frag_t *frags = shinfo->frags;
912 unsigned long pending_idx = *((u16 *)skb->data);
913 int i, start;
914
915 /* Skip first skb fragment if it is on same page as header fragment. */
916 start = ((unsigned long)shinfo->frags[0].page == pending_idx);
917
918 for (i = start; i < shinfo->nr_frags; i++, txp++) {
919 struct page *page;
920 pending_ring_idx_t index;
921 struct pending_tx_info *pending_tx_info =
922 netbk->pending_tx_info;
923
924 index = pending_index(netbk->pending_cons++);
925 pending_idx = netbk->pending_ring[index];
926 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
927 if (!page)
928 return NULL;
929
930 netbk->mmap_pages[pending_idx] = page;
931
932 gop->source.u.ref = txp->gref;
933 gop->source.domid = vif->domid;
934 gop->source.offset = txp->offset;
935
936 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
937 gop->dest.domid = DOMID_SELF;
938 gop->dest.offset = txp->offset;
939
940 gop->len = txp->size;
941 gop->flags = GNTCOPY_source_gref;
942
943 gop++;
944
945 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
946 xenvif_get(vif);
947 pending_tx_info[pending_idx].vif = vif;
948 frags[i].page = (void *)pending_idx;
949 }
950
951 return gop;
952}
953
954static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
955 struct sk_buff *skb,
956 struct gnttab_copy **gopp)
957{
958 struct gnttab_copy *gop = *gopp;
959 int pending_idx = *((u16 *)skb->data);
960 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
961 struct xenvif *vif = pending_tx_info[pending_idx].vif;
962 struct xen_netif_tx_request *txp;
963 struct skb_shared_info *shinfo = skb_shinfo(skb);
964 int nr_frags = shinfo->nr_frags;
965 int i, err, start;
966
967 /* Check status of header. */
968 err = gop->status;
969 if (unlikely(err)) {
970 pending_ring_idx_t index;
971 index = pending_index(netbk->pending_prod++);
972 txp = &pending_tx_info[pending_idx].req;
973 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
974 netbk->pending_ring[index] = pending_idx;
975 xenvif_put(vif);
976 }
977
978 /* Skip first skb fragment if it is on same page as header fragment. */
979 start = ((unsigned long)shinfo->frags[0].page == pending_idx);
980
981 for (i = start; i < nr_frags; i++) {
982 int j, newerr;
983 pending_ring_idx_t index;
984
985 pending_idx = (unsigned long)shinfo->frags[i].page;
986
987 /* Check error status: if okay then remember grant handle. */
988 newerr = (++gop)->status;
989 if (likely(!newerr)) {
990 /* Had a previous error? Invalidate this fragment. */
991 if (unlikely(err))
992 xen_netbk_idx_release(netbk, pending_idx);
993 continue;
994 }
995
996 /* Error on this fragment: respond to client with an error. */
997 txp = &netbk->pending_tx_info[pending_idx].req;
998 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
999 index = pending_index(netbk->pending_prod++);
1000 netbk->pending_ring[index] = pending_idx;
1001 xenvif_put(vif);
1002
1003 /* Not the first error? Preceding frags already invalidated. */
1004 if (err)
1005 continue;
1006
1007 /* First error: invalidate header and preceding fragments. */
1008 pending_idx = *((u16 *)skb->data);
1009 xen_netbk_idx_release(netbk, pending_idx);
1010 for (j = start; j < i; j++) {
1011 pending_idx = (unsigned long)shinfo->frags[i].page;
1012 xen_netbk_idx_release(netbk, pending_idx);
1013 }
1014
1015 /* Remember the error: invalidate all subsequent fragments. */
1016 err = newerr;
1017 }
1018
1019 *gopp = gop + 1;
1020 return err;
1021}
1022
1023static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1024{
1025 struct skb_shared_info *shinfo = skb_shinfo(skb);
1026 int nr_frags = shinfo->nr_frags;
1027 int i;
1028
1029 for (i = 0; i < nr_frags; i++) {
1030 skb_frag_t *frag = shinfo->frags + i;
1031 struct xen_netif_tx_request *txp;
1032 unsigned long pending_idx;
1033
1034 pending_idx = (unsigned long)frag->page;
1035
1036 txp = &netbk->pending_tx_info[pending_idx].req;
1037 frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1038 frag->size = txp->size;
1039 frag->page_offset = txp->offset;
1040
1041 skb->len += txp->size;
1042 skb->data_len += txp->size;
1043 skb->truesize += txp->size;
1044
1045 /* Take an extra reference to offset xen_netbk_idx_release */
1046 get_page(netbk->mmap_pages[pending_idx]);
1047 xen_netbk_idx_release(netbk, pending_idx);
1048 }
1049}
1050
1051static int xen_netbk_get_extras(struct xenvif *vif,
1052 struct xen_netif_extra_info *extras,
1053 int work_to_do)
1054{
1055 struct xen_netif_extra_info extra;
1056 RING_IDX cons = vif->tx.req_cons;
1057
1058 do {
1059 if (unlikely(work_to_do-- <= 0)) {
1060 netdev_dbg(vif->dev, "Missing extra info\n");
1061 return -EBADR;
1062 }
1063
1064 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1065 sizeof(extra));
1066 if (unlikely(!extra.type ||
1067 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1068 vif->tx.req_cons = ++cons;
1069 netdev_dbg(vif->dev,
1070 "Invalid extra type: %d\n", extra.type);
1071 return -EINVAL;
1072 }
1073
1074 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1075 vif->tx.req_cons = ++cons;
1076 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1077
1078 return work_to_do;
1079}
1080
1081static int netbk_set_skb_gso(struct xenvif *vif,
1082 struct sk_buff *skb,
1083 struct xen_netif_extra_info *gso)
1084{
1085 if (!gso->u.gso.size) {
1086 netdev_dbg(vif->dev, "GSO size must not be zero.\n");
1087 return -EINVAL;
1088 }
1089
1090 /* Currently only TCPv4 S.O. is supported. */
1091 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1092 netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1093 return -EINVAL;
1094 }
1095
1096 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1097 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1098
1099 /* Header must be checked, and gso_segs computed. */
1100 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1101 skb_shinfo(skb)->gso_segs = 0;
1102
1103 return 0;
1104}
1105
1106static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1107{
1108 struct iphdr *iph;
1109 unsigned char *th;
1110 int err = -EPROTO;
1111 int recalculate_partial_csum = 0;
1112
1113 /*
1114 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1115 * peers can fail to set NETRXF_csum_blank when sending a GSO
1116 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1117 * recalculate the partial checksum.
1118 */
1119 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1120 vif->rx_gso_checksum_fixup++;
1121 skb->ip_summed = CHECKSUM_PARTIAL;
1122 recalculate_partial_csum = 1;
1123 }
1124
1125 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1126 if (skb->ip_summed != CHECKSUM_PARTIAL)
1127 return 0;
1128
1129 if (skb->protocol != htons(ETH_P_IP))
1130 goto out;
1131
1132 iph = (void *)skb->data;
1133 th = skb->data + 4 * iph->ihl;
1134 if (th >= skb_tail_pointer(skb))
1135 goto out;
1136
1137 skb->csum_start = th - skb->head;
1138 switch (iph->protocol) {
1139 case IPPROTO_TCP:
1140 skb->csum_offset = offsetof(struct tcphdr, check);
1141
1142 if (recalculate_partial_csum) {
1143 struct tcphdr *tcph = (struct tcphdr *)th;
1144 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1145 skb->len - iph->ihl*4,
1146 IPPROTO_TCP, 0);
1147 }
1148 break;
1149 case IPPROTO_UDP:
1150 skb->csum_offset = offsetof(struct udphdr, check);
1151
1152 if (recalculate_partial_csum) {
1153 struct udphdr *udph = (struct udphdr *)th;
1154 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1155 skb->len - iph->ihl*4,
1156 IPPROTO_UDP, 0);
1157 }
1158 break;
1159 default:
1160 if (net_ratelimit())
1161 netdev_err(vif->dev,
1162 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1163 iph->protocol);
1164 goto out;
1165 }
1166
1167 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
1168 goto out;
1169
1170 err = 0;
1171
1172out:
1173 return err;
1174}
1175
1176static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1177{
1178 unsigned long now = jiffies;
1179 unsigned long next_credit =
1180 vif->credit_timeout.expires +
1181 msecs_to_jiffies(vif->credit_usec / 1000);
1182
1183 /* Timer could already be pending in rare cases. */
1184 if (timer_pending(&vif->credit_timeout))
1185 return true;
1186
1187 /* Passed the point where we can replenish credit? */
1188 if (time_after_eq(now, next_credit)) {
1189 vif->credit_timeout.expires = now;
1190 tx_add_credit(vif);
1191 }
1192
1193 /* Still too big to send right now? Set a callback. */
1194 if (size > vif->remaining_credit) {
1195 vif->credit_timeout.data =
1196 (unsigned long)vif;
1197 vif->credit_timeout.function =
1198 tx_credit_callback;
1199 mod_timer(&vif->credit_timeout,
1200 next_credit);
1201
1202 return true;
1203 }
1204
1205 return false;
1206}
1207
1208static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1209{
1210 struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1211 struct sk_buff *skb;
1212 int ret;
1213
1214 while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
1215 !list_empty(&netbk->net_schedule_list)) {
1216 struct xenvif *vif;
1217 struct xen_netif_tx_request txreq;
1218 struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
1219 struct page *page;
1220 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1221 u16 pending_idx;
1222 RING_IDX idx;
1223 int work_to_do;
1224 unsigned int data_len;
1225 pending_ring_idx_t index;
1226
1227 /* Get a netif from the list with work to do. */
1228 vif = poll_net_schedule_list(netbk);
1229 if (!vif)
1230 continue;
1231
1232 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1233 if (!work_to_do) {
1234 xenvif_put(vif);
1235 continue;
1236 }
1237
1238 idx = vif->tx.req_cons;
1239 rmb(); /* Ensure that we see the request before we copy it. */
1240 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1241
1242 /* Credit-based scheduling. */
1243 if (txreq.size > vif->remaining_credit &&
1244 tx_credit_exceeded(vif, txreq.size)) {
1245 xenvif_put(vif);
1246 continue;
1247 }
1248
1249 vif->remaining_credit -= txreq.size;
1250
1251 work_to_do--;
1252 vif->tx.req_cons = ++idx;
1253
1254 memset(extras, 0, sizeof(extras));
1255 if (txreq.flags & XEN_NETTXF_extra_info) {
1256 work_to_do = xen_netbk_get_extras(vif, extras,
1257 work_to_do);
1258 idx = vif->tx.req_cons;
1259 if (unlikely(work_to_do < 0)) {
1260 netbk_tx_err(vif, &txreq, idx);
1261 continue;
1262 }
1263 }
1264
1265 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1266 if (unlikely(ret < 0)) {
1267 netbk_tx_err(vif, &txreq, idx - ret);
1268 continue;
1269 }
1270 idx += ret;
1271
1272 if (unlikely(txreq.size < ETH_HLEN)) {
1273 netdev_dbg(vif->dev,
1274 "Bad packet size: %d\n", txreq.size);
1275 netbk_tx_err(vif, &txreq, idx);
1276 continue;
1277 }
1278
1279 /* No crossing a page as the payload mustn't fragment. */
1280 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1281 netdev_dbg(vif->dev,
1282 "txreq.offset: %x, size: %u, end: %lu\n",
1283 txreq.offset, txreq.size,
1284 (txreq.offset&~PAGE_MASK) + txreq.size);
1285 netbk_tx_err(vif, &txreq, idx);
1286 continue;
1287 }
1288
1289 index = pending_index(netbk->pending_cons);
1290 pending_idx = netbk->pending_ring[index];
1291
1292 data_len = (txreq.size > PKT_PROT_LEN &&
1293 ret < MAX_SKB_FRAGS) ?
1294 PKT_PROT_LEN : txreq.size;
1295
1296 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1297 GFP_ATOMIC | __GFP_NOWARN);
1298 if (unlikely(skb == NULL)) {
1299 netdev_dbg(vif->dev,
1300 "Can't allocate a skb in start_xmit.\n");
1301 netbk_tx_err(vif, &txreq, idx);
1302 break;
1303 }
1304
1305 /* Packets passed to netif_rx() must have some headroom. */
1306 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1307
1308 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1309 struct xen_netif_extra_info *gso;
1310 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1311
1312 if (netbk_set_skb_gso(vif, skb, gso)) {
1313 kfree_skb(skb);
1314 netbk_tx_err(vif, &txreq, idx);
1315 continue;
1316 }
1317 }
1318
1319 /* XXX could copy straight to head */
1320 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
1321 if (!page) {
1322 kfree_skb(skb);
1323 netbk_tx_err(vif, &txreq, idx);
1324 continue;
1325 }
1326
1327 netbk->mmap_pages[pending_idx] = page;
1328
1329 gop->source.u.ref = txreq.gref;
1330 gop->source.domid = vif->domid;
1331 gop->source.offset = txreq.offset;
1332
1333 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1334 gop->dest.domid = DOMID_SELF;
1335 gop->dest.offset = txreq.offset;
1336
1337 gop->len = txreq.size;
1338 gop->flags = GNTCOPY_source_gref;
1339
1340 gop++;
1341
1342 memcpy(&netbk->pending_tx_info[pending_idx].req,
1343 &txreq, sizeof(txreq));
1344 netbk->pending_tx_info[pending_idx].vif = vif;
1345 *((u16 *)skb->data) = pending_idx;
1346
1347 __skb_put(skb, data_len);
1348
1349 skb_shinfo(skb)->nr_frags = ret;
1350 if (data_len < txreq.size) {
1351 skb_shinfo(skb)->nr_frags++;
1352 skb_shinfo(skb)->frags[0].page =
1353 (void *)(unsigned long)pending_idx;
1354 } else {
1355 /* Discriminate from any valid pending_idx value. */
1356 skb_shinfo(skb)->frags[0].page = (void *)~0UL;
1357 }
1358
1359 __skb_queue_tail(&netbk->tx_queue, skb);
1360
1361 netbk->pending_cons++;
1362
1363 request_gop = xen_netbk_get_requests(netbk, vif,
1364 skb, txfrags, gop);
1365 if (request_gop == NULL) {
1366 kfree_skb(skb);
1367 netbk_tx_err(vif, &txreq, idx);
1368 continue;
1369 }
1370 gop = request_gop;
1371
1372 vif->tx.req_cons = idx;
1373 xen_netbk_check_rx_xenvif(vif);
1374
1375 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1376 break;
1377 }
1378
1379 return gop - netbk->tx_copy_ops;
1380}
1381
1382static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1383{
1384 struct gnttab_copy *gop = netbk->tx_copy_ops;
1385 struct sk_buff *skb;
1386
1387 while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1388 struct xen_netif_tx_request *txp;
1389 struct xenvif *vif;
1390 u16 pending_idx;
1391 unsigned data_len;
1392
1393 pending_idx = *((u16 *)skb->data);
1394 vif = netbk->pending_tx_info[pending_idx].vif;
1395 txp = &netbk->pending_tx_info[pending_idx].req;
1396
1397 /* Check the remap error code. */
1398 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1399 netdev_dbg(vif->dev, "netback grant failed.\n");
1400 skb_shinfo(skb)->nr_frags = 0;
1401 kfree_skb(skb);
1402 continue;
1403 }
1404
1405 data_len = skb->len;
1406 memcpy(skb->data,
1407 (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1408 data_len);
1409 if (data_len < txp->size) {
1410 /* Append the packet payload as a fragment. */
1411 txp->offset += data_len;
1412 txp->size -= data_len;
1413 } else {
1414 /* Schedule a response immediately. */
1415 xen_netbk_idx_release(netbk, pending_idx);
1416 }
1417
1418 if (txp->flags & XEN_NETTXF_csum_blank)
1419 skb->ip_summed = CHECKSUM_PARTIAL;
1420 else if (txp->flags & XEN_NETTXF_data_validated)
1421 skb->ip_summed = CHECKSUM_UNNECESSARY;
1422
1423 xen_netbk_fill_frags(netbk, skb);
1424
1425 /*
1426 * If the initial fragment was < PKT_PROT_LEN then
1427 * pull through some bytes from the other fragments to
1428 * increase the linear region to PKT_PROT_LEN bytes.
1429 */
1430 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1431 int target = min_t(int, skb->len, PKT_PROT_LEN);
1432 __pskb_pull_tail(skb, target - skb_headlen(skb));
1433 }
1434
1435 skb->dev = vif->dev;
1436 skb->protocol = eth_type_trans(skb, skb->dev);
1437
1438 if (checksum_setup(vif, skb)) {
1439 netdev_dbg(vif->dev,
1440 "Can't setup checksum in net_tx_action\n");
1441 kfree_skb(skb);
1442 continue;
1443 }
1444
1445 vif->dev->stats.rx_bytes += skb->len;
1446 vif->dev->stats.rx_packets++;
1447
1448 xenvif_receive_skb(vif, skb);
1449 }
1450}
1451
1452/* Called after netfront has transmitted */
1453static void xen_netbk_tx_action(struct xen_netbk *netbk)
1454{
1455 unsigned nr_gops;
1456 int ret;
1457
1458 nr_gops = xen_netbk_tx_build_gops(netbk);
1459
1460 if (nr_gops == 0)
1461 return;
1462 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
1463 netbk->tx_copy_ops, nr_gops);
1464 BUG_ON(ret);
1465
1466 xen_netbk_tx_submit(netbk);
1467
1468}
1469
1470static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1471{
1472 struct xenvif *vif;
1473 struct pending_tx_info *pending_tx_info;
1474 pending_ring_idx_t index;
1475
1476 /* Already complete? */
1477 if (netbk->mmap_pages[pending_idx] == NULL)
1478 return;
1479
1480 pending_tx_info = &netbk->pending_tx_info[pending_idx];
1481
1482 vif = pending_tx_info->vif;
1483
1484 make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
1485
1486 index = pending_index(netbk->pending_prod++);
1487 netbk->pending_ring[index] = pending_idx;
1488
1489 xenvif_put(vif);
1490
1491 netbk->mmap_pages[pending_idx]->mapping = 0;
1492 put_page(netbk->mmap_pages[pending_idx]);
1493 netbk->mmap_pages[pending_idx] = NULL;
1494}
1495
1496static void make_tx_response(struct xenvif *vif,
1497 struct xen_netif_tx_request *txp,
1498 s8 st)
1499{
1500 RING_IDX i = vif->tx.rsp_prod_pvt;
1501 struct xen_netif_tx_response *resp;
1502 int notify;
1503
1504 resp = RING_GET_RESPONSE(&vif->tx, i);
1505 resp->id = txp->id;
1506 resp->status = st;
1507
1508 if (txp->flags & XEN_NETTXF_extra_info)
1509 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1510
1511 vif->tx.rsp_prod_pvt = ++i;
1512 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1513 if (notify)
1514 notify_remote_via_irq(vif->irq);
1515}
1516
1517static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1518 u16 id,
1519 s8 st,
1520 u16 offset,
1521 u16 size,
1522 u16 flags)
1523{
1524 RING_IDX i = vif->rx.rsp_prod_pvt;
1525 struct xen_netif_rx_response *resp;
1526
1527 resp = RING_GET_RESPONSE(&vif->rx, i);
1528 resp->offset = offset;
1529 resp->flags = flags;
1530 resp->id = id;
1531 resp->status = (s16)size;
1532 if (st < 0)
1533 resp->status = (s16)st;
1534
1535 vif->rx.rsp_prod_pvt = ++i;
1536
1537 return resp;
1538}
1539
1540static inline int rx_work_todo(struct xen_netbk *netbk)
1541{
1542 return !skb_queue_empty(&netbk->rx_queue);
1543}
1544
1545static inline int tx_work_todo(struct xen_netbk *netbk)
1546{
1547
1548 if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
1549 !list_empty(&netbk->net_schedule_list))
1550 return 1;
1551
1552 return 0;
1553}
1554
1555static int xen_netbk_kthread(void *data)
1556{
1557 struct xen_netbk *netbk = data;
1558 while (!kthread_should_stop()) {
1559 wait_event_interruptible(netbk->wq,
1560 rx_work_todo(netbk) ||
1561 tx_work_todo(netbk) ||
1562 kthread_should_stop());
1563 cond_resched();
1564
1565 if (kthread_should_stop())
1566 break;
1567
1568 if (rx_work_todo(netbk))
1569 xen_netbk_rx_action(netbk);
1570
1571 if (tx_work_todo(netbk))
1572 xen_netbk_tx_action(netbk);
1573 }
1574
1575 return 0;
1576}
1577
1578void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1579{
1580 struct gnttab_unmap_grant_ref op;
1581
1582 if (vif->tx.sring) {
1583 gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr,
1584 GNTMAP_host_map, vif->tx_shmem_handle);
1585
1586 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
1587 BUG();
1588 }
1589
1590 if (vif->rx.sring) {
1591 gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
1592 GNTMAP_host_map, vif->rx_shmem_handle);
1593
1594 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
1595 BUG();
1596 }
1597 if (vif->rx_comms_area)
1598 free_vm_area(vif->rx_comms_area);
1599 if (vif->tx_comms_area)
1600 free_vm_area(vif->tx_comms_area);
1601}
1602
1603int xen_netbk_map_frontend_rings(struct xenvif *vif,
1604 grant_ref_t tx_ring_ref,
1605 grant_ref_t rx_ring_ref)
1606{
1607 struct gnttab_map_grant_ref op;
1608 struct xen_netif_tx_sring *txs;
1609 struct xen_netif_rx_sring *rxs;
1610
1611 int err = -ENOMEM;
1612
1613 vif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
1614 if (vif->tx_comms_area == NULL)
1615 goto err;
1616
1617 vif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
1618 if (vif->rx_comms_area == NULL)
1619 goto err;
1620
1621 gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
1622 GNTMAP_host_map, tx_ring_ref, vif->domid);
1623
1624 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
1625 BUG();
1626
1627 if (op.status) {
1628 netdev_warn(vif->dev,
1629 "failed to map tx ring. err=%d status=%d\n",
1630 err, op.status);
1631 err = op.status;
1632 goto err;
1633 }
1634
1635 vif->tx_shmem_ref = tx_ring_ref;
1636 vif->tx_shmem_handle = op.handle;
1637
1638 txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
1639 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1640
1641 gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr,
1642 GNTMAP_host_map, rx_ring_ref, vif->domid);
1643
1644 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
1645 BUG();
1646
1647 if (op.status) {
1648 netdev_warn(vif->dev,
1649 "failed to map rx ring. err=%d status=%d\n",
1650 err, op.status);
1651 err = op.status;
1652 goto err;
1653 }
1654
1655 vif->rx_shmem_ref = rx_ring_ref;
1656 vif->rx_shmem_handle = op.handle;
1657 vif->rx_req_cons_peek = 0;
1658
1659 rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
1660 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1661
1662 return 0;
1663
1664err:
1665 xen_netbk_unmap_frontend_rings(vif);
1666 return err;
1667}
1668
1669static int __init netback_init(void)
1670{
1671 int i;
1672 int rc = 0;
1673 int group;
1674
1675 if (!xen_pv_domain())
1676 return -ENODEV;
1677
1678 xen_netbk_group_nr = num_online_cpus();
1679 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1680 if (!xen_netbk) {
1681 printk(KERN_ALERT "%s: out of memory\n", __func__);
1682 return -ENOMEM;
1683 }
1684
1685 for (group = 0; group < xen_netbk_group_nr; group++) {
1686 struct xen_netbk *netbk = &xen_netbk[group];
1687 skb_queue_head_init(&netbk->rx_queue);
1688 skb_queue_head_init(&netbk->tx_queue);
1689
1690 init_timer(&netbk->net_timer);
1691 netbk->net_timer.data = (unsigned long)netbk;
1692 netbk->net_timer.function = xen_netbk_alarm;
1693
1694 netbk->pending_cons = 0;
1695 netbk->pending_prod = MAX_PENDING_REQS;
1696 for (i = 0; i < MAX_PENDING_REQS; i++)
1697 netbk->pending_ring[i] = i;
1698
1699 init_waitqueue_head(&netbk->wq);
1700 netbk->task = kthread_create(xen_netbk_kthread,
1701 (void *)netbk,
1702 "netback/%u", group);
1703
1704 if (IS_ERR(netbk->task)) {
1705 printk(KERN_ALERT "kthread_run() fails at netback\n");
1706 del_timer(&netbk->net_timer);
1707 rc = PTR_ERR(netbk->task);
1708 goto failed_init;
1709 }
1710
1711 kthread_bind(netbk->task, group);
1712
1713 INIT_LIST_HEAD(&netbk->net_schedule_list);
1714
1715 spin_lock_init(&netbk->net_schedule_list_lock);
1716
1717 atomic_set(&netbk->netfront_count, 0);
1718
1719 wake_up_process(netbk->task);
1720 }
1721
1722 rc = xenvif_xenbus_init();
1723 if (rc)
1724 goto failed_init;
1725
1726 return 0;
1727
1728failed_init:
1729 while (--group >= 0) {
1730 struct xen_netbk *netbk = &xen_netbk[group];
1731 for (i = 0; i < MAX_PENDING_REQS; i++) {
1732 if (netbk->mmap_pages[i])
1733 __free_page(netbk->mmap_pages[i]);
1734 }
1735 del_timer(&netbk->net_timer);
1736 kthread_stop(netbk->task);
1737 }
1738 vfree(xen_netbk);
1739 return rc;
1740
1741}
1742
1743module_init(netback_init);
1744
1745MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
new file mode 100644
index 00000000000..22b8c350599
--- /dev/null
+++ b/drivers/net/xen-netback/xenbus.c
@@ -0,0 +1,490 @@
1/*
2 * Xenbus code for netif backend
3 *
4 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
5 * Copyright (C) 2005 XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20*/
21
22#include "common.h"
23
24struct backend_info {
25 struct xenbus_device *dev;
26 struct xenvif *vif;
27 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch;
29 int have_hotplug_status_watch:1;
30};
31
32static int connect_rings(struct backend_info *);
33static void connect(struct backend_info *);
34static void backend_create_xenvif(struct backend_info *be);
35static void unregister_hotplug_status_watch(struct backend_info *be);
36
37static int netback_remove(struct xenbus_device *dev)
38{
39 struct backend_info *be = dev_get_drvdata(&dev->dev);
40
41 unregister_hotplug_status_watch(be);
42 if (be->vif) {
43 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
44 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
45 xenvif_disconnect(be->vif);
46 be->vif = NULL;
47 }
48 kfree(be);
49 dev_set_drvdata(&dev->dev, NULL);
50 return 0;
51}
52
53
54/**
55 * Entry point to this code when a new device is created. Allocate the basic
56 * structures and switch to InitWait.
57 */
58static int netback_probe(struct xenbus_device *dev,
59 const struct xenbus_device_id *id)
60{
61 const char *message;
62 struct xenbus_transaction xbt;
63 int err;
64 int sg;
65 struct backend_info *be = kzalloc(sizeof(struct backend_info),
66 GFP_KERNEL);
67 if (!be) {
68 xenbus_dev_fatal(dev, -ENOMEM,
69 "allocating backend structure");
70 return -ENOMEM;
71 }
72
73 be->dev = dev;
74 dev_set_drvdata(&dev->dev, be);
75
76 sg = 1;
77
78 do {
79 err = xenbus_transaction_start(&xbt);
80 if (err) {
81 xenbus_dev_fatal(dev, err, "starting transaction");
82 goto fail;
83 }
84
85 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
86 if (err) {
87 message = "writing feature-sg";
88 goto abort_transaction;
89 }
90
91 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
92 "%d", sg);
93 if (err) {
94 message = "writing feature-gso-tcpv4";
95 goto abort_transaction;
96 }
97
98 /* We support rx-copy path. */
99 err = xenbus_printf(xbt, dev->nodename,
100 "feature-rx-copy", "%d", 1);
101 if (err) {
102 message = "writing feature-rx-copy";
103 goto abort_transaction;
104 }
105
106 /*
107 * We don't support rx-flip path (except old guests who don't
108 * grok this feature flag).
109 */
110 err = xenbus_printf(xbt, dev->nodename,
111 "feature-rx-flip", "%d", 0);
112 if (err) {
113 message = "writing feature-rx-flip";
114 goto abort_transaction;
115 }
116
117 err = xenbus_transaction_end(xbt, 0);
118 } while (err == -EAGAIN);
119
120 if (err) {
121 xenbus_dev_fatal(dev, err, "completing transaction");
122 goto fail;
123 }
124
125 err = xenbus_switch_state(dev, XenbusStateInitWait);
126 if (err)
127 goto fail;
128
129 /* This kicks hotplug scripts, so do it immediately. */
130 backend_create_xenvif(be);
131
132 return 0;
133
134abort_transaction:
135 xenbus_transaction_end(xbt, 1);
136 xenbus_dev_fatal(dev, err, "%s", message);
137fail:
138 pr_debug("failed");
139 netback_remove(dev);
140 return err;
141}
142
143
144/*
145 * Handle the creation of the hotplug script environment. We add the script
146 * and vif variables to the environment, for the benefit of the vif-* hotplug
147 * scripts.
148 */
149static int netback_uevent(struct xenbus_device *xdev,
150 struct kobj_uevent_env *env)
151{
152 struct backend_info *be = dev_get_drvdata(&xdev->dev);
153 char *val;
154
155 val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
156 if (IS_ERR(val)) {
157 int err = PTR_ERR(val);
158 xenbus_dev_fatal(xdev, err, "reading script");
159 return err;
160 } else {
161 if (add_uevent_var(env, "script=%s", val)) {
162 kfree(val);
163 return -ENOMEM;
164 }
165 kfree(val);
166 }
167
168 if (!be || !be->vif)
169 return 0;
170
171 return add_uevent_var(env, "vif=%s", be->vif->dev->name);
172}
173
174
175static void backend_create_xenvif(struct backend_info *be)
176{
177 int err;
178 long handle;
179 struct xenbus_device *dev = be->dev;
180
181 if (be->vif != NULL)
182 return;
183
184 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
185 if (err != 1) {
186 xenbus_dev_fatal(dev, err, "reading handle");
187 return;
188 }
189
190 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
191 if (IS_ERR(be->vif)) {
192 err = PTR_ERR(be->vif);
193 be->vif = NULL;
194 xenbus_dev_fatal(dev, err, "creating interface");
195 return;
196 }
197
198 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
199}
200
201
202static void disconnect_backend(struct xenbus_device *dev)
203{
204 struct backend_info *be = dev_get_drvdata(&dev->dev);
205
206 if (be->vif) {
207 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
208 xenvif_disconnect(be->vif);
209 be->vif = NULL;
210 }
211}
212
213/**
214 * Callback received when the frontend's state changes.
215 */
216static void frontend_changed(struct xenbus_device *dev,
217 enum xenbus_state frontend_state)
218{
219 struct backend_info *be = dev_get_drvdata(&dev->dev);
220
221 pr_debug("frontend state %s", xenbus_strstate(frontend_state));
222
223 be->frontend_state = frontend_state;
224
225 switch (frontend_state) {
226 case XenbusStateInitialising:
227 if (dev->state == XenbusStateClosed) {
228 printk(KERN_INFO "%s: %s: prepare for reconnect\n",
229 __func__, dev->nodename);
230 xenbus_switch_state(dev, XenbusStateInitWait);
231 }
232 break;
233
234 case XenbusStateInitialised:
235 break;
236
237 case XenbusStateConnected:
238 if (dev->state == XenbusStateConnected)
239 break;
240 backend_create_xenvif(be);
241 if (be->vif)
242 connect(be);
243 break;
244
245 case XenbusStateClosing:
246 if (be->vif)
247 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
248 disconnect_backend(dev);
249 xenbus_switch_state(dev, XenbusStateClosing);
250 break;
251
252 case XenbusStateClosed:
253 xenbus_switch_state(dev, XenbusStateClosed);
254 if (xenbus_dev_is_online(dev))
255 break;
256 /* fall through if not online */
257 case XenbusStateUnknown:
258 device_unregister(&dev->dev);
259 break;
260
261 default:
262 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
263 frontend_state);
264 break;
265 }
266}
267
268
269static void xen_net_read_rate(struct xenbus_device *dev,
270 unsigned long *bytes, unsigned long *usec)
271{
272 char *s, *e;
273 unsigned long b, u;
274 char *ratestr;
275
276 /* Default to unlimited bandwidth. */
277 *bytes = ~0UL;
278 *usec = 0;
279
280 ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
281 if (IS_ERR(ratestr))
282 return;
283
284 s = ratestr;
285 b = simple_strtoul(s, &e, 10);
286 if ((s == e) || (*e != ','))
287 goto fail;
288
289 s = e + 1;
290 u = simple_strtoul(s, &e, 10);
291 if ((s == e) || (*e != '\0'))
292 goto fail;
293
294 *bytes = b;
295 *usec = u;
296
297 kfree(ratestr);
298 return;
299
300 fail:
301 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
302 kfree(ratestr);
303}
304
305static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
306{
307 char *s, *e, *macstr;
308 int i;
309
310 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
311 if (IS_ERR(macstr))
312 return PTR_ERR(macstr);
313
314 for (i = 0; i < ETH_ALEN; i++) {
315 mac[i] = simple_strtoul(s, &e, 16);
316 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
317 kfree(macstr);
318 return -ENOENT;
319 }
320 s = e+1;
321 }
322
323 kfree(macstr);
324 return 0;
325}
326
327static void unregister_hotplug_status_watch(struct backend_info *be)
328{
329 if (be->have_hotplug_status_watch) {
330 unregister_xenbus_watch(&be->hotplug_status_watch);
331 kfree(be->hotplug_status_watch.node);
332 }
333 be->have_hotplug_status_watch = 0;
334}
335
336static void hotplug_status_changed(struct xenbus_watch *watch,
337 const char **vec,
338 unsigned int vec_size)
339{
340 struct backend_info *be = container_of(watch,
341 struct backend_info,
342 hotplug_status_watch);
343 char *str;
344 unsigned int len;
345
346 str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
347 if (IS_ERR(str))
348 return;
349 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
350 xenbus_switch_state(be->dev, XenbusStateConnected);
351 /* Not interested in this watch anymore. */
352 unregister_hotplug_status_watch(be);
353 }
354 kfree(str);
355}
356
357static void connect(struct backend_info *be)
358{
359 int err;
360 struct xenbus_device *dev = be->dev;
361
362 err = connect_rings(be);
363 if (err)
364 return;
365
366 err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
367 if (err) {
368 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
369 return;
370 }
371
372 xen_net_read_rate(dev, &be->vif->credit_bytes,
373 &be->vif->credit_usec);
374 be->vif->remaining_credit = be->vif->credit_bytes;
375
376 unregister_hotplug_status_watch(be);
377 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
378 hotplug_status_changed,
379 "%s/%s", dev->nodename, "hotplug-status");
380 if (err) {
381 /* Switch now, since we can't do a watch. */
382 xenbus_switch_state(dev, XenbusStateConnected);
383 } else {
384 be->have_hotplug_status_watch = 1;
385 }
386
387 netif_wake_queue(be->vif->dev);
388}
389
390
391static int connect_rings(struct backend_info *be)
392{
393 struct xenvif *vif = be->vif;
394 struct xenbus_device *dev = be->dev;
395 unsigned long tx_ring_ref, rx_ring_ref;
396 unsigned int evtchn, rx_copy;
397 int err;
398 int val;
399
400 err = xenbus_gather(XBT_NIL, dev->otherend,
401 "tx-ring-ref", "%lu", &tx_ring_ref,
402 "rx-ring-ref", "%lu", &rx_ring_ref,
403 "event-channel", "%u", &evtchn, NULL);
404 if (err) {
405 xenbus_dev_fatal(dev, err,
406 "reading %s/ring-ref and event-channel",
407 dev->otherend);
408 return err;
409 }
410
411 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
412 &rx_copy);
413 if (err == -ENOENT) {
414 err = 0;
415 rx_copy = 0;
416 }
417 if (err < 0) {
418 xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
419 dev->otherend);
420 return err;
421 }
422 if (!rx_copy)
423 return -EOPNOTSUPP;
424
425 if (vif->dev->tx_queue_len != 0) {
426 if (xenbus_scanf(XBT_NIL, dev->otherend,
427 "feature-rx-notify", "%d", &val) < 0)
428 val = 0;
429 if (val)
430 vif->can_queue = 1;
431 else
432 /* Must be non-zero for pfifo_fast to work. */
433 vif->dev->tx_queue_len = 1;
434 }
435
436 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
437 "%d", &val) < 0)
438 val = 0;
439 vif->can_sg = !!val;
440
441 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
442 "%d", &val) < 0)
443 val = 0;
444 vif->gso = !!val;
445
446 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
447 "%d", &val) < 0)
448 val = 0;
449 vif->gso_prefix = !!val;
450
451 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
452 "%d", &val) < 0)
453 val = 0;
454 vif->csum = !val;
455
456 /* Map the shared frame, irq etc. */
457 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn);
458 if (err) {
459 xenbus_dev_fatal(dev, err,
460 "mapping shared-frames %lu/%lu port %u",
461 tx_ring_ref, rx_ring_ref, evtchn);
462 return err;
463 }
464 return 0;
465}
466
467
468/* ** Driver Registration ** */
469
470
471static const struct xenbus_device_id netback_ids[] = {
472 { "vif" },
473 { "" }
474};
475
476
477static struct xenbus_driver netback = {
478 .name = "vif",
479 .owner = THIS_MODULE,
480 .ids = netback_ids,
481 .probe = netback_probe,
482 .remove = netback_remove,
483 .uevent = netback_uevent,
484 .otherend_changed = frontend_changed,
485};
486
487int xenvif_xenbus_init(void)
488{
489 return xenbus_register_backend(&netback);
490}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 546de574982..5c8d9c385be 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@ struct netfront_info {
120 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 120 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 122 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
123
124 /* Statistics */
125 unsigned long rx_gso_checksum_fixup;
123}; 126};
124 127
125struct netfront_rx_info { 128struct netfront_rx_info {
@@ -356,7 +359,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
356 struct xen_netif_tx_response *txrsp; 359 struct xen_netif_tx_response *txrsp;
357 360
358 txrsp = RING_GET_RESPONSE(&np->tx, cons); 361 txrsp = RING_GET_RESPONSE(&np->tx, cons);
359 if (txrsp->status == NETIF_RSP_NULL) 362 if (txrsp->status == XEN_NETIF_RSP_NULL)
360 continue; 363 continue;
361 364
362 id = txrsp->id; 365 id = txrsp->id;
@@ -413,7 +416,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
413 larger than a page), split it it into page-sized chunks. */ 416 larger than a page), split it it into page-sized chunks. */
414 while (len > PAGE_SIZE - offset) { 417 while (len > PAGE_SIZE - offset) {
415 tx->size = PAGE_SIZE - offset; 418 tx->size = PAGE_SIZE - offset;
416 tx->flags |= NETTXF_more_data; 419 tx->flags |= XEN_NETTXF_more_data;
417 len -= tx->size; 420 len -= tx->size;
418 data += tx->size; 421 data += tx->size;
419 offset = 0; 422 offset = 0;
@@ -439,7 +442,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
439 for (i = 0; i < frags; i++) { 442 for (i = 0; i < frags; i++) {
440 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 443 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
441 444
442 tx->flags |= NETTXF_more_data; 445 tx->flags |= XEN_NETTXF_more_data;
443 446
444 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 447 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
445 np->tx_skbs[id].skb = skb_get(skb); 448 np->tx_skbs[id].skb = skb_get(skb);
@@ -514,10 +517,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
514 tx->flags = 0; 517 tx->flags = 0;
515 if (skb->ip_summed == CHECKSUM_PARTIAL) 518 if (skb->ip_summed == CHECKSUM_PARTIAL)
516 /* local packet? */ 519 /* local packet? */
517 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; 520 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
518 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 521 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
519 /* remote but checksummed. */ 522 /* remote but checksummed. */
520 tx->flags |= NETTXF_data_validated; 523 tx->flags |= XEN_NETTXF_data_validated;
521 524
522 if (skb_shinfo(skb)->gso_size) { 525 if (skb_shinfo(skb)->gso_size) {
523 struct xen_netif_extra_info *gso; 526 struct xen_netif_extra_info *gso;
@@ -528,7 +531,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
528 if (extra) 531 if (extra)
529 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 532 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
530 else 533 else
531 tx->flags |= NETTXF_extra_info; 534 tx->flags |= XEN_NETTXF_extra_info;
532 535
533 gso->u.gso.size = skb_shinfo(skb)->gso_size; 536 gso->u.gso.size = skb_shinfo(skb)->gso_size;
534 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 537 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
@@ -648,7 +651,7 @@ static int xennet_get_responses(struct netfront_info *np,
648 int err = 0; 651 int err = 0;
649 unsigned long ret; 652 unsigned long ret;
650 653
651 if (rx->flags & NETRXF_extra_info) { 654 if (rx->flags & XEN_NETRXF_extra_info) {
652 err = xennet_get_extras(np, extras, rp); 655 err = xennet_get_extras(np, extras, rp);
653 cons = np->rx.rsp_cons; 656 cons = np->rx.rsp_cons;
654 } 657 }
@@ -685,7 +688,7 @@ static int xennet_get_responses(struct netfront_info *np,
685 __skb_queue_tail(list, skb); 688 __skb_queue_tail(list, skb);
686 689
687next: 690next:
688 if (!(rx->flags & NETRXF_more_data)) 691 if (!(rx->flags & XEN_NETRXF_more_data))
689 break; 692 break;
690 693
691 if (cons + frags == rp) { 694 if (cons + frags == rp) {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
770 return cons; 773 return cons;
771} 774}
772 775
773static int skb_checksum_setup(struct sk_buff *skb) 776static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
774{ 777{
775 struct iphdr *iph; 778 struct iphdr *iph;
776 unsigned char *th; 779 unsigned char *th;
777 int err = -EPROTO; 780 int err = -EPROTO;
781 int recalculate_partial_csum = 0;
782
783 /*
784 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
785 * peers can fail to set NETRXF_csum_blank when sending a GSO
786 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
787 * recalculate the partial checksum.
788 */
789 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
790 struct netfront_info *np = netdev_priv(dev);
791 np->rx_gso_checksum_fixup++;
792 skb->ip_summed = CHECKSUM_PARTIAL;
793 recalculate_partial_csum = 1;
794 }
795
796 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
797 if (skb->ip_summed != CHECKSUM_PARTIAL)
798 return 0;
778 799
779 if (skb->protocol != htons(ETH_P_IP)) 800 if (skb->protocol != htons(ETH_P_IP))
780 goto out; 801 goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
788 switch (iph->protocol) { 809 switch (iph->protocol) {
789 case IPPROTO_TCP: 810 case IPPROTO_TCP:
790 skb->csum_offset = offsetof(struct tcphdr, check); 811 skb->csum_offset = offsetof(struct tcphdr, check);
812
813 if (recalculate_partial_csum) {
814 struct tcphdr *tcph = (struct tcphdr *)th;
815 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
816 skb->len - iph->ihl*4,
817 IPPROTO_TCP, 0);
818 }
791 break; 819 break;
792 case IPPROTO_UDP: 820 case IPPROTO_UDP:
793 skb->csum_offset = offsetof(struct udphdr, check); 821 skb->csum_offset = offsetof(struct udphdr, check);
822
823 if (recalculate_partial_csum) {
824 struct udphdr *udph = (struct udphdr *)th;
825 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
826 skb->len - iph->ihl*4,
827 IPPROTO_UDP, 0);
828 }
794 break; 829 break;
795 default: 830 default:
796 if (net_ratelimit()) 831 if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
829 /* Ethernet work: Delayed to here as it peeks the header. */ 864 /* Ethernet work: Delayed to here as it peeks the header. */
830 skb->protocol = eth_type_trans(skb, dev); 865 skb->protocol = eth_type_trans(skb, dev);
831 866
832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 867 if (checksum_setup(dev, skb)) {
833 if (skb_checksum_setup(skb)) { 868 kfree_skb(skb);
834 kfree_skb(skb); 869 packets_dropped++;
835 packets_dropped++; 870 dev->stats.rx_errors++;
836 dev->stats.rx_errors++; 871 continue;
837 continue;
838 }
839 } 872 }
840 873
841 dev->stats.rx_packets++; 874 dev->stats.rx_packets++;
@@ -950,9 +983,9 @@ err:
950 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); 983 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
951 skb->len += skb->data_len; 984 skb->len += skb->data_len;
952 985
953 if (rx->flags & NETRXF_csum_blank) 986 if (rx->flags & XEN_NETRXF_csum_blank)
954 skb->ip_summed = CHECKSUM_PARTIAL; 987 skb->ip_summed = CHECKSUM_PARTIAL;
955 else if (rx->flags & NETRXF_data_validated) 988 else if (rx->flags & XEN_NETRXF_data_validated)
956 skb->ip_summed = CHECKSUM_UNNECESSARY; 989 skb->ip_summed = CHECKSUM_UNNECESSARY;
957 990
958 __skb_queue_tail(&rxq, skb); 991 __skb_queue_tail(&rxq, skb);
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
1632 } 1665 }
1633} 1666}
1634 1667
1668static const struct xennet_stat {
1669 char name[ETH_GSTRING_LEN];
1670 u16 offset;
1671} xennet_stats[] = {
1672 {
1673 "rx_gso_checksum_fixup",
1674 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1675 },
1676};
1677
1678static int xennet_get_sset_count(struct net_device *dev, int string_set)
1679{
1680 switch (string_set) {
1681 case ETH_SS_STATS:
1682 return ARRAY_SIZE(xennet_stats);
1683 default:
1684 return -EINVAL;
1685 }
1686}
1687
1688static void xennet_get_ethtool_stats(struct net_device *dev,
1689 struct ethtool_stats *stats, u64 * data)
1690{
1691 void *np = netdev_priv(dev);
1692 int i;
1693
1694 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1695 data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1696}
1697
1698static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1699{
1700 int i;
1701
1702 switch (stringset) {
1703 case ETH_SS_STATS:
1704 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1705 memcpy(data + i * ETH_GSTRING_LEN,
1706 xennet_stats[i].name, ETH_GSTRING_LEN);
1707 break;
1708 }
1709}
1710
1635static const struct ethtool_ops xennet_ethtool_ops = 1711static const struct ethtool_ops xennet_ethtool_ops =
1636{ 1712{
1637 .set_tx_csum = ethtool_op_set_tx_csum, 1713 .set_tx_csum = ethtool_op_set_tx_csum,
1638 .set_sg = xennet_set_sg, 1714 .set_sg = xennet_set_sg,
1639 .set_tso = xennet_set_tso, 1715 .set_tso = xennet_set_tso,
1640 .get_link = ethtool_op_get_link, 1716 .get_link = ethtool_op_get_link,
1717
1718 .get_sset_count = xennet_get_sset_count,
1719 .get_ethtool_stats = xennet_get_ethtool_stats,
1720 .get_strings = xennet_get_strings,
1641}; 1721};
1642 1722
1643#ifdef CONFIG_SYSFS 1723#ifdef CONFIG_SYSFS
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index cad66ce1640..2642af4ee49 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -1101,8 +1101,7 @@ static struct net_device_ops xemaclite_netdev_ops;
1101 * Return: 0, if the driver is bound to the Emaclite device, or 1101 * Return: 0, if the driver is bound to the Emaclite device, or
1102 * a negative error if there is failure. 1102 * a negative error if there is failure.
1103 */ 1103 */
1104static int __devinit xemaclite_of_probe(struct platform_device *ofdev, 1104static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
1105 const struct of_device_id *match)
1106{ 1105{
1107 struct resource r_irq; /* Interrupt resources */ 1106 struct resource r_irq; /* Interrupt resources */
1108 struct resource r_mem; /* IO mem resources */ 1107 struct resource r_mem; /* IO mem resources */
@@ -1288,7 +1287,7 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
1288}; 1287};
1289MODULE_DEVICE_TABLE(of, xemaclite_of_match); 1288MODULE_DEVICE_TABLE(of, xemaclite_of_match);
1290 1289
1291static struct of_platform_driver xemaclite_of_driver = { 1290static struct platform_driver xemaclite_of_driver = {
1292 .driver = { 1291 .driver = {
1293 .name = DRIVER_NAME, 1292 .name = DRIVER_NAME,
1294 .owner = THIS_MODULE, 1293 .owner = THIS_MODULE,
@@ -1306,7 +1305,7 @@ static struct of_platform_driver xemaclite_of_driver = {
1306static int __init xemaclite_init(void) 1305static int __init xemaclite_init(void)
1307{ 1306{
1308 /* No kernel boot options used, we just need to register the driver */ 1307 /* No kernel boot options used, we just need to register the driver */
1309 return of_register_platform_driver(&xemaclite_of_driver); 1308 return platform_driver_register(&xemaclite_of_driver);
1310} 1309}
1311 1310
1312/** 1311/**
@@ -1314,7 +1313,7 @@ static int __init xemaclite_init(void)
1314 */ 1313 */
1315static void __exit xemaclite_cleanup(void) 1314static void __exit xemaclite_cleanup(void)
1316{ 1315{
1317 of_unregister_platform_driver(&xemaclite_of_driver); 1316 platform_driver_unregister(&xemaclite_of_driver);
1318} 1317}
1319 1318
1320module_init(xemaclite_init); 1319module_init(xemaclite_init);